Bluetooth: Remove superfluous extra empty line between functions
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         hci_dev_change_flag(hdev, HCI_DUT_MODE);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init1(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local AMP Info */
404         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
405
406         /* Read Data Blk size */
407         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
408
409         /* Read Flow Control Mode */
410         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
411
412         /* Read Location Data */
413         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
414 }
415
416 static void amp_init2(struct hci_request *req)
417 {
418         /* Read Local Supported Features. Not all AMP controllers
419          * support this so it's placed conditionally in the second
420          * stage init.
421          */
422         if (req->hdev->commands[14] & 0x20)
423                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
424 }
425
426 static void hci_init1_req(struct hci_request *req, unsigned long opt)
427 {
428         struct hci_dev *hdev = req->hdev;
429
430         BT_DBG("%s %ld", hdev->name, opt);
431
432         /* Reset */
433         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
434                 hci_reset_req(req, 0);
435
436         switch (hdev->dev_type) {
437         case HCI_BREDR:
438                 bredr_init(req);
439                 break;
440
441         case HCI_AMP:
442                 amp_init1(req);
443                 break;
444
445         default:
446                 BT_ERR("Unknown device type %d", hdev->dev_type);
447                 break;
448         }
449 }
450
451 static void bredr_setup(struct hci_request *req)
452 {
453         __le16 param;
454         __u8 flt_type;
455
456         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
457         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
458
459         /* Read Class of Device */
460         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
461
462         /* Read Local Name */
463         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
464
465         /* Read Voice Setting */
466         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
467
468         /* Read Number of Supported IAC */
469         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
470
471         /* Read Current IAC LAP */
472         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
473
474         /* Clear Event Filters */
475         flt_type = HCI_FLT_CLEAR_ALL;
476         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
477
478         /* Connection accept timeout ~20 secs */
479         param = cpu_to_le16(0x7d00);
480         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
481 }
482
483 static void le_setup(struct hci_request *req)
484 {
485         struct hci_dev *hdev = req->hdev;
486
487         /* Read LE Buffer Size */
488         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
489
490         /* Read LE Local Supported Features */
491         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
492
493         /* Read LE Supported States */
494         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
495
496         /* Read LE White List Size */
497         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
498
499         /* Clear LE White List */
500         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
501
502         /* LE-only controllers have LE implicitly enabled */
503         if (!lmp_bredr_capable(hdev))
504                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
505 }
506
507 static void hci_setup_event_mask(struct hci_request *req)
508 {
509         struct hci_dev *hdev = req->hdev;
510
511         /* The second byte is 0xff instead of 0x9f (two reserved bits
512          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
513          * command otherwise.
514          */
515         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
516
517         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
518          * any event mask for pre 1.2 devices.
519          */
520         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
521                 return;
522
523         if (lmp_bredr_capable(hdev)) {
524                 events[4] |= 0x01; /* Flow Specification Complete */
525                 events[4] |= 0x02; /* Inquiry Result with RSSI */
526                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
527                 events[5] |= 0x08; /* Synchronous Connection Complete */
528                 events[5] |= 0x10; /* Synchronous Connection Changed */
529         } else {
530                 /* Use a different default for LE-only devices */
531                 memset(events, 0, sizeof(events));
532                 events[0] |= 0x10; /* Disconnection Complete */
533                 events[1] |= 0x08; /* Read Remote Version Information Complete */
534                 events[1] |= 0x20; /* Command Complete */
535                 events[1] |= 0x40; /* Command Status */
536                 events[1] |= 0x80; /* Hardware Error */
537                 events[2] |= 0x04; /* Number of Completed Packets */
538                 events[3] |= 0x02; /* Data Buffer Overflow */
539
540                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
541                         events[0] |= 0x80; /* Encryption Change */
542                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
543                 }
544         }
545
546         if (lmp_inq_rssi_capable(hdev))
547                 events[4] |= 0x02; /* Inquiry Result with RSSI */
548
549         if (lmp_sniffsubr_capable(hdev))
550                 events[5] |= 0x20; /* Sniff Subrating */
551
552         if (lmp_pause_enc_capable(hdev))
553                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
554
555         if (lmp_ext_inq_capable(hdev))
556                 events[5] |= 0x40; /* Extended Inquiry Result */
557
558         if (lmp_no_flush_capable(hdev))
559                 events[7] |= 0x01; /* Enhanced Flush Complete */
560
561         if (lmp_lsto_capable(hdev))
562                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
563
564         if (lmp_ssp_capable(hdev)) {
565                 events[6] |= 0x01;      /* IO Capability Request */
566                 events[6] |= 0x02;      /* IO Capability Response */
567                 events[6] |= 0x04;      /* User Confirmation Request */
568                 events[6] |= 0x08;      /* User Passkey Request */
569                 events[6] |= 0x10;      /* Remote OOB Data Request */
570                 events[6] |= 0x20;      /* Simple Pairing Complete */
571                 events[7] |= 0x04;      /* User Passkey Notification */
572                 events[7] |= 0x08;      /* Keypress Notification */
573                 events[7] |= 0x10;      /* Remote Host Supported
574                                          * Features Notification
575                                          */
576         }
577
578         if (lmp_le_capable(hdev))
579                 events[7] |= 0x20;      /* LE Meta-Event */
580
581         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
582 }
583
584 static void hci_init2_req(struct hci_request *req, unsigned long opt)
585 {
586         struct hci_dev *hdev = req->hdev;
587
588         if (hdev->dev_type == HCI_AMP)
589                 return amp_init2(req);
590
591         if (lmp_bredr_capable(hdev))
592                 bredr_setup(req);
593         else
594                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
595
596         if (lmp_le_capable(hdev))
597                 le_setup(req);
598
599         /* All Bluetooth 1.2 and later controllers should support the
600          * HCI command for reading the local supported commands.
601          *
602          * Unfortunately some controllers indicate Bluetooth 1.2 support,
603          * but do not have support for this command. If that is the case,
604          * the driver can quirk the behavior and skip reading the local
605          * supported commands.
606          */
607         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
608             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
609                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
610
611         if (lmp_ssp_capable(hdev)) {
612                 /* When SSP is available, then the host features page
613                  * should also be available as well. However some
614                  * controllers list the max_page as 0 as long as SSP
615                  * has not been enabled. To achieve proper debugging
616                  * output, force the minimum max_page to 1 at least.
617                  */
618                 hdev->max_page = 0x01;
619
620                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
621                         u8 mode = 0x01;
622
623                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
624                                     sizeof(mode), &mode);
625                 } else {
626                         struct hci_cp_write_eir cp;
627
628                         memset(hdev->eir, 0, sizeof(hdev->eir));
629                         memset(&cp, 0, sizeof(cp));
630
631                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
632                 }
633         }
634
635         if (lmp_inq_rssi_capable(hdev) ||
636             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
637                 u8 mode;
638
639                 /* If Extended Inquiry Result events are supported, then
640                  * they are clearly preferred over Inquiry Result with RSSI
641                  * events.
642                  */
643                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
644
645                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
646         }
647
648         if (lmp_inq_tx_pwr_capable(hdev))
649                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
650
651         if (lmp_ext_feat_capable(hdev)) {
652                 struct hci_cp_read_local_ext_features cp;
653
654                 cp.page = 0x01;
655                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
656                             sizeof(cp), &cp);
657         }
658
659         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
660                 u8 enable = 1;
661                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
662                             &enable);
663         }
664 }
665
666 static void hci_setup_link_policy(struct hci_request *req)
667 {
668         struct hci_dev *hdev = req->hdev;
669         struct hci_cp_write_def_link_policy cp;
670         u16 link_policy = 0;
671
672         if (lmp_rswitch_capable(hdev))
673                 link_policy |= HCI_LP_RSWITCH;
674         if (lmp_hold_capable(hdev))
675                 link_policy |= HCI_LP_HOLD;
676         if (lmp_sniff_capable(hdev))
677                 link_policy |= HCI_LP_SNIFF;
678         if (lmp_park_capable(hdev))
679                 link_policy |= HCI_LP_PARK;
680
681         cp.policy = cpu_to_le16(link_policy);
682         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
683 }
684
685 static void hci_set_le_support(struct hci_request *req)
686 {
687         struct hci_dev *hdev = req->hdev;
688         struct hci_cp_write_le_host_supported cp;
689
690         /* LE-only devices do not support explicit enablement */
691         if (!lmp_bredr_capable(hdev))
692                 return;
693
694         memset(&cp, 0, sizeof(cp));
695
696         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
697                 cp.le = 0x01;
698                 cp.simul = 0x00;
699         }
700
701         if (cp.le != lmp_host_le_capable(hdev))
702                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
703                             &cp);
704 }
705
706 static void hci_set_event_mask_page_2(struct hci_request *req)
707 {
708         struct hci_dev *hdev = req->hdev;
709         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
710
711         /* If Connectionless Slave Broadcast master role is supported
712          * enable all necessary events for it.
713          */
714         if (lmp_csb_master_capable(hdev)) {
715                 events[1] |= 0x40;      /* Triggered Clock Capture */
716                 events[1] |= 0x80;      /* Synchronization Train Complete */
717                 events[2] |= 0x10;      /* Slave Page Response Timeout */
718                 events[2] |= 0x20;      /* CSB Channel Map Change */
719         }
720
721         /* If Connectionless Slave Broadcast slave role is supported
722          * enable all necessary events for it.
723          */
724         if (lmp_csb_slave_capable(hdev)) {
725                 events[2] |= 0x01;      /* Synchronization Train Received */
726                 events[2] |= 0x02;      /* CSB Receive */
727                 events[2] |= 0x04;      /* CSB Timeout */
728                 events[2] |= 0x08;      /* Truncated Page Complete */
729         }
730
731         /* Enable Authenticated Payload Timeout Expired event if supported */
732         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
733                 events[2] |= 0x80;
734
735         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
736 }
737
738 static void hci_init3_req(struct hci_request *req, unsigned long opt)
739 {
740         struct hci_dev *hdev = req->hdev;
741         u8 p;
742
743         hci_setup_event_mask(req);
744
745         if (hdev->commands[6] & 0x20) {
746                 struct hci_cp_read_stored_link_key cp;
747
748                 bacpy(&cp.bdaddr, BDADDR_ANY);
749                 cp.read_all = 0x01;
750                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
751         }
752
753         if (hdev->commands[5] & 0x10)
754                 hci_setup_link_policy(req);
755
756         if (hdev->commands[8] & 0x01)
757                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
758
759         /* Some older Broadcom based Bluetooth 1.2 controllers do not
760          * support the Read Page Scan Type command. Check support for
761          * this command in the bit mask of supported commands.
762          */
763         if (hdev->commands[13] & 0x01)
764                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
765
766         if (lmp_le_capable(hdev)) {
767                 u8 events[8];
768
769                 memset(events, 0, sizeof(events));
770                 events[0] = 0x0f;
771
772                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
773                         events[0] |= 0x10;      /* LE Long Term Key Request */
774
775                 /* If controller supports the Connection Parameters Request
776                  * Link Layer Procedure, enable the corresponding event.
777                  */
778                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
779                         events[0] |= 0x20;      /* LE Remote Connection
780                                                  * Parameter Request
781                                                  */
782
783                 /* If the controller supports the Data Length Extension
784                  * feature, enable the corresponding event.
785                  */
786                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
787                         events[0] |= 0x40;      /* LE Data Length Change */
788
789                 /* If the controller supports Extended Scanner Filter
790                  * Policies, enable the correspondig event.
791                  */
792                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
793                         events[1] |= 0x04;      /* LE Direct Advertising
794                                                  * Report
795                                                  */
796
797                 /* If the controller supports the LE Read Local P-256
798                  * Public Key command, enable the corresponding event.
799                  */
800                 if (hdev->commands[34] & 0x02)
801                         events[0] |= 0x80;      /* LE Read Local P-256
802                                                  * Public Key Complete
803                                                  */
804
805                 /* If the controller supports the LE Generate DHKey
806                  * command, enable the corresponding event.
807                  */
808                 if (hdev->commands[34] & 0x04)
809                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
810
811                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
812                             events);
813
814                 if (hdev->commands[25] & 0x40) {
815                         /* Read LE Advertising Channel TX Power */
816                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
817                 }
818
819                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
820                         /* Read LE Maximum Data Length */
821                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
822
823                         /* Read LE Suggested Default Data Length */
824                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
825                 }
826
827                 hci_set_le_support(req);
828         }
829
830         /* Read features beyond page 1 if available */
831         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
832                 struct hci_cp_read_local_ext_features cp;
833
834                 cp.page = p;
835                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
836                             sizeof(cp), &cp);
837         }
838 }
839
840 static void hci_init4_req(struct hci_request *req, unsigned long opt)
841 {
842         struct hci_dev *hdev = req->hdev;
843
844         /* Some Broadcom based Bluetooth controllers do not support the
845          * Delete Stored Link Key command. They are clearly indicating its
846          * absence in the bit mask of supported commands.
847          *
848          * Check the supported commands and only if the the command is marked
849          * as supported send it. If not supported assume that the controller
850          * does not have actual support for stored link keys which makes this
851          * command redundant anyway.
852          *
853          * Some controllers indicate that they support handling deleting
854          * stored link keys, but they don't. The quirk lets a driver
855          * just disable this command.
856          */
857         if (hdev->commands[6] & 0x80 &&
858             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
859                 struct hci_cp_delete_stored_link_key cp;
860
861                 bacpy(&cp.bdaddr, BDADDR_ANY);
862                 cp.delete_all = 0x01;
863                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
864                             sizeof(cp), &cp);
865         }
866
867         /* Set event mask page 2 if the HCI command for it is supported */
868         if (hdev->commands[22] & 0x04)
869                 hci_set_event_mask_page_2(req);
870
871         /* Read local codec list if the HCI command is supported */
872         if (hdev->commands[29] & 0x20)
873                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
874
875         /* Get MWS transport configuration if the HCI command is supported */
876         if (hdev->commands[30] & 0x08)
877                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
878
879         /* Check for Synchronization Train support */
880         if (lmp_sync_train_capable(hdev))
881                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
882
883         /* Enable Secure Connections if supported and configured */
884         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
885             bredr_sc_enabled(hdev)) {
886                 u8 support = 0x01;
887
888                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
889                             sizeof(support), &support);
890         }
891 }
892
893 static int __hci_init(struct hci_dev *hdev)
894 {
895         int err;
896
897         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
898         if (err < 0)
899                 return err;
900
901         /* The Device Under Test (DUT) mode is special and available for
902          * all controller types. So just create it early on.
903          */
904         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
905                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
906                                     &dut_mode_fops);
907         }
908
909         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
910         if (err < 0)
911                 return err;
912
913         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
914          * BR/EDR/LE type controllers. AMP controllers only need the
915          * first two stages of init.
916          */
917         if (hdev->dev_type != HCI_BREDR)
918                 return 0;
919
920         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
921         if (err < 0)
922                 return err;
923
924         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
925         if (err < 0)
926                 return err;
927
928         /* This function is only called when the controller is actually in
929          * configured state. When the controller is marked as unconfigured,
930          * this initialization procedure is not run.
931          *
932          * It means that it is possible that a controller runs through its
933          * setup phase and then discovers missing settings. If that is the
934          * case, then this function will not be called. It then will only
935          * be called during the config phase.
936          *
937          * So only when in setup phase or config phase, create the debugfs
938          * entries and register the SMP channels.
939          */
940         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
941             !hci_dev_test_flag(hdev, HCI_CONFIG))
942                 return 0;
943
944         hci_debugfs_create_common(hdev);
945
946         if (lmp_bredr_capable(hdev))
947                 hci_debugfs_create_bredr(hdev);
948
949         if (lmp_le_capable(hdev))
950                 hci_debugfs_create_le(hdev);
951
952         return 0;
953 }
954
955 static void hci_init0_req(struct hci_request *req, unsigned long opt)
956 {
957         struct hci_dev *hdev = req->hdev;
958
959         BT_DBG("%s %ld", hdev->name, opt);
960
961         /* Reset */
962         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
963                 hci_reset_req(req, 0);
964
965         /* Read Local Version */
966         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
967
968         /* Read BD Address */
969         if (hdev->set_bdaddr)
970                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
971 }
972
973 static int __hci_unconf_init(struct hci_dev *hdev)
974 {
975         int err;
976
977         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
978                 return 0;
979
980         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
981         if (err < 0)
982                 return err;
983
984         return 0;
985 }
986
987 static void hci_scan_req(struct hci_request *req, unsigned long opt)
988 {
989         __u8 scan = opt;
990
991         BT_DBG("%s %x", req->hdev->name, scan);
992
993         /* Inquiry and Page scans */
994         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
995 }
996
997 static void hci_auth_req(struct hci_request *req, unsigned long opt)
998 {
999         __u8 auth = opt;
1000
1001         BT_DBG("%s %x", req->hdev->name, auth);
1002
1003         /* Authentication */
1004         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1005 }
1006
1007 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1008 {
1009         __u8 encrypt = opt;
1010
1011         BT_DBG("%s %x", req->hdev->name, encrypt);
1012
1013         /* Encryption */
1014         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1015 }
1016
1017 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1018 {
1019         __le16 policy = cpu_to_le16(opt);
1020
1021         BT_DBG("%s %x", req->hdev->name, policy);
1022
1023         /* Default link policy */
1024         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1025 }
1026
1027 /* Get HCI device by index.
1028  * Device is held on return. */
1029 struct hci_dev *hci_dev_get(int index)
1030 {
1031         struct hci_dev *hdev = NULL, *d;
1032
1033         BT_DBG("%d", index);
1034
1035         if (index < 0)
1036                 return NULL;
1037
1038         read_lock(&hci_dev_list_lock);
1039         list_for_each_entry(d, &hci_dev_list, list) {
1040                 if (d->id == index) {
1041                         hdev = hci_dev_hold(d);
1042                         break;
1043                 }
1044         }
1045         read_unlock(&hci_dev_list_lock);
1046         return hdev;
1047 }
1048
1049 /* ---- Inquiry support ---- */
1050
1051 bool hci_discovery_active(struct hci_dev *hdev)
1052 {
1053         struct discovery_state *discov = &hdev->discovery;
1054
1055         switch (discov->state) {
1056         case DISCOVERY_FINDING:
1057         case DISCOVERY_RESOLVING:
1058                 return true;
1059
1060         default:
1061                 return false;
1062         }
1063 }
1064
1065 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1066 {
1067         int old_state = hdev->discovery.state;
1068
1069         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1070
1071         if (old_state == state)
1072                 return;
1073
1074         hdev->discovery.state = state;
1075
1076         switch (state) {
1077         case DISCOVERY_STOPPED:
1078                 hci_update_background_scan(hdev);
1079
1080                 if (old_state != DISCOVERY_STARTING)
1081                         mgmt_discovering(hdev, 0);
1082                 break;
1083         case DISCOVERY_STARTING:
1084                 break;
1085         case DISCOVERY_FINDING:
1086                 mgmt_discovering(hdev, 1);
1087                 break;
1088         case DISCOVERY_RESOLVING:
1089                 break;
1090         case DISCOVERY_STOPPING:
1091                 break;
1092         }
1093 }
1094
1095 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1096 {
1097         struct discovery_state *cache = &hdev->discovery;
1098         struct inquiry_entry *p, *n;
1099
1100         list_for_each_entry_safe(p, n, &cache->all, all) {
1101                 list_del(&p->all);
1102                 kfree(p);
1103         }
1104
1105         INIT_LIST_HEAD(&cache->unknown);
1106         INIT_LIST_HEAD(&cache->resolve);
1107 }
1108
1109 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1110                                                bdaddr_t *bdaddr)
1111 {
1112         struct discovery_state *cache = &hdev->discovery;
1113         struct inquiry_entry *e;
1114
1115         BT_DBG("cache %p, %pMR", cache, bdaddr);
1116
1117         list_for_each_entry(e, &cache->all, all) {
1118                 if (!bacmp(&e->data.bdaddr, bdaddr))
1119                         return e;
1120         }
1121
1122         return NULL;
1123 }
1124
1125 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1126                                                        bdaddr_t *bdaddr)
1127 {
1128         struct discovery_state *cache = &hdev->discovery;
1129         struct inquiry_entry *e;
1130
1131         BT_DBG("cache %p, %pMR", cache, bdaddr);
1132
1133         list_for_each_entry(e, &cache->unknown, list) {
1134                 if (!bacmp(&e->data.bdaddr, bdaddr))
1135                         return e;
1136         }
1137
1138         return NULL;
1139 }
1140
1141 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1142                                                        bdaddr_t *bdaddr,
1143                                                        int state)
1144 {
1145         struct discovery_state *cache = &hdev->discovery;
1146         struct inquiry_entry *e;
1147
1148         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1149
1150         list_for_each_entry(e, &cache->resolve, list) {
1151                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1152                         return e;
1153                 if (!bacmp(&e->data.bdaddr, bdaddr))
1154                         return e;
1155         }
1156
1157         return NULL;
1158 }
1159
1160 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1161                                       struct inquiry_entry *ie)
1162 {
1163         struct discovery_state *cache = &hdev->discovery;
1164         struct list_head *pos = &cache->resolve;
1165         struct inquiry_entry *p;
1166
1167         list_del(&ie->list);
1168
1169         list_for_each_entry(p, &cache->resolve, list) {
1170                 if (p->name_state != NAME_PENDING &&
1171                     abs(p->data.rssi) >= abs(ie->data.rssi))
1172                         break;
1173                 pos = &p->list;
1174         }
1175
1176         list_add(&ie->list, pos);
1177 }
1178
1179 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1180                              bool name_known)
1181 {
1182         struct discovery_state *cache = &hdev->discovery;
1183         struct inquiry_entry *ie;
1184         u32 flags = 0;
1185
1186         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1187
1188         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1189
1190         if (!data->ssp_mode)
1191                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1192
1193         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1194         if (ie) {
1195                 if (!ie->data.ssp_mode)
1196                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1197
1198                 if (ie->name_state == NAME_NEEDED &&
1199                     data->rssi != ie->data.rssi) {
1200                         ie->data.rssi = data->rssi;
1201                         hci_inquiry_cache_update_resolve(hdev, ie);
1202                 }
1203
1204                 goto update;
1205         }
1206
1207         /* Entry not in the cache. Add new one. */
1208         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1209         if (!ie) {
1210                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1211                 goto done;
1212         }
1213
1214         list_add(&ie->all, &cache->all);
1215
1216         if (name_known) {
1217                 ie->name_state = NAME_KNOWN;
1218         } else {
1219                 ie->name_state = NAME_NOT_KNOWN;
1220                 list_add(&ie->list, &cache->unknown);
1221         }
1222
1223 update:
1224         if (name_known && ie->name_state != NAME_KNOWN &&
1225             ie->name_state != NAME_PENDING) {
1226                 ie->name_state = NAME_KNOWN;
1227                 list_del(&ie->list);
1228         }
1229
1230         memcpy(&ie->data, data, sizeof(*data));
1231         ie->timestamp = jiffies;
1232         cache->timestamp = jiffies;
1233
1234         if (ie->name_state == NAME_NOT_KNOWN)
1235                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1236
1237 done:
1238         return flags;
1239 }
1240
1241 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1242 {
1243         struct discovery_state *cache = &hdev->discovery;
1244         struct inquiry_info *info = (struct inquiry_info *) buf;
1245         struct inquiry_entry *e;
1246         int copied = 0;
1247
1248         list_for_each_entry(e, &cache->all, all) {
1249                 struct inquiry_data *data = &e->data;
1250
1251                 if (copied >= num)
1252                         break;
1253
1254                 bacpy(&info->bdaddr, &data->bdaddr);
1255                 info->pscan_rep_mode    = data->pscan_rep_mode;
1256                 info->pscan_period_mode = data->pscan_period_mode;
1257                 info->pscan_mode        = data->pscan_mode;
1258                 memcpy(info->dev_class, data->dev_class, 3);
1259                 info->clock_offset      = data->clock_offset;
1260
1261                 info++;
1262                 copied++;
1263         }
1264
1265         BT_DBG("cache %p, copied %d", cache, copied);
1266         return copied;
1267 }
1268
1269 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1270 {
1271         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1272         struct hci_dev *hdev = req->hdev;
1273         struct hci_cp_inquiry cp;
1274
1275         BT_DBG("%s", hdev->name);
1276
1277         if (test_bit(HCI_INQUIRY, &hdev->flags))
1278                 return;
1279
1280         /* Start Inquiry */
1281         memcpy(&cp.lap, &ir->lap, 3);
1282         cp.length  = ir->length;
1283         cp.num_rsp = ir->num_rsp;
1284         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1285 }
1286
1287 int hci_inquiry(void __user *arg)
1288 {
1289         __u8 __user *ptr = arg;
1290         struct hci_inquiry_req ir;
1291         struct hci_dev *hdev;
1292         int err = 0, do_inquiry = 0, max_rsp;
1293         long timeo;
1294         __u8 *buf;
1295
1296         if (copy_from_user(&ir, ptr, sizeof(ir)))
1297                 return -EFAULT;
1298
1299         hdev = hci_dev_get(ir.dev_id);
1300         if (!hdev)
1301                 return -ENODEV;
1302
1303         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1304                 err = -EBUSY;
1305                 goto done;
1306         }
1307
1308         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1309                 err = -EOPNOTSUPP;
1310                 goto done;
1311         }
1312
1313         if (hdev->dev_type != HCI_BREDR) {
1314                 err = -EOPNOTSUPP;
1315                 goto done;
1316         }
1317
1318         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1319                 err = -EOPNOTSUPP;
1320                 goto done;
1321         }
1322
1323         hci_dev_lock(hdev);
1324         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1325             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1326                 hci_inquiry_cache_flush(hdev);
1327                 do_inquiry = 1;
1328         }
1329         hci_dev_unlock(hdev);
1330
1331         timeo = ir.length * msecs_to_jiffies(2000);
1332
1333         if (do_inquiry) {
1334                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1335                                    timeo);
1336                 if (err < 0)
1337                         goto done;
1338
1339                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1340                  * cleared). If it is interrupted by a signal, return -EINTR.
1341                  */
1342                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1343                                 TASK_INTERRUPTIBLE))
1344                         return -EINTR;
1345         }
1346
1347         /* for unlimited number of responses we will use buffer with
1348          * 255 entries
1349          */
1350         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1351
1352         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1353          * copy it to the user space.
1354          */
1355         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1356         if (!buf) {
1357                 err = -ENOMEM;
1358                 goto done;
1359         }
1360
1361         hci_dev_lock(hdev);
1362         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1363         hci_dev_unlock(hdev);
1364
1365         BT_DBG("num_rsp %d", ir.num_rsp);
1366
1367         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1368                 ptr += sizeof(ir);
1369                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1370                                  ir.num_rsp))
1371                         err = -EFAULT;
1372         } else
1373                 err = -EFAULT;
1374
1375         kfree(buf);
1376
1377 done:
1378         hci_dev_put(hdev);
1379         return err;
1380 }
1381
1382 static int hci_dev_do_open(struct hci_dev *hdev)
1383 {
1384         int ret = 0;
1385
1386         BT_DBG("%s %p", hdev->name, hdev);
1387
1388         hci_req_lock(hdev);
1389
1390         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1391                 ret = -ENODEV;
1392                 goto done;
1393         }
1394
1395         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1396             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1397                 /* Check for rfkill but allow the HCI setup stage to
1398                  * proceed (which in itself doesn't cause any RF activity).
1399                  */
1400                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1401                         ret = -ERFKILL;
1402                         goto done;
1403                 }
1404
1405                 /* Check for valid public address or a configured static
1406                  * random adddress, but let the HCI setup proceed to
1407                  * be able to determine if there is a public address
1408                  * or not.
1409                  *
1410                  * In case of user channel usage, it is not important
1411                  * if a public address or static random address is
1412                  * available.
1413                  *
1414                  * This check is only valid for BR/EDR controllers
1415                  * since AMP controllers do not have an address.
1416                  */
1417                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1418                     hdev->dev_type == HCI_BREDR &&
1419                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1420                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1421                         ret = -EADDRNOTAVAIL;
1422                         goto done;
1423                 }
1424         }
1425
1426         if (test_bit(HCI_UP, &hdev->flags)) {
1427                 ret = -EALREADY;
1428                 goto done;
1429         }
1430
1431         if (hdev->open(hdev)) {
1432                 ret = -EIO;
1433                 goto done;
1434         }
1435
1436         atomic_set(&hdev->cmd_cnt, 1);
1437         set_bit(HCI_INIT, &hdev->flags);
1438
1439         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1440                 if (hdev->setup)
1441                         ret = hdev->setup(hdev);
1442
1443                 /* The transport driver can set these quirks before
1444                  * creating the HCI device or in its setup callback.
1445                  *
1446                  * In case any of them is set, the controller has to
1447                  * start up as unconfigured.
1448                  */
1449                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1450                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1451                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1452
1453                 /* For an unconfigured controller it is required to
1454                  * read at least the version information provided by
1455                  * the Read Local Version Information command.
1456                  *
1457                  * If the set_bdaddr driver callback is provided, then
1458                  * also the original Bluetooth public device address
1459                  * will be read using the Read BD Address command.
1460                  */
1461                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1462                         ret = __hci_unconf_init(hdev);
1463         }
1464
1465         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1466                 /* If public address change is configured, ensure that
1467                  * the address gets programmed. If the driver does not
1468                  * support changing the public address, fail the power
1469                  * on procedure.
1470                  */
1471                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1472                     hdev->set_bdaddr)
1473                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1474                 else
1475                         ret = -EADDRNOTAVAIL;
1476         }
1477
1478         if (!ret) {
1479                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1480                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1481                         ret = __hci_init(hdev);
1482         }
1483
1484         clear_bit(HCI_INIT, &hdev->flags);
1485
1486         if (!ret) {
1487                 hci_dev_hold(hdev);
1488                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1489                 set_bit(HCI_UP, &hdev->flags);
1490                 hci_notify(hdev, HCI_DEV_UP);
1491                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1492                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1493                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1494                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1495                     hdev->dev_type == HCI_BREDR) {
1496                         hci_dev_lock(hdev);
1497                         mgmt_powered(hdev, 1);
1498                         hci_dev_unlock(hdev);
1499                 }
1500         } else {
1501                 /* Init failed, cleanup */
1502                 flush_work(&hdev->tx_work);
1503                 flush_work(&hdev->cmd_work);
1504                 flush_work(&hdev->rx_work);
1505
1506                 skb_queue_purge(&hdev->cmd_q);
1507                 skb_queue_purge(&hdev->rx_q);
1508
1509                 if (hdev->flush)
1510                         hdev->flush(hdev);
1511
1512                 if (hdev->sent_cmd) {
1513                         kfree_skb(hdev->sent_cmd);
1514                         hdev->sent_cmd = NULL;
1515                 }
1516
1517                 hdev->close(hdev);
1518                 hdev->flags &= BIT(HCI_RAW);
1519         }
1520
1521 done:
1522         hci_req_unlock(hdev);
1523         return ret;
1524 }
1525
1526 /* ---- HCI ioctl helpers ---- */
1527
1528 int hci_dev_open(__u16 dev)
1529 {
1530         struct hci_dev *hdev;
1531         int err;
1532
1533         hdev = hci_dev_get(dev);
1534         if (!hdev)
1535                 return -ENODEV;
1536
1537         /* Devices that are marked as unconfigured can only be powered
1538          * up as user channel. Trying to bring them up as normal devices
1539          * will result into a failure. Only user channel operation is
1540          * possible.
1541          *
1542          * When this function is called for a user channel, the flag
1543          * HCI_USER_CHANNEL will be set first before attempting to
1544          * open the device.
1545          */
1546         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1547             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1548                 err = -EOPNOTSUPP;
1549                 goto done;
1550         }
1551
1552         /* We need to ensure that no other power on/off work is pending
1553          * before proceeding to call hci_dev_do_open. This is
1554          * particularly important if the setup procedure has not yet
1555          * completed.
1556          */
1557         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1558                 cancel_delayed_work(&hdev->power_off);
1559
1560         /* After this call it is guaranteed that the setup procedure
1561          * has finished. This means that error conditions like RFKILL
1562          * or no valid public or static random address apply.
1563          */
1564         flush_workqueue(hdev->req_workqueue);
1565
1566         /* For controllers not using the management interface and that
1567          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1568          * so that pairing works for them. Once the management interface
1569          * is in use this bit will be cleared again and userspace has
1570          * to explicitly enable it.
1571          */
1572         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1573             !hci_dev_test_flag(hdev, HCI_MGMT))
1574                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1575
1576         err = hci_dev_do_open(hdev);
1577
1578 done:
1579         hci_dev_put(hdev);
1580         return err;
1581 }
1582
1583 /* This function requires the caller holds hdev->lock */
1584 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1585 {
1586         struct hci_conn_params *p;
1587
1588         list_for_each_entry(p, &hdev->le_conn_params, list) {
1589                 if (p->conn) {
1590                         hci_conn_drop(p->conn);
1591                         hci_conn_put(p->conn);
1592                         p->conn = NULL;
1593                 }
1594                 list_del_init(&p->action);
1595         }
1596
1597         BT_DBG("All LE pending actions cleared");
1598 }
1599
1600 static int hci_dev_do_close(struct hci_dev *hdev)
1601 {
1602         BT_DBG("%s %p", hdev->name, hdev);
1603
1604         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1605                 /* Execute vendor specific shutdown routine */
1606                 if (hdev->shutdown)
1607                         hdev->shutdown(hdev);
1608         }
1609
1610         cancel_delayed_work(&hdev->power_off);
1611
1612         hci_req_cancel(hdev, ENODEV);
1613         hci_req_lock(hdev);
1614
1615         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1616                 cancel_delayed_work_sync(&hdev->cmd_timer);
1617                 hci_req_unlock(hdev);
1618                 return 0;
1619         }
1620
1621         /* Flush RX and TX works */
1622         flush_work(&hdev->tx_work);
1623         flush_work(&hdev->rx_work);
1624
1625         if (hdev->discov_timeout > 0) {
1626                 cancel_delayed_work(&hdev->discov_off);
1627                 hdev->discov_timeout = 0;
1628                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1629                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1630         }
1631
1632         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1633                 cancel_delayed_work(&hdev->service_cache);
1634
1635         cancel_delayed_work_sync(&hdev->le_scan_disable);
1636         cancel_delayed_work_sync(&hdev->le_scan_restart);
1637
1638         if (hci_dev_test_flag(hdev, HCI_MGMT))
1639                 cancel_delayed_work_sync(&hdev->rpa_expired);
1640
1641         /* Avoid potential lockdep warnings from the *_flush() calls by
1642          * ensuring the workqueue is empty up front.
1643          */
1644         drain_workqueue(hdev->workqueue);
1645
1646         hci_dev_lock(hdev);
1647
1648         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1649
1650         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1651                 if (hdev->dev_type == HCI_BREDR)
1652                         mgmt_powered(hdev, 0);
1653         }
1654
1655         hci_inquiry_cache_flush(hdev);
1656         hci_pend_le_actions_clear(hdev);
1657         hci_conn_hash_flush(hdev);
1658         hci_dev_unlock(hdev);
1659
1660         smp_unregister(hdev);
1661
1662         hci_notify(hdev, HCI_DEV_DOWN);
1663
1664         if (hdev->flush)
1665                 hdev->flush(hdev);
1666
1667         /* Reset device */
1668         skb_queue_purge(&hdev->cmd_q);
1669         atomic_set(&hdev->cmd_cnt, 1);
1670         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1671             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1672             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1673                 set_bit(HCI_INIT, &hdev->flags);
1674                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1675                 clear_bit(HCI_INIT, &hdev->flags);
1676         }
1677
1678         /* flush cmd  work */
1679         flush_work(&hdev->cmd_work);
1680
1681         /* Drop queues */
1682         skb_queue_purge(&hdev->rx_q);
1683         skb_queue_purge(&hdev->cmd_q);
1684         skb_queue_purge(&hdev->raw_q);
1685
1686         /* Drop last sent command */
1687         if (hdev->sent_cmd) {
1688                 cancel_delayed_work_sync(&hdev->cmd_timer);
1689                 kfree_skb(hdev->sent_cmd);
1690                 hdev->sent_cmd = NULL;
1691         }
1692
1693         kfree_skb(hdev->recv_evt);
1694         hdev->recv_evt = NULL;
1695
1696         /* After this point our queues are empty
1697          * and no tasks are scheduled. */
1698         hdev->close(hdev);
1699
1700         /* Clear flags */
1701         hdev->flags &= BIT(HCI_RAW);
1702         hci_dev_clear_volatile_flags(hdev);
1703
1704         /* Controller radio is available but is currently powered down */
1705         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1706
1707         memset(hdev->eir, 0, sizeof(hdev->eir));
1708         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1709         bacpy(&hdev->random_addr, BDADDR_ANY);
1710
1711         hci_req_unlock(hdev);
1712
1713         hci_dev_put(hdev);
1714         return 0;
1715 }
1716
1717 int hci_dev_close(__u16 dev)
1718 {
1719         struct hci_dev *hdev;
1720         int err;
1721
1722         hdev = hci_dev_get(dev);
1723         if (!hdev)
1724                 return -ENODEV;
1725
1726         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1727                 err = -EBUSY;
1728                 goto done;
1729         }
1730
1731         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1732                 cancel_delayed_work(&hdev->power_off);
1733
1734         err = hci_dev_do_close(hdev);
1735
1736 done:
1737         hci_dev_put(hdev);
1738         return err;
1739 }
1740
1741 static int hci_dev_do_reset(struct hci_dev *hdev)
1742 {
1743         int ret;
1744
1745         BT_DBG("%s %p", hdev->name, hdev);
1746
1747         hci_req_lock(hdev);
1748
1749         /* Drop queues */
1750         skb_queue_purge(&hdev->rx_q);
1751         skb_queue_purge(&hdev->cmd_q);
1752
1753         /* Avoid potential lockdep warnings from the *_flush() calls by
1754          * ensuring the workqueue is empty up front.
1755          */
1756         drain_workqueue(hdev->workqueue);
1757
1758         hci_dev_lock(hdev);
1759         hci_inquiry_cache_flush(hdev);
1760         hci_conn_hash_flush(hdev);
1761         hci_dev_unlock(hdev);
1762
1763         if (hdev->flush)
1764                 hdev->flush(hdev);
1765
1766         atomic_set(&hdev->cmd_cnt, 1);
1767         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1768
1769         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1770
1771         hci_req_unlock(hdev);
1772         return ret;
1773 }
1774
1775 int hci_dev_reset(__u16 dev)
1776 {
1777         struct hci_dev *hdev;
1778         int err;
1779
1780         hdev = hci_dev_get(dev);
1781         if (!hdev)
1782                 return -ENODEV;
1783
1784         if (!test_bit(HCI_UP, &hdev->flags)) {
1785                 err = -ENETDOWN;
1786                 goto done;
1787         }
1788
1789         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1790                 err = -EBUSY;
1791                 goto done;
1792         }
1793
1794         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1795                 err = -EOPNOTSUPP;
1796                 goto done;
1797         }
1798
1799         err = hci_dev_do_reset(hdev);
1800
1801 done:
1802         hci_dev_put(hdev);
1803         return err;
1804 }
1805
1806 int hci_dev_reset_stat(__u16 dev)
1807 {
1808         struct hci_dev *hdev;
1809         int ret = 0;
1810
1811         hdev = hci_dev_get(dev);
1812         if (!hdev)
1813                 return -ENODEV;
1814
1815         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1816                 ret = -EBUSY;
1817                 goto done;
1818         }
1819
1820         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1821                 ret = -EOPNOTSUPP;
1822                 goto done;
1823         }
1824
1825         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1826
1827 done:
1828         hci_dev_put(hdev);
1829         return ret;
1830 }
1831
1832 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1833 {
1834         bool conn_changed, discov_changed;
1835
1836         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1837
1838         if ((scan & SCAN_PAGE))
1839                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1840                                                           HCI_CONNECTABLE);
1841         else
1842                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1843                                                            HCI_CONNECTABLE);
1844
1845         if ((scan & SCAN_INQUIRY)) {
1846                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1847                                                             HCI_DISCOVERABLE);
1848         } else {
1849                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1850                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1851                                                              HCI_DISCOVERABLE);
1852         }
1853
1854         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1855                 return;
1856
1857         if (conn_changed || discov_changed) {
1858                 /* In case this was disabled through mgmt */
1859                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1860
1861                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1862                         mgmt_update_adv_data(hdev);
1863
1864                 mgmt_new_settings(hdev);
1865         }
1866 }
1867
1868 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1869 {
1870         struct hci_dev *hdev;
1871         struct hci_dev_req dr;
1872         int err = 0;
1873
1874         if (copy_from_user(&dr, arg, sizeof(dr)))
1875                 return -EFAULT;
1876
1877         hdev = hci_dev_get(dr.dev_id);
1878         if (!hdev)
1879                 return -ENODEV;
1880
1881         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1882                 err = -EBUSY;
1883                 goto done;
1884         }
1885
1886         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1887                 err = -EOPNOTSUPP;
1888                 goto done;
1889         }
1890
1891         if (hdev->dev_type != HCI_BREDR) {
1892                 err = -EOPNOTSUPP;
1893                 goto done;
1894         }
1895
1896         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1897                 err = -EOPNOTSUPP;
1898                 goto done;
1899         }
1900
1901         switch (cmd) {
1902         case HCISETAUTH:
1903                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1904                                    HCI_INIT_TIMEOUT);
1905                 break;
1906
1907         case HCISETENCRYPT:
1908                 if (!lmp_encrypt_capable(hdev)) {
1909                         err = -EOPNOTSUPP;
1910                         break;
1911                 }
1912
1913                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1914                         /* Auth must be enabled first */
1915                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1916                                            HCI_INIT_TIMEOUT);
1917                         if (err)
1918                                 break;
1919                 }
1920
1921                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1922                                    HCI_INIT_TIMEOUT);
1923                 break;
1924
1925         case HCISETSCAN:
1926                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1927                                    HCI_INIT_TIMEOUT);
1928
1929                 /* Ensure that the connectable and discoverable states
1930                  * get correctly modified as this was a non-mgmt change.
1931                  */
1932                 if (!err)
1933                         hci_update_scan_state(hdev, dr.dev_opt);
1934                 break;
1935
1936         case HCISETLINKPOL:
1937                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1938                                    HCI_INIT_TIMEOUT);
1939                 break;
1940
1941         case HCISETLINKMODE:
1942                 hdev->link_mode = ((__u16) dr.dev_opt) &
1943                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1944                 break;
1945
1946         case HCISETPTYPE:
1947                 hdev->pkt_type = (__u16) dr.dev_opt;
1948                 break;
1949
1950         case HCISETACLMTU:
1951                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1952                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1953                 break;
1954
1955         case HCISETSCOMTU:
1956                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1957                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1958                 break;
1959
1960         default:
1961                 err = -EINVAL;
1962                 break;
1963         }
1964
1965 done:
1966         hci_dev_put(hdev);
1967         return err;
1968 }
1969
1970 int hci_get_dev_list(void __user *arg)
1971 {
1972         struct hci_dev *hdev;
1973         struct hci_dev_list_req *dl;
1974         struct hci_dev_req *dr;
1975         int n = 0, size, err;
1976         __u16 dev_num;
1977
1978         if (get_user(dev_num, (__u16 __user *) arg))
1979                 return -EFAULT;
1980
1981         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1982                 return -EINVAL;
1983
1984         size = sizeof(*dl) + dev_num * sizeof(*dr);
1985
1986         dl = kzalloc(size, GFP_KERNEL);
1987         if (!dl)
1988                 return -ENOMEM;
1989
1990         dr = dl->dev_req;
1991
1992         read_lock(&hci_dev_list_lock);
1993         list_for_each_entry(hdev, &hci_dev_list, list) {
1994                 unsigned long flags = hdev->flags;
1995
1996                 /* When the auto-off is configured it means the transport
1997                  * is running, but in that case still indicate that the
1998                  * device is actually down.
1999                  */
2000                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2001                         flags &= ~BIT(HCI_UP);
2002
2003                 (dr + n)->dev_id  = hdev->id;
2004                 (dr + n)->dev_opt = flags;
2005
2006                 if (++n >= dev_num)
2007                         break;
2008         }
2009         read_unlock(&hci_dev_list_lock);
2010
2011         dl->dev_num = n;
2012         size = sizeof(*dl) + n * sizeof(*dr);
2013
2014         err = copy_to_user(arg, dl, size);
2015         kfree(dl);
2016
2017         return err ? -EFAULT : 0;
2018 }
2019
2020 int hci_get_dev_info(void __user *arg)
2021 {
2022         struct hci_dev *hdev;
2023         struct hci_dev_info di;
2024         unsigned long flags;
2025         int err = 0;
2026
2027         if (copy_from_user(&di, arg, sizeof(di)))
2028                 return -EFAULT;
2029
2030         hdev = hci_dev_get(di.dev_id);
2031         if (!hdev)
2032                 return -ENODEV;
2033
2034         /* When the auto-off is configured it means the transport
2035          * is running, but in that case still indicate that the
2036          * device is actually down.
2037          */
2038         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2039                 flags = hdev->flags & ~BIT(HCI_UP);
2040         else
2041                 flags = hdev->flags;
2042
2043         strcpy(di.name, hdev->name);
2044         di.bdaddr   = hdev->bdaddr;
2045         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2046         di.flags    = flags;
2047         di.pkt_type = hdev->pkt_type;
2048         if (lmp_bredr_capable(hdev)) {
2049                 di.acl_mtu  = hdev->acl_mtu;
2050                 di.acl_pkts = hdev->acl_pkts;
2051                 di.sco_mtu  = hdev->sco_mtu;
2052                 di.sco_pkts = hdev->sco_pkts;
2053         } else {
2054                 di.acl_mtu  = hdev->le_mtu;
2055                 di.acl_pkts = hdev->le_pkts;
2056                 di.sco_mtu  = 0;
2057                 di.sco_pkts = 0;
2058         }
2059         di.link_policy = hdev->link_policy;
2060         di.link_mode   = hdev->link_mode;
2061
2062         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2063         memcpy(&di.features, &hdev->features, sizeof(di.features));
2064
2065         if (copy_to_user(arg, &di, sizeof(di)))
2066                 err = -EFAULT;
2067
2068         hci_dev_put(hdev);
2069
2070         return err;
2071 }
2072
2073 /* ---- Interface to HCI drivers ---- */
2074
2075 static int hci_rfkill_set_block(void *data, bool blocked)
2076 {
2077         struct hci_dev *hdev = data;
2078
2079         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2080
2081         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2082                 return -EBUSY;
2083
2084         if (blocked) {
2085                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2086                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2087                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2088                         hci_dev_do_close(hdev);
2089         } else {
2090                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2091         }
2092
2093         return 0;
2094 }
2095
2096 static const struct rfkill_ops hci_rfkill_ops = {
2097         .set_block = hci_rfkill_set_block,
2098 };
2099
2100 static void hci_power_on(struct work_struct *work)
2101 {
2102         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2103         int err;
2104
2105         BT_DBG("%s", hdev->name);
2106
2107         err = hci_dev_do_open(hdev);
2108         if (err < 0) {
2109                 hci_dev_lock(hdev);
2110                 mgmt_set_powered_failed(hdev, err);
2111                 hci_dev_unlock(hdev);
2112                 return;
2113         }
2114
2115         /* During the HCI setup phase, a few error conditions are
2116          * ignored and they need to be checked now. If they are still
2117          * valid, it is important to turn the device back off.
2118          */
2119         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2120             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2121             (hdev->dev_type == HCI_BREDR &&
2122              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2123              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2124                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2125                 hci_dev_do_close(hdev);
2126         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2127                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2128                                    HCI_AUTO_OFF_TIMEOUT);
2129         }
2130
2131         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2132                 /* For unconfigured devices, set the HCI_RAW flag
2133                  * so that userspace can easily identify them.
2134                  */
2135                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2136                         set_bit(HCI_RAW, &hdev->flags);
2137
2138                 /* For fully configured devices, this will send
2139                  * the Index Added event. For unconfigured devices,
2140                  * it will send Unconfigued Index Added event.
2141                  *
2142                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2143                  * and no event will be send.
2144                  */
2145                 mgmt_index_added(hdev);
2146         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2147                 /* When the controller is now configured, then it
2148                  * is important to clear the HCI_RAW flag.
2149                  */
2150                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2151                         clear_bit(HCI_RAW, &hdev->flags);
2152
2153                 /* Powering on the controller with HCI_CONFIG set only
2154                  * happens with the transition from unconfigured to
2155                  * configured. This will send the Index Added event.
2156                  */
2157                 mgmt_index_added(hdev);
2158         }
2159 }
2160
2161 static void hci_power_off(struct work_struct *work)
2162 {
2163         struct hci_dev *hdev = container_of(work, struct hci_dev,
2164                                             power_off.work);
2165
2166         BT_DBG("%s", hdev->name);
2167
2168         hci_dev_do_close(hdev);
2169 }
2170
2171 static void hci_error_reset(struct work_struct *work)
2172 {
2173         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2174
2175         BT_DBG("%s", hdev->name);
2176
2177         if (hdev->hw_error)
2178                 hdev->hw_error(hdev, hdev->hw_error_code);
2179         else
2180                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2181                        hdev->hw_error_code);
2182
2183         if (hci_dev_do_close(hdev))
2184                 return;
2185
2186         hci_dev_do_open(hdev);
2187 }
2188
2189 static void hci_discov_off(struct work_struct *work)
2190 {
2191         struct hci_dev *hdev;
2192
2193         hdev = container_of(work, struct hci_dev, discov_off.work);
2194
2195         BT_DBG("%s", hdev->name);
2196
2197         mgmt_discoverable_timeout(hdev);
2198 }
2199
2200 void hci_uuids_clear(struct hci_dev *hdev)
2201 {
2202         struct bt_uuid *uuid, *tmp;
2203
2204         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2205                 list_del(&uuid->list);
2206                 kfree(uuid);
2207         }
2208 }
2209
2210 void hci_link_keys_clear(struct hci_dev *hdev)
2211 {
2212         struct link_key *key;
2213
2214         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2215                 list_del_rcu(&key->list);
2216                 kfree_rcu(key, rcu);
2217         }
2218 }
2219
2220 void hci_smp_ltks_clear(struct hci_dev *hdev)
2221 {
2222         struct smp_ltk *k;
2223
2224         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2225                 list_del_rcu(&k->list);
2226                 kfree_rcu(k, rcu);
2227         }
2228 }
2229
2230 void hci_smp_irks_clear(struct hci_dev *hdev)
2231 {
2232         struct smp_irk *k;
2233
2234         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2235                 list_del_rcu(&k->list);
2236                 kfree_rcu(k, rcu);
2237         }
2238 }
2239
2240 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2241 {
2242         struct link_key *k;
2243
2244         rcu_read_lock();
2245         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2246                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2247                         rcu_read_unlock();
2248                         return k;
2249                 }
2250         }
2251         rcu_read_unlock();
2252
2253         return NULL;
2254 }
2255
2256 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2257                                u8 key_type, u8 old_key_type)
2258 {
2259         /* Legacy key */
2260         if (key_type < 0x03)
2261                 return true;
2262
2263         /* Debug keys are insecure so don't store them persistently */
2264         if (key_type == HCI_LK_DEBUG_COMBINATION)
2265                 return false;
2266
2267         /* Changed combination key and there's no previous one */
2268         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2269                 return false;
2270
2271         /* Security mode 3 case */
2272         if (!conn)
2273                 return true;
2274
2275         /* BR/EDR key derived using SC from an LE link */
2276         if (conn->type == LE_LINK)
2277                 return true;
2278
2279         /* Neither local nor remote side had no-bonding as requirement */
2280         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2281                 return true;
2282
2283         /* Local side had dedicated bonding as requirement */
2284         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2285                 return true;
2286
2287         /* Remote side had dedicated bonding as requirement */
2288         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2289                 return true;
2290
2291         /* If none of the above criteria match, then don't store the key
2292          * persistently */
2293         return false;
2294 }
2295
2296 static u8 ltk_role(u8 type)
2297 {
2298         if (type == SMP_LTK)
2299                 return HCI_ROLE_MASTER;
2300
2301         return HCI_ROLE_SLAVE;
2302 }
2303
2304 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2305                              u8 addr_type, u8 role)
2306 {
2307         struct smp_ltk *k;
2308
2309         rcu_read_lock();
2310         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2311                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2312                         continue;
2313
2314                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2315                         rcu_read_unlock();
2316                         return k;
2317                 }
2318         }
2319         rcu_read_unlock();
2320
2321         return NULL;
2322 }
2323
2324 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2325 {
2326         struct smp_irk *irk;
2327
2328         rcu_read_lock();
2329         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2330                 if (!bacmp(&irk->rpa, rpa)) {
2331                         rcu_read_unlock();
2332                         return irk;
2333                 }
2334         }
2335
2336         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2337                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2338                         bacpy(&irk->rpa, rpa);
2339                         rcu_read_unlock();
2340                         return irk;
2341                 }
2342         }
2343         rcu_read_unlock();
2344
2345         return NULL;
2346 }
2347
2348 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2349                                      u8 addr_type)
2350 {
2351         struct smp_irk *irk;
2352
2353         /* Identity Address must be public or static random */
2354         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2355                 return NULL;
2356
2357         rcu_read_lock();
2358         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2359                 if (addr_type == irk->addr_type &&
2360                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2361                         rcu_read_unlock();
2362                         return irk;
2363                 }
2364         }
2365         rcu_read_unlock();
2366
2367         return NULL;
2368 }
2369
2370 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2371                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2372                                   u8 pin_len, bool *persistent)
2373 {
2374         struct link_key *key, *old_key;
2375         u8 old_key_type;
2376
2377         old_key = hci_find_link_key(hdev, bdaddr);
2378         if (old_key) {
2379                 old_key_type = old_key->type;
2380                 key = old_key;
2381         } else {
2382                 old_key_type = conn ? conn->key_type : 0xff;
2383                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2384                 if (!key)
2385                         return NULL;
2386                 list_add_rcu(&key->list, &hdev->link_keys);
2387         }
2388
2389         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2390
2391         /* Some buggy controller combinations generate a changed
2392          * combination key for legacy pairing even when there's no
2393          * previous key */
2394         if (type == HCI_LK_CHANGED_COMBINATION &&
2395             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2396                 type = HCI_LK_COMBINATION;
2397                 if (conn)
2398                         conn->key_type = type;
2399         }
2400
2401         bacpy(&key->bdaddr, bdaddr);
2402         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2403         key->pin_len = pin_len;
2404
2405         if (type == HCI_LK_CHANGED_COMBINATION)
2406                 key->type = old_key_type;
2407         else
2408                 key->type = type;
2409
2410         if (persistent)
2411                 *persistent = hci_persistent_key(hdev, conn, type,
2412                                                  old_key_type);
2413
2414         return key;
2415 }
2416
2417 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2418                             u8 addr_type, u8 type, u8 authenticated,
2419                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2420 {
2421         struct smp_ltk *key, *old_key;
2422         u8 role = ltk_role(type);
2423
2424         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2425         if (old_key)
2426                 key = old_key;
2427         else {
2428                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2429                 if (!key)
2430                         return NULL;
2431                 list_add_rcu(&key->list, &hdev->long_term_keys);
2432         }
2433
2434         bacpy(&key->bdaddr, bdaddr);
2435         key->bdaddr_type = addr_type;
2436         memcpy(key->val, tk, sizeof(key->val));
2437         key->authenticated = authenticated;
2438         key->ediv = ediv;
2439         key->rand = rand;
2440         key->enc_size = enc_size;
2441         key->type = type;
2442
2443         return key;
2444 }
2445
2446 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2447                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2448 {
2449         struct smp_irk *irk;
2450
2451         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2452         if (!irk) {
2453                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2454                 if (!irk)
2455                         return NULL;
2456
2457                 bacpy(&irk->bdaddr, bdaddr);
2458                 irk->addr_type = addr_type;
2459
2460                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2461         }
2462
2463         memcpy(irk->val, val, 16);
2464         bacpy(&irk->rpa, rpa);
2465
2466         return irk;
2467 }
2468
2469 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2470 {
2471         struct link_key *key;
2472
2473         key = hci_find_link_key(hdev, bdaddr);
2474         if (!key)
2475                 return -ENOENT;
2476
2477         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2478
2479         list_del_rcu(&key->list);
2480         kfree_rcu(key, rcu);
2481
2482         return 0;
2483 }
2484
2485 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2486 {
2487         struct smp_ltk *k;
2488         int removed = 0;
2489
2490         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2491                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2492                         continue;
2493
2494                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2495
2496                 list_del_rcu(&k->list);
2497                 kfree_rcu(k, rcu);
2498                 removed++;
2499         }
2500
2501         return removed ? 0 : -ENOENT;
2502 }
2503
2504 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2505 {
2506         struct smp_irk *k;
2507
2508         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2509                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2510                         continue;
2511
2512                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2513
2514                 list_del_rcu(&k->list);
2515                 kfree_rcu(k, rcu);
2516         }
2517 }
2518
2519 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2520 {
2521         struct smp_ltk *k;
2522         struct smp_irk *irk;
2523         u8 addr_type;
2524
2525         if (type == BDADDR_BREDR) {
2526                 if (hci_find_link_key(hdev, bdaddr))
2527                         return true;
2528                 return false;
2529         }
2530
2531         /* Convert to HCI addr type which struct smp_ltk uses */
2532         if (type == BDADDR_LE_PUBLIC)
2533                 addr_type = ADDR_LE_DEV_PUBLIC;
2534         else
2535                 addr_type = ADDR_LE_DEV_RANDOM;
2536
2537         irk = hci_get_irk(hdev, bdaddr, addr_type);
2538         if (irk) {
2539                 bdaddr = &irk->bdaddr;
2540                 addr_type = irk->addr_type;
2541         }
2542
2543         rcu_read_lock();
2544         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2545                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2546                         rcu_read_unlock();
2547                         return true;
2548                 }
2549         }
2550         rcu_read_unlock();
2551
2552         return false;
2553 }
2554
2555 /* HCI command timer function */
2556 static void hci_cmd_timeout(struct work_struct *work)
2557 {
2558         struct hci_dev *hdev = container_of(work, struct hci_dev,
2559                                             cmd_timer.work);
2560
2561         if (hdev->sent_cmd) {
2562                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2563                 u16 opcode = __le16_to_cpu(sent->opcode);
2564
2565                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2566         } else {
2567                 BT_ERR("%s command tx timeout", hdev->name);
2568         }
2569
2570         atomic_set(&hdev->cmd_cnt, 1);
2571         queue_work(hdev->workqueue, &hdev->cmd_work);
2572 }
2573
2574 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2575                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2576 {
2577         struct oob_data *data;
2578
2579         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2580                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2581                         continue;
2582                 if (data->bdaddr_type != bdaddr_type)
2583                         continue;
2584                 return data;
2585         }
2586
2587         return NULL;
2588 }
2589
2590 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2591                                u8 bdaddr_type)
2592 {
2593         struct oob_data *data;
2594
2595         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2596         if (!data)
2597                 return -ENOENT;
2598
2599         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2600
2601         list_del(&data->list);
2602         kfree(data);
2603
2604         return 0;
2605 }
2606
2607 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2608 {
2609         struct oob_data *data, *n;
2610
2611         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2612                 list_del(&data->list);
2613                 kfree(data);
2614         }
2615 }
2616
2617 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2618                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2619                             u8 *hash256, u8 *rand256)
2620 {
2621         struct oob_data *data;
2622
2623         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2624         if (!data) {
2625                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2626                 if (!data)
2627                         return -ENOMEM;
2628
2629                 bacpy(&data->bdaddr, bdaddr);
2630                 data->bdaddr_type = bdaddr_type;
2631                 list_add(&data->list, &hdev->remote_oob_data);
2632         }
2633
2634         if (hash192 && rand192) {
2635                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2636                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2637                 if (hash256 && rand256)
2638                         data->present = 0x03;
2639         } else {
2640                 memset(data->hash192, 0, sizeof(data->hash192));
2641                 memset(data->rand192, 0, sizeof(data->rand192));
2642                 if (hash256 && rand256)
2643                         data->present = 0x02;
2644                 else
2645                         data->present = 0x00;
2646         }
2647
2648         if (hash256 && rand256) {
2649                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2650                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2651         } else {
2652                 memset(data->hash256, 0, sizeof(data->hash256));
2653                 memset(data->rand256, 0, sizeof(data->rand256));
2654                 if (hash192 && rand192)
2655                         data->present = 0x01;
2656         }
2657
2658         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2659
2660         return 0;
2661 }
2662
2663 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2664                                          bdaddr_t *bdaddr, u8 type)
2665 {
2666         struct bdaddr_list *b;
2667
2668         list_for_each_entry(b, bdaddr_list, list) {
2669                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2670                         return b;
2671         }
2672
2673         return NULL;
2674 }
2675
2676 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2677 {
2678         struct list_head *p, *n;
2679
2680         list_for_each_safe(p, n, bdaddr_list) {
2681                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2682
2683                 list_del(p);
2684                 kfree(b);
2685         }
2686 }
2687
2688 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2689 {
2690         struct bdaddr_list *entry;
2691
2692         if (!bacmp(bdaddr, BDADDR_ANY))
2693                 return -EBADF;
2694
2695         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2696                 return -EEXIST;
2697
2698         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2699         if (!entry)
2700                 return -ENOMEM;
2701
2702         bacpy(&entry->bdaddr, bdaddr);
2703         entry->bdaddr_type = type;
2704
2705         list_add(&entry->list, list);
2706
2707         return 0;
2708 }
2709
2710 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2711 {
2712         struct bdaddr_list *entry;
2713
2714         if (!bacmp(bdaddr, BDADDR_ANY)) {
2715                 hci_bdaddr_list_clear(list);
2716                 return 0;
2717         }
2718
2719         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2720         if (!entry)
2721                 return -ENOENT;
2722
2723         list_del(&entry->list);
2724         kfree(entry);
2725
2726         return 0;
2727 }
2728
2729 /* This function requires the caller holds hdev->lock */
2730 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2731                                                bdaddr_t *addr, u8 addr_type)
2732 {
2733         struct hci_conn_params *params;
2734
2735         /* The conn params list only contains identity addresses */
2736         if (!hci_is_identity_address(addr, addr_type))
2737                 return NULL;
2738
2739         list_for_each_entry(params, &hdev->le_conn_params, list) {
2740                 if (bacmp(&params->addr, addr) == 0 &&
2741                     params->addr_type == addr_type) {
2742                         return params;
2743                 }
2744         }
2745
2746         return NULL;
2747 }
2748
2749 /* This function requires the caller holds hdev->lock */
2750 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2751                                                   bdaddr_t *addr, u8 addr_type)
2752 {
2753         struct hci_conn_params *param;
2754
2755         /* The list only contains identity addresses */
2756         if (!hci_is_identity_address(addr, addr_type))
2757                 return NULL;
2758
2759         list_for_each_entry(param, list, action) {
2760                 if (bacmp(&param->addr, addr) == 0 &&
2761                     param->addr_type == addr_type)
2762                         return param;
2763         }
2764
2765         return NULL;
2766 }
2767
2768 /* This function requires the caller holds hdev->lock */
2769 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2770                                             bdaddr_t *addr, u8 addr_type)
2771 {
2772         struct hci_conn_params *params;
2773
2774         if (!hci_is_identity_address(addr, addr_type))
2775                 return NULL;
2776
2777         params = hci_conn_params_lookup(hdev, addr, addr_type);
2778         if (params)
2779                 return params;
2780
2781         params = kzalloc(sizeof(*params), GFP_KERNEL);
2782         if (!params) {
2783                 BT_ERR("Out of memory");
2784                 return NULL;
2785         }
2786
2787         bacpy(&params->addr, addr);
2788         params->addr_type = addr_type;
2789
2790         list_add(&params->list, &hdev->le_conn_params);
2791         INIT_LIST_HEAD(&params->action);
2792
2793         params->conn_min_interval = hdev->le_conn_min_interval;
2794         params->conn_max_interval = hdev->le_conn_max_interval;
2795         params->conn_latency = hdev->le_conn_latency;
2796         params->supervision_timeout = hdev->le_supv_timeout;
2797         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2798
2799         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2800
2801         return params;
2802 }
2803
2804 static void hci_conn_params_free(struct hci_conn_params *params)
2805 {
2806         if (params->conn) {
2807                 hci_conn_drop(params->conn);
2808                 hci_conn_put(params->conn);
2809         }
2810
2811         list_del(&params->action);
2812         list_del(&params->list);
2813         kfree(params);
2814 }
2815
2816 /* This function requires the caller holds hdev->lock */
2817 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2818 {
2819         struct hci_conn_params *params;
2820
2821         params = hci_conn_params_lookup(hdev, addr, addr_type);
2822         if (!params)
2823                 return;
2824
2825         hci_conn_params_free(params);
2826
2827         hci_update_background_scan(hdev);
2828
2829         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2830 }
2831
2832 /* This function requires the caller holds hdev->lock */
2833 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2834 {
2835         struct hci_conn_params *params, *tmp;
2836
2837         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2838                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2839                         continue;
2840                 list_del(&params->list);
2841                 kfree(params);
2842         }
2843
2844         BT_DBG("All LE disabled connection parameters were removed");
2845 }
2846
2847 /* This function requires the caller holds hdev->lock */
2848 void hci_conn_params_clear_all(struct hci_dev *hdev)
2849 {
2850         struct hci_conn_params *params, *tmp;
2851
2852         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2853                 hci_conn_params_free(params);
2854
2855         hci_update_background_scan(hdev);
2856
2857         BT_DBG("All LE connection parameters were removed");
2858 }
2859
2860 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2861 {
2862         if (status) {
2863                 BT_ERR("Failed to start inquiry: status %d", status);
2864
2865                 hci_dev_lock(hdev);
2866                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2867                 hci_dev_unlock(hdev);
2868                 return;
2869         }
2870 }
2871
2872 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2873                                           u16 opcode)
2874 {
2875         /* General inquiry access code (GIAC) */
2876         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2877         struct hci_cp_inquiry cp;
2878         int err;
2879
2880         if (status) {
2881                 BT_ERR("Failed to disable LE scanning: status %d", status);
2882                 return;
2883         }
2884
2885         hdev->discovery.scan_start = 0;
2886
2887         switch (hdev->discovery.type) {
2888         case DISCOV_TYPE_LE:
2889                 hci_dev_lock(hdev);
2890                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2891                 hci_dev_unlock(hdev);
2892                 break;
2893
2894         case DISCOV_TYPE_INTERLEAVED:
2895                 hci_dev_lock(hdev);
2896
2897                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2898                              &hdev->quirks)) {
2899                         /* If we were running LE only scan, change discovery
2900                          * state. If we were running both LE and BR/EDR inquiry
2901                          * simultaneously, and BR/EDR inquiry is already
2902                          * finished, stop discovery, otherwise BR/EDR inquiry
2903                          * will stop discovery when finished.
2904                          */
2905                         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2906                                 hci_discovery_set_state(hdev,
2907                                                         DISCOVERY_STOPPED);
2908                 } else {
2909                         struct hci_request req;
2910
2911                         hci_inquiry_cache_flush(hdev);
2912
2913                         hci_req_init(&req, hdev);
2914
2915                         memset(&cp, 0, sizeof(cp));
2916                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2917                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2918                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2919
2920                         err = hci_req_run(&req, inquiry_complete);
2921                         if (err) {
2922                                 BT_ERR("Inquiry request failed: err %d", err);
2923                                 hci_discovery_set_state(hdev,
2924                                                         DISCOVERY_STOPPED);
2925                         }
2926                 }
2927
2928                 hci_dev_unlock(hdev);
2929                 break;
2930         }
2931 }
2932
2933 static void le_scan_disable_work(struct work_struct *work)
2934 {
2935         struct hci_dev *hdev = container_of(work, struct hci_dev,
2936                                             le_scan_disable.work);
2937         struct hci_request req;
2938         int err;
2939
2940         BT_DBG("%s", hdev->name);
2941
2942         cancel_delayed_work_sync(&hdev->le_scan_restart);
2943
2944         hci_req_init(&req, hdev);
2945
2946         hci_req_add_le_scan_disable(&req);
2947
2948         err = hci_req_run(&req, le_scan_disable_work_complete);
2949         if (err)
2950                 BT_ERR("Disable LE scanning request failed: err %d", err);
2951 }
2952
2953 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2954                                           u16 opcode)
2955 {
2956         unsigned long timeout, duration, scan_start, now;
2957
2958         BT_DBG("%s", hdev->name);
2959
2960         if (status) {
2961                 BT_ERR("Failed to restart LE scan: status %d", status);
2962                 return;
2963         }
2964
2965         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2966             !hdev->discovery.scan_start)
2967                 return;
2968
2969         /* When the scan was started, hdev->le_scan_disable has been queued
2970          * after duration from scan_start. During scan restart this job
2971          * has been canceled, and we need to queue it again after proper
2972          * timeout, to make sure that scan does not run indefinitely.
2973          */
2974         duration = hdev->discovery.scan_duration;
2975         scan_start = hdev->discovery.scan_start;
2976         now = jiffies;
2977         if (now - scan_start <= duration) {
2978                 int elapsed;
2979
2980                 if (now >= scan_start)
2981                         elapsed = now - scan_start;
2982                 else
2983                         elapsed = ULONG_MAX - scan_start + now;
2984
2985                 timeout = duration - elapsed;
2986         } else {
2987                 timeout = 0;
2988         }
2989         queue_delayed_work(hdev->workqueue,
2990                            &hdev->le_scan_disable, timeout);
2991 }
2992
2993 static void le_scan_restart_work(struct work_struct *work)
2994 {
2995         struct hci_dev *hdev = container_of(work, struct hci_dev,
2996                                             le_scan_restart.work);
2997         struct hci_request req;
2998         struct hci_cp_le_set_scan_enable cp;
2999         int err;
3000
3001         BT_DBG("%s", hdev->name);
3002
3003         /* If controller is not scanning we are done. */
3004         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3005                 return;
3006
3007         hci_req_init(&req, hdev);
3008
3009         hci_req_add_le_scan_disable(&req);
3010
3011         memset(&cp, 0, sizeof(cp));
3012         cp.enable = LE_SCAN_ENABLE;
3013         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3014         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3015
3016         err = hci_req_run(&req, le_scan_restart_work_complete);
3017         if (err)
3018                 BT_ERR("Restart LE scan request failed: err %d", err);
3019 }
3020
3021 /* Copy the Identity Address of the controller.
3022  *
3023  * If the controller has a public BD_ADDR, then by default use that one.
3024  * If this is a LE only controller without a public address, default to
3025  * the static random address.
3026  *
3027  * For debugging purposes it is possible to force controllers with a
3028  * public address to use the static random address instead.
3029  *
3030  * In case BR/EDR has been disabled on a dual-mode controller and
3031  * userspace has configured a static address, then that address
3032  * becomes the identity address instead of the public BR/EDR address.
3033  */
3034 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035                                u8 *bdaddr_type)
3036 {
3037         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3038             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3039             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3040              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3041                 bacpy(bdaddr, &hdev->static_addr);
3042                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3043         } else {
3044                 bacpy(bdaddr, &hdev->bdaddr);
3045                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3046         }
3047 }
3048
3049 /* Alloc HCI device */
3050 struct hci_dev *hci_alloc_dev(void)
3051 {
3052         struct hci_dev *hdev;
3053
3054         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3055         if (!hdev)
3056                 return NULL;
3057
3058         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3059         hdev->esco_type = (ESCO_HV1);
3060         hdev->link_mode = (HCI_LM_ACCEPT);
3061         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3062         hdev->io_capability = 0x03;     /* No Input No Output */
3063         hdev->manufacturer = 0xffff;    /* Default to internal use */
3064         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3065         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3066
3067         hdev->sniff_max_interval = 800;
3068         hdev->sniff_min_interval = 80;
3069
3070         hdev->le_adv_channel_map = 0x07;
3071         hdev->le_adv_min_interval = 0x0800;
3072         hdev->le_adv_max_interval = 0x0800;
3073         hdev->le_scan_interval = 0x0060;
3074         hdev->le_scan_window = 0x0030;
3075         hdev->le_conn_min_interval = 0x0028;
3076         hdev->le_conn_max_interval = 0x0038;
3077         hdev->le_conn_latency = 0x0000;
3078         hdev->le_supv_timeout = 0x002a;
3079         hdev->le_def_tx_len = 0x001b;
3080         hdev->le_def_tx_time = 0x0148;
3081         hdev->le_max_tx_len = 0x001b;
3082         hdev->le_max_tx_time = 0x0148;
3083         hdev->le_max_rx_len = 0x001b;
3084         hdev->le_max_rx_time = 0x0148;
3085
3086         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3087         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3088         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3089         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3090
3091         mutex_init(&hdev->lock);
3092         mutex_init(&hdev->req_lock);
3093
3094         INIT_LIST_HEAD(&hdev->mgmt_pending);
3095         INIT_LIST_HEAD(&hdev->blacklist);
3096         INIT_LIST_HEAD(&hdev->whitelist);
3097         INIT_LIST_HEAD(&hdev->uuids);
3098         INIT_LIST_HEAD(&hdev->link_keys);
3099         INIT_LIST_HEAD(&hdev->long_term_keys);
3100         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3101         INIT_LIST_HEAD(&hdev->remote_oob_data);
3102         INIT_LIST_HEAD(&hdev->le_white_list);
3103         INIT_LIST_HEAD(&hdev->le_conn_params);
3104         INIT_LIST_HEAD(&hdev->pend_le_conns);
3105         INIT_LIST_HEAD(&hdev->pend_le_reports);
3106         INIT_LIST_HEAD(&hdev->conn_hash.list);
3107
3108         INIT_WORK(&hdev->rx_work, hci_rx_work);
3109         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3110         INIT_WORK(&hdev->tx_work, hci_tx_work);
3111         INIT_WORK(&hdev->power_on, hci_power_on);
3112         INIT_WORK(&hdev->error_reset, hci_error_reset);
3113
3114         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3115         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3116         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3117         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3118
3119         skb_queue_head_init(&hdev->rx_q);
3120         skb_queue_head_init(&hdev->cmd_q);
3121         skb_queue_head_init(&hdev->raw_q);
3122
3123         init_waitqueue_head(&hdev->req_wait_q);
3124
3125         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3126
3127         hci_init_sysfs(hdev);
3128         discovery_init(hdev);
3129         adv_info_init(hdev);
3130
3131         return hdev;
3132 }
3133 EXPORT_SYMBOL(hci_alloc_dev);
3134
3135 /* Free HCI device */
3136 void hci_free_dev(struct hci_dev *hdev)
3137 {
3138         /* will free via device release */
3139         put_device(&hdev->dev);
3140 }
3141 EXPORT_SYMBOL(hci_free_dev);
3142
3143 /* Register HCI device */
3144 int hci_register_dev(struct hci_dev *hdev)
3145 {
3146         int id, error;
3147
3148         if (!hdev->open || !hdev->close || !hdev->send)
3149                 return -EINVAL;
3150
3151         /* Do not allow HCI_AMP devices to register at index 0,
3152          * so the index can be used as the AMP controller ID.
3153          */
3154         switch (hdev->dev_type) {
3155         case HCI_BREDR:
3156                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3157                 break;
3158         case HCI_AMP:
3159                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3160                 break;
3161         default:
3162                 return -EINVAL;
3163         }
3164
3165         if (id < 0)
3166                 return id;
3167
3168         sprintf(hdev->name, "hci%d", id);
3169         hdev->id = id;
3170
3171         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3172
3173         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3174                                           WQ_MEM_RECLAIM, 1, hdev->name);
3175         if (!hdev->workqueue) {
3176                 error = -ENOMEM;
3177                 goto err;
3178         }
3179
3180         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3181                                               WQ_MEM_RECLAIM, 1, hdev->name);
3182         if (!hdev->req_workqueue) {
3183                 destroy_workqueue(hdev->workqueue);
3184                 error = -ENOMEM;
3185                 goto err;
3186         }
3187
3188         if (!IS_ERR_OR_NULL(bt_debugfs))
3189                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3190
3191         dev_set_name(&hdev->dev, "%s", hdev->name);
3192
3193         error = device_add(&hdev->dev);
3194         if (error < 0)
3195                 goto err_wqueue;
3196
3197         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3198                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3199                                     hdev);
3200         if (hdev->rfkill) {
3201                 if (rfkill_register(hdev->rfkill) < 0) {
3202                         rfkill_destroy(hdev->rfkill);
3203                         hdev->rfkill = NULL;
3204                 }
3205         }
3206
3207         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3208                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3209
3210         hci_dev_set_flag(hdev, HCI_SETUP);
3211         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3212
3213         if (hdev->dev_type == HCI_BREDR) {
3214                 /* Assume BR/EDR support until proven otherwise (such as
3215                  * through reading supported features during init.
3216                  */
3217                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3218         }
3219
3220         write_lock(&hci_dev_list_lock);
3221         list_add(&hdev->list, &hci_dev_list);
3222         write_unlock(&hci_dev_list_lock);
3223
3224         /* Devices that are marked for raw-only usage are unconfigured
3225          * and should not be included in normal operation.
3226          */
3227         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3228                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3229
3230         hci_notify(hdev, HCI_DEV_REG);
3231         hci_dev_hold(hdev);
3232
3233         queue_work(hdev->req_workqueue, &hdev->power_on);
3234
3235         return id;
3236
3237 err_wqueue:
3238         destroy_workqueue(hdev->workqueue);
3239         destroy_workqueue(hdev->req_workqueue);
3240 err:
3241         ida_simple_remove(&hci_index_ida, hdev->id);
3242
3243         return error;
3244 }
3245 EXPORT_SYMBOL(hci_register_dev);
3246
3247 /* Unregister HCI device */
3248 void hci_unregister_dev(struct hci_dev *hdev)
3249 {
3250         int i, id;
3251
3252         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3253
3254         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3255
3256         id = hdev->id;
3257
3258         write_lock(&hci_dev_list_lock);
3259         list_del(&hdev->list);
3260         write_unlock(&hci_dev_list_lock);
3261
3262         hci_dev_do_close(hdev);
3263
3264         for (i = 0; i < NUM_REASSEMBLY; i++)
3265                 kfree_skb(hdev->reassembly[i]);
3266
3267         cancel_work_sync(&hdev->power_on);
3268
3269         if (!test_bit(HCI_INIT, &hdev->flags) &&
3270             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3271             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3272                 hci_dev_lock(hdev);
3273                 mgmt_index_removed(hdev);
3274                 hci_dev_unlock(hdev);
3275         }
3276
3277         /* mgmt_index_removed should take care of emptying the
3278          * pending list */
3279         BUG_ON(!list_empty(&hdev->mgmt_pending));
3280
3281         hci_notify(hdev, HCI_DEV_UNREG);
3282
3283         if (hdev->rfkill) {
3284                 rfkill_unregister(hdev->rfkill);
3285                 rfkill_destroy(hdev->rfkill);
3286         }
3287
3288         device_del(&hdev->dev);
3289
3290         debugfs_remove_recursive(hdev->debugfs);
3291
3292         destroy_workqueue(hdev->workqueue);
3293         destroy_workqueue(hdev->req_workqueue);
3294
3295         hci_dev_lock(hdev);
3296         hci_bdaddr_list_clear(&hdev->blacklist);
3297         hci_bdaddr_list_clear(&hdev->whitelist);
3298         hci_uuids_clear(hdev);
3299         hci_link_keys_clear(hdev);
3300         hci_smp_ltks_clear(hdev);
3301         hci_smp_irks_clear(hdev);
3302         hci_remote_oob_data_clear(hdev);
3303         hci_bdaddr_list_clear(&hdev->le_white_list);
3304         hci_conn_params_clear_all(hdev);
3305         hci_discovery_filter_clear(hdev);
3306         hci_dev_unlock(hdev);
3307
3308         hci_dev_put(hdev);
3309
3310         ida_simple_remove(&hci_index_ida, id);
3311 }
3312 EXPORT_SYMBOL(hci_unregister_dev);
3313
3314 /* Suspend HCI device */
3315 int hci_suspend_dev(struct hci_dev *hdev)
3316 {
3317         hci_notify(hdev, HCI_DEV_SUSPEND);
3318         return 0;
3319 }
3320 EXPORT_SYMBOL(hci_suspend_dev);
3321
3322 /* Resume HCI device */
3323 int hci_resume_dev(struct hci_dev *hdev)
3324 {
3325         hci_notify(hdev, HCI_DEV_RESUME);
3326         return 0;
3327 }
3328 EXPORT_SYMBOL(hci_resume_dev);
3329
3330 /* Reset HCI device */
3331 int hci_reset_dev(struct hci_dev *hdev)
3332 {
3333         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3334         struct sk_buff *skb;
3335
3336         skb = bt_skb_alloc(3, GFP_ATOMIC);
3337         if (!skb)
3338                 return -ENOMEM;
3339
3340         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3341         memcpy(skb_put(skb, 3), hw_err, 3);
3342
3343         /* Send Hardware Error to upper stack */
3344         return hci_recv_frame(hdev, skb);
3345 }
3346 EXPORT_SYMBOL(hci_reset_dev);
3347
3348 /* Receive frame from HCI drivers */
3349 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3350 {
3351         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3352                       && !test_bit(HCI_INIT, &hdev->flags))) {
3353                 kfree_skb(skb);
3354                 return -ENXIO;
3355         }
3356
3357         /* Incoming skb */
3358         bt_cb(skb)->incoming = 1;
3359
3360         /* Time stamp */
3361         __net_timestamp(skb);
3362
3363         skb_queue_tail(&hdev->rx_q, skb);
3364         queue_work(hdev->workqueue, &hdev->rx_work);
3365
3366         return 0;
3367 }
3368 EXPORT_SYMBOL(hci_recv_frame);
3369
3370 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3371                           int count, __u8 index)
3372 {
3373         int len = 0;
3374         int hlen = 0;
3375         int remain = count;
3376         struct sk_buff *skb;
3377         struct bt_skb_cb *scb;
3378
3379         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3380             index >= NUM_REASSEMBLY)
3381                 return -EILSEQ;
3382
3383         skb = hdev->reassembly[index];
3384
3385         if (!skb) {
3386                 switch (type) {
3387                 case HCI_ACLDATA_PKT:
3388                         len = HCI_MAX_FRAME_SIZE;
3389                         hlen = HCI_ACL_HDR_SIZE;
3390                         break;
3391                 case HCI_EVENT_PKT:
3392                         len = HCI_MAX_EVENT_SIZE;
3393                         hlen = HCI_EVENT_HDR_SIZE;
3394                         break;
3395                 case HCI_SCODATA_PKT:
3396                         len = HCI_MAX_SCO_SIZE;
3397                         hlen = HCI_SCO_HDR_SIZE;
3398                         break;
3399                 }
3400
3401                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3402                 if (!skb)
3403                         return -ENOMEM;
3404
3405                 scb = (void *) skb->cb;
3406                 scb->expect = hlen;
3407                 scb->pkt_type = type;
3408
3409                 hdev->reassembly[index] = skb;
3410         }
3411
3412         while (count) {
3413                 scb = (void *) skb->cb;
3414                 len = min_t(uint, scb->expect, count);
3415
3416                 memcpy(skb_put(skb, len), data, len);
3417
3418                 count -= len;
3419                 data += len;
3420                 scb->expect -= len;
3421                 remain = count;
3422
3423                 switch (type) {
3424                 case HCI_EVENT_PKT:
3425                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3426                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3427                                 scb->expect = h->plen;
3428
3429                                 if (skb_tailroom(skb) < scb->expect) {
3430                                         kfree_skb(skb);
3431                                         hdev->reassembly[index] = NULL;
3432                                         return -ENOMEM;
3433                                 }
3434                         }
3435                         break;
3436
3437                 case HCI_ACLDATA_PKT:
3438                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3439                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3440                                 scb->expect = __le16_to_cpu(h->dlen);
3441
3442                                 if (skb_tailroom(skb) < scb->expect) {
3443                                         kfree_skb(skb);
3444                                         hdev->reassembly[index] = NULL;
3445                                         return -ENOMEM;
3446                                 }
3447                         }
3448                         break;
3449
3450                 case HCI_SCODATA_PKT:
3451                         if (skb->len == HCI_SCO_HDR_SIZE) {
3452                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3453                                 scb->expect = h->dlen;
3454
3455                                 if (skb_tailroom(skb) < scb->expect) {
3456                                         kfree_skb(skb);
3457                                         hdev->reassembly[index] = NULL;
3458                                         return -ENOMEM;
3459                                 }
3460                         }
3461                         break;
3462                 }
3463
3464                 if (scb->expect == 0) {
3465                         /* Complete frame */
3466
3467                         bt_cb(skb)->pkt_type = type;
3468                         hci_recv_frame(hdev, skb);
3469
3470                         hdev->reassembly[index] = NULL;
3471                         return remain;
3472                 }
3473         }
3474
3475         return remain;
3476 }
3477
3478 #define STREAM_REASSEMBLY 0
3479
3480 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3481 {
3482         int type;
3483         int rem = 0;
3484
3485         while (count) {
3486                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3487
3488                 if (!skb) {
3489                         struct { char type; } *pkt;
3490
3491                         /* Start of the frame */
3492                         pkt = data;
3493                         type = pkt->type;
3494
3495                         data++;
3496                         count--;
3497                 } else
3498                         type = bt_cb(skb)->pkt_type;
3499
3500                 rem = hci_reassembly(hdev, type, data, count,
3501                                      STREAM_REASSEMBLY);
3502                 if (rem < 0)
3503                         return rem;
3504
3505                 data += (count - rem);
3506                 count = rem;
3507         }
3508
3509         return rem;
3510 }
3511 EXPORT_SYMBOL(hci_recv_stream_fragment);
3512
3513 /* ---- Interface to upper protocols ---- */
3514
3515 int hci_register_cb(struct hci_cb *cb)
3516 {
3517         BT_DBG("%p name %s", cb, cb->name);
3518
3519         mutex_lock(&hci_cb_list_lock);
3520         list_add_tail(&cb->list, &hci_cb_list);
3521         mutex_unlock(&hci_cb_list_lock);
3522
3523         return 0;
3524 }
3525 EXPORT_SYMBOL(hci_register_cb);
3526
3527 int hci_unregister_cb(struct hci_cb *cb)
3528 {
3529         BT_DBG("%p name %s", cb, cb->name);
3530
3531         mutex_lock(&hci_cb_list_lock);
3532         list_del(&cb->list);
3533         mutex_unlock(&hci_cb_list_lock);
3534
3535         return 0;
3536 }
3537 EXPORT_SYMBOL(hci_unregister_cb);
3538
3539 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3540 {
3541         int err;
3542
3543         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3544
3545         /* Time stamp */
3546         __net_timestamp(skb);
3547
3548         /* Send copy to monitor */
3549         hci_send_to_monitor(hdev, skb);
3550
3551         if (atomic_read(&hdev->promisc)) {
3552                 /* Send copy to the sockets */
3553                 hci_send_to_sock(hdev, skb);
3554         }
3555
3556         /* Get rid of skb owner, prior to sending to the driver. */
3557         skb_orphan(skb);
3558
3559         err = hdev->send(hdev, skb);
3560         if (err < 0) {
3561                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3562                 kfree_skb(skb);
3563         }
3564 }
3565
3566 bool hci_req_pending(struct hci_dev *hdev)
3567 {
3568         return (hdev->req_status == HCI_REQ_PEND);
3569 }
3570
3571 /* Send HCI command */
3572 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3573                  const void *param)
3574 {
3575         struct sk_buff *skb;
3576
3577         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3578
3579         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3580         if (!skb) {
3581                 BT_ERR("%s no memory for command", hdev->name);
3582                 return -ENOMEM;
3583         }
3584
3585         /* Stand-alone HCI commands must be flagged as
3586          * single-command requests.
3587          */
3588         bt_cb(skb)->req_start = 1;
3589
3590         skb_queue_tail(&hdev->cmd_q, skb);
3591         queue_work(hdev->workqueue, &hdev->cmd_work);
3592
3593         return 0;
3594 }
3595
3596 /* Get data from the previously sent command */
3597 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3598 {
3599         struct hci_command_hdr *hdr;
3600
3601         if (!hdev->sent_cmd)
3602                 return NULL;
3603
3604         hdr = (void *) hdev->sent_cmd->data;
3605
3606         if (hdr->opcode != cpu_to_le16(opcode))
3607                 return NULL;
3608
3609         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3610
3611         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3612 }
3613
3614 /* Send ACL data */
3615 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3616 {
3617         struct hci_acl_hdr *hdr;
3618         int len = skb->len;
3619
3620         skb_push(skb, HCI_ACL_HDR_SIZE);
3621         skb_reset_transport_header(skb);
3622         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3623         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3624         hdr->dlen   = cpu_to_le16(len);
3625 }
3626
3627 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3628                           struct sk_buff *skb, __u16 flags)
3629 {
3630         struct hci_conn *conn = chan->conn;
3631         struct hci_dev *hdev = conn->hdev;
3632         struct sk_buff *list;
3633
3634         skb->len = skb_headlen(skb);
3635         skb->data_len = 0;
3636
3637         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3638
3639         switch (hdev->dev_type) {
3640         case HCI_BREDR:
3641                 hci_add_acl_hdr(skb, conn->handle, flags);
3642                 break;
3643         case HCI_AMP:
3644                 hci_add_acl_hdr(skb, chan->handle, flags);
3645                 break;
3646         default:
3647                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3648                 return;
3649         }
3650
3651         list = skb_shinfo(skb)->frag_list;
3652         if (!list) {
3653                 /* Non fragmented */
3654                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3655
3656                 skb_queue_tail(queue, skb);
3657         } else {
3658                 /* Fragmented */
3659                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3660
3661                 skb_shinfo(skb)->frag_list = NULL;
3662
3663                 /* Queue all fragments atomically. We need to use spin_lock_bh
3664                  * here because of 6LoWPAN links, as there this function is
3665                  * called from softirq and using normal spin lock could cause
3666                  * deadlocks.
3667                  */
3668                 spin_lock_bh(&queue->lock);
3669
3670                 __skb_queue_tail(queue, skb);
3671
3672                 flags &= ~ACL_START;
3673                 flags |= ACL_CONT;
3674                 do {
3675                         skb = list; list = list->next;
3676
3677                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3678                         hci_add_acl_hdr(skb, conn->handle, flags);
3679
3680                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3681
3682                         __skb_queue_tail(queue, skb);
3683                 } while (list);
3684
3685                 spin_unlock_bh(&queue->lock);
3686         }
3687 }
3688
3689 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3690 {
3691         struct hci_dev *hdev = chan->conn->hdev;
3692
3693         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3694
3695         hci_queue_acl(chan, &chan->data_q, skb, flags);
3696
3697         queue_work(hdev->workqueue, &hdev->tx_work);
3698 }
3699
3700 /* Send SCO data */
3701 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3702 {
3703         struct hci_dev *hdev = conn->hdev;
3704         struct hci_sco_hdr hdr;
3705
3706         BT_DBG("%s len %d", hdev->name, skb->len);
3707
3708         hdr.handle = cpu_to_le16(conn->handle);
3709         hdr.dlen   = skb->len;
3710
3711         skb_push(skb, HCI_SCO_HDR_SIZE);
3712         skb_reset_transport_header(skb);
3713         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3714
3715         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3716
3717         skb_queue_tail(&conn->data_q, skb);
3718         queue_work(hdev->workqueue, &hdev->tx_work);
3719 }
3720
3721 /* ---- HCI TX task (outgoing data) ---- */
3722
3723 /* HCI Connection scheduler */
3724 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3725                                      int *quote)
3726 {
3727         struct hci_conn_hash *h = &hdev->conn_hash;
3728         struct hci_conn *conn = NULL, *c;
3729         unsigned int num = 0, min = ~0;
3730
3731         /* We don't have to lock device here. Connections are always
3732          * added and removed with TX task disabled. */
3733
3734         rcu_read_lock();
3735
3736         list_for_each_entry_rcu(c, &h->list, list) {
3737                 if (c->type != type || skb_queue_empty(&c->data_q))
3738                         continue;
3739
3740                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3741                         continue;
3742
3743                 num++;
3744
3745                 if (c->sent < min) {
3746                         min  = c->sent;
3747                         conn = c;
3748                 }
3749
3750                 if (hci_conn_num(hdev, type) == num)
3751                         break;
3752         }
3753
3754         rcu_read_unlock();
3755
3756         if (conn) {
3757                 int cnt, q;
3758
3759                 switch (conn->type) {
3760                 case ACL_LINK:
3761                         cnt = hdev->acl_cnt;
3762                         break;
3763                 case SCO_LINK:
3764                 case ESCO_LINK:
3765                         cnt = hdev->sco_cnt;
3766                         break;
3767                 case LE_LINK:
3768                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3769                         break;
3770                 default:
3771                         cnt = 0;
3772                         BT_ERR("Unknown link type");
3773                 }
3774
3775                 q = cnt / num;
3776                 *quote = q ? q : 1;
3777         } else
3778                 *quote = 0;
3779
3780         BT_DBG("conn %p quote %d", conn, *quote);
3781         return conn;
3782 }
3783
3784 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3785 {
3786         struct hci_conn_hash *h = &hdev->conn_hash;
3787         struct hci_conn *c;
3788
3789         BT_ERR("%s link tx timeout", hdev->name);
3790
3791         rcu_read_lock();
3792
3793         /* Kill stalled connections */
3794         list_for_each_entry_rcu(c, &h->list, list) {
3795                 if (c->type == type && c->sent) {
3796                         BT_ERR("%s killing stalled connection %pMR",
3797                                hdev->name, &c->dst);
3798                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3799                 }
3800         }
3801
3802         rcu_read_unlock();
3803 }
3804
3805 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3806                                       int *quote)
3807 {
3808         struct hci_conn_hash *h = &hdev->conn_hash;
3809         struct hci_chan *chan = NULL;
3810         unsigned int num = 0, min = ~0, cur_prio = 0;
3811         struct hci_conn *conn;
3812         int cnt, q, conn_num = 0;
3813
3814         BT_DBG("%s", hdev->name);
3815
3816         rcu_read_lock();
3817
3818         list_for_each_entry_rcu(conn, &h->list, list) {
3819                 struct hci_chan *tmp;
3820
3821                 if (conn->type != type)
3822                         continue;
3823
3824                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3825                         continue;
3826
3827                 conn_num++;
3828
3829                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3830                         struct sk_buff *skb;
3831
3832                         if (skb_queue_empty(&tmp->data_q))
3833                                 continue;
3834
3835                         skb = skb_peek(&tmp->data_q);
3836                         if (skb->priority < cur_prio)
3837                                 continue;
3838
3839                         if (skb->priority > cur_prio) {
3840                                 num = 0;
3841                                 min = ~0;
3842                                 cur_prio = skb->priority;
3843                         }
3844
3845                         num++;
3846
3847                         if (conn->sent < min) {
3848                                 min  = conn->sent;
3849                                 chan = tmp;
3850                         }
3851                 }
3852
3853                 if (hci_conn_num(hdev, type) == conn_num)
3854                         break;
3855         }
3856
3857         rcu_read_unlock();
3858
3859         if (!chan)
3860                 return NULL;
3861
3862         switch (chan->conn->type) {
3863         case ACL_LINK:
3864                 cnt = hdev->acl_cnt;
3865                 break;
3866         case AMP_LINK:
3867                 cnt = hdev->block_cnt;
3868                 break;
3869         case SCO_LINK:
3870         case ESCO_LINK:
3871                 cnt = hdev->sco_cnt;
3872                 break;
3873         case LE_LINK:
3874                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3875                 break;
3876         default:
3877                 cnt = 0;
3878                 BT_ERR("Unknown link type");
3879         }
3880
3881         q = cnt / num;
3882         *quote = q ? q : 1;
3883         BT_DBG("chan %p quote %d", chan, *quote);
3884         return chan;
3885 }
3886
3887 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3888 {
3889         struct hci_conn_hash *h = &hdev->conn_hash;
3890         struct hci_conn *conn;
3891         int num = 0;
3892
3893         BT_DBG("%s", hdev->name);
3894
3895         rcu_read_lock();
3896
3897         list_for_each_entry_rcu(conn, &h->list, list) {
3898                 struct hci_chan *chan;
3899
3900                 if (conn->type != type)
3901                         continue;
3902
3903                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3904                         continue;
3905
3906                 num++;
3907
3908                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3909                         struct sk_buff *skb;
3910
3911                         if (chan->sent) {
3912                                 chan->sent = 0;
3913                                 continue;
3914                         }
3915
3916                         if (skb_queue_empty(&chan->data_q))
3917                                 continue;
3918
3919                         skb = skb_peek(&chan->data_q);
3920                         if (skb->priority >= HCI_PRIO_MAX - 1)
3921                                 continue;
3922
3923                         skb->priority = HCI_PRIO_MAX - 1;
3924
3925                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3926                                skb->priority);
3927                 }
3928
3929                 if (hci_conn_num(hdev, type) == num)
3930                         break;
3931         }
3932
3933         rcu_read_unlock();
3934
3935 }
3936
3937 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3938 {
3939         /* Calculate count of blocks used by this packet */
3940         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3941 }
3942
3943 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3944 {
3945         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3946                 /* ACL tx timeout must be longer than maximum
3947                  * link supervision timeout (40.9 seconds) */
3948                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3949                                        HCI_ACL_TX_TIMEOUT))
3950                         hci_link_tx_to(hdev, ACL_LINK);
3951         }
3952 }
3953
3954 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3955 {
3956         unsigned int cnt = hdev->acl_cnt;
3957         struct hci_chan *chan;
3958         struct sk_buff *skb;
3959         int quote;
3960
3961         __check_timeout(hdev, cnt);
3962
3963         while (hdev->acl_cnt &&
3964                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3965                 u32 priority = (skb_peek(&chan->data_q))->priority;
3966                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3967                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3968                                skb->len, skb->priority);
3969
3970                         /* Stop if priority has changed */
3971                         if (skb->priority < priority)
3972                                 break;
3973
3974                         skb = skb_dequeue(&chan->data_q);
3975
3976                         hci_conn_enter_active_mode(chan->conn,
3977                                                    bt_cb(skb)->force_active);
3978
3979                         hci_send_frame(hdev, skb);
3980                         hdev->acl_last_tx = jiffies;
3981
3982                         hdev->acl_cnt--;
3983                         chan->sent++;
3984                         chan->conn->sent++;
3985                 }
3986         }
3987
3988         if (cnt != hdev->acl_cnt)
3989                 hci_prio_recalculate(hdev, ACL_LINK);
3990 }
3991
3992 static void hci_sched_acl_blk(struct hci_dev *hdev)
3993 {
3994         unsigned int cnt = hdev->block_cnt;
3995         struct hci_chan *chan;
3996         struct sk_buff *skb;
3997         int quote;
3998         u8 type;
3999
4000         __check_timeout(hdev, cnt);
4001
4002         BT_DBG("%s", hdev->name);
4003
4004         if (hdev->dev_type == HCI_AMP)
4005                 type = AMP_LINK;
4006         else
4007                 type = ACL_LINK;
4008
4009         while (hdev->block_cnt > 0 &&
4010                (chan = hci_chan_sent(hdev, type, &quote))) {
4011                 u32 priority = (skb_peek(&chan->data_q))->priority;
4012                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4013                         int blocks;
4014
4015                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4016                                skb->len, skb->priority);
4017
4018                         /* Stop if priority has changed */
4019                         if (skb->priority < priority)
4020                                 break;
4021
4022                         skb = skb_dequeue(&chan->data_q);
4023
4024                         blocks = __get_blocks(hdev, skb);
4025                         if (blocks > hdev->block_cnt)
4026                                 return;
4027
4028                         hci_conn_enter_active_mode(chan->conn,
4029                                                    bt_cb(skb)->force_active);
4030
4031                         hci_send_frame(hdev, skb);
4032                         hdev->acl_last_tx = jiffies;
4033
4034                         hdev->block_cnt -= blocks;
4035                         quote -= blocks;
4036
4037                         chan->sent += blocks;
4038                         chan->conn->sent += blocks;
4039                 }
4040         }
4041
4042         if (cnt != hdev->block_cnt)
4043                 hci_prio_recalculate(hdev, type);
4044 }
4045
4046 static void hci_sched_acl(struct hci_dev *hdev)
4047 {
4048         BT_DBG("%s", hdev->name);
4049
4050         /* No ACL link over BR/EDR controller */
4051         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4052                 return;
4053
4054         /* No AMP link over AMP controller */
4055         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4056                 return;
4057
4058         switch (hdev->flow_ctl_mode) {
4059         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4060                 hci_sched_acl_pkt(hdev);
4061                 break;
4062
4063         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4064                 hci_sched_acl_blk(hdev);
4065                 break;
4066         }
4067 }
4068
4069 /* Schedule SCO */
4070 static void hci_sched_sco(struct hci_dev *hdev)
4071 {
4072         struct hci_conn *conn;
4073         struct sk_buff *skb;
4074         int quote;
4075
4076         BT_DBG("%s", hdev->name);
4077
4078         if (!hci_conn_num(hdev, SCO_LINK))
4079                 return;
4080
4081         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4082                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4083                         BT_DBG("skb %p len %d", skb, skb->len);
4084                         hci_send_frame(hdev, skb);
4085
4086                         conn->sent++;
4087                         if (conn->sent == ~0)
4088                                 conn->sent = 0;
4089                 }
4090         }
4091 }
4092
4093 static void hci_sched_esco(struct hci_dev *hdev)
4094 {
4095         struct hci_conn *conn;
4096         struct sk_buff *skb;
4097         int quote;
4098
4099         BT_DBG("%s", hdev->name);
4100
4101         if (!hci_conn_num(hdev, ESCO_LINK))
4102                 return;
4103
4104         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4105                                                      &quote))) {
4106                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4107                         BT_DBG("skb %p len %d", skb, skb->len);
4108                         hci_send_frame(hdev, skb);
4109
4110                         conn->sent++;
4111                         if (conn->sent == ~0)
4112                                 conn->sent = 0;
4113                 }
4114         }
4115 }
4116
4117 static void hci_sched_le(struct hci_dev *hdev)
4118 {
4119         struct hci_chan *chan;
4120         struct sk_buff *skb;
4121         int quote, cnt, tmp;
4122
4123         BT_DBG("%s", hdev->name);
4124
4125         if (!hci_conn_num(hdev, LE_LINK))
4126                 return;
4127
4128         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4129                 /* LE tx timeout must be longer than maximum
4130                  * link supervision timeout (40.9 seconds) */
4131                 if (!hdev->le_cnt && hdev->le_pkts &&
4132                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4133                         hci_link_tx_to(hdev, LE_LINK);
4134         }
4135
4136         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4137         tmp = cnt;
4138         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4139                 u32 priority = (skb_peek(&chan->data_q))->priority;
4140                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4141                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4142                                skb->len, skb->priority);
4143
4144                         /* Stop if priority has changed */
4145                         if (skb->priority < priority)
4146                                 break;
4147
4148                         skb = skb_dequeue(&chan->data_q);
4149
4150                         hci_send_frame(hdev, skb);
4151                         hdev->le_last_tx = jiffies;
4152
4153                         cnt--;
4154                         chan->sent++;
4155                         chan->conn->sent++;
4156                 }
4157         }
4158
4159         if (hdev->le_pkts)
4160                 hdev->le_cnt = cnt;
4161         else
4162                 hdev->acl_cnt = cnt;
4163
4164         if (cnt != tmp)
4165                 hci_prio_recalculate(hdev, LE_LINK);
4166 }
4167
4168 static void hci_tx_work(struct work_struct *work)
4169 {
4170         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4171         struct sk_buff *skb;
4172
4173         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4174                hdev->sco_cnt, hdev->le_cnt);
4175
4176         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4177                 /* Schedule queues and send stuff to HCI driver */
4178                 hci_sched_acl(hdev);
4179                 hci_sched_sco(hdev);
4180                 hci_sched_esco(hdev);
4181                 hci_sched_le(hdev);
4182         }
4183
4184         /* Send next queued raw (unknown type) packet */
4185         while ((skb = skb_dequeue(&hdev->raw_q)))
4186                 hci_send_frame(hdev, skb);
4187 }
4188
4189 /* ----- HCI RX task (incoming data processing) ----- */
4190
4191 /* ACL data packet */
4192 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4193 {
4194         struct hci_acl_hdr *hdr = (void *) skb->data;
4195         struct hci_conn *conn;
4196         __u16 handle, flags;
4197
4198         skb_pull(skb, HCI_ACL_HDR_SIZE);
4199
4200         handle = __le16_to_cpu(hdr->handle);
4201         flags  = hci_flags(handle);
4202         handle = hci_handle(handle);
4203
4204         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4205                handle, flags);
4206
4207         hdev->stat.acl_rx++;
4208
4209         hci_dev_lock(hdev);
4210         conn = hci_conn_hash_lookup_handle(hdev, handle);
4211         hci_dev_unlock(hdev);
4212
4213         if (conn) {
4214                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4215
4216                 /* Send to upper protocol */
4217                 l2cap_recv_acldata(conn, skb, flags);
4218                 return;
4219         } else {
4220                 BT_ERR("%s ACL packet for unknown connection handle %d",
4221                        hdev->name, handle);
4222         }
4223
4224         kfree_skb(skb);
4225 }
4226
4227 /* SCO data packet */
4228 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4229 {
4230         struct hci_sco_hdr *hdr = (void *) skb->data;
4231         struct hci_conn *conn;
4232         __u16 handle;
4233
4234         skb_pull(skb, HCI_SCO_HDR_SIZE);
4235
4236         handle = __le16_to_cpu(hdr->handle);
4237
4238         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4239
4240         hdev->stat.sco_rx++;
4241
4242         hci_dev_lock(hdev);
4243         conn = hci_conn_hash_lookup_handle(hdev, handle);
4244         hci_dev_unlock(hdev);
4245
4246         if (conn) {
4247                 /* Send to upper protocol */
4248                 sco_recv_scodata(conn, skb);
4249                 return;
4250         } else {
4251                 BT_ERR("%s SCO packet for unknown connection handle %d",
4252                        hdev->name, handle);
4253         }
4254
4255         kfree_skb(skb);
4256 }
4257
4258 static bool hci_req_is_complete(struct hci_dev *hdev)
4259 {
4260         struct sk_buff *skb;
4261
4262         skb = skb_peek(&hdev->cmd_q);
4263         if (!skb)
4264                 return true;
4265
4266         return bt_cb(skb)->req_start;
4267 }
4268
4269 static void hci_resend_last(struct hci_dev *hdev)
4270 {
4271         struct hci_command_hdr *sent;
4272         struct sk_buff *skb;
4273         u16 opcode;
4274
4275         if (!hdev->sent_cmd)
4276                 return;
4277
4278         sent = (void *) hdev->sent_cmd->data;
4279         opcode = __le16_to_cpu(sent->opcode);
4280         if (opcode == HCI_OP_RESET)
4281                 return;
4282
4283         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4284         if (!skb)
4285                 return;
4286
4287         skb_queue_head(&hdev->cmd_q, skb);
4288         queue_work(hdev->workqueue, &hdev->cmd_work);
4289 }
4290
4291 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4292 {
4293         hci_req_complete_t req_complete = NULL;
4294         struct sk_buff *skb;
4295         unsigned long flags;
4296
4297         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4298
4299         /* If the completed command doesn't match the last one that was
4300          * sent we need to do special handling of it.
4301          */
4302         if (!hci_sent_cmd_data(hdev, opcode)) {
4303                 /* Some CSR based controllers generate a spontaneous
4304                  * reset complete event during init and any pending
4305                  * command will never be completed. In such a case we
4306                  * need to resend whatever was the last sent
4307                  * command.
4308                  */
4309                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4310                         hci_resend_last(hdev);
4311
4312                 return;
4313         }
4314
4315         /* If the command succeeded and there's still more commands in
4316          * this request the request is not yet complete.
4317          */
4318         if (!status && !hci_req_is_complete(hdev))
4319                 return;
4320
4321         /* If this was the last command in a request the complete
4322          * callback would be found in hdev->sent_cmd instead of the
4323          * command queue (hdev->cmd_q).
4324          */
4325         if (hdev->sent_cmd) {
4326                 req_complete = bt_cb(hdev->sent_cmd)->req_complete;
4327
4328                 if (req_complete) {
4329                         /* We must set the complete callback to NULL to
4330                          * avoid calling the callback more than once if
4331                          * this function gets called again.
4332                          */
4333                         bt_cb(hdev->sent_cmd)->req_complete = NULL;
4334
4335                         goto call_complete;
4336                 }
4337         }
4338
4339         /* Remove all pending commands belonging to this request */
4340         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4341         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4342                 if (bt_cb(skb)->req_start) {
4343                         __skb_queue_head(&hdev->cmd_q, skb);
4344                         break;
4345                 }
4346
4347                 req_complete = bt_cb(skb)->req_complete;
4348                 kfree_skb(skb);
4349         }
4350         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4351
4352 call_complete:
4353         if (req_complete)
4354                 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4355 }
4356
4357 static void hci_rx_work(struct work_struct *work)
4358 {
4359         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4360         struct sk_buff *skb;
4361
4362         BT_DBG("%s", hdev->name);
4363
4364         while ((skb = skb_dequeue(&hdev->rx_q))) {
4365                 /* Send copy to monitor */
4366                 hci_send_to_monitor(hdev, skb);
4367
4368                 if (atomic_read(&hdev->promisc)) {
4369                         /* Send copy to the sockets */
4370                         hci_send_to_sock(hdev, skb);
4371                 }
4372
4373                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4374                         kfree_skb(skb);
4375                         continue;
4376                 }
4377
4378                 if (test_bit(HCI_INIT, &hdev->flags)) {
4379                         /* Don't process data packets in this states. */
4380                         switch (bt_cb(skb)->pkt_type) {
4381                         case HCI_ACLDATA_PKT:
4382                         case HCI_SCODATA_PKT:
4383                                 kfree_skb(skb);
4384                                 continue;
4385                         }
4386                 }
4387
4388                 /* Process frame */
4389                 switch (bt_cb(skb)->pkt_type) {
4390                 case HCI_EVENT_PKT:
4391                         BT_DBG("%s Event packet", hdev->name);
4392                         hci_event_packet(hdev, skb);
4393                         break;
4394
4395                 case HCI_ACLDATA_PKT:
4396                         BT_DBG("%s ACL data packet", hdev->name);
4397                         hci_acldata_packet(hdev, skb);
4398                         break;
4399
4400                 case HCI_SCODATA_PKT:
4401                         BT_DBG("%s SCO data packet", hdev->name);
4402                         hci_scodata_packet(hdev, skb);
4403                         break;
4404
4405                 default:
4406                         kfree_skb(skb);
4407                         break;
4408                 }
4409         }
4410 }
4411
4412 static void hci_cmd_work(struct work_struct *work)
4413 {
4414         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4415         struct sk_buff *skb;
4416
4417         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4418                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4419
4420         /* Send queued commands */
4421         if (atomic_read(&hdev->cmd_cnt)) {
4422                 skb = skb_dequeue(&hdev->cmd_q);
4423                 if (!skb)
4424                         return;
4425
4426                 kfree_skb(hdev->sent_cmd);
4427
4428                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4429                 if (hdev->sent_cmd) {
4430                         atomic_dec(&hdev->cmd_cnt);
4431                         hci_send_frame(hdev, skb);
4432                         if (test_bit(HCI_RESET, &hdev->flags))
4433                                 cancel_delayed_work(&hdev->cmd_timer);
4434                         else
4435                                 schedule_delayed_work(&hdev->cmd_timer,
4436                                                       HCI_CMD_TIMEOUT);
4437                 } else {
4438                         skb_queue_head(&hdev->cmd_q, skb);
4439                         queue_work(hdev->workqueue, &hdev->cmd_work);
4440                 }
4441         }
4442 }