Bluetooth: Fix SMP channel registration for unconfigured controllers
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
501 {
502         if (lmp_ext_inq_capable(hdev))
503                 return 0x02;
504
505         if (lmp_inq_rssi_capable(hdev))
506                 return 0x01;
507
508         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509             hdev->lmp_subver == 0x0757)
510                 return 0x01;
511
512         if (hdev->manufacturer == 15) {
513                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
514                         return 0x01;
515                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
516                         return 0x01;
517                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
518                         return 0x01;
519         }
520
521         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522             hdev->lmp_subver == 0x1805)
523                 return 0x01;
524
525         return 0x00;
526 }
527
528 static void hci_setup_inquiry_mode(struct hci_request *req)
529 {
530         u8 mode;
531
532         mode = hci_get_inquiry_mode(req->hdev);
533
534         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
535 }
536
537 static void hci_setup_event_mask(struct hci_request *req)
538 {
539         struct hci_dev *hdev = req->hdev;
540
541         /* The second byte is 0xff instead of 0x9f (two reserved bits
542          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
543          * command otherwise.
544          */
545         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
546
547         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548          * any event mask for pre 1.2 devices.
549          */
550         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
551                 return;
552
553         if (lmp_bredr_capable(hdev)) {
554                 events[4] |= 0x01; /* Flow Specification Complete */
555                 events[4] |= 0x02; /* Inquiry Result with RSSI */
556                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557                 events[5] |= 0x08; /* Synchronous Connection Complete */
558                 events[5] |= 0x10; /* Synchronous Connection Changed */
559         } else {
560                 /* Use a different default for LE-only devices */
561                 memset(events, 0, sizeof(events));
562                 events[0] |= 0x10; /* Disconnection Complete */
563                 events[1] |= 0x08; /* Read Remote Version Information Complete */
564                 events[1] |= 0x20; /* Command Complete */
565                 events[1] |= 0x40; /* Command Status */
566                 events[1] |= 0x80; /* Hardware Error */
567                 events[2] |= 0x04; /* Number of Completed Packets */
568                 events[3] |= 0x02; /* Data Buffer Overflow */
569
570                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571                         events[0] |= 0x80; /* Encryption Change */
572                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
573                 }
574         }
575
576         if (lmp_inq_rssi_capable(hdev))
577                 events[4] |= 0x02; /* Inquiry Result with RSSI */
578
579         if (lmp_sniffsubr_capable(hdev))
580                 events[5] |= 0x20; /* Sniff Subrating */
581
582         if (lmp_pause_enc_capable(hdev))
583                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
584
585         if (lmp_ext_inq_capable(hdev))
586                 events[5] |= 0x40; /* Extended Inquiry Result */
587
588         if (lmp_no_flush_capable(hdev))
589                 events[7] |= 0x01; /* Enhanced Flush Complete */
590
591         if (lmp_lsto_capable(hdev))
592                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
593
594         if (lmp_ssp_capable(hdev)) {
595                 events[6] |= 0x01;      /* IO Capability Request */
596                 events[6] |= 0x02;      /* IO Capability Response */
597                 events[6] |= 0x04;      /* User Confirmation Request */
598                 events[6] |= 0x08;      /* User Passkey Request */
599                 events[6] |= 0x10;      /* Remote OOB Data Request */
600                 events[6] |= 0x20;      /* Simple Pairing Complete */
601                 events[7] |= 0x04;      /* User Passkey Notification */
602                 events[7] |= 0x08;      /* Keypress Notification */
603                 events[7] |= 0x10;      /* Remote Host Supported
604                                          * Features Notification
605                                          */
606         }
607
608         if (lmp_le_capable(hdev))
609                 events[7] |= 0x20;      /* LE Meta-Event */
610
611         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
612 }
613
614 static void hci_init2_req(struct hci_request *req, unsigned long opt)
615 {
616         struct hci_dev *hdev = req->hdev;
617
618         if (lmp_bredr_capable(hdev))
619                 bredr_setup(req);
620         else
621                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
622
623         if (lmp_le_capable(hdev))
624                 le_setup(req);
625
626         /* All Bluetooth 1.2 and later controllers should support the
627          * HCI command for reading the local supported commands.
628          *
629          * Unfortunately some controllers indicate Bluetooth 1.2 support,
630          * but do not have support for this command. If that is the case,
631          * the driver can quirk the behavior and skip reading the local
632          * supported commands.
633          */
634         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
635             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
636                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
637
638         if (lmp_ssp_capable(hdev)) {
639                 /* When SSP is available, then the host features page
640                  * should also be available as well. However some
641                  * controllers list the max_page as 0 as long as SSP
642                  * has not been enabled. To achieve proper debugging
643                  * output, force the minimum max_page to 1 at least.
644                  */
645                 hdev->max_page = 0x01;
646
647                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
648                         u8 mode = 0x01;
649                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
650                                     sizeof(mode), &mode);
651                 } else {
652                         struct hci_cp_write_eir cp;
653
654                         memset(hdev->eir, 0, sizeof(hdev->eir));
655                         memset(&cp, 0, sizeof(cp));
656
657                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
658                 }
659         }
660
661         if (lmp_inq_rssi_capable(hdev))
662                 hci_setup_inquiry_mode(req);
663
664         if (lmp_inq_tx_pwr_capable(hdev))
665                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
666
667         if (lmp_ext_feat_capable(hdev)) {
668                 struct hci_cp_read_local_ext_features cp;
669
670                 cp.page = 0x01;
671                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
672                             sizeof(cp), &cp);
673         }
674
675         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
676                 u8 enable = 1;
677                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
678                             &enable);
679         }
680 }
681
682 static void hci_setup_link_policy(struct hci_request *req)
683 {
684         struct hci_dev *hdev = req->hdev;
685         struct hci_cp_write_def_link_policy cp;
686         u16 link_policy = 0;
687
688         if (lmp_rswitch_capable(hdev))
689                 link_policy |= HCI_LP_RSWITCH;
690         if (lmp_hold_capable(hdev))
691                 link_policy |= HCI_LP_HOLD;
692         if (lmp_sniff_capable(hdev))
693                 link_policy |= HCI_LP_SNIFF;
694         if (lmp_park_capable(hdev))
695                 link_policy |= HCI_LP_PARK;
696
697         cp.policy = cpu_to_le16(link_policy);
698         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
699 }
700
701 static void hci_set_le_support(struct hci_request *req)
702 {
703         struct hci_dev *hdev = req->hdev;
704         struct hci_cp_write_le_host_supported cp;
705
706         /* LE-only devices do not support explicit enablement */
707         if (!lmp_bredr_capable(hdev))
708                 return;
709
710         memset(&cp, 0, sizeof(cp));
711
712         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
713                 cp.le = 0x01;
714                 cp.simul = 0x00;
715         }
716
717         if (cp.le != lmp_host_le_capable(hdev))
718                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
719                             &cp);
720 }
721
722 static void hci_set_event_mask_page_2(struct hci_request *req)
723 {
724         struct hci_dev *hdev = req->hdev;
725         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
726
727         /* If Connectionless Slave Broadcast master role is supported
728          * enable all necessary events for it.
729          */
730         if (lmp_csb_master_capable(hdev)) {
731                 events[1] |= 0x40;      /* Triggered Clock Capture */
732                 events[1] |= 0x80;      /* Synchronization Train Complete */
733                 events[2] |= 0x10;      /* Slave Page Response Timeout */
734                 events[2] |= 0x20;      /* CSB Channel Map Change */
735         }
736
737         /* If Connectionless Slave Broadcast slave role is supported
738          * enable all necessary events for it.
739          */
740         if (lmp_csb_slave_capable(hdev)) {
741                 events[2] |= 0x01;      /* Synchronization Train Received */
742                 events[2] |= 0x02;      /* CSB Receive */
743                 events[2] |= 0x04;      /* CSB Timeout */
744                 events[2] |= 0x08;      /* Truncated Page Complete */
745         }
746
747         /* Enable Authenticated Payload Timeout Expired event if supported */
748         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
749                 events[2] |= 0x80;
750
751         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
752 }
753
754 static void hci_init3_req(struct hci_request *req, unsigned long opt)
755 {
756         struct hci_dev *hdev = req->hdev;
757         u8 p;
758
759         hci_setup_event_mask(req);
760
761         /* Some Broadcom based Bluetooth controllers do not support the
762          * Delete Stored Link Key command. They are clearly indicating its
763          * absence in the bit mask of supported commands.
764          *
765          * Check the supported commands and only if the the command is marked
766          * as supported send it. If not supported assume that the controller
767          * does not have actual support for stored link keys which makes this
768          * command redundant anyway.
769          *
770          * Some controllers indicate that they support handling deleting
771          * stored link keys, but they don't. The quirk lets a driver
772          * just disable this command.
773          */
774         if (hdev->commands[6] & 0x80 &&
775             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
776                 struct hci_cp_delete_stored_link_key cp;
777
778                 bacpy(&cp.bdaddr, BDADDR_ANY);
779                 cp.delete_all = 0x01;
780                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
781                             sizeof(cp), &cp);
782         }
783
784         if (hdev->commands[5] & 0x10)
785                 hci_setup_link_policy(req);
786
787         if (hdev->commands[8] & 0x01)
788                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
789
790         /* Some older Broadcom based Bluetooth 1.2 controllers do not
791          * support the Read Page Scan Type command. Check support for
792          * this command in the bit mask of supported commands.
793          */
794         if (hdev->commands[13] & 0x01)
795                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
796
797         if (lmp_le_capable(hdev)) {
798                 u8 events[8];
799
800                 memset(events, 0, sizeof(events));
801                 events[0] = 0x0f;
802
803                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
804                         events[0] |= 0x10;      /* LE Long Term Key Request */
805
806                 /* If controller supports the Connection Parameters Request
807                  * Link Layer Procedure, enable the corresponding event.
808                  */
809                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
810                         events[0] |= 0x20;      /* LE Remote Connection
811                                                  * Parameter Request
812                                                  */
813
814                 /* If the controller supports the Data Length Extension
815                  * feature, enable the corresponding event.
816                  */
817                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
818                         events[0] |= 0x40;      /* LE Data Length Change */
819
820                 /* If the controller supports Extended Scanner Filter
821                  * Policies, enable the correspondig event.
822                  */
823                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
824                         events[1] |= 0x04;      /* LE Direct Advertising
825                                                  * Report
826                                                  */
827
828                 /* If the controller supports the LE Read Local P-256
829                  * Public Key command, enable the corresponding event.
830                  */
831                 if (hdev->commands[34] & 0x02)
832                         events[0] |= 0x80;      /* LE Read Local P-256
833                                                  * Public Key Complete
834                                                  */
835
836                 /* If the controller supports the LE Generate DHKey
837                  * command, enable the corresponding event.
838                  */
839                 if (hdev->commands[34] & 0x04)
840                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
841
842                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
843                             events);
844
845                 if (hdev->commands[25] & 0x40) {
846                         /* Read LE Advertising Channel TX Power */
847                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
848                 }
849
850                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
851                         /* Read LE Maximum Data Length */
852                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
853
854                         /* Read LE Suggested Default Data Length */
855                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
856                 }
857
858                 hci_set_le_support(req);
859         }
860
861         /* Read features beyond page 1 if available */
862         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
863                 struct hci_cp_read_local_ext_features cp;
864
865                 cp.page = p;
866                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
867                             sizeof(cp), &cp);
868         }
869 }
870
871 static void hci_init4_req(struct hci_request *req, unsigned long opt)
872 {
873         struct hci_dev *hdev = req->hdev;
874
875         /* Set event mask page 2 if the HCI command for it is supported */
876         if (hdev->commands[22] & 0x04)
877                 hci_set_event_mask_page_2(req);
878
879         /* Read local codec list if the HCI command is supported */
880         if (hdev->commands[29] & 0x20)
881                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
882
883         /* Get MWS transport configuration if the HCI command is supported */
884         if (hdev->commands[30] & 0x08)
885                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
886
887         /* Check for Synchronization Train support */
888         if (lmp_sync_train_capable(hdev))
889                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
890
891         /* Enable Secure Connections if supported and configured */
892         if (bredr_sc_enabled(hdev)) {
893                 u8 support = 0x01;
894                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
895                             sizeof(support), &support);
896         }
897 }
898
899 static int __hci_init(struct hci_dev *hdev)
900 {
901         int err;
902
903         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
904         if (err < 0)
905                 return err;
906
907         /* The Device Under Test (DUT) mode is special and available for
908          * all controller types. So just create it early on.
909          */
910         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
911                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
912                                     &dut_mode_fops);
913         }
914
915         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
916          * BR/EDR/LE type controllers. AMP controllers only need the
917          * first stage init.
918          */
919         if (hdev->dev_type != HCI_BREDR)
920                 return 0;
921
922         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
923         if (err < 0)
924                 return err;
925
926         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
927         if (err < 0)
928                 return err;
929
930         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
931         if (err < 0)
932                 return err;
933
934         /* This function is only called when the controller is actually in
935          * configured state. When the controller is marked as unconfigured,
936          * this initialization procedure is not run.
937          *
938          * It means that it is possible that a controller runs through its
939          * setup phase and then discovers missing settings. If that is the
940          * case, then this function will not be called. It then will only
941          * be called during the config phase.
942          *
943          * So only when in setup phase or config phase, create the debugfs
944          * entries and register the SMP channels.
945          */
946         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
947             !test_bit(HCI_CONFIG, &hdev->dev_flags))
948                 return 0;
949
950         hci_debugfs_create_common(hdev);
951
952         if (lmp_bredr_capable(hdev))
953                 hci_debugfs_create_bredr(hdev);
954
955         if (lmp_le_capable(hdev)) {
956                 hci_debugfs_create_le(hdev);
957                 smp_register(hdev);
958         }
959
960         return 0;
961 }
962
963 static void hci_init0_req(struct hci_request *req, unsigned long opt)
964 {
965         struct hci_dev *hdev = req->hdev;
966
967         BT_DBG("%s %ld", hdev->name, opt);
968
969         /* Reset */
970         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
971                 hci_reset_req(req, 0);
972
973         /* Read Local Version */
974         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
975
976         /* Read BD Address */
977         if (hdev->set_bdaddr)
978                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
979 }
980
981 static int __hci_unconf_init(struct hci_dev *hdev)
982 {
983         int err;
984
985         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
986                 return 0;
987
988         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
989         if (err < 0)
990                 return err;
991
992         return 0;
993 }
994
995 static void hci_scan_req(struct hci_request *req, unsigned long opt)
996 {
997         __u8 scan = opt;
998
999         BT_DBG("%s %x", req->hdev->name, scan);
1000
1001         /* Inquiry and Page scans */
1002         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1003 }
1004
1005 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1006 {
1007         __u8 auth = opt;
1008
1009         BT_DBG("%s %x", req->hdev->name, auth);
1010
1011         /* Authentication */
1012         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1013 }
1014
1015 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1016 {
1017         __u8 encrypt = opt;
1018
1019         BT_DBG("%s %x", req->hdev->name, encrypt);
1020
1021         /* Encryption */
1022         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1023 }
1024
1025 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1026 {
1027         __le16 policy = cpu_to_le16(opt);
1028
1029         BT_DBG("%s %x", req->hdev->name, policy);
1030
1031         /* Default link policy */
1032         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1033 }
1034
1035 /* Get HCI device by index.
1036  * Device is held on return. */
1037 struct hci_dev *hci_dev_get(int index)
1038 {
1039         struct hci_dev *hdev = NULL, *d;
1040
1041         BT_DBG("%d", index);
1042
1043         if (index < 0)
1044                 return NULL;
1045
1046         read_lock(&hci_dev_list_lock);
1047         list_for_each_entry(d, &hci_dev_list, list) {
1048                 if (d->id == index) {
1049                         hdev = hci_dev_hold(d);
1050                         break;
1051                 }
1052         }
1053         read_unlock(&hci_dev_list_lock);
1054         return hdev;
1055 }
1056
1057 /* ---- Inquiry support ---- */
1058
1059 bool hci_discovery_active(struct hci_dev *hdev)
1060 {
1061         struct discovery_state *discov = &hdev->discovery;
1062
1063         switch (discov->state) {
1064         case DISCOVERY_FINDING:
1065         case DISCOVERY_RESOLVING:
1066                 return true;
1067
1068         default:
1069                 return false;
1070         }
1071 }
1072
1073 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1074 {
1075         int old_state = hdev->discovery.state;
1076
1077         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1078
1079         if (old_state == state)
1080                 return;
1081
1082         hdev->discovery.state = state;
1083
1084         switch (state) {
1085         case DISCOVERY_STOPPED:
1086                 hci_update_background_scan(hdev);
1087
1088                 if (old_state != DISCOVERY_STARTING)
1089                         mgmt_discovering(hdev, 0);
1090                 break;
1091         case DISCOVERY_STARTING:
1092                 break;
1093         case DISCOVERY_FINDING:
1094                 mgmt_discovering(hdev, 1);
1095                 break;
1096         case DISCOVERY_RESOLVING:
1097                 break;
1098         case DISCOVERY_STOPPING:
1099                 break;
1100         }
1101 }
1102
1103 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1104 {
1105         struct discovery_state *cache = &hdev->discovery;
1106         struct inquiry_entry *p, *n;
1107
1108         list_for_each_entry_safe(p, n, &cache->all, all) {
1109                 list_del(&p->all);
1110                 kfree(p);
1111         }
1112
1113         INIT_LIST_HEAD(&cache->unknown);
1114         INIT_LIST_HEAD(&cache->resolve);
1115 }
1116
1117 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1118                                                bdaddr_t *bdaddr)
1119 {
1120         struct discovery_state *cache = &hdev->discovery;
1121         struct inquiry_entry *e;
1122
1123         BT_DBG("cache %p, %pMR", cache, bdaddr);
1124
1125         list_for_each_entry(e, &cache->all, all) {
1126                 if (!bacmp(&e->data.bdaddr, bdaddr))
1127                         return e;
1128         }
1129
1130         return NULL;
1131 }
1132
1133 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1134                                                        bdaddr_t *bdaddr)
1135 {
1136         struct discovery_state *cache = &hdev->discovery;
1137         struct inquiry_entry *e;
1138
1139         BT_DBG("cache %p, %pMR", cache, bdaddr);
1140
1141         list_for_each_entry(e, &cache->unknown, list) {
1142                 if (!bacmp(&e->data.bdaddr, bdaddr))
1143                         return e;
1144         }
1145
1146         return NULL;
1147 }
1148
1149 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1150                                                        bdaddr_t *bdaddr,
1151                                                        int state)
1152 {
1153         struct discovery_state *cache = &hdev->discovery;
1154         struct inquiry_entry *e;
1155
1156         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1157
1158         list_for_each_entry(e, &cache->resolve, list) {
1159                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1160                         return e;
1161                 if (!bacmp(&e->data.bdaddr, bdaddr))
1162                         return e;
1163         }
1164
1165         return NULL;
1166 }
1167
1168 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1169                                       struct inquiry_entry *ie)
1170 {
1171         struct discovery_state *cache = &hdev->discovery;
1172         struct list_head *pos = &cache->resolve;
1173         struct inquiry_entry *p;
1174
1175         list_del(&ie->list);
1176
1177         list_for_each_entry(p, &cache->resolve, list) {
1178                 if (p->name_state != NAME_PENDING &&
1179                     abs(p->data.rssi) >= abs(ie->data.rssi))
1180                         break;
1181                 pos = &p->list;
1182         }
1183
1184         list_add(&ie->list, pos);
1185 }
1186
1187 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1188                              bool name_known)
1189 {
1190         struct discovery_state *cache = &hdev->discovery;
1191         struct inquiry_entry *ie;
1192         u32 flags = 0;
1193
1194         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1195
1196         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1197
1198         if (!data->ssp_mode)
1199                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1200
1201         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1202         if (ie) {
1203                 if (!ie->data.ssp_mode)
1204                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1205
1206                 if (ie->name_state == NAME_NEEDED &&
1207                     data->rssi != ie->data.rssi) {
1208                         ie->data.rssi = data->rssi;
1209                         hci_inquiry_cache_update_resolve(hdev, ie);
1210                 }
1211
1212                 goto update;
1213         }
1214
1215         /* Entry not in the cache. Add new one. */
1216         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1217         if (!ie) {
1218                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1219                 goto done;
1220         }
1221
1222         list_add(&ie->all, &cache->all);
1223
1224         if (name_known) {
1225                 ie->name_state = NAME_KNOWN;
1226         } else {
1227                 ie->name_state = NAME_NOT_KNOWN;
1228                 list_add(&ie->list, &cache->unknown);
1229         }
1230
1231 update:
1232         if (name_known && ie->name_state != NAME_KNOWN &&
1233             ie->name_state != NAME_PENDING) {
1234                 ie->name_state = NAME_KNOWN;
1235                 list_del(&ie->list);
1236         }
1237
1238         memcpy(&ie->data, data, sizeof(*data));
1239         ie->timestamp = jiffies;
1240         cache->timestamp = jiffies;
1241
1242         if (ie->name_state == NAME_NOT_KNOWN)
1243                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1244
1245 done:
1246         return flags;
1247 }
1248
1249 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1250 {
1251         struct discovery_state *cache = &hdev->discovery;
1252         struct inquiry_info *info = (struct inquiry_info *) buf;
1253         struct inquiry_entry *e;
1254         int copied = 0;
1255
1256         list_for_each_entry(e, &cache->all, all) {
1257                 struct inquiry_data *data = &e->data;
1258
1259                 if (copied >= num)
1260                         break;
1261
1262                 bacpy(&info->bdaddr, &data->bdaddr);
1263                 info->pscan_rep_mode    = data->pscan_rep_mode;
1264                 info->pscan_period_mode = data->pscan_period_mode;
1265                 info->pscan_mode        = data->pscan_mode;
1266                 memcpy(info->dev_class, data->dev_class, 3);
1267                 info->clock_offset      = data->clock_offset;
1268
1269                 info++;
1270                 copied++;
1271         }
1272
1273         BT_DBG("cache %p, copied %d", cache, copied);
1274         return copied;
1275 }
1276
1277 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1278 {
1279         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1280         struct hci_dev *hdev = req->hdev;
1281         struct hci_cp_inquiry cp;
1282
1283         BT_DBG("%s", hdev->name);
1284
1285         if (test_bit(HCI_INQUIRY, &hdev->flags))
1286                 return;
1287
1288         /* Start Inquiry */
1289         memcpy(&cp.lap, &ir->lap, 3);
1290         cp.length  = ir->length;
1291         cp.num_rsp = ir->num_rsp;
1292         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1293 }
1294
1295 int hci_inquiry(void __user *arg)
1296 {
1297         __u8 __user *ptr = arg;
1298         struct hci_inquiry_req ir;
1299         struct hci_dev *hdev;
1300         int err = 0, do_inquiry = 0, max_rsp;
1301         long timeo;
1302         __u8 *buf;
1303
1304         if (copy_from_user(&ir, ptr, sizeof(ir)))
1305                 return -EFAULT;
1306
1307         hdev = hci_dev_get(ir.dev_id);
1308         if (!hdev)
1309                 return -ENODEV;
1310
1311         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1312                 err = -EBUSY;
1313                 goto done;
1314         }
1315
1316         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1317                 err = -EOPNOTSUPP;
1318                 goto done;
1319         }
1320
1321         if (hdev->dev_type != HCI_BREDR) {
1322                 err = -EOPNOTSUPP;
1323                 goto done;
1324         }
1325
1326         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1327                 err = -EOPNOTSUPP;
1328                 goto done;
1329         }
1330
1331         hci_dev_lock(hdev);
1332         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1333             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1334                 hci_inquiry_cache_flush(hdev);
1335                 do_inquiry = 1;
1336         }
1337         hci_dev_unlock(hdev);
1338
1339         timeo = ir.length * msecs_to_jiffies(2000);
1340
1341         if (do_inquiry) {
1342                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1343                                    timeo);
1344                 if (err < 0)
1345                         goto done;
1346
1347                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1348                  * cleared). If it is interrupted by a signal, return -EINTR.
1349                  */
1350                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1351                                 TASK_INTERRUPTIBLE))
1352                         return -EINTR;
1353         }
1354
1355         /* for unlimited number of responses we will use buffer with
1356          * 255 entries
1357          */
1358         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1359
1360         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1361          * copy it to the user space.
1362          */
1363         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1364         if (!buf) {
1365                 err = -ENOMEM;
1366                 goto done;
1367         }
1368
1369         hci_dev_lock(hdev);
1370         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1371         hci_dev_unlock(hdev);
1372
1373         BT_DBG("num_rsp %d", ir.num_rsp);
1374
1375         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1376                 ptr += sizeof(ir);
1377                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1378                                  ir.num_rsp))
1379                         err = -EFAULT;
1380         } else
1381                 err = -EFAULT;
1382
1383         kfree(buf);
1384
1385 done:
1386         hci_dev_put(hdev);
1387         return err;
1388 }
1389
1390 static int hci_dev_do_open(struct hci_dev *hdev)
1391 {
1392         int ret = 0;
1393
1394         BT_DBG("%s %p", hdev->name, hdev);
1395
1396         hci_req_lock(hdev);
1397
1398         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1399                 ret = -ENODEV;
1400                 goto done;
1401         }
1402
1403         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1404             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1405                 /* Check for rfkill but allow the HCI setup stage to
1406                  * proceed (which in itself doesn't cause any RF activity).
1407                  */
1408                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1409                         ret = -ERFKILL;
1410                         goto done;
1411                 }
1412
1413                 /* Check for valid public address or a configured static
1414                  * random adddress, but let the HCI setup proceed to
1415                  * be able to determine if there is a public address
1416                  * or not.
1417                  *
1418                  * In case of user channel usage, it is not important
1419                  * if a public address or static random address is
1420                  * available.
1421                  *
1422                  * This check is only valid for BR/EDR controllers
1423                  * since AMP controllers do not have an address.
1424                  */
1425                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1426                     hdev->dev_type == HCI_BREDR &&
1427                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1428                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1429                         ret = -EADDRNOTAVAIL;
1430                         goto done;
1431                 }
1432         }
1433
1434         if (test_bit(HCI_UP, &hdev->flags)) {
1435                 ret = -EALREADY;
1436                 goto done;
1437         }
1438
1439         if (hdev->open(hdev)) {
1440                 ret = -EIO;
1441                 goto done;
1442         }
1443
1444         atomic_set(&hdev->cmd_cnt, 1);
1445         set_bit(HCI_INIT, &hdev->flags);
1446
1447         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1448                 if (hdev->setup)
1449                         ret = hdev->setup(hdev);
1450
1451                 /* The transport driver can set these quirks before
1452                  * creating the HCI device or in its setup callback.
1453                  *
1454                  * In case any of them is set, the controller has to
1455                  * start up as unconfigured.
1456                  */
1457                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1458                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1459                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1460
1461                 /* For an unconfigured controller it is required to
1462                  * read at least the version information provided by
1463                  * the Read Local Version Information command.
1464                  *
1465                  * If the set_bdaddr driver callback is provided, then
1466                  * also the original Bluetooth public device address
1467                  * will be read using the Read BD Address command.
1468                  */
1469                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1470                         ret = __hci_unconf_init(hdev);
1471         }
1472
1473         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1474                 /* If public address change is configured, ensure that
1475                  * the address gets programmed. If the driver does not
1476                  * support changing the public address, fail the power
1477                  * on procedure.
1478                  */
1479                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1480                     hdev->set_bdaddr)
1481                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1482                 else
1483                         ret = -EADDRNOTAVAIL;
1484         }
1485
1486         if (!ret) {
1487                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1488                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1489                         ret = __hci_init(hdev);
1490         }
1491
1492         clear_bit(HCI_INIT, &hdev->flags);
1493
1494         if (!ret) {
1495                 hci_dev_hold(hdev);
1496                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1497                 set_bit(HCI_UP, &hdev->flags);
1498                 hci_notify(hdev, HCI_DEV_UP);
1499                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1500                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1501                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1502                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1503                     hdev->dev_type == HCI_BREDR) {
1504                         hci_dev_lock(hdev);
1505                         mgmt_powered(hdev, 1);
1506                         hci_dev_unlock(hdev);
1507                 }
1508         } else {
1509                 /* Init failed, cleanup */
1510                 flush_work(&hdev->tx_work);
1511                 flush_work(&hdev->cmd_work);
1512                 flush_work(&hdev->rx_work);
1513
1514                 skb_queue_purge(&hdev->cmd_q);
1515                 skb_queue_purge(&hdev->rx_q);
1516
1517                 if (hdev->flush)
1518                         hdev->flush(hdev);
1519
1520                 if (hdev->sent_cmd) {
1521                         kfree_skb(hdev->sent_cmd);
1522                         hdev->sent_cmd = NULL;
1523                 }
1524
1525                 hdev->close(hdev);
1526                 hdev->flags &= BIT(HCI_RAW);
1527         }
1528
1529 done:
1530         hci_req_unlock(hdev);
1531         return ret;
1532 }
1533
1534 /* ---- HCI ioctl helpers ---- */
1535
1536 int hci_dev_open(__u16 dev)
1537 {
1538         struct hci_dev *hdev;
1539         int err;
1540
1541         hdev = hci_dev_get(dev);
1542         if (!hdev)
1543                 return -ENODEV;
1544
1545         /* Devices that are marked as unconfigured can only be powered
1546          * up as user channel. Trying to bring them up as normal devices
1547          * will result into a failure. Only user channel operation is
1548          * possible.
1549          *
1550          * When this function is called for a user channel, the flag
1551          * HCI_USER_CHANNEL will be set first before attempting to
1552          * open the device.
1553          */
1554         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1555             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1556                 err = -EOPNOTSUPP;
1557                 goto done;
1558         }
1559
1560         /* We need to ensure that no other power on/off work is pending
1561          * before proceeding to call hci_dev_do_open. This is
1562          * particularly important if the setup procedure has not yet
1563          * completed.
1564          */
1565         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1566                 cancel_delayed_work(&hdev->power_off);
1567
1568         /* After this call it is guaranteed that the setup procedure
1569          * has finished. This means that error conditions like RFKILL
1570          * or no valid public or static random address apply.
1571          */
1572         flush_workqueue(hdev->req_workqueue);
1573
1574         /* For controllers not using the management interface and that
1575          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1576          * so that pairing works for them. Once the management interface
1577          * is in use this bit will be cleared again and userspace has
1578          * to explicitly enable it.
1579          */
1580         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1581             !test_bit(HCI_MGMT, &hdev->dev_flags))
1582                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1583
1584         err = hci_dev_do_open(hdev);
1585
1586 done:
1587         hci_dev_put(hdev);
1588         return err;
1589 }
1590
1591 /* This function requires the caller holds hdev->lock */
1592 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1593 {
1594         struct hci_conn_params *p;
1595
1596         list_for_each_entry(p, &hdev->le_conn_params, list) {
1597                 if (p->conn) {
1598                         hci_conn_drop(p->conn);
1599                         hci_conn_put(p->conn);
1600                         p->conn = NULL;
1601                 }
1602                 list_del_init(&p->action);
1603         }
1604
1605         BT_DBG("All LE pending actions cleared");
1606 }
1607
1608 static int hci_dev_do_close(struct hci_dev *hdev)
1609 {
1610         BT_DBG("%s %p", hdev->name, hdev);
1611
1612         cancel_delayed_work(&hdev->power_off);
1613
1614         hci_req_cancel(hdev, ENODEV);
1615         hci_req_lock(hdev);
1616
1617         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1618                 cancel_delayed_work_sync(&hdev->cmd_timer);
1619                 hci_req_unlock(hdev);
1620                 return 0;
1621         }
1622
1623         /* Flush RX and TX works */
1624         flush_work(&hdev->tx_work);
1625         flush_work(&hdev->rx_work);
1626
1627         if (hdev->discov_timeout > 0) {
1628                 cancel_delayed_work(&hdev->discov_off);
1629                 hdev->discov_timeout = 0;
1630                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1631                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1632         }
1633
1634         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1635                 cancel_delayed_work(&hdev->service_cache);
1636
1637         cancel_delayed_work_sync(&hdev->le_scan_disable);
1638
1639         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1640                 cancel_delayed_work_sync(&hdev->rpa_expired);
1641
1642         /* Avoid potential lockdep warnings from the *_flush() calls by
1643          * ensuring the workqueue is empty up front.
1644          */
1645         drain_workqueue(hdev->workqueue);
1646
1647         hci_dev_lock(hdev);
1648
1649         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1650                 if (hdev->dev_type == HCI_BREDR)
1651                         mgmt_powered(hdev, 0);
1652         }
1653
1654         hci_inquiry_cache_flush(hdev);
1655         hci_pend_le_actions_clear(hdev);
1656         hci_conn_hash_flush(hdev);
1657         hci_dev_unlock(hdev);
1658
1659         hci_notify(hdev, HCI_DEV_DOWN);
1660
1661         if (hdev->flush)
1662                 hdev->flush(hdev);
1663
1664         /* Reset device */
1665         skb_queue_purge(&hdev->cmd_q);
1666         atomic_set(&hdev->cmd_cnt, 1);
1667         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1668             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1669             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1670                 set_bit(HCI_INIT, &hdev->flags);
1671                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1672                 clear_bit(HCI_INIT, &hdev->flags);
1673         }
1674
1675         /* flush cmd  work */
1676         flush_work(&hdev->cmd_work);
1677
1678         /* Drop queues */
1679         skb_queue_purge(&hdev->rx_q);
1680         skb_queue_purge(&hdev->cmd_q);
1681         skb_queue_purge(&hdev->raw_q);
1682
1683         /* Drop last sent command */
1684         if (hdev->sent_cmd) {
1685                 cancel_delayed_work_sync(&hdev->cmd_timer);
1686                 kfree_skb(hdev->sent_cmd);
1687                 hdev->sent_cmd = NULL;
1688         }
1689
1690         kfree_skb(hdev->recv_evt);
1691         hdev->recv_evt = NULL;
1692
1693         /* After this point our queues are empty
1694          * and no tasks are scheduled. */
1695         hdev->close(hdev);
1696
1697         /* Clear flags */
1698         hdev->flags &= BIT(HCI_RAW);
1699         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1700
1701         /* Controller radio is available but is currently powered down */
1702         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1703
1704         memset(hdev->eir, 0, sizeof(hdev->eir));
1705         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1706         bacpy(&hdev->random_addr, BDADDR_ANY);
1707
1708         hci_req_unlock(hdev);
1709
1710         hci_dev_put(hdev);
1711         return 0;
1712 }
1713
1714 int hci_dev_close(__u16 dev)
1715 {
1716         struct hci_dev *hdev;
1717         int err;
1718
1719         hdev = hci_dev_get(dev);
1720         if (!hdev)
1721                 return -ENODEV;
1722
1723         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1724                 err = -EBUSY;
1725                 goto done;
1726         }
1727
1728         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1729                 cancel_delayed_work(&hdev->power_off);
1730
1731         err = hci_dev_do_close(hdev);
1732
1733 done:
1734         hci_dev_put(hdev);
1735         return err;
1736 }
1737
1738 int hci_dev_reset(__u16 dev)
1739 {
1740         struct hci_dev *hdev;
1741         int ret = 0;
1742
1743         hdev = hci_dev_get(dev);
1744         if (!hdev)
1745                 return -ENODEV;
1746
1747         hci_req_lock(hdev);
1748
1749         if (!test_bit(HCI_UP, &hdev->flags)) {
1750                 ret = -ENETDOWN;
1751                 goto done;
1752         }
1753
1754         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1755                 ret = -EBUSY;
1756                 goto done;
1757         }
1758
1759         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1760                 ret = -EOPNOTSUPP;
1761                 goto done;
1762         }
1763
1764         /* Drop queues */
1765         skb_queue_purge(&hdev->rx_q);
1766         skb_queue_purge(&hdev->cmd_q);
1767
1768         /* Avoid potential lockdep warnings from the *_flush() calls by
1769          * ensuring the workqueue is empty up front.
1770          */
1771         drain_workqueue(hdev->workqueue);
1772
1773         hci_dev_lock(hdev);
1774         hci_inquiry_cache_flush(hdev);
1775         hci_conn_hash_flush(hdev);
1776         hci_dev_unlock(hdev);
1777
1778         if (hdev->flush)
1779                 hdev->flush(hdev);
1780
1781         atomic_set(&hdev->cmd_cnt, 1);
1782         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1783
1784         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1785
1786 done:
1787         hci_req_unlock(hdev);
1788         hci_dev_put(hdev);
1789         return ret;
1790 }
1791
1792 int hci_dev_reset_stat(__u16 dev)
1793 {
1794         struct hci_dev *hdev;
1795         int ret = 0;
1796
1797         hdev = hci_dev_get(dev);
1798         if (!hdev)
1799                 return -ENODEV;
1800
1801         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1802                 ret = -EBUSY;
1803                 goto done;
1804         }
1805
1806         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1807                 ret = -EOPNOTSUPP;
1808                 goto done;
1809         }
1810
1811         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1812
1813 done:
1814         hci_dev_put(hdev);
1815         return ret;
1816 }
1817
1818 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1819 {
1820         bool conn_changed, discov_changed;
1821
1822         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1823
1824         if ((scan & SCAN_PAGE))
1825                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1826                                                  &hdev->dev_flags);
1827         else
1828                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1829                                                   &hdev->dev_flags);
1830
1831         if ((scan & SCAN_INQUIRY)) {
1832                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1833                                                    &hdev->dev_flags);
1834         } else {
1835                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1836                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1837                                                     &hdev->dev_flags);
1838         }
1839
1840         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1841                 return;
1842
1843         if (conn_changed || discov_changed) {
1844                 /* In case this was disabled through mgmt */
1845                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1846
1847                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1848                         mgmt_update_adv_data(hdev);
1849
1850                 mgmt_new_settings(hdev);
1851         }
1852 }
1853
1854 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1855 {
1856         struct hci_dev *hdev;
1857         struct hci_dev_req dr;
1858         int err = 0;
1859
1860         if (copy_from_user(&dr, arg, sizeof(dr)))
1861                 return -EFAULT;
1862
1863         hdev = hci_dev_get(dr.dev_id);
1864         if (!hdev)
1865                 return -ENODEV;
1866
1867         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1868                 err = -EBUSY;
1869                 goto done;
1870         }
1871
1872         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1873                 err = -EOPNOTSUPP;
1874                 goto done;
1875         }
1876
1877         if (hdev->dev_type != HCI_BREDR) {
1878                 err = -EOPNOTSUPP;
1879                 goto done;
1880         }
1881
1882         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1883                 err = -EOPNOTSUPP;
1884                 goto done;
1885         }
1886
1887         switch (cmd) {
1888         case HCISETAUTH:
1889                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1890                                    HCI_INIT_TIMEOUT);
1891                 break;
1892
1893         case HCISETENCRYPT:
1894                 if (!lmp_encrypt_capable(hdev)) {
1895                         err = -EOPNOTSUPP;
1896                         break;
1897                 }
1898
1899                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1900                         /* Auth must be enabled first */
1901                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1902                                            HCI_INIT_TIMEOUT);
1903                         if (err)
1904                                 break;
1905                 }
1906
1907                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1908                                    HCI_INIT_TIMEOUT);
1909                 break;
1910
1911         case HCISETSCAN:
1912                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1913                                    HCI_INIT_TIMEOUT);
1914
1915                 /* Ensure that the connectable and discoverable states
1916                  * get correctly modified as this was a non-mgmt change.
1917                  */
1918                 if (!err)
1919                         hci_update_scan_state(hdev, dr.dev_opt);
1920                 break;
1921
1922         case HCISETLINKPOL:
1923                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1924                                    HCI_INIT_TIMEOUT);
1925                 break;
1926
1927         case HCISETLINKMODE:
1928                 hdev->link_mode = ((__u16) dr.dev_opt) &
1929                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1930                 break;
1931
1932         case HCISETPTYPE:
1933                 hdev->pkt_type = (__u16) dr.dev_opt;
1934                 break;
1935
1936         case HCISETACLMTU:
1937                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1938                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1939                 break;
1940
1941         case HCISETSCOMTU:
1942                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1943                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1944                 break;
1945
1946         default:
1947                 err = -EINVAL;
1948                 break;
1949         }
1950
1951 done:
1952         hci_dev_put(hdev);
1953         return err;
1954 }
1955
1956 int hci_get_dev_list(void __user *arg)
1957 {
1958         struct hci_dev *hdev;
1959         struct hci_dev_list_req *dl;
1960         struct hci_dev_req *dr;
1961         int n = 0, size, err;
1962         __u16 dev_num;
1963
1964         if (get_user(dev_num, (__u16 __user *) arg))
1965                 return -EFAULT;
1966
1967         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1968                 return -EINVAL;
1969
1970         size = sizeof(*dl) + dev_num * sizeof(*dr);
1971
1972         dl = kzalloc(size, GFP_KERNEL);
1973         if (!dl)
1974                 return -ENOMEM;
1975
1976         dr = dl->dev_req;
1977
1978         read_lock(&hci_dev_list_lock);
1979         list_for_each_entry(hdev, &hci_dev_list, list) {
1980                 unsigned long flags = hdev->flags;
1981
1982                 /* When the auto-off is configured it means the transport
1983                  * is running, but in that case still indicate that the
1984                  * device is actually down.
1985                  */
1986                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1987                         flags &= ~BIT(HCI_UP);
1988
1989                 (dr + n)->dev_id  = hdev->id;
1990                 (dr + n)->dev_opt = flags;
1991
1992                 if (++n >= dev_num)
1993                         break;
1994         }
1995         read_unlock(&hci_dev_list_lock);
1996
1997         dl->dev_num = n;
1998         size = sizeof(*dl) + n * sizeof(*dr);
1999
2000         err = copy_to_user(arg, dl, size);
2001         kfree(dl);
2002
2003         return err ? -EFAULT : 0;
2004 }
2005
2006 int hci_get_dev_info(void __user *arg)
2007 {
2008         struct hci_dev *hdev;
2009         struct hci_dev_info di;
2010         unsigned long flags;
2011         int err = 0;
2012
2013         if (copy_from_user(&di, arg, sizeof(di)))
2014                 return -EFAULT;
2015
2016         hdev = hci_dev_get(di.dev_id);
2017         if (!hdev)
2018                 return -ENODEV;
2019
2020         /* When the auto-off is configured it means the transport
2021          * is running, but in that case still indicate that the
2022          * device is actually down.
2023          */
2024         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2025                 flags = hdev->flags & ~BIT(HCI_UP);
2026         else
2027                 flags = hdev->flags;
2028
2029         strcpy(di.name, hdev->name);
2030         di.bdaddr   = hdev->bdaddr;
2031         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2032         di.flags    = flags;
2033         di.pkt_type = hdev->pkt_type;
2034         if (lmp_bredr_capable(hdev)) {
2035                 di.acl_mtu  = hdev->acl_mtu;
2036                 di.acl_pkts = hdev->acl_pkts;
2037                 di.sco_mtu  = hdev->sco_mtu;
2038                 di.sco_pkts = hdev->sco_pkts;
2039         } else {
2040                 di.acl_mtu  = hdev->le_mtu;
2041                 di.acl_pkts = hdev->le_pkts;
2042                 di.sco_mtu  = 0;
2043                 di.sco_pkts = 0;
2044         }
2045         di.link_policy = hdev->link_policy;
2046         di.link_mode   = hdev->link_mode;
2047
2048         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2049         memcpy(&di.features, &hdev->features, sizeof(di.features));
2050
2051         if (copy_to_user(arg, &di, sizeof(di)))
2052                 err = -EFAULT;
2053
2054         hci_dev_put(hdev);
2055
2056         return err;
2057 }
2058
2059 /* ---- Interface to HCI drivers ---- */
2060
2061 static int hci_rfkill_set_block(void *data, bool blocked)
2062 {
2063         struct hci_dev *hdev = data;
2064
2065         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2066
2067         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2068                 return -EBUSY;
2069
2070         if (blocked) {
2071                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2072                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2073                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2074                         hci_dev_do_close(hdev);
2075         } else {
2076                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2077         }
2078
2079         return 0;
2080 }
2081
2082 static const struct rfkill_ops hci_rfkill_ops = {
2083         .set_block = hci_rfkill_set_block,
2084 };
2085
2086 static void hci_power_on(struct work_struct *work)
2087 {
2088         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2089         int err;
2090
2091         BT_DBG("%s", hdev->name);
2092
2093         err = hci_dev_do_open(hdev);
2094         if (err < 0) {
2095                 hci_dev_lock(hdev);
2096                 mgmt_set_powered_failed(hdev, err);
2097                 hci_dev_unlock(hdev);
2098                 return;
2099         }
2100
2101         /* During the HCI setup phase, a few error conditions are
2102          * ignored and they need to be checked now. If they are still
2103          * valid, it is important to turn the device back off.
2104          */
2105         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2106             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2107             (hdev->dev_type == HCI_BREDR &&
2108              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2109              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2110                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2111                 hci_dev_do_close(hdev);
2112         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2113                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2114                                    HCI_AUTO_OFF_TIMEOUT);
2115         }
2116
2117         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2118                 /* For unconfigured devices, set the HCI_RAW flag
2119                  * so that userspace can easily identify them.
2120                  */
2121                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2122                         set_bit(HCI_RAW, &hdev->flags);
2123
2124                 /* For fully configured devices, this will send
2125                  * the Index Added event. For unconfigured devices,
2126                  * it will send Unconfigued Index Added event.
2127                  *
2128                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2129                  * and no event will be send.
2130                  */
2131                 mgmt_index_added(hdev);
2132         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2133                 /* When the controller is now configured, then it
2134                  * is important to clear the HCI_RAW flag.
2135                  */
2136                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2137                         clear_bit(HCI_RAW, &hdev->flags);
2138
2139                 /* Powering on the controller with HCI_CONFIG set only
2140                  * happens with the transition from unconfigured to
2141                  * configured. This will send the Index Added event.
2142                  */
2143                 mgmt_index_added(hdev);
2144         }
2145 }
2146
2147 static void hci_power_off(struct work_struct *work)
2148 {
2149         struct hci_dev *hdev = container_of(work, struct hci_dev,
2150                                             power_off.work);
2151
2152         BT_DBG("%s", hdev->name);
2153
2154         hci_dev_do_close(hdev);
2155 }
2156
2157 static void hci_discov_off(struct work_struct *work)
2158 {
2159         struct hci_dev *hdev;
2160
2161         hdev = container_of(work, struct hci_dev, discov_off.work);
2162
2163         BT_DBG("%s", hdev->name);
2164
2165         mgmt_discoverable_timeout(hdev);
2166 }
2167
2168 void hci_uuids_clear(struct hci_dev *hdev)
2169 {
2170         struct bt_uuid *uuid, *tmp;
2171
2172         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2173                 list_del(&uuid->list);
2174                 kfree(uuid);
2175         }
2176 }
2177
2178 void hci_link_keys_clear(struct hci_dev *hdev)
2179 {
2180         struct link_key *key;
2181
2182         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2183                 list_del_rcu(&key->list);
2184                 kfree_rcu(key, rcu);
2185         }
2186 }
2187
2188 void hci_smp_ltks_clear(struct hci_dev *hdev)
2189 {
2190         struct smp_ltk *k;
2191
2192         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2193                 list_del_rcu(&k->list);
2194                 kfree_rcu(k, rcu);
2195         }
2196 }
2197
2198 void hci_smp_irks_clear(struct hci_dev *hdev)
2199 {
2200         struct smp_irk *k;
2201
2202         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2203                 list_del_rcu(&k->list);
2204                 kfree_rcu(k, rcu);
2205         }
2206 }
2207
2208 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2209 {
2210         struct link_key *k;
2211
2212         rcu_read_lock();
2213         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2214                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2215                         rcu_read_unlock();
2216                         return k;
2217                 }
2218         }
2219         rcu_read_unlock();
2220
2221         return NULL;
2222 }
2223
2224 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2225                                u8 key_type, u8 old_key_type)
2226 {
2227         /* Legacy key */
2228         if (key_type < 0x03)
2229                 return true;
2230
2231         /* Debug keys are insecure so don't store them persistently */
2232         if (key_type == HCI_LK_DEBUG_COMBINATION)
2233                 return false;
2234
2235         /* Changed combination key and there's no previous one */
2236         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2237                 return false;
2238
2239         /* Security mode 3 case */
2240         if (!conn)
2241                 return true;
2242
2243         /* BR/EDR key derived using SC from an LE link */
2244         if (conn->type == LE_LINK)
2245                 return true;
2246
2247         /* Neither local nor remote side had no-bonding as requirement */
2248         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2249                 return true;
2250
2251         /* Local side had dedicated bonding as requirement */
2252         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2253                 return true;
2254
2255         /* Remote side had dedicated bonding as requirement */
2256         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2257                 return true;
2258
2259         /* If none of the above criteria match, then don't store the key
2260          * persistently */
2261         return false;
2262 }
2263
2264 static u8 ltk_role(u8 type)
2265 {
2266         if (type == SMP_LTK)
2267                 return HCI_ROLE_MASTER;
2268
2269         return HCI_ROLE_SLAVE;
2270 }
2271
2272 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2273                              u8 addr_type, u8 role)
2274 {
2275         struct smp_ltk *k;
2276
2277         rcu_read_lock();
2278         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2279                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2280                         continue;
2281
2282                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2283                         rcu_read_unlock();
2284                         return k;
2285                 }
2286         }
2287         rcu_read_unlock();
2288
2289         return NULL;
2290 }
2291
2292 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2293 {
2294         struct smp_irk *irk;
2295
2296         rcu_read_lock();
2297         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2298                 if (!bacmp(&irk->rpa, rpa)) {
2299                         rcu_read_unlock();
2300                         return irk;
2301                 }
2302         }
2303
2304         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2305                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2306                         bacpy(&irk->rpa, rpa);
2307                         rcu_read_unlock();
2308                         return irk;
2309                 }
2310         }
2311         rcu_read_unlock();
2312
2313         return NULL;
2314 }
2315
2316 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2317                                      u8 addr_type)
2318 {
2319         struct smp_irk *irk;
2320
2321         /* Identity Address must be public or static random */
2322         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2323                 return NULL;
2324
2325         rcu_read_lock();
2326         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2327                 if (addr_type == irk->addr_type &&
2328                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2329                         rcu_read_unlock();
2330                         return irk;
2331                 }
2332         }
2333         rcu_read_unlock();
2334
2335         return NULL;
2336 }
2337
2338 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2339                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2340                                   u8 pin_len, bool *persistent)
2341 {
2342         struct link_key *key, *old_key;
2343         u8 old_key_type;
2344
2345         old_key = hci_find_link_key(hdev, bdaddr);
2346         if (old_key) {
2347                 old_key_type = old_key->type;
2348                 key = old_key;
2349         } else {
2350                 old_key_type = conn ? conn->key_type : 0xff;
2351                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2352                 if (!key)
2353                         return NULL;
2354                 list_add_rcu(&key->list, &hdev->link_keys);
2355         }
2356
2357         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2358
2359         /* Some buggy controller combinations generate a changed
2360          * combination key for legacy pairing even when there's no
2361          * previous key */
2362         if (type == HCI_LK_CHANGED_COMBINATION &&
2363             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2364                 type = HCI_LK_COMBINATION;
2365                 if (conn)
2366                         conn->key_type = type;
2367         }
2368
2369         bacpy(&key->bdaddr, bdaddr);
2370         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2371         key->pin_len = pin_len;
2372
2373         if (type == HCI_LK_CHANGED_COMBINATION)
2374                 key->type = old_key_type;
2375         else
2376                 key->type = type;
2377
2378         if (persistent)
2379                 *persistent = hci_persistent_key(hdev, conn, type,
2380                                                  old_key_type);
2381
2382         return key;
2383 }
2384
2385 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2386                             u8 addr_type, u8 type, u8 authenticated,
2387                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2388 {
2389         struct smp_ltk *key, *old_key;
2390         u8 role = ltk_role(type);
2391
2392         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2393         if (old_key)
2394                 key = old_key;
2395         else {
2396                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2397                 if (!key)
2398                         return NULL;
2399                 list_add_rcu(&key->list, &hdev->long_term_keys);
2400         }
2401
2402         bacpy(&key->bdaddr, bdaddr);
2403         key->bdaddr_type = addr_type;
2404         memcpy(key->val, tk, sizeof(key->val));
2405         key->authenticated = authenticated;
2406         key->ediv = ediv;
2407         key->rand = rand;
2408         key->enc_size = enc_size;
2409         key->type = type;
2410
2411         return key;
2412 }
2413
2414 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2415                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2416 {
2417         struct smp_irk *irk;
2418
2419         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2420         if (!irk) {
2421                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2422                 if (!irk)
2423                         return NULL;
2424
2425                 bacpy(&irk->bdaddr, bdaddr);
2426                 irk->addr_type = addr_type;
2427
2428                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2429         }
2430
2431         memcpy(irk->val, val, 16);
2432         bacpy(&irk->rpa, rpa);
2433
2434         return irk;
2435 }
2436
2437 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2438 {
2439         struct link_key *key;
2440
2441         key = hci_find_link_key(hdev, bdaddr);
2442         if (!key)
2443                 return -ENOENT;
2444
2445         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2446
2447         list_del_rcu(&key->list);
2448         kfree_rcu(key, rcu);
2449
2450         return 0;
2451 }
2452
2453 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2454 {
2455         struct smp_ltk *k;
2456         int removed = 0;
2457
2458         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2459                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2460                         continue;
2461
2462                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2463
2464                 list_del_rcu(&k->list);
2465                 kfree_rcu(k, rcu);
2466                 removed++;
2467         }
2468
2469         return removed ? 0 : -ENOENT;
2470 }
2471
2472 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2473 {
2474         struct smp_irk *k;
2475
2476         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2477                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2478                         continue;
2479
2480                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2481
2482                 list_del_rcu(&k->list);
2483                 kfree_rcu(k, rcu);
2484         }
2485 }
2486
2487 /* HCI command timer function */
2488 static void hci_cmd_timeout(struct work_struct *work)
2489 {
2490         struct hci_dev *hdev = container_of(work, struct hci_dev,
2491                                             cmd_timer.work);
2492
2493         if (hdev->sent_cmd) {
2494                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2495                 u16 opcode = __le16_to_cpu(sent->opcode);
2496
2497                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2498         } else {
2499                 BT_ERR("%s command tx timeout", hdev->name);
2500         }
2501
2502         atomic_set(&hdev->cmd_cnt, 1);
2503         queue_work(hdev->workqueue, &hdev->cmd_work);
2504 }
2505
2506 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2507                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2508 {
2509         struct oob_data *data;
2510
2511         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2512                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2513                         continue;
2514                 if (data->bdaddr_type != bdaddr_type)
2515                         continue;
2516                 return data;
2517         }
2518
2519         return NULL;
2520 }
2521
2522 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2523                                u8 bdaddr_type)
2524 {
2525         struct oob_data *data;
2526
2527         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2528         if (!data)
2529                 return -ENOENT;
2530
2531         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2532
2533         list_del(&data->list);
2534         kfree(data);
2535
2536         return 0;
2537 }
2538
2539 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2540 {
2541         struct oob_data *data, *n;
2542
2543         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2544                 list_del(&data->list);
2545                 kfree(data);
2546         }
2547 }
2548
2549 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2550                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2551                             u8 *hash256, u8 *rand256)
2552 {
2553         struct oob_data *data;
2554
2555         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2556         if (!data) {
2557                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2558                 if (!data)
2559                         return -ENOMEM;
2560
2561                 bacpy(&data->bdaddr, bdaddr);
2562                 data->bdaddr_type = bdaddr_type;
2563                 list_add(&data->list, &hdev->remote_oob_data);
2564         }
2565
2566         if (hash192 && rand192) {
2567                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2568                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2569         } else {
2570                 memset(data->hash192, 0, sizeof(data->hash192));
2571                 memset(data->rand192, 0, sizeof(data->rand192));
2572         }
2573
2574         if (hash256 && rand256) {
2575                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2576                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2577         } else {
2578                 memset(data->hash256, 0, sizeof(data->hash256));
2579                 memset(data->rand256, 0, sizeof(data->rand256));
2580         }
2581
2582         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2583
2584         return 0;
2585 }
2586
2587 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2588                                          bdaddr_t *bdaddr, u8 type)
2589 {
2590         struct bdaddr_list *b;
2591
2592         list_for_each_entry(b, bdaddr_list, list) {
2593                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2594                         return b;
2595         }
2596
2597         return NULL;
2598 }
2599
2600 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2601 {
2602         struct list_head *p, *n;
2603
2604         list_for_each_safe(p, n, bdaddr_list) {
2605                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2606
2607                 list_del(p);
2608                 kfree(b);
2609         }
2610 }
2611
2612 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2613 {
2614         struct bdaddr_list *entry;
2615
2616         if (!bacmp(bdaddr, BDADDR_ANY))
2617                 return -EBADF;
2618
2619         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2620                 return -EEXIST;
2621
2622         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2623         if (!entry)
2624                 return -ENOMEM;
2625
2626         bacpy(&entry->bdaddr, bdaddr);
2627         entry->bdaddr_type = type;
2628
2629         list_add(&entry->list, list);
2630
2631         return 0;
2632 }
2633
2634 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2635 {
2636         struct bdaddr_list *entry;
2637
2638         if (!bacmp(bdaddr, BDADDR_ANY)) {
2639                 hci_bdaddr_list_clear(list);
2640                 return 0;
2641         }
2642
2643         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2644         if (!entry)
2645                 return -ENOENT;
2646
2647         list_del(&entry->list);
2648         kfree(entry);
2649
2650         return 0;
2651 }
2652
2653 /* This function requires the caller holds hdev->lock */
2654 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2655                                                bdaddr_t *addr, u8 addr_type)
2656 {
2657         struct hci_conn_params *params;
2658
2659         /* The conn params list only contains identity addresses */
2660         if (!hci_is_identity_address(addr, addr_type))
2661                 return NULL;
2662
2663         list_for_each_entry(params, &hdev->le_conn_params, list) {
2664                 if (bacmp(&params->addr, addr) == 0 &&
2665                     params->addr_type == addr_type) {
2666                         return params;
2667                 }
2668         }
2669
2670         return NULL;
2671 }
2672
2673 /* This function requires the caller holds hdev->lock */
2674 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2675                                                   bdaddr_t *addr, u8 addr_type)
2676 {
2677         struct hci_conn_params *param;
2678
2679         /* The list only contains identity addresses */
2680         if (!hci_is_identity_address(addr, addr_type))
2681                 return NULL;
2682
2683         list_for_each_entry(param, list, action) {
2684                 if (bacmp(&param->addr, addr) == 0 &&
2685                     param->addr_type == addr_type)
2686                         return param;
2687         }
2688
2689         return NULL;
2690 }
2691
2692 /* This function requires the caller holds hdev->lock */
2693 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2694                                             bdaddr_t *addr, u8 addr_type)
2695 {
2696         struct hci_conn_params *params;
2697
2698         if (!hci_is_identity_address(addr, addr_type))
2699                 return NULL;
2700
2701         params = hci_conn_params_lookup(hdev, addr, addr_type);
2702         if (params)
2703                 return params;
2704
2705         params = kzalloc(sizeof(*params), GFP_KERNEL);
2706         if (!params) {
2707                 BT_ERR("Out of memory");
2708                 return NULL;
2709         }
2710
2711         bacpy(&params->addr, addr);
2712         params->addr_type = addr_type;
2713
2714         list_add(&params->list, &hdev->le_conn_params);
2715         INIT_LIST_HEAD(&params->action);
2716
2717         params->conn_min_interval = hdev->le_conn_min_interval;
2718         params->conn_max_interval = hdev->le_conn_max_interval;
2719         params->conn_latency = hdev->le_conn_latency;
2720         params->supervision_timeout = hdev->le_supv_timeout;
2721         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2722
2723         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2724
2725         return params;
2726 }
2727
2728 static void hci_conn_params_free(struct hci_conn_params *params)
2729 {
2730         if (params->conn) {
2731                 hci_conn_drop(params->conn);
2732                 hci_conn_put(params->conn);
2733         }
2734
2735         list_del(&params->action);
2736         list_del(&params->list);
2737         kfree(params);
2738 }
2739
2740 /* This function requires the caller holds hdev->lock */
2741 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2742 {
2743         struct hci_conn_params *params;
2744
2745         params = hci_conn_params_lookup(hdev, addr, addr_type);
2746         if (!params)
2747                 return;
2748
2749         hci_conn_params_free(params);
2750
2751         hci_update_background_scan(hdev);
2752
2753         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754 }
2755
2756 /* This function requires the caller holds hdev->lock */
2757 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2758 {
2759         struct hci_conn_params *params, *tmp;
2760
2761         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2762                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2763                         continue;
2764                 list_del(&params->list);
2765                 kfree(params);
2766         }
2767
2768         BT_DBG("All LE disabled connection parameters were removed");
2769 }
2770
2771 /* This function requires the caller holds hdev->lock */
2772 void hci_conn_params_clear_all(struct hci_dev *hdev)
2773 {
2774         struct hci_conn_params *params, *tmp;
2775
2776         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2777                 hci_conn_params_free(params);
2778
2779         hci_update_background_scan(hdev);
2780
2781         BT_DBG("All LE connection parameters were removed");
2782 }
2783
2784 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2785 {
2786         if (status) {
2787                 BT_ERR("Failed to start inquiry: status %d", status);
2788
2789                 hci_dev_lock(hdev);
2790                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2791                 hci_dev_unlock(hdev);
2792                 return;
2793         }
2794 }
2795
2796 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2797 {
2798         /* General inquiry access code (GIAC) */
2799         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2800         struct hci_request req;
2801         struct hci_cp_inquiry cp;
2802         int err;
2803
2804         if (status) {
2805                 BT_ERR("Failed to disable LE scanning: status %d", status);
2806                 return;
2807         }
2808
2809         switch (hdev->discovery.type) {
2810         case DISCOV_TYPE_LE:
2811                 hci_dev_lock(hdev);
2812                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2813                 hci_dev_unlock(hdev);
2814                 break;
2815
2816         case DISCOV_TYPE_INTERLEAVED:
2817                 hci_req_init(&req, hdev);
2818
2819                 memset(&cp, 0, sizeof(cp));
2820                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2821                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2822                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2823
2824                 hci_dev_lock(hdev);
2825
2826                 hci_inquiry_cache_flush(hdev);
2827
2828                 err = hci_req_run(&req, inquiry_complete);
2829                 if (err) {
2830                         BT_ERR("Inquiry request failed: err %d", err);
2831                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2832                 }
2833
2834                 hci_dev_unlock(hdev);
2835                 break;
2836         }
2837 }
2838
2839 static void le_scan_disable_work(struct work_struct *work)
2840 {
2841         struct hci_dev *hdev = container_of(work, struct hci_dev,
2842                                             le_scan_disable.work);
2843         struct hci_request req;
2844         int err;
2845
2846         BT_DBG("%s", hdev->name);
2847
2848         hci_req_init(&req, hdev);
2849
2850         hci_req_add_le_scan_disable(&req);
2851
2852         err = hci_req_run(&req, le_scan_disable_work_complete);
2853         if (err)
2854                 BT_ERR("Disable LE scanning request failed: err %d", err);
2855 }
2856
2857 /* Copy the Identity Address of the controller.
2858  *
2859  * If the controller has a public BD_ADDR, then by default use that one.
2860  * If this is a LE only controller without a public address, default to
2861  * the static random address.
2862  *
2863  * For debugging purposes it is possible to force controllers with a
2864  * public address to use the static random address instead.
2865  *
2866  * In case BR/EDR has been disabled on a dual-mode controller and
2867  * userspace has configured a static address, then that address
2868  * becomes the identity address instead of the public BR/EDR address.
2869  */
2870 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2871                                u8 *bdaddr_type)
2872 {
2873         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2874             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2875             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2876              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2877                 bacpy(bdaddr, &hdev->static_addr);
2878                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2879         } else {
2880                 bacpy(bdaddr, &hdev->bdaddr);
2881                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2882         }
2883 }
2884
2885 /* Alloc HCI device */
2886 struct hci_dev *hci_alloc_dev(void)
2887 {
2888         struct hci_dev *hdev;
2889
2890         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2891         if (!hdev)
2892                 return NULL;
2893
2894         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2895         hdev->esco_type = (ESCO_HV1);
2896         hdev->link_mode = (HCI_LM_ACCEPT);
2897         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2898         hdev->io_capability = 0x03;     /* No Input No Output */
2899         hdev->manufacturer = 0xffff;    /* Default to internal use */
2900         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2901         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2902
2903         hdev->sniff_max_interval = 800;
2904         hdev->sniff_min_interval = 80;
2905
2906         hdev->le_adv_channel_map = 0x07;
2907         hdev->le_adv_min_interval = 0x0800;
2908         hdev->le_adv_max_interval = 0x0800;
2909         hdev->le_scan_interval = 0x0060;
2910         hdev->le_scan_window = 0x0030;
2911         hdev->le_conn_min_interval = 0x0028;
2912         hdev->le_conn_max_interval = 0x0038;
2913         hdev->le_conn_latency = 0x0000;
2914         hdev->le_supv_timeout = 0x002a;
2915         hdev->le_def_tx_len = 0x001b;
2916         hdev->le_def_tx_time = 0x0148;
2917         hdev->le_max_tx_len = 0x001b;
2918         hdev->le_max_tx_time = 0x0148;
2919         hdev->le_max_rx_len = 0x001b;
2920         hdev->le_max_rx_time = 0x0148;
2921
2922         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2923         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2924         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2925         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2926
2927         mutex_init(&hdev->lock);
2928         mutex_init(&hdev->req_lock);
2929
2930         INIT_LIST_HEAD(&hdev->mgmt_pending);
2931         INIT_LIST_HEAD(&hdev->blacklist);
2932         INIT_LIST_HEAD(&hdev->whitelist);
2933         INIT_LIST_HEAD(&hdev->uuids);
2934         INIT_LIST_HEAD(&hdev->link_keys);
2935         INIT_LIST_HEAD(&hdev->long_term_keys);
2936         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2937         INIT_LIST_HEAD(&hdev->remote_oob_data);
2938         INIT_LIST_HEAD(&hdev->le_white_list);
2939         INIT_LIST_HEAD(&hdev->le_conn_params);
2940         INIT_LIST_HEAD(&hdev->pend_le_conns);
2941         INIT_LIST_HEAD(&hdev->pend_le_reports);
2942         INIT_LIST_HEAD(&hdev->conn_hash.list);
2943
2944         INIT_WORK(&hdev->rx_work, hci_rx_work);
2945         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2946         INIT_WORK(&hdev->tx_work, hci_tx_work);
2947         INIT_WORK(&hdev->power_on, hci_power_on);
2948
2949         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2950         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2951         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2952
2953         skb_queue_head_init(&hdev->rx_q);
2954         skb_queue_head_init(&hdev->cmd_q);
2955         skb_queue_head_init(&hdev->raw_q);
2956
2957         init_waitqueue_head(&hdev->req_wait_q);
2958
2959         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2960
2961         hci_init_sysfs(hdev);
2962         discovery_init(hdev);
2963
2964         return hdev;
2965 }
2966 EXPORT_SYMBOL(hci_alloc_dev);
2967
2968 /* Free HCI device */
2969 void hci_free_dev(struct hci_dev *hdev)
2970 {
2971         /* will free via device release */
2972         put_device(&hdev->dev);
2973 }
2974 EXPORT_SYMBOL(hci_free_dev);
2975
2976 /* Register HCI device */
2977 int hci_register_dev(struct hci_dev *hdev)
2978 {
2979         int id, error;
2980
2981         if (!hdev->open || !hdev->close || !hdev->send)
2982                 return -EINVAL;
2983
2984         /* Do not allow HCI_AMP devices to register at index 0,
2985          * so the index can be used as the AMP controller ID.
2986          */
2987         switch (hdev->dev_type) {
2988         case HCI_BREDR:
2989                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2990                 break;
2991         case HCI_AMP:
2992                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2993                 break;
2994         default:
2995                 return -EINVAL;
2996         }
2997
2998         if (id < 0)
2999                 return id;
3000
3001         sprintf(hdev->name, "hci%d", id);
3002         hdev->id = id;
3003
3004         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3005
3006         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3007                                           WQ_MEM_RECLAIM, 1, hdev->name);
3008         if (!hdev->workqueue) {
3009                 error = -ENOMEM;
3010                 goto err;
3011         }
3012
3013         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3014                                               WQ_MEM_RECLAIM, 1, hdev->name);
3015         if (!hdev->req_workqueue) {
3016                 destroy_workqueue(hdev->workqueue);
3017                 error = -ENOMEM;
3018                 goto err;
3019         }
3020
3021         if (!IS_ERR_OR_NULL(bt_debugfs))
3022                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3023
3024         dev_set_name(&hdev->dev, "%s", hdev->name);
3025
3026         error = device_add(&hdev->dev);
3027         if (error < 0)
3028                 goto err_wqueue;
3029
3030         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3031                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3032                                     hdev);
3033         if (hdev->rfkill) {
3034                 if (rfkill_register(hdev->rfkill) < 0) {
3035                         rfkill_destroy(hdev->rfkill);
3036                         hdev->rfkill = NULL;
3037                 }
3038         }
3039
3040         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3041                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3042
3043         set_bit(HCI_SETUP, &hdev->dev_flags);
3044         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3045
3046         if (hdev->dev_type == HCI_BREDR) {
3047                 /* Assume BR/EDR support until proven otherwise (such as
3048                  * through reading supported features during init.
3049                  */
3050                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3051         }
3052
3053         write_lock(&hci_dev_list_lock);
3054         list_add(&hdev->list, &hci_dev_list);
3055         write_unlock(&hci_dev_list_lock);
3056
3057         /* Devices that are marked for raw-only usage are unconfigured
3058          * and should not be included in normal operation.
3059          */
3060         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3061                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3062
3063         hci_notify(hdev, HCI_DEV_REG);
3064         hci_dev_hold(hdev);
3065
3066         queue_work(hdev->req_workqueue, &hdev->power_on);
3067
3068         return id;
3069
3070 err_wqueue:
3071         destroy_workqueue(hdev->workqueue);
3072         destroy_workqueue(hdev->req_workqueue);
3073 err:
3074         ida_simple_remove(&hci_index_ida, hdev->id);
3075
3076         return error;
3077 }
3078 EXPORT_SYMBOL(hci_register_dev);
3079
3080 /* Unregister HCI device */
3081 void hci_unregister_dev(struct hci_dev *hdev)
3082 {
3083         int i, id;
3084
3085         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3086
3087         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3088
3089         id = hdev->id;
3090
3091         write_lock(&hci_dev_list_lock);
3092         list_del(&hdev->list);
3093         write_unlock(&hci_dev_list_lock);
3094
3095         hci_dev_do_close(hdev);
3096
3097         for (i = 0; i < NUM_REASSEMBLY; i++)
3098                 kfree_skb(hdev->reassembly[i]);
3099
3100         cancel_work_sync(&hdev->power_on);
3101
3102         if (!test_bit(HCI_INIT, &hdev->flags) &&
3103             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3104             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3105                 hci_dev_lock(hdev);
3106                 mgmt_index_removed(hdev);
3107                 hci_dev_unlock(hdev);
3108         }
3109
3110         /* mgmt_index_removed should take care of emptying the
3111          * pending list */
3112         BUG_ON(!list_empty(&hdev->mgmt_pending));
3113
3114         hci_notify(hdev, HCI_DEV_UNREG);
3115
3116         if (hdev->rfkill) {
3117                 rfkill_unregister(hdev->rfkill);
3118                 rfkill_destroy(hdev->rfkill);
3119         }
3120
3121         smp_unregister(hdev);
3122
3123         device_del(&hdev->dev);
3124
3125         debugfs_remove_recursive(hdev->debugfs);
3126
3127         destroy_workqueue(hdev->workqueue);
3128         destroy_workqueue(hdev->req_workqueue);
3129
3130         hci_dev_lock(hdev);
3131         hci_bdaddr_list_clear(&hdev->blacklist);
3132         hci_bdaddr_list_clear(&hdev->whitelist);
3133         hci_uuids_clear(hdev);
3134         hci_link_keys_clear(hdev);
3135         hci_smp_ltks_clear(hdev);
3136         hci_smp_irks_clear(hdev);
3137         hci_remote_oob_data_clear(hdev);
3138         hci_bdaddr_list_clear(&hdev->le_white_list);
3139         hci_conn_params_clear_all(hdev);
3140         hci_discovery_filter_clear(hdev);
3141         hci_dev_unlock(hdev);
3142
3143         hci_dev_put(hdev);
3144
3145         ida_simple_remove(&hci_index_ida, id);
3146 }
3147 EXPORT_SYMBOL(hci_unregister_dev);
3148
3149 /* Suspend HCI device */
3150 int hci_suspend_dev(struct hci_dev *hdev)
3151 {
3152         hci_notify(hdev, HCI_DEV_SUSPEND);
3153         return 0;
3154 }
3155 EXPORT_SYMBOL(hci_suspend_dev);
3156
3157 /* Resume HCI device */
3158 int hci_resume_dev(struct hci_dev *hdev)
3159 {
3160         hci_notify(hdev, HCI_DEV_RESUME);
3161         return 0;
3162 }
3163 EXPORT_SYMBOL(hci_resume_dev);
3164
3165 /* Reset HCI device */
3166 int hci_reset_dev(struct hci_dev *hdev)
3167 {
3168         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3169         struct sk_buff *skb;
3170
3171         skb = bt_skb_alloc(3, GFP_ATOMIC);
3172         if (!skb)
3173                 return -ENOMEM;
3174
3175         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3176         memcpy(skb_put(skb, 3), hw_err, 3);
3177
3178         /* Send Hardware Error to upper stack */
3179         return hci_recv_frame(hdev, skb);
3180 }
3181 EXPORT_SYMBOL(hci_reset_dev);
3182
3183 /* Receive frame from HCI drivers */
3184 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3185 {
3186         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3187                       && !test_bit(HCI_INIT, &hdev->flags))) {
3188                 kfree_skb(skb);
3189                 return -ENXIO;
3190         }
3191
3192         /* Incoming skb */
3193         bt_cb(skb)->incoming = 1;
3194
3195         /* Time stamp */
3196         __net_timestamp(skb);
3197
3198         skb_queue_tail(&hdev->rx_q, skb);
3199         queue_work(hdev->workqueue, &hdev->rx_work);
3200
3201         return 0;
3202 }
3203 EXPORT_SYMBOL(hci_recv_frame);
3204
3205 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3206                           int count, __u8 index)
3207 {
3208         int len = 0;
3209         int hlen = 0;
3210         int remain = count;
3211         struct sk_buff *skb;
3212         struct bt_skb_cb *scb;
3213
3214         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3215             index >= NUM_REASSEMBLY)
3216                 return -EILSEQ;
3217
3218         skb = hdev->reassembly[index];
3219
3220         if (!skb) {
3221                 switch (type) {
3222                 case HCI_ACLDATA_PKT:
3223                         len = HCI_MAX_FRAME_SIZE;
3224                         hlen = HCI_ACL_HDR_SIZE;
3225                         break;
3226                 case HCI_EVENT_PKT:
3227                         len = HCI_MAX_EVENT_SIZE;
3228                         hlen = HCI_EVENT_HDR_SIZE;
3229                         break;
3230                 case HCI_SCODATA_PKT:
3231                         len = HCI_MAX_SCO_SIZE;
3232                         hlen = HCI_SCO_HDR_SIZE;
3233                         break;
3234                 }
3235
3236                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3237                 if (!skb)
3238                         return -ENOMEM;
3239
3240                 scb = (void *) skb->cb;
3241                 scb->expect = hlen;
3242                 scb->pkt_type = type;
3243
3244                 hdev->reassembly[index] = skb;
3245         }
3246
3247         while (count) {
3248                 scb = (void *) skb->cb;
3249                 len = min_t(uint, scb->expect, count);
3250
3251                 memcpy(skb_put(skb, len), data, len);
3252
3253                 count -= len;
3254                 data += len;
3255                 scb->expect -= len;
3256                 remain = count;
3257
3258                 switch (type) {
3259                 case HCI_EVENT_PKT:
3260                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3261                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3262                                 scb->expect = h->plen;
3263
3264                                 if (skb_tailroom(skb) < scb->expect) {
3265                                         kfree_skb(skb);
3266                                         hdev->reassembly[index] = NULL;
3267                                         return -ENOMEM;
3268                                 }
3269                         }
3270                         break;
3271
3272                 case HCI_ACLDATA_PKT:
3273                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3274                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3275                                 scb->expect = __le16_to_cpu(h->dlen);
3276
3277                                 if (skb_tailroom(skb) < scb->expect) {
3278                                         kfree_skb(skb);
3279                                         hdev->reassembly[index] = NULL;
3280                                         return -ENOMEM;
3281                                 }
3282                         }
3283                         break;
3284
3285                 case HCI_SCODATA_PKT:
3286                         if (skb->len == HCI_SCO_HDR_SIZE) {
3287                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3288                                 scb->expect = h->dlen;
3289
3290                                 if (skb_tailroom(skb) < scb->expect) {
3291                                         kfree_skb(skb);
3292                                         hdev->reassembly[index] = NULL;
3293                                         return -ENOMEM;
3294                                 }
3295                         }
3296                         break;
3297                 }
3298
3299                 if (scb->expect == 0) {
3300                         /* Complete frame */
3301
3302                         bt_cb(skb)->pkt_type = type;
3303                         hci_recv_frame(hdev, skb);
3304
3305                         hdev->reassembly[index] = NULL;
3306                         return remain;
3307                 }
3308         }
3309
3310         return remain;
3311 }
3312
3313 #define STREAM_REASSEMBLY 0
3314
3315 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3316 {
3317         int type;
3318         int rem = 0;
3319
3320         while (count) {
3321                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3322
3323                 if (!skb) {
3324                         struct { char type; } *pkt;
3325
3326                         /* Start of the frame */
3327                         pkt = data;
3328                         type = pkt->type;
3329
3330                         data++;
3331                         count--;
3332                 } else
3333                         type = bt_cb(skb)->pkt_type;
3334
3335                 rem = hci_reassembly(hdev, type, data, count,
3336                                      STREAM_REASSEMBLY);
3337                 if (rem < 0)
3338                         return rem;
3339
3340                 data += (count - rem);
3341                 count = rem;
3342         }
3343
3344         return rem;
3345 }
3346 EXPORT_SYMBOL(hci_recv_stream_fragment);
3347
3348 /* ---- Interface to upper protocols ---- */
3349
3350 int hci_register_cb(struct hci_cb *cb)
3351 {
3352         BT_DBG("%p name %s", cb, cb->name);
3353
3354         write_lock(&hci_cb_list_lock);
3355         list_add(&cb->list, &hci_cb_list);
3356         write_unlock(&hci_cb_list_lock);
3357
3358         return 0;
3359 }
3360 EXPORT_SYMBOL(hci_register_cb);
3361
3362 int hci_unregister_cb(struct hci_cb *cb)
3363 {
3364         BT_DBG("%p name %s", cb, cb->name);
3365
3366         write_lock(&hci_cb_list_lock);
3367         list_del(&cb->list);
3368         write_unlock(&hci_cb_list_lock);
3369
3370         return 0;
3371 }
3372 EXPORT_SYMBOL(hci_unregister_cb);
3373
3374 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3375 {
3376         int err;
3377
3378         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3379
3380         /* Time stamp */
3381         __net_timestamp(skb);
3382
3383         /* Send copy to monitor */
3384         hci_send_to_monitor(hdev, skb);
3385
3386         if (atomic_read(&hdev->promisc)) {
3387                 /* Send copy to the sockets */
3388                 hci_send_to_sock(hdev, skb);
3389         }
3390
3391         /* Get rid of skb owner, prior to sending to the driver. */
3392         skb_orphan(skb);
3393
3394         err = hdev->send(hdev, skb);
3395         if (err < 0) {
3396                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3397                 kfree_skb(skb);
3398         }
3399 }
3400
3401 bool hci_req_pending(struct hci_dev *hdev)
3402 {
3403         return (hdev->req_status == HCI_REQ_PEND);
3404 }
3405
3406 /* Send HCI command */
3407 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3408                  const void *param)
3409 {
3410         struct sk_buff *skb;
3411
3412         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3413
3414         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3415         if (!skb) {
3416                 BT_ERR("%s no memory for command", hdev->name);
3417                 return -ENOMEM;
3418         }
3419
3420         /* Stand-alone HCI commands must be flagged as
3421          * single-command requests.
3422          */
3423         bt_cb(skb)->req.start = true;
3424
3425         skb_queue_tail(&hdev->cmd_q, skb);
3426         queue_work(hdev->workqueue, &hdev->cmd_work);
3427
3428         return 0;
3429 }
3430
3431 /* Get data from the previously sent command */
3432 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3433 {
3434         struct hci_command_hdr *hdr;
3435
3436         if (!hdev->sent_cmd)
3437                 return NULL;
3438
3439         hdr = (void *) hdev->sent_cmd->data;
3440
3441         if (hdr->opcode != cpu_to_le16(opcode))
3442                 return NULL;
3443
3444         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3445
3446         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3447 }
3448
3449 /* Send ACL data */
3450 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3451 {
3452         struct hci_acl_hdr *hdr;
3453         int len = skb->len;
3454
3455         skb_push(skb, HCI_ACL_HDR_SIZE);
3456         skb_reset_transport_header(skb);
3457         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3458         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3459         hdr->dlen   = cpu_to_le16(len);
3460 }
3461
3462 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3463                           struct sk_buff *skb, __u16 flags)
3464 {
3465         struct hci_conn *conn = chan->conn;
3466         struct hci_dev *hdev = conn->hdev;
3467         struct sk_buff *list;
3468
3469         skb->len = skb_headlen(skb);
3470         skb->data_len = 0;
3471
3472         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3473
3474         switch (hdev->dev_type) {
3475         case HCI_BREDR:
3476                 hci_add_acl_hdr(skb, conn->handle, flags);
3477                 break;
3478         case HCI_AMP:
3479                 hci_add_acl_hdr(skb, chan->handle, flags);
3480                 break;
3481         default:
3482                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3483                 return;
3484         }
3485
3486         list = skb_shinfo(skb)->frag_list;
3487         if (!list) {
3488                 /* Non fragmented */
3489                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3490
3491                 skb_queue_tail(queue, skb);
3492         } else {
3493                 /* Fragmented */
3494                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3495
3496                 skb_shinfo(skb)->frag_list = NULL;
3497
3498                 /* Queue all fragments atomically. We need to use spin_lock_bh
3499                  * here because of 6LoWPAN links, as there this function is
3500                  * called from softirq and using normal spin lock could cause
3501                  * deadlocks.
3502                  */
3503                 spin_lock_bh(&queue->lock);
3504
3505                 __skb_queue_tail(queue, skb);
3506
3507                 flags &= ~ACL_START;
3508                 flags |= ACL_CONT;
3509                 do {
3510                         skb = list; list = list->next;
3511
3512                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3513                         hci_add_acl_hdr(skb, conn->handle, flags);
3514
3515                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3516
3517                         __skb_queue_tail(queue, skb);
3518                 } while (list);
3519
3520                 spin_unlock_bh(&queue->lock);
3521         }
3522 }
3523
3524 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3525 {
3526         struct hci_dev *hdev = chan->conn->hdev;
3527
3528         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3529
3530         hci_queue_acl(chan, &chan->data_q, skb, flags);
3531
3532         queue_work(hdev->workqueue, &hdev->tx_work);
3533 }
3534
3535 /* Send SCO data */
3536 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3537 {
3538         struct hci_dev *hdev = conn->hdev;
3539         struct hci_sco_hdr hdr;
3540
3541         BT_DBG("%s len %d", hdev->name, skb->len);
3542
3543         hdr.handle = cpu_to_le16(conn->handle);
3544         hdr.dlen   = skb->len;
3545
3546         skb_push(skb, HCI_SCO_HDR_SIZE);
3547         skb_reset_transport_header(skb);
3548         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3549
3550         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3551
3552         skb_queue_tail(&conn->data_q, skb);
3553         queue_work(hdev->workqueue, &hdev->tx_work);
3554 }
3555
3556 /* ---- HCI TX task (outgoing data) ---- */
3557
3558 /* HCI Connection scheduler */
3559 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3560                                      int *quote)
3561 {
3562         struct hci_conn_hash *h = &hdev->conn_hash;
3563         struct hci_conn *conn = NULL, *c;
3564         unsigned int num = 0, min = ~0;
3565
3566         /* We don't have to lock device here. Connections are always
3567          * added and removed with TX task disabled. */
3568
3569         rcu_read_lock();
3570
3571         list_for_each_entry_rcu(c, &h->list, list) {
3572                 if (c->type != type || skb_queue_empty(&c->data_q))
3573                         continue;
3574
3575                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3576                         continue;
3577
3578                 num++;
3579
3580                 if (c->sent < min) {
3581                         min  = c->sent;
3582                         conn = c;
3583                 }
3584
3585                 if (hci_conn_num(hdev, type) == num)
3586                         break;
3587         }
3588
3589         rcu_read_unlock();
3590
3591         if (conn) {
3592                 int cnt, q;
3593
3594                 switch (conn->type) {
3595                 case ACL_LINK:
3596                         cnt = hdev->acl_cnt;
3597                         break;
3598                 case SCO_LINK:
3599                 case ESCO_LINK:
3600                         cnt = hdev->sco_cnt;
3601                         break;
3602                 case LE_LINK:
3603                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3604                         break;
3605                 default:
3606                         cnt = 0;
3607                         BT_ERR("Unknown link type");
3608                 }
3609
3610                 q = cnt / num;
3611                 *quote = q ? q : 1;
3612         } else
3613                 *quote = 0;
3614
3615         BT_DBG("conn %p quote %d", conn, *quote);
3616         return conn;
3617 }
3618
3619 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3620 {
3621         struct hci_conn_hash *h = &hdev->conn_hash;
3622         struct hci_conn *c;
3623
3624         BT_ERR("%s link tx timeout", hdev->name);
3625
3626         rcu_read_lock();
3627
3628         /* Kill stalled connections */
3629         list_for_each_entry_rcu(c, &h->list, list) {
3630                 if (c->type == type && c->sent) {
3631                         BT_ERR("%s killing stalled connection %pMR",
3632                                hdev->name, &c->dst);
3633                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3634                 }
3635         }
3636
3637         rcu_read_unlock();
3638 }
3639
3640 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3641                                       int *quote)
3642 {
3643         struct hci_conn_hash *h = &hdev->conn_hash;
3644         struct hci_chan *chan = NULL;
3645         unsigned int num = 0, min = ~0, cur_prio = 0;
3646         struct hci_conn *conn;
3647         int cnt, q, conn_num = 0;
3648
3649         BT_DBG("%s", hdev->name);
3650
3651         rcu_read_lock();
3652
3653         list_for_each_entry_rcu(conn, &h->list, list) {
3654                 struct hci_chan *tmp;
3655
3656                 if (conn->type != type)
3657                         continue;
3658
3659                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3660                         continue;
3661
3662                 conn_num++;
3663
3664                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3665                         struct sk_buff *skb;
3666
3667                         if (skb_queue_empty(&tmp->data_q))
3668                                 continue;
3669
3670                         skb = skb_peek(&tmp->data_q);
3671                         if (skb->priority < cur_prio)
3672                                 continue;
3673
3674                         if (skb->priority > cur_prio) {
3675                                 num = 0;
3676                                 min = ~0;
3677                                 cur_prio = skb->priority;
3678                         }
3679
3680                         num++;
3681
3682                         if (conn->sent < min) {
3683                                 min  = conn->sent;
3684                                 chan = tmp;
3685                         }
3686                 }
3687
3688                 if (hci_conn_num(hdev, type) == conn_num)
3689                         break;
3690         }
3691
3692         rcu_read_unlock();
3693
3694         if (!chan)
3695                 return NULL;
3696
3697         switch (chan->conn->type) {
3698         case ACL_LINK:
3699                 cnt = hdev->acl_cnt;
3700                 break;
3701         case AMP_LINK:
3702                 cnt = hdev->block_cnt;
3703                 break;
3704         case SCO_LINK:
3705         case ESCO_LINK:
3706                 cnt = hdev->sco_cnt;
3707                 break;
3708         case LE_LINK:
3709                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3710                 break;
3711         default:
3712                 cnt = 0;
3713                 BT_ERR("Unknown link type");
3714         }
3715
3716         q = cnt / num;
3717         *quote = q ? q : 1;
3718         BT_DBG("chan %p quote %d", chan, *quote);
3719         return chan;
3720 }
3721
3722 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3723 {
3724         struct hci_conn_hash *h = &hdev->conn_hash;
3725         struct hci_conn *conn;
3726         int num = 0;
3727
3728         BT_DBG("%s", hdev->name);
3729
3730         rcu_read_lock();
3731
3732         list_for_each_entry_rcu(conn, &h->list, list) {
3733                 struct hci_chan *chan;
3734
3735                 if (conn->type != type)
3736                         continue;
3737
3738                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3739                         continue;
3740
3741                 num++;
3742
3743                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3744                         struct sk_buff *skb;
3745
3746                         if (chan->sent) {
3747                                 chan->sent = 0;
3748                                 continue;
3749                         }
3750
3751                         if (skb_queue_empty(&chan->data_q))
3752                                 continue;
3753
3754                         skb = skb_peek(&chan->data_q);
3755                         if (skb->priority >= HCI_PRIO_MAX - 1)
3756                                 continue;
3757
3758                         skb->priority = HCI_PRIO_MAX - 1;
3759
3760                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3761                                skb->priority);
3762                 }
3763
3764                 if (hci_conn_num(hdev, type) == num)
3765                         break;
3766         }
3767
3768         rcu_read_unlock();
3769
3770 }
3771
3772 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3773 {
3774         /* Calculate count of blocks used by this packet */
3775         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3776 }
3777
3778 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3779 {
3780         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3781                 /* ACL tx timeout must be longer than maximum
3782                  * link supervision timeout (40.9 seconds) */
3783                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3784                                        HCI_ACL_TX_TIMEOUT))
3785                         hci_link_tx_to(hdev, ACL_LINK);
3786         }
3787 }
3788
3789 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3790 {
3791         unsigned int cnt = hdev->acl_cnt;
3792         struct hci_chan *chan;
3793         struct sk_buff *skb;
3794         int quote;
3795
3796         __check_timeout(hdev, cnt);
3797
3798         while (hdev->acl_cnt &&
3799                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3800                 u32 priority = (skb_peek(&chan->data_q))->priority;
3801                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3802                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3803                                skb->len, skb->priority);
3804
3805                         /* Stop if priority has changed */
3806                         if (skb->priority < priority)
3807                                 break;
3808
3809                         skb = skb_dequeue(&chan->data_q);
3810
3811                         hci_conn_enter_active_mode(chan->conn,
3812                                                    bt_cb(skb)->force_active);
3813
3814                         hci_send_frame(hdev, skb);
3815                         hdev->acl_last_tx = jiffies;
3816
3817                         hdev->acl_cnt--;
3818                         chan->sent++;
3819                         chan->conn->sent++;
3820                 }
3821         }
3822
3823         if (cnt != hdev->acl_cnt)
3824                 hci_prio_recalculate(hdev, ACL_LINK);
3825 }
3826
3827 static void hci_sched_acl_blk(struct hci_dev *hdev)
3828 {
3829         unsigned int cnt = hdev->block_cnt;
3830         struct hci_chan *chan;
3831         struct sk_buff *skb;
3832         int quote;
3833         u8 type;
3834
3835         __check_timeout(hdev, cnt);
3836
3837         BT_DBG("%s", hdev->name);
3838
3839         if (hdev->dev_type == HCI_AMP)
3840                 type = AMP_LINK;
3841         else
3842                 type = ACL_LINK;
3843
3844         while (hdev->block_cnt > 0 &&
3845                (chan = hci_chan_sent(hdev, type, &quote))) {
3846                 u32 priority = (skb_peek(&chan->data_q))->priority;
3847                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3848                         int blocks;
3849
3850                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3851                                skb->len, skb->priority);
3852
3853                         /* Stop if priority has changed */
3854                         if (skb->priority < priority)
3855                                 break;
3856
3857                         skb = skb_dequeue(&chan->data_q);
3858
3859                         blocks = __get_blocks(hdev, skb);
3860                         if (blocks > hdev->block_cnt)
3861                                 return;
3862
3863                         hci_conn_enter_active_mode(chan->conn,
3864                                                    bt_cb(skb)->force_active);
3865
3866                         hci_send_frame(hdev, skb);
3867                         hdev->acl_last_tx = jiffies;
3868
3869                         hdev->block_cnt -= blocks;
3870                         quote -= blocks;
3871
3872                         chan->sent += blocks;
3873                         chan->conn->sent += blocks;
3874                 }
3875         }
3876
3877         if (cnt != hdev->block_cnt)
3878                 hci_prio_recalculate(hdev, type);
3879 }
3880
3881 static void hci_sched_acl(struct hci_dev *hdev)
3882 {
3883         BT_DBG("%s", hdev->name);
3884
3885         /* No ACL link over BR/EDR controller */
3886         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3887                 return;
3888
3889         /* No AMP link over AMP controller */
3890         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3891                 return;
3892
3893         switch (hdev->flow_ctl_mode) {
3894         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3895                 hci_sched_acl_pkt(hdev);
3896                 break;
3897
3898         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3899                 hci_sched_acl_blk(hdev);
3900                 break;
3901         }
3902 }
3903
3904 /* Schedule SCO */
3905 static void hci_sched_sco(struct hci_dev *hdev)
3906 {
3907         struct hci_conn *conn;
3908         struct sk_buff *skb;
3909         int quote;
3910
3911         BT_DBG("%s", hdev->name);
3912
3913         if (!hci_conn_num(hdev, SCO_LINK))
3914                 return;
3915
3916         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3917                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3918                         BT_DBG("skb %p len %d", skb, skb->len);
3919                         hci_send_frame(hdev, skb);
3920
3921                         conn->sent++;
3922                         if (conn->sent == ~0)
3923                                 conn->sent = 0;
3924                 }
3925         }
3926 }
3927
3928 static void hci_sched_esco(struct hci_dev *hdev)
3929 {
3930         struct hci_conn *conn;
3931         struct sk_buff *skb;
3932         int quote;
3933
3934         BT_DBG("%s", hdev->name);
3935
3936         if (!hci_conn_num(hdev, ESCO_LINK))
3937                 return;
3938
3939         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3940                                                      &quote))) {
3941                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3942                         BT_DBG("skb %p len %d", skb, skb->len);
3943                         hci_send_frame(hdev, skb);
3944
3945                         conn->sent++;
3946                         if (conn->sent == ~0)
3947                                 conn->sent = 0;
3948                 }
3949         }
3950 }
3951
3952 static void hci_sched_le(struct hci_dev *hdev)
3953 {
3954         struct hci_chan *chan;
3955         struct sk_buff *skb;
3956         int quote, cnt, tmp;
3957
3958         BT_DBG("%s", hdev->name);
3959
3960         if (!hci_conn_num(hdev, LE_LINK))
3961                 return;
3962
3963         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3964                 /* LE tx timeout must be longer than maximum
3965                  * link supervision timeout (40.9 seconds) */
3966                 if (!hdev->le_cnt && hdev->le_pkts &&
3967                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3968                         hci_link_tx_to(hdev, LE_LINK);
3969         }
3970
3971         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3972         tmp = cnt;
3973         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3974                 u32 priority = (skb_peek(&chan->data_q))->priority;
3975                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3976                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3977                                skb->len, skb->priority);
3978
3979                         /* Stop if priority has changed */
3980                         if (skb->priority < priority)
3981                                 break;
3982
3983                         skb = skb_dequeue(&chan->data_q);
3984
3985                         hci_send_frame(hdev, skb);
3986                         hdev->le_last_tx = jiffies;
3987
3988                         cnt--;
3989                         chan->sent++;
3990                         chan->conn->sent++;
3991                 }
3992         }
3993
3994         if (hdev->le_pkts)
3995                 hdev->le_cnt = cnt;
3996         else
3997                 hdev->acl_cnt = cnt;
3998
3999         if (cnt != tmp)
4000                 hci_prio_recalculate(hdev, LE_LINK);
4001 }
4002
4003 static void hci_tx_work(struct work_struct *work)
4004 {
4005         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4006         struct sk_buff *skb;
4007
4008         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4009                hdev->sco_cnt, hdev->le_cnt);
4010
4011         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4012                 /* Schedule queues and send stuff to HCI driver */
4013                 hci_sched_acl(hdev);
4014                 hci_sched_sco(hdev);
4015                 hci_sched_esco(hdev);
4016                 hci_sched_le(hdev);
4017         }
4018
4019         /* Send next queued raw (unknown type) packet */
4020         while ((skb = skb_dequeue(&hdev->raw_q)))
4021                 hci_send_frame(hdev, skb);
4022 }
4023
4024 /* ----- HCI RX task (incoming data processing) ----- */
4025
4026 /* ACL data packet */
4027 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4028 {
4029         struct hci_acl_hdr *hdr = (void *) skb->data;
4030         struct hci_conn *conn;
4031         __u16 handle, flags;
4032
4033         skb_pull(skb, HCI_ACL_HDR_SIZE);
4034
4035         handle = __le16_to_cpu(hdr->handle);
4036         flags  = hci_flags(handle);
4037         handle = hci_handle(handle);
4038
4039         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4040                handle, flags);
4041
4042         hdev->stat.acl_rx++;
4043
4044         hci_dev_lock(hdev);
4045         conn = hci_conn_hash_lookup_handle(hdev, handle);
4046         hci_dev_unlock(hdev);
4047
4048         if (conn) {
4049                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4050
4051                 /* Send to upper protocol */
4052                 l2cap_recv_acldata(conn, skb, flags);
4053                 return;
4054         } else {
4055                 BT_ERR("%s ACL packet for unknown connection handle %d",
4056                        hdev->name, handle);
4057         }
4058
4059         kfree_skb(skb);
4060 }
4061
4062 /* SCO data packet */
4063 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4064 {
4065         struct hci_sco_hdr *hdr = (void *) skb->data;
4066         struct hci_conn *conn;
4067         __u16 handle;
4068
4069         skb_pull(skb, HCI_SCO_HDR_SIZE);
4070
4071         handle = __le16_to_cpu(hdr->handle);
4072
4073         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4074
4075         hdev->stat.sco_rx++;
4076
4077         hci_dev_lock(hdev);
4078         conn = hci_conn_hash_lookup_handle(hdev, handle);
4079         hci_dev_unlock(hdev);
4080
4081         if (conn) {
4082                 /* Send to upper protocol */
4083                 sco_recv_scodata(conn, skb);
4084                 return;
4085         } else {
4086                 BT_ERR("%s SCO packet for unknown connection handle %d",
4087                        hdev->name, handle);
4088         }
4089
4090         kfree_skb(skb);
4091 }
4092
4093 static bool hci_req_is_complete(struct hci_dev *hdev)
4094 {
4095         struct sk_buff *skb;
4096
4097         skb = skb_peek(&hdev->cmd_q);
4098         if (!skb)
4099                 return true;
4100
4101         return bt_cb(skb)->req.start;
4102 }
4103
4104 static void hci_resend_last(struct hci_dev *hdev)
4105 {
4106         struct hci_command_hdr *sent;
4107         struct sk_buff *skb;
4108         u16 opcode;
4109
4110         if (!hdev->sent_cmd)
4111                 return;
4112
4113         sent = (void *) hdev->sent_cmd->data;
4114         opcode = __le16_to_cpu(sent->opcode);
4115         if (opcode == HCI_OP_RESET)
4116                 return;
4117
4118         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4119         if (!skb)
4120                 return;
4121
4122         skb_queue_head(&hdev->cmd_q, skb);
4123         queue_work(hdev->workqueue, &hdev->cmd_work);
4124 }
4125
4126 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4127 {
4128         hci_req_complete_t req_complete = NULL;
4129         struct sk_buff *skb;
4130         unsigned long flags;
4131
4132         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4133
4134         /* If the completed command doesn't match the last one that was
4135          * sent we need to do special handling of it.
4136          */
4137         if (!hci_sent_cmd_data(hdev, opcode)) {
4138                 /* Some CSR based controllers generate a spontaneous
4139                  * reset complete event during init and any pending
4140                  * command will never be completed. In such a case we
4141                  * need to resend whatever was the last sent
4142                  * command.
4143                  */
4144                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4145                         hci_resend_last(hdev);
4146
4147                 return;
4148         }
4149
4150         /* If the command succeeded and there's still more commands in
4151          * this request the request is not yet complete.
4152          */
4153         if (!status && !hci_req_is_complete(hdev))
4154                 return;
4155
4156         /* If this was the last command in a request the complete
4157          * callback would be found in hdev->sent_cmd instead of the
4158          * command queue (hdev->cmd_q).
4159          */
4160         if (hdev->sent_cmd) {
4161                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4162
4163                 if (req_complete) {
4164                         /* We must set the complete callback to NULL to
4165                          * avoid calling the callback more than once if
4166                          * this function gets called again.
4167                          */
4168                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4169
4170                         goto call_complete;
4171                 }
4172         }
4173
4174         /* Remove all pending commands belonging to this request */
4175         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4176         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4177                 if (bt_cb(skb)->req.start) {
4178                         __skb_queue_head(&hdev->cmd_q, skb);
4179                         break;
4180                 }
4181
4182                 req_complete = bt_cb(skb)->req.complete;
4183                 kfree_skb(skb);
4184         }
4185         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4186
4187 call_complete:
4188         if (req_complete)
4189                 req_complete(hdev, status);
4190 }
4191
4192 static void hci_rx_work(struct work_struct *work)
4193 {
4194         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4195         struct sk_buff *skb;
4196
4197         BT_DBG("%s", hdev->name);
4198
4199         while ((skb = skb_dequeue(&hdev->rx_q))) {
4200                 /* Send copy to monitor */
4201                 hci_send_to_monitor(hdev, skb);
4202
4203                 if (atomic_read(&hdev->promisc)) {
4204                         /* Send copy to the sockets */
4205                         hci_send_to_sock(hdev, skb);
4206                 }
4207
4208                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4209                         kfree_skb(skb);
4210                         continue;
4211                 }
4212
4213                 if (test_bit(HCI_INIT, &hdev->flags)) {
4214                         /* Don't process data packets in this states. */
4215                         switch (bt_cb(skb)->pkt_type) {
4216                         case HCI_ACLDATA_PKT:
4217                         case HCI_SCODATA_PKT:
4218                                 kfree_skb(skb);
4219                                 continue;
4220                         }
4221                 }
4222
4223                 /* Process frame */
4224                 switch (bt_cb(skb)->pkt_type) {
4225                 case HCI_EVENT_PKT:
4226                         BT_DBG("%s Event packet", hdev->name);
4227                         hci_event_packet(hdev, skb);
4228                         break;
4229
4230                 case HCI_ACLDATA_PKT:
4231                         BT_DBG("%s ACL data packet", hdev->name);
4232                         hci_acldata_packet(hdev, skb);
4233                         break;
4234
4235                 case HCI_SCODATA_PKT:
4236                         BT_DBG("%s SCO data packet", hdev->name);
4237                         hci_scodata_packet(hdev, skb);
4238                         break;
4239
4240                 default:
4241                         kfree_skb(skb);
4242                         break;
4243                 }
4244         }
4245 }
4246
4247 static void hci_cmd_work(struct work_struct *work)
4248 {
4249         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4250         struct sk_buff *skb;
4251
4252         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4253                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4254
4255         /* Send queued commands */
4256         if (atomic_read(&hdev->cmd_cnt)) {
4257                 skb = skb_dequeue(&hdev->cmd_q);
4258                 if (!skb)
4259                         return;
4260
4261                 kfree_skb(hdev->sent_cmd);
4262
4263                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4264                 if (hdev->sent_cmd) {
4265                         atomic_dec(&hdev->cmd_cnt);
4266                         hci_send_frame(hdev, skb);
4267                         if (test_bit(HCI_RESET, &hdev->flags))
4268                                 cancel_delayed_work(&hdev->cmd_timer);
4269                         else
4270                                 schedule_delayed_work(&hdev->cmd_timer,
4271                                                       HCI_CMD_TIMEOUT);
4272                 } else {
4273                         skb_queue_head(&hdev->cmd_q, skb);
4274                         queue_work(hdev->workqueue, &hdev->cmd_work);
4275                 }
4276         }
4277 }