Merge tag 'iwlwifi-next-for-kalle-2014-12-30' of https://git.kernel.org/pub/scm/linux...
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
501 {
502         if (lmp_ext_inq_capable(hdev))
503                 return 0x02;
504
505         if (lmp_inq_rssi_capable(hdev))
506                 return 0x01;
507
508         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
509             hdev->lmp_subver == 0x0757)
510                 return 0x01;
511
512         if (hdev->manufacturer == 15) {
513                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
514                         return 0x01;
515                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
516                         return 0x01;
517                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
518                         return 0x01;
519         }
520
521         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
522             hdev->lmp_subver == 0x1805)
523                 return 0x01;
524
525         return 0x00;
526 }
527
528 static void hci_setup_inquiry_mode(struct hci_request *req)
529 {
530         u8 mode;
531
532         mode = hci_get_inquiry_mode(req->hdev);
533
534         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
535 }
536
537 static void hci_setup_event_mask(struct hci_request *req)
538 {
539         struct hci_dev *hdev = req->hdev;
540
541         /* The second byte is 0xff instead of 0x9f (two reserved bits
542          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
543          * command otherwise.
544          */
545         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
546
547         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
548          * any event mask for pre 1.2 devices.
549          */
550         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
551                 return;
552
553         if (lmp_bredr_capable(hdev)) {
554                 events[4] |= 0x01; /* Flow Specification Complete */
555                 events[4] |= 0x02; /* Inquiry Result with RSSI */
556                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
557                 events[5] |= 0x08; /* Synchronous Connection Complete */
558                 events[5] |= 0x10; /* Synchronous Connection Changed */
559         } else {
560                 /* Use a different default for LE-only devices */
561                 memset(events, 0, sizeof(events));
562                 events[0] |= 0x10; /* Disconnection Complete */
563                 events[1] |= 0x08; /* Read Remote Version Information Complete */
564                 events[1] |= 0x20; /* Command Complete */
565                 events[1] |= 0x40; /* Command Status */
566                 events[1] |= 0x80; /* Hardware Error */
567                 events[2] |= 0x04; /* Number of Completed Packets */
568                 events[3] |= 0x02; /* Data Buffer Overflow */
569
570                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
571                         events[0] |= 0x80; /* Encryption Change */
572                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
573                 }
574         }
575
576         if (lmp_inq_rssi_capable(hdev))
577                 events[4] |= 0x02; /* Inquiry Result with RSSI */
578
579         if (lmp_sniffsubr_capable(hdev))
580                 events[5] |= 0x20; /* Sniff Subrating */
581
582         if (lmp_pause_enc_capable(hdev))
583                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
584
585         if (lmp_ext_inq_capable(hdev))
586                 events[5] |= 0x40; /* Extended Inquiry Result */
587
588         if (lmp_no_flush_capable(hdev))
589                 events[7] |= 0x01; /* Enhanced Flush Complete */
590
591         if (lmp_lsto_capable(hdev))
592                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
593
594         if (lmp_ssp_capable(hdev)) {
595                 events[6] |= 0x01;      /* IO Capability Request */
596                 events[6] |= 0x02;      /* IO Capability Response */
597                 events[6] |= 0x04;      /* User Confirmation Request */
598                 events[6] |= 0x08;      /* User Passkey Request */
599                 events[6] |= 0x10;      /* Remote OOB Data Request */
600                 events[6] |= 0x20;      /* Simple Pairing Complete */
601                 events[7] |= 0x04;      /* User Passkey Notification */
602                 events[7] |= 0x08;      /* Keypress Notification */
603                 events[7] |= 0x10;      /* Remote Host Supported
604                                          * Features Notification
605                                          */
606         }
607
608         if (lmp_le_capable(hdev))
609                 events[7] |= 0x20;      /* LE Meta-Event */
610
611         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
612 }
613
614 static void hci_init2_req(struct hci_request *req, unsigned long opt)
615 {
616         struct hci_dev *hdev = req->hdev;
617
618         if (lmp_bredr_capable(hdev))
619                 bredr_setup(req);
620         else
621                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
622
623         if (lmp_le_capable(hdev))
624                 le_setup(req);
625
626         /* All Bluetooth 1.2 and later controllers should support the
627          * HCI command for reading the local supported commands.
628          *
629          * Unfortunately some controllers indicate Bluetooth 1.2 support,
630          * but do not have support for this command. If that is the case,
631          * the driver can quirk the behavior and skip reading the local
632          * supported commands.
633          */
634         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
635             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
636                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
637
638         if (lmp_ssp_capable(hdev)) {
639                 /* When SSP is available, then the host features page
640                  * should also be available as well. However some
641                  * controllers list the max_page as 0 as long as SSP
642                  * has not been enabled. To achieve proper debugging
643                  * output, force the minimum max_page to 1 at least.
644                  */
645                 hdev->max_page = 0x01;
646
647                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
648                         u8 mode = 0x01;
649                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
650                                     sizeof(mode), &mode);
651                 } else {
652                         struct hci_cp_write_eir cp;
653
654                         memset(hdev->eir, 0, sizeof(hdev->eir));
655                         memset(&cp, 0, sizeof(cp));
656
657                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
658                 }
659         }
660
661         if (lmp_inq_rssi_capable(hdev))
662                 hci_setup_inquiry_mode(req);
663
664         if (lmp_inq_tx_pwr_capable(hdev))
665                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
666
667         if (lmp_ext_feat_capable(hdev)) {
668                 struct hci_cp_read_local_ext_features cp;
669
670                 cp.page = 0x01;
671                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
672                             sizeof(cp), &cp);
673         }
674
675         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
676                 u8 enable = 1;
677                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
678                             &enable);
679         }
680 }
681
682 static void hci_setup_link_policy(struct hci_request *req)
683 {
684         struct hci_dev *hdev = req->hdev;
685         struct hci_cp_write_def_link_policy cp;
686         u16 link_policy = 0;
687
688         if (lmp_rswitch_capable(hdev))
689                 link_policy |= HCI_LP_RSWITCH;
690         if (lmp_hold_capable(hdev))
691                 link_policy |= HCI_LP_HOLD;
692         if (lmp_sniff_capable(hdev))
693                 link_policy |= HCI_LP_SNIFF;
694         if (lmp_park_capable(hdev))
695                 link_policy |= HCI_LP_PARK;
696
697         cp.policy = cpu_to_le16(link_policy);
698         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
699 }
700
701 static void hci_set_le_support(struct hci_request *req)
702 {
703         struct hci_dev *hdev = req->hdev;
704         struct hci_cp_write_le_host_supported cp;
705
706         /* LE-only devices do not support explicit enablement */
707         if (!lmp_bredr_capable(hdev))
708                 return;
709
710         memset(&cp, 0, sizeof(cp));
711
712         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
713                 cp.le = 0x01;
714                 cp.simul = 0x00;
715         }
716
717         if (cp.le != lmp_host_le_capable(hdev))
718                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
719                             &cp);
720 }
721
722 static void hci_set_event_mask_page_2(struct hci_request *req)
723 {
724         struct hci_dev *hdev = req->hdev;
725         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
726
727         /* If Connectionless Slave Broadcast master role is supported
728          * enable all necessary events for it.
729          */
730         if (lmp_csb_master_capable(hdev)) {
731                 events[1] |= 0x40;      /* Triggered Clock Capture */
732                 events[1] |= 0x80;      /* Synchronization Train Complete */
733                 events[2] |= 0x10;      /* Slave Page Response Timeout */
734                 events[2] |= 0x20;      /* CSB Channel Map Change */
735         }
736
737         /* If Connectionless Slave Broadcast slave role is supported
738          * enable all necessary events for it.
739          */
740         if (lmp_csb_slave_capable(hdev)) {
741                 events[2] |= 0x01;      /* Synchronization Train Received */
742                 events[2] |= 0x02;      /* CSB Receive */
743                 events[2] |= 0x04;      /* CSB Timeout */
744                 events[2] |= 0x08;      /* Truncated Page Complete */
745         }
746
747         /* Enable Authenticated Payload Timeout Expired event if supported */
748         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
749                 events[2] |= 0x80;
750
751         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
752 }
753
754 static void hci_init3_req(struct hci_request *req, unsigned long opt)
755 {
756         struct hci_dev *hdev = req->hdev;
757         u8 p;
758
759         hci_setup_event_mask(req);
760
761         /* Some Broadcom based Bluetooth controllers do not support the
762          * Delete Stored Link Key command. They are clearly indicating its
763          * absence in the bit mask of supported commands.
764          *
765          * Check the supported commands and only if the the command is marked
766          * as supported send it. If not supported assume that the controller
767          * does not have actual support for stored link keys which makes this
768          * command redundant anyway.
769          *
770          * Some controllers indicate that they support handling deleting
771          * stored link keys, but they don't. The quirk lets a driver
772          * just disable this command.
773          */
774         if (hdev->commands[6] & 0x80 &&
775             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
776                 struct hci_cp_delete_stored_link_key cp;
777
778                 bacpy(&cp.bdaddr, BDADDR_ANY);
779                 cp.delete_all = 0x01;
780                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
781                             sizeof(cp), &cp);
782         }
783
784         if (hdev->commands[5] & 0x10)
785                 hci_setup_link_policy(req);
786
787         if (hdev->commands[8] & 0x01)
788                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
789
790         /* Some older Broadcom based Bluetooth 1.2 controllers do not
791          * support the Read Page Scan Type command. Check support for
792          * this command in the bit mask of supported commands.
793          */
794         if (hdev->commands[13] & 0x01)
795                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
796
797         if (lmp_le_capable(hdev)) {
798                 u8 events[8];
799
800                 memset(events, 0, sizeof(events));
801                 events[0] = 0x0f;
802
803                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
804                         events[0] |= 0x10;      /* LE Long Term Key Request */
805
806                 /* If controller supports the Connection Parameters Request
807                  * Link Layer Procedure, enable the corresponding event.
808                  */
809                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
810                         events[0] |= 0x20;      /* LE Remote Connection
811                                                  * Parameter Request
812                                                  */
813
814                 /* If the controller supports the Data Length Extension
815                  * feature, enable the corresponding event.
816                  */
817                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
818                         events[0] |= 0x40;      /* LE Data Length Change */
819
820                 /* If the controller supports Extended Scanner Filter
821                  * Policies, enable the correspondig event.
822                  */
823                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
824                         events[1] |= 0x04;      /* LE Direct Advertising
825                                                  * Report
826                                                  */
827
828                 /* If the controller supports the LE Read Local P-256
829                  * Public Key command, enable the corresponding event.
830                  */
831                 if (hdev->commands[34] & 0x02)
832                         events[0] |= 0x80;      /* LE Read Local P-256
833                                                  * Public Key Complete
834                                                  */
835
836                 /* If the controller supports the LE Generate DHKey
837                  * command, enable the corresponding event.
838                  */
839                 if (hdev->commands[34] & 0x04)
840                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
841
842                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
843                             events);
844
845                 if (hdev->commands[25] & 0x40) {
846                         /* Read LE Advertising Channel TX Power */
847                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
848                 }
849
850                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
851                         /* Read LE Maximum Data Length */
852                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
853
854                         /* Read LE Suggested Default Data Length */
855                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
856                 }
857
858                 hci_set_le_support(req);
859         }
860
861         /* Read features beyond page 1 if available */
862         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
863                 struct hci_cp_read_local_ext_features cp;
864
865                 cp.page = p;
866                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
867                             sizeof(cp), &cp);
868         }
869 }
870
871 static void hci_init4_req(struct hci_request *req, unsigned long opt)
872 {
873         struct hci_dev *hdev = req->hdev;
874
875         /* Set event mask page 2 if the HCI command for it is supported */
876         if (hdev->commands[22] & 0x04)
877                 hci_set_event_mask_page_2(req);
878
879         /* Read local codec list if the HCI command is supported */
880         if (hdev->commands[29] & 0x20)
881                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
882
883         /* Get MWS transport configuration if the HCI command is supported */
884         if (hdev->commands[30] & 0x08)
885                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
886
887         /* Check for Synchronization Train support */
888         if (lmp_sync_train_capable(hdev))
889                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
890
891         /* Enable Secure Connections if supported and configured */
892         if (bredr_sc_enabled(hdev)) {
893                 u8 support = 0x01;
894                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
895                             sizeof(support), &support);
896         }
897 }
898
899 static int __hci_init(struct hci_dev *hdev)
900 {
901         int err;
902
903         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
904         if (err < 0)
905                 return err;
906
907         /* The Device Under Test (DUT) mode is special and available for
908          * all controller types. So just create it early on.
909          */
910         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
911                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
912                                     &dut_mode_fops);
913         }
914
915         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
916          * BR/EDR/LE type controllers. AMP controllers only need the
917          * first stage init.
918          */
919         if (hdev->dev_type != HCI_BREDR)
920                 return 0;
921
922         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
923         if (err < 0)
924                 return err;
925
926         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
927         if (err < 0)
928                 return err;
929
930         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
931         if (err < 0)
932                 return err;
933
934         /* Only create debugfs entries during the initial setup
935          * phase and not every time the controller gets powered on.
936          */
937         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
938                 return 0;
939
940         hci_debugfs_create_common(hdev);
941
942         if (lmp_bredr_capable(hdev))
943                 hci_debugfs_create_bredr(hdev);
944
945         if (lmp_le_capable(hdev)) {
946                 hci_debugfs_create_le(hdev);
947                 smp_register(hdev);
948         }
949
950         return 0;
951 }
952
953 static void hci_init0_req(struct hci_request *req, unsigned long opt)
954 {
955         struct hci_dev *hdev = req->hdev;
956
957         BT_DBG("%s %ld", hdev->name, opt);
958
959         /* Reset */
960         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
961                 hci_reset_req(req, 0);
962
963         /* Read Local Version */
964         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
965
966         /* Read BD Address */
967         if (hdev->set_bdaddr)
968                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
969 }
970
971 static int __hci_unconf_init(struct hci_dev *hdev)
972 {
973         int err;
974
975         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
976                 return 0;
977
978         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
979         if (err < 0)
980                 return err;
981
982         return 0;
983 }
984
985 static void hci_scan_req(struct hci_request *req, unsigned long opt)
986 {
987         __u8 scan = opt;
988
989         BT_DBG("%s %x", req->hdev->name, scan);
990
991         /* Inquiry and Page scans */
992         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
993 }
994
995 static void hci_auth_req(struct hci_request *req, unsigned long opt)
996 {
997         __u8 auth = opt;
998
999         BT_DBG("%s %x", req->hdev->name, auth);
1000
1001         /* Authentication */
1002         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1003 }
1004
1005 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1006 {
1007         __u8 encrypt = opt;
1008
1009         BT_DBG("%s %x", req->hdev->name, encrypt);
1010
1011         /* Encryption */
1012         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1013 }
1014
1015 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1016 {
1017         __le16 policy = cpu_to_le16(opt);
1018
1019         BT_DBG("%s %x", req->hdev->name, policy);
1020
1021         /* Default link policy */
1022         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1023 }
1024
1025 /* Get HCI device by index.
1026  * Device is held on return. */
1027 struct hci_dev *hci_dev_get(int index)
1028 {
1029         struct hci_dev *hdev = NULL, *d;
1030
1031         BT_DBG("%d", index);
1032
1033         if (index < 0)
1034                 return NULL;
1035
1036         read_lock(&hci_dev_list_lock);
1037         list_for_each_entry(d, &hci_dev_list, list) {
1038                 if (d->id == index) {
1039                         hdev = hci_dev_hold(d);
1040                         break;
1041                 }
1042         }
1043         read_unlock(&hci_dev_list_lock);
1044         return hdev;
1045 }
1046
1047 /* ---- Inquiry support ---- */
1048
1049 bool hci_discovery_active(struct hci_dev *hdev)
1050 {
1051         struct discovery_state *discov = &hdev->discovery;
1052
1053         switch (discov->state) {
1054         case DISCOVERY_FINDING:
1055         case DISCOVERY_RESOLVING:
1056                 return true;
1057
1058         default:
1059                 return false;
1060         }
1061 }
1062
1063 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1064 {
1065         int old_state = hdev->discovery.state;
1066
1067         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1068
1069         if (old_state == state)
1070                 return;
1071
1072         hdev->discovery.state = state;
1073
1074         switch (state) {
1075         case DISCOVERY_STOPPED:
1076                 hci_update_background_scan(hdev);
1077
1078                 if (old_state != DISCOVERY_STARTING)
1079                         mgmt_discovering(hdev, 0);
1080                 break;
1081         case DISCOVERY_STARTING:
1082                 break;
1083         case DISCOVERY_FINDING:
1084                 mgmt_discovering(hdev, 1);
1085                 break;
1086         case DISCOVERY_RESOLVING:
1087                 break;
1088         case DISCOVERY_STOPPING:
1089                 break;
1090         }
1091 }
1092
1093 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1094 {
1095         struct discovery_state *cache = &hdev->discovery;
1096         struct inquiry_entry *p, *n;
1097
1098         list_for_each_entry_safe(p, n, &cache->all, all) {
1099                 list_del(&p->all);
1100                 kfree(p);
1101         }
1102
1103         INIT_LIST_HEAD(&cache->unknown);
1104         INIT_LIST_HEAD(&cache->resolve);
1105 }
1106
1107 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1108                                                bdaddr_t *bdaddr)
1109 {
1110         struct discovery_state *cache = &hdev->discovery;
1111         struct inquiry_entry *e;
1112
1113         BT_DBG("cache %p, %pMR", cache, bdaddr);
1114
1115         list_for_each_entry(e, &cache->all, all) {
1116                 if (!bacmp(&e->data.bdaddr, bdaddr))
1117                         return e;
1118         }
1119
1120         return NULL;
1121 }
1122
1123 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1124                                                        bdaddr_t *bdaddr)
1125 {
1126         struct discovery_state *cache = &hdev->discovery;
1127         struct inquiry_entry *e;
1128
1129         BT_DBG("cache %p, %pMR", cache, bdaddr);
1130
1131         list_for_each_entry(e, &cache->unknown, list) {
1132                 if (!bacmp(&e->data.bdaddr, bdaddr))
1133                         return e;
1134         }
1135
1136         return NULL;
1137 }
1138
1139 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1140                                                        bdaddr_t *bdaddr,
1141                                                        int state)
1142 {
1143         struct discovery_state *cache = &hdev->discovery;
1144         struct inquiry_entry *e;
1145
1146         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1147
1148         list_for_each_entry(e, &cache->resolve, list) {
1149                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1150                         return e;
1151                 if (!bacmp(&e->data.bdaddr, bdaddr))
1152                         return e;
1153         }
1154
1155         return NULL;
1156 }
1157
1158 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1159                                       struct inquiry_entry *ie)
1160 {
1161         struct discovery_state *cache = &hdev->discovery;
1162         struct list_head *pos = &cache->resolve;
1163         struct inquiry_entry *p;
1164
1165         list_del(&ie->list);
1166
1167         list_for_each_entry(p, &cache->resolve, list) {
1168                 if (p->name_state != NAME_PENDING &&
1169                     abs(p->data.rssi) >= abs(ie->data.rssi))
1170                         break;
1171                 pos = &p->list;
1172         }
1173
1174         list_add(&ie->list, pos);
1175 }
1176
1177 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1178                              bool name_known)
1179 {
1180         struct discovery_state *cache = &hdev->discovery;
1181         struct inquiry_entry *ie;
1182         u32 flags = 0;
1183
1184         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1185
1186         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1187
1188         if (!data->ssp_mode)
1189                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1190
1191         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1192         if (ie) {
1193                 if (!ie->data.ssp_mode)
1194                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1195
1196                 if (ie->name_state == NAME_NEEDED &&
1197                     data->rssi != ie->data.rssi) {
1198                         ie->data.rssi = data->rssi;
1199                         hci_inquiry_cache_update_resolve(hdev, ie);
1200                 }
1201
1202                 goto update;
1203         }
1204
1205         /* Entry not in the cache. Add new one. */
1206         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1207         if (!ie) {
1208                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1209                 goto done;
1210         }
1211
1212         list_add(&ie->all, &cache->all);
1213
1214         if (name_known) {
1215                 ie->name_state = NAME_KNOWN;
1216         } else {
1217                 ie->name_state = NAME_NOT_KNOWN;
1218                 list_add(&ie->list, &cache->unknown);
1219         }
1220
1221 update:
1222         if (name_known && ie->name_state != NAME_KNOWN &&
1223             ie->name_state != NAME_PENDING) {
1224                 ie->name_state = NAME_KNOWN;
1225                 list_del(&ie->list);
1226         }
1227
1228         memcpy(&ie->data, data, sizeof(*data));
1229         ie->timestamp = jiffies;
1230         cache->timestamp = jiffies;
1231
1232         if (ie->name_state == NAME_NOT_KNOWN)
1233                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1234
1235 done:
1236         return flags;
1237 }
1238
1239 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1240 {
1241         struct discovery_state *cache = &hdev->discovery;
1242         struct inquiry_info *info = (struct inquiry_info *) buf;
1243         struct inquiry_entry *e;
1244         int copied = 0;
1245
1246         list_for_each_entry(e, &cache->all, all) {
1247                 struct inquiry_data *data = &e->data;
1248
1249                 if (copied >= num)
1250                         break;
1251
1252                 bacpy(&info->bdaddr, &data->bdaddr);
1253                 info->pscan_rep_mode    = data->pscan_rep_mode;
1254                 info->pscan_period_mode = data->pscan_period_mode;
1255                 info->pscan_mode        = data->pscan_mode;
1256                 memcpy(info->dev_class, data->dev_class, 3);
1257                 info->clock_offset      = data->clock_offset;
1258
1259                 info++;
1260                 copied++;
1261         }
1262
1263         BT_DBG("cache %p, copied %d", cache, copied);
1264         return copied;
1265 }
1266
1267 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1268 {
1269         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1270         struct hci_dev *hdev = req->hdev;
1271         struct hci_cp_inquiry cp;
1272
1273         BT_DBG("%s", hdev->name);
1274
1275         if (test_bit(HCI_INQUIRY, &hdev->flags))
1276                 return;
1277
1278         /* Start Inquiry */
1279         memcpy(&cp.lap, &ir->lap, 3);
1280         cp.length  = ir->length;
1281         cp.num_rsp = ir->num_rsp;
1282         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1283 }
1284
1285 int hci_inquiry(void __user *arg)
1286 {
1287         __u8 __user *ptr = arg;
1288         struct hci_inquiry_req ir;
1289         struct hci_dev *hdev;
1290         int err = 0, do_inquiry = 0, max_rsp;
1291         long timeo;
1292         __u8 *buf;
1293
1294         if (copy_from_user(&ir, ptr, sizeof(ir)))
1295                 return -EFAULT;
1296
1297         hdev = hci_dev_get(ir.dev_id);
1298         if (!hdev)
1299                 return -ENODEV;
1300
1301         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1302                 err = -EBUSY;
1303                 goto done;
1304         }
1305
1306         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1307                 err = -EOPNOTSUPP;
1308                 goto done;
1309         }
1310
1311         if (hdev->dev_type != HCI_BREDR) {
1312                 err = -EOPNOTSUPP;
1313                 goto done;
1314         }
1315
1316         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1317                 err = -EOPNOTSUPP;
1318                 goto done;
1319         }
1320
1321         hci_dev_lock(hdev);
1322         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1323             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1324                 hci_inquiry_cache_flush(hdev);
1325                 do_inquiry = 1;
1326         }
1327         hci_dev_unlock(hdev);
1328
1329         timeo = ir.length * msecs_to_jiffies(2000);
1330
1331         if (do_inquiry) {
1332                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1333                                    timeo);
1334                 if (err < 0)
1335                         goto done;
1336
1337                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1338                  * cleared). If it is interrupted by a signal, return -EINTR.
1339                  */
1340                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1341                                 TASK_INTERRUPTIBLE))
1342                         return -EINTR;
1343         }
1344
1345         /* for unlimited number of responses we will use buffer with
1346          * 255 entries
1347          */
1348         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1349
1350         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1351          * copy it to the user space.
1352          */
1353         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1354         if (!buf) {
1355                 err = -ENOMEM;
1356                 goto done;
1357         }
1358
1359         hci_dev_lock(hdev);
1360         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1361         hci_dev_unlock(hdev);
1362
1363         BT_DBG("num_rsp %d", ir.num_rsp);
1364
1365         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1366                 ptr += sizeof(ir);
1367                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1368                                  ir.num_rsp))
1369                         err = -EFAULT;
1370         } else
1371                 err = -EFAULT;
1372
1373         kfree(buf);
1374
1375 done:
1376         hci_dev_put(hdev);
1377         return err;
1378 }
1379
1380 static int hci_dev_do_open(struct hci_dev *hdev)
1381 {
1382         int ret = 0;
1383
1384         BT_DBG("%s %p", hdev->name, hdev);
1385
1386         hci_req_lock(hdev);
1387
1388         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1389                 ret = -ENODEV;
1390                 goto done;
1391         }
1392
1393         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1394             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1395                 /* Check for rfkill but allow the HCI setup stage to
1396                  * proceed (which in itself doesn't cause any RF activity).
1397                  */
1398                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1399                         ret = -ERFKILL;
1400                         goto done;
1401                 }
1402
1403                 /* Check for valid public address or a configured static
1404                  * random adddress, but let the HCI setup proceed to
1405                  * be able to determine if there is a public address
1406                  * or not.
1407                  *
1408                  * In case of user channel usage, it is not important
1409                  * if a public address or static random address is
1410                  * available.
1411                  *
1412                  * This check is only valid for BR/EDR controllers
1413                  * since AMP controllers do not have an address.
1414                  */
1415                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1416                     hdev->dev_type == HCI_BREDR &&
1417                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1418                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1419                         ret = -EADDRNOTAVAIL;
1420                         goto done;
1421                 }
1422         }
1423
1424         if (test_bit(HCI_UP, &hdev->flags)) {
1425                 ret = -EALREADY;
1426                 goto done;
1427         }
1428
1429         if (hdev->open(hdev)) {
1430                 ret = -EIO;
1431                 goto done;
1432         }
1433
1434         atomic_set(&hdev->cmd_cnt, 1);
1435         set_bit(HCI_INIT, &hdev->flags);
1436
1437         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1438                 if (hdev->setup)
1439                         ret = hdev->setup(hdev);
1440
1441                 /* The transport driver can set these quirks before
1442                  * creating the HCI device or in its setup callback.
1443                  *
1444                  * In case any of them is set, the controller has to
1445                  * start up as unconfigured.
1446                  */
1447                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1448                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1449                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1450
1451                 /* For an unconfigured controller it is required to
1452                  * read at least the version information provided by
1453                  * the Read Local Version Information command.
1454                  *
1455                  * If the set_bdaddr driver callback is provided, then
1456                  * also the original Bluetooth public device address
1457                  * will be read using the Read BD Address command.
1458                  */
1459                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1460                         ret = __hci_unconf_init(hdev);
1461         }
1462
1463         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1464                 /* If public address change is configured, ensure that
1465                  * the address gets programmed. If the driver does not
1466                  * support changing the public address, fail the power
1467                  * on procedure.
1468                  */
1469                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1470                     hdev->set_bdaddr)
1471                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1472                 else
1473                         ret = -EADDRNOTAVAIL;
1474         }
1475
1476         if (!ret) {
1477                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1478                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1479                         ret = __hci_init(hdev);
1480         }
1481
1482         clear_bit(HCI_INIT, &hdev->flags);
1483
1484         if (!ret) {
1485                 hci_dev_hold(hdev);
1486                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1487                 set_bit(HCI_UP, &hdev->flags);
1488                 hci_notify(hdev, HCI_DEV_UP);
1489                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1490                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1491                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1492                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1493                     hdev->dev_type == HCI_BREDR) {
1494                         hci_dev_lock(hdev);
1495                         mgmt_powered(hdev, 1);
1496                         hci_dev_unlock(hdev);
1497                 }
1498         } else {
1499                 /* Init failed, cleanup */
1500                 flush_work(&hdev->tx_work);
1501                 flush_work(&hdev->cmd_work);
1502                 flush_work(&hdev->rx_work);
1503
1504                 skb_queue_purge(&hdev->cmd_q);
1505                 skb_queue_purge(&hdev->rx_q);
1506
1507                 if (hdev->flush)
1508                         hdev->flush(hdev);
1509
1510                 if (hdev->sent_cmd) {
1511                         kfree_skb(hdev->sent_cmd);
1512                         hdev->sent_cmd = NULL;
1513                 }
1514
1515                 hdev->close(hdev);
1516                 hdev->flags &= BIT(HCI_RAW);
1517         }
1518
1519 done:
1520         hci_req_unlock(hdev);
1521         return ret;
1522 }
1523
1524 /* ---- HCI ioctl helpers ---- */
1525
1526 int hci_dev_open(__u16 dev)
1527 {
1528         struct hci_dev *hdev;
1529         int err;
1530
1531         hdev = hci_dev_get(dev);
1532         if (!hdev)
1533                 return -ENODEV;
1534
1535         /* Devices that are marked as unconfigured can only be powered
1536          * up as user channel. Trying to bring them up as normal devices
1537          * will result into a failure. Only user channel operation is
1538          * possible.
1539          *
1540          * When this function is called for a user channel, the flag
1541          * HCI_USER_CHANNEL will be set first before attempting to
1542          * open the device.
1543          */
1544         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1545             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1546                 err = -EOPNOTSUPP;
1547                 goto done;
1548         }
1549
1550         /* We need to ensure that no other power on/off work is pending
1551          * before proceeding to call hci_dev_do_open. This is
1552          * particularly important if the setup procedure has not yet
1553          * completed.
1554          */
1555         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1556                 cancel_delayed_work(&hdev->power_off);
1557
1558         /* After this call it is guaranteed that the setup procedure
1559          * has finished. This means that error conditions like RFKILL
1560          * or no valid public or static random address apply.
1561          */
1562         flush_workqueue(hdev->req_workqueue);
1563
1564         /* For controllers not using the management interface and that
1565          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1566          * so that pairing works for them. Once the management interface
1567          * is in use this bit will be cleared again and userspace has
1568          * to explicitly enable it.
1569          */
1570         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1571             !test_bit(HCI_MGMT, &hdev->dev_flags))
1572                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1573
1574         err = hci_dev_do_open(hdev);
1575
1576 done:
1577         hci_dev_put(hdev);
1578         return err;
1579 }
1580
1581 /* This function requires the caller holds hdev->lock */
1582 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1583 {
1584         struct hci_conn_params *p;
1585
1586         list_for_each_entry(p, &hdev->le_conn_params, list) {
1587                 if (p->conn) {
1588                         hci_conn_drop(p->conn);
1589                         hci_conn_put(p->conn);
1590                         p->conn = NULL;
1591                 }
1592                 list_del_init(&p->action);
1593         }
1594
1595         BT_DBG("All LE pending actions cleared");
1596 }
1597
1598 static int hci_dev_do_close(struct hci_dev *hdev)
1599 {
1600         BT_DBG("%s %p", hdev->name, hdev);
1601
1602         cancel_delayed_work(&hdev->power_off);
1603
1604         hci_req_cancel(hdev, ENODEV);
1605         hci_req_lock(hdev);
1606
1607         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1608                 cancel_delayed_work_sync(&hdev->cmd_timer);
1609                 hci_req_unlock(hdev);
1610                 return 0;
1611         }
1612
1613         /* Flush RX and TX works */
1614         flush_work(&hdev->tx_work);
1615         flush_work(&hdev->rx_work);
1616
1617         if (hdev->discov_timeout > 0) {
1618                 cancel_delayed_work(&hdev->discov_off);
1619                 hdev->discov_timeout = 0;
1620                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1621                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1622         }
1623
1624         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1625                 cancel_delayed_work(&hdev->service_cache);
1626
1627         cancel_delayed_work_sync(&hdev->le_scan_disable);
1628
1629         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1630                 cancel_delayed_work_sync(&hdev->rpa_expired);
1631
1632         /* Avoid potential lockdep warnings from the *_flush() calls by
1633          * ensuring the workqueue is empty up front.
1634          */
1635         drain_workqueue(hdev->workqueue);
1636
1637         hci_dev_lock(hdev);
1638
1639         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1640                 if (hdev->dev_type == HCI_BREDR)
1641                         mgmt_powered(hdev, 0);
1642         }
1643
1644         hci_inquiry_cache_flush(hdev);
1645         hci_pend_le_actions_clear(hdev);
1646         hci_conn_hash_flush(hdev);
1647         hci_dev_unlock(hdev);
1648
1649         hci_notify(hdev, HCI_DEV_DOWN);
1650
1651         if (hdev->flush)
1652                 hdev->flush(hdev);
1653
1654         /* Reset device */
1655         skb_queue_purge(&hdev->cmd_q);
1656         atomic_set(&hdev->cmd_cnt, 1);
1657         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1658             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1659             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1660                 set_bit(HCI_INIT, &hdev->flags);
1661                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1662                 clear_bit(HCI_INIT, &hdev->flags);
1663         }
1664
1665         /* flush cmd  work */
1666         flush_work(&hdev->cmd_work);
1667
1668         /* Drop queues */
1669         skb_queue_purge(&hdev->rx_q);
1670         skb_queue_purge(&hdev->cmd_q);
1671         skb_queue_purge(&hdev->raw_q);
1672
1673         /* Drop last sent command */
1674         if (hdev->sent_cmd) {
1675                 cancel_delayed_work_sync(&hdev->cmd_timer);
1676                 kfree_skb(hdev->sent_cmd);
1677                 hdev->sent_cmd = NULL;
1678         }
1679
1680         kfree_skb(hdev->recv_evt);
1681         hdev->recv_evt = NULL;
1682
1683         /* After this point our queues are empty
1684          * and no tasks are scheduled. */
1685         hdev->close(hdev);
1686
1687         /* Clear flags */
1688         hdev->flags &= BIT(HCI_RAW);
1689         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1690
1691         /* Controller radio is available but is currently powered down */
1692         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1693
1694         memset(hdev->eir, 0, sizeof(hdev->eir));
1695         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1696         bacpy(&hdev->random_addr, BDADDR_ANY);
1697
1698         hci_req_unlock(hdev);
1699
1700         hci_dev_put(hdev);
1701         return 0;
1702 }
1703
1704 int hci_dev_close(__u16 dev)
1705 {
1706         struct hci_dev *hdev;
1707         int err;
1708
1709         hdev = hci_dev_get(dev);
1710         if (!hdev)
1711                 return -ENODEV;
1712
1713         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1714                 err = -EBUSY;
1715                 goto done;
1716         }
1717
1718         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1719                 cancel_delayed_work(&hdev->power_off);
1720
1721         err = hci_dev_do_close(hdev);
1722
1723 done:
1724         hci_dev_put(hdev);
1725         return err;
1726 }
1727
1728 int hci_dev_reset(__u16 dev)
1729 {
1730         struct hci_dev *hdev;
1731         int ret = 0;
1732
1733         hdev = hci_dev_get(dev);
1734         if (!hdev)
1735                 return -ENODEV;
1736
1737         hci_req_lock(hdev);
1738
1739         if (!test_bit(HCI_UP, &hdev->flags)) {
1740                 ret = -ENETDOWN;
1741                 goto done;
1742         }
1743
1744         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1745                 ret = -EBUSY;
1746                 goto done;
1747         }
1748
1749         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1750                 ret = -EOPNOTSUPP;
1751                 goto done;
1752         }
1753
1754         /* Drop queues */
1755         skb_queue_purge(&hdev->rx_q);
1756         skb_queue_purge(&hdev->cmd_q);
1757
1758         /* Avoid potential lockdep warnings from the *_flush() calls by
1759          * ensuring the workqueue is empty up front.
1760          */
1761         drain_workqueue(hdev->workqueue);
1762
1763         hci_dev_lock(hdev);
1764         hci_inquiry_cache_flush(hdev);
1765         hci_conn_hash_flush(hdev);
1766         hci_dev_unlock(hdev);
1767
1768         if (hdev->flush)
1769                 hdev->flush(hdev);
1770
1771         atomic_set(&hdev->cmd_cnt, 1);
1772         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1773
1774         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1775
1776 done:
1777         hci_req_unlock(hdev);
1778         hci_dev_put(hdev);
1779         return ret;
1780 }
1781
1782 int hci_dev_reset_stat(__u16 dev)
1783 {
1784         struct hci_dev *hdev;
1785         int ret = 0;
1786
1787         hdev = hci_dev_get(dev);
1788         if (!hdev)
1789                 return -ENODEV;
1790
1791         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1792                 ret = -EBUSY;
1793                 goto done;
1794         }
1795
1796         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1797                 ret = -EOPNOTSUPP;
1798                 goto done;
1799         }
1800
1801         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1802
1803 done:
1804         hci_dev_put(hdev);
1805         return ret;
1806 }
1807
1808 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1809 {
1810         bool conn_changed, discov_changed;
1811
1812         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1813
1814         if ((scan & SCAN_PAGE))
1815                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1816                                                  &hdev->dev_flags);
1817         else
1818                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1819                                                   &hdev->dev_flags);
1820
1821         if ((scan & SCAN_INQUIRY)) {
1822                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1823                                                    &hdev->dev_flags);
1824         } else {
1825                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1826                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1827                                                     &hdev->dev_flags);
1828         }
1829
1830         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1831                 return;
1832
1833         if (conn_changed || discov_changed) {
1834                 /* In case this was disabled through mgmt */
1835                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1836
1837                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1838                         mgmt_update_adv_data(hdev);
1839
1840                 mgmt_new_settings(hdev);
1841         }
1842 }
1843
1844 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1845 {
1846         struct hci_dev *hdev;
1847         struct hci_dev_req dr;
1848         int err = 0;
1849
1850         if (copy_from_user(&dr, arg, sizeof(dr)))
1851                 return -EFAULT;
1852
1853         hdev = hci_dev_get(dr.dev_id);
1854         if (!hdev)
1855                 return -ENODEV;
1856
1857         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1858                 err = -EBUSY;
1859                 goto done;
1860         }
1861
1862         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1863                 err = -EOPNOTSUPP;
1864                 goto done;
1865         }
1866
1867         if (hdev->dev_type != HCI_BREDR) {
1868                 err = -EOPNOTSUPP;
1869                 goto done;
1870         }
1871
1872         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1873                 err = -EOPNOTSUPP;
1874                 goto done;
1875         }
1876
1877         switch (cmd) {
1878         case HCISETAUTH:
1879                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1880                                    HCI_INIT_TIMEOUT);
1881                 break;
1882
1883         case HCISETENCRYPT:
1884                 if (!lmp_encrypt_capable(hdev)) {
1885                         err = -EOPNOTSUPP;
1886                         break;
1887                 }
1888
1889                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1890                         /* Auth must be enabled first */
1891                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1892                                            HCI_INIT_TIMEOUT);
1893                         if (err)
1894                                 break;
1895                 }
1896
1897                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1898                                    HCI_INIT_TIMEOUT);
1899                 break;
1900
1901         case HCISETSCAN:
1902                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1903                                    HCI_INIT_TIMEOUT);
1904
1905                 /* Ensure that the connectable and discoverable states
1906                  * get correctly modified as this was a non-mgmt change.
1907                  */
1908                 if (!err)
1909                         hci_update_scan_state(hdev, dr.dev_opt);
1910                 break;
1911
1912         case HCISETLINKPOL:
1913                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1914                                    HCI_INIT_TIMEOUT);
1915                 break;
1916
1917         case HCISETLINKMODE:
1918                 hdev->link_mode = ((__u16) dr.dev_opt) &
1919                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1920                 break;
1921
1922         case HCISETPTYPE:
1923                 hdev->pkt_type = (__u16) dr.dev_opt;
1924                 break;
1925
1926         case HCISETACLMTU:
1927                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1928                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1929                 break;
1930
1931         case HCISETSCOMTU:
1932                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1933                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1934                 break;
1935
1936         default:
1937                 err = -EINVAL;
1938                 break;
1939         }
1940
1941 done:
1942         hci_dev_put(hdev);
1943         return err;
1944 }
1945
1946 int hci_get_dev_list(void __user *arg)
1947 {
1948         struct hci_dev *hdev;
1949         struct hci_dev_list_req *dl;
1950         struct hci_dev_req *dr;
1951         int n = 0, size, err;
1952         __u16 dev_num;
1953
1954         if (get_user(dev_num, (__u16 __user *) arg))
1955                 return -EFAULT;
1956
1957         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1958                 return -EINVAL;
1959
1960         size = sizeof(*dl) + dev_num * sizeof(*dr);
1961
1962         dl = kzalloc(size, GFP_KERNEL);
1963         if (!dl)
1964                 return -ENOMEM;
1965
1966         dr = dl->dev_req;
1967
1968         read_lock(&hci_dev_list_lock);
1969         list_for_each_entry(hdev, &hci_dev_list, list) {
1970                 unsigned long flags = hdev->flags;
1971
1972                 /* When the auto-off is configured it means the transport
1973                  * is running, but in that case still indicate that the
1974                  * device is actually down.
1975                  */
1976                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1977                         flags &= ~BIT(HCI_UP);
1978
1979                 (dr + n)->dev_id  = hdev->id;
1980                 (dr + n)->dev_opt = flags;
1981
1982                 if (++n >= dev_num)
1983                         break;
1984         }
1985         read_unlock(&hci_dev_list_lock);
1986
1987         dl->dev_num = n;
1988         size = sizeof(*dl) + n * sizeof(*dr);
1989
1990         err = copy_to_user(arg, dl, size);
1991         kfree(dl);
1992
1993         return err ? -EFAULT : 0;
1994 }
1995
1996 int hci_get_dev_info(void __user *arg)
1997 {
1998         struct hci_dev *hdev;
1999         struct hci_dev_info di;
2000         unsigned long flags;
2001         int err = 0;
2002
2003         if (copy_from_user(&di, arg, sizeof(di)))
2004                 return -EFAULT;
2005
2006         hdev = hci_dev_get(di.dev_id);
2007         if (!hdev)
2008                 return -ENODEV;
2009
2010         /* When the auto-off is configured it means the transport
2011          * is running, but in that case still indicate that the
2012          * device is actually down.
2013          */
2014         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2015                 flags = hdev->flags & ~BIT(HCI_UP);
2016         else
2017                 flags = hdev->flags;
2018
2019         strcpy(di.name, hdev->name);
2020         di.bdaddr   = hdev->bdaddr;
2021         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2022         di.flags    = flags;
2023         di.pkt_type = hdev->pkt_type;
2024         if (lmp_bredr_capable(hdev)) {
2025                 di.acl_mtu  = hdev->acl_mtu;
2026                 di.acl_pkts = hdev->acl_pkts;
2027                 di.sco_mtu  = hdev->sco_mtu;
2028                 di.sco_pkts = hdev->sco_pkts;
2029         } else {
2030                 di.acl_mtu  = hdev->le_mtu;
2031                 di.acl_pkts = hdev->le_pkts;
2032                 di.sco_mtu  = 0;
2033                 di.sco_pkts = 0;
2034         }
2035         di.link_policy = hdev->link_policy;
2036         di.link_mode   = hdev->link_mode;
2037
2038         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2039         memcpy(&di.features, &hdev->features, sizeof(di.features));
2040
2041         if (copy_to_user(arg, &di, sizeof(di)))
2042                 err = -EFAULT;
2043
2044         hci_dev_put(hdev);
2045
2046         return err;
2047 }
2048
2049 /* ---- Interface to HCI drivers ---- */
2050
2051 static int hci_rfkill_set_block(void *data, bool blocked)
2052 {
2053         struct hci_dev *hdev = data;
2054
2055         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2056
2057         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2058                 return -EBUSY;
2059
2060         if (blocked) {
2061                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2062                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2063                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2064                         hci_dev_do_close(hdev);
2065         } else {
2066                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2067         }
2068
2069         return 0;
2070 }
2071
2072 static const struct rfkill_ops hci_rfkill_ops = {
2073         .set_block = hci_rfkill_set_block,
2074 };
2075
2076 static void hci_power_on(struct work_struct *work)
2077 {
2078         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2079         int err;
2080
2081         BT_DBG("%s", hdev->name);
2082
2083         err = hci_dev_do_open(hdev);
2084         if (err < 0) {
2085                 hci_dev_lock(hdev);
2086                 mgmt_set_powered_failed(hdev, err);
2087                 hci_dev_unlock(hdev);
2088                 return;
2089         }
2090
2091         /* During the HCI setup phase, a few error conditions are
2092          * ignored and they need to be checked now. If they are still
2093          * valid, it is important to turn the device back off.
2094          */
2095         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2096             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2097             (hdev->dev_type == HCI_BREDR &&
2098              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2099              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2100                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2101                 hci_dev_do_close(hdev);
2102         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2103                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2104                                    HCI_AUTO_OFF_TIMEOUT);
2105         }
2106
2107         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2108                 /* For unconfigured devices, set the HCI_RAW flag
2109                  * so that userspace can easily identify them.
2110                  */
2111                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2112                         set_bit(HCI_RAW, &hdev->flags);
2113
2114                 /* For fully configured devices, this will send
2115                  * the Index Added event. For unconfigured devices,
2116                  * it will send Unconfigued Index Added event.
2117                  *
2118                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2119                  * and no event will be send.
2120                  */
2121                 mgmt_index_added(hdev);
2122         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2123                 /* When the controller is now configured, then it
2124                  * is important to clear the HCI_RAW flag.
2125                  */
2126                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2127                         clear_bit(HCI_RAW, &hdev->flags);
2128
2129                 /* Powering on the controller with HCI_CONFIG set only
2130                  * happens with the transition from unconfigured to
2131                  * configured. This will send the Index Added event.
2132                  */
2133                 mgmt_index_added(hdev);
2134         }
2135 }
2136
2137 static void hci_power_off(struct work_struct *work)
2138 {
2139         struct hci_dev *hdev = container_of(work, struct hci_dev,
2140                                             power_off.work);
2141
2142         BT_DBG("%s", hdev->name);
2143
2144         hci_dev_do_close(hdev);
2145 }
2146
2147 static void hci_discov_off(struct work_struct *work)
2148 {
2149         struct hci_dev *hdev;
2150
2151         hdev = container_of(work, struct hci_dev, discov_off.work);
2152
2153         BT_DBG("%s", hdev->name);
2154
2155         mgmt_discoverable_timeout(hdev);
2156 }
2157
2158 void hci_uuids_clear(struct hci_dev *hdev)
2159 {
2160         struct bt_uuid *uuid, *tmp;
2161
2162         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2163                 list_del(&uuid->list);
2164                 kfree(uuid);
2165         }
2166 }
2167
2168 void hci_link_keys_clear(struct hci_dev *hdev)
2169 {
2170         struct link_key *key;
2171
2172         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2173                 list_del_rcu(&key->list);
2174                 kfree_rcu(key, rcu);
2175         }
2176 }
2177
2178 void hci_smp_ltks_clear(struct hci_dev *hdev)
2179 {
2180         struct smp_ltk *k;
2181
2182         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2183                 list_del_rcu(&k->list);
2184                 kfree_rcu(k, rcu);
2185         }
2186 }
2187
2188 void hci_smp_irks_clear(struct hci_dev *hdev)
2189 {
2190         struct smp_irk *k;
2191
2192         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2193                 list_del_rcu(&k->list);
2194                 kfree_rcu(k, rcu);
2195         }
2196 }
2197
2198 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2199 {
2200         struct link_key *k;
2201
2202         rcu_read_lock();
2203         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2204                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2205                         rcu_read_unlock();
2206                         return k;
2207                 }
2208         }
2209         rcu_read_unlock();
2210
2211         return NULL;
2212 }
2213
2214 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2215                                u8 key_type, u8 old_key_type)
2216 {
2217         /* Legacy key */
2218         if (key_type < 0x03)
2219                 return true;
2220
2221         /* Debug keys are insecure so don't store them persistently */
2222         if (key_type == HCI_LK_DEBUG_COMBINATION)
2223                 return false;
2224
2225         /* Changed combination key and there's no previous one */
2226         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2227                 return false;
2228
2229         /* Security mode 3 case */
2230         if (!conn)
2231                 return true;
2232
2233         /* BR/EDR key derived using SC from an LE link */
2234         if (conn->type == LE_LINK)
2235                 return true;
2236
2237         /* Neither local nor remote side had no-bonding as requirement */
2238         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2239                 return true;
2240
2241         /* Local side had dedicated bonding as requirement */
2242         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2243                 return true;
2244
2245         /* Remote side had dedicated bonding as requirement */
2246         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2247                 return true;
2248
2249         /* If none of the above criteria match, then don't store the key
2250          * persistently */
2251         return false;
2252 }
2253
2254 static u8 ltk_role(u8 type)
2255 {
2256         if (type == SMP_LTK)
2257                 return HCI_ROLE_MASTER;
2258
2259         return HCI_ROLE_SLAVE;
2260 }
2261
2262 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2263                              u8 addr_type, u8 role)
2264 {
2265         struct smp_ltk *k;
2266
2267         rcu_read_lock();
2268         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2269                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2270                         continue;
2271
2272                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2273                         rcu_read_unlock();
2274                         return k;
2275                 }
2276         }
2277         rcu_read_unlock();
2278
2279         return NULL;
2280 }
2281
2282 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2283 {
2284         struct smp_irk *irk;
2285
2286         rcu_read_lock();
2287         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2288                 if (!bacmp(&irk->rpa, rpa)) {
2289                         rcu_read_unlock();
2290                         return irk;
2291                 }
2292         }
2293
2294         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2295                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2296                         bacpy(&irk->rpa, rpa);
2297                         rcu_read_unlock();
2298                         return irk;
2299                 }
2300         }
2301         rcu_read_unlock();
2302
2303         return NULL;
2304 }
2305
2306 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2307                                      u8 addr_type)
2308 {
2309         struct smp_irk *irk;
2310
2311         /* Identity Address must be public or static random */
2312         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2313                 return NULL;
2314
2315         rcu_read_lock();
2316         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2317                 if (addr_type == irk->addr_type &&
2318                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2319                         rcu_read_unlock();
2320                         return irk;
2321                 }
2322         }
2323         rcu_read_unlock();
2324
2325         return NULL;
2326 }
2327
2328 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2329                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2330                                   u8 pin_len, bool *persistent)
2331 {
2332         struct link_key *key, *old_key;
2333         u8 old_key_type;
2334
2335         old_key = hci_find_link_key(hdev, bdaddr);
2336         if (old_key) {
2337                 old_key_type = old_key->type;
2338                 key = old_key;
2339         } else {
2340                 old_key_type = conn ? conn->key_type : 0xff;
2341                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2342                 if (!key)
2343                         return NULL;
2344                 list_add_rcu(&key->list, &hdev->link_keys);
2345         }
2346
2347         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2348
2349         /* Some buggy controller combinations generate a changed
2350          * combination key for legacy pairing even when there's no
2351          * previous key */
2352         if (type == HCI_LK_CHANGED_COMBINATION &&
2353             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2354                 type = HCI_LK_COMBINATION;
2355                 if (conn)
2356                         conn->key_type = type;
2357         }
2358
2359         bacpy(&key->bdaddr, bdaddr);
2360         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2361         key->pin_len = pin_len;
2362
2363         if (type == HCI_LK_CHANGED_COMBINATION)
2364                 key->type = old_key_type;
2365         else
2366                 key->type = type;
2367
2368         if (persistent)
2369                 *persistent = hci_persistent_key(hdev, conn, type,
2370                                                  old_key_type);
2371
2372         return key;
2373 }
2374
2375 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2376                             u8 addr_type, u8 type, u8 authenticated,
2377                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2378 {
2379         struct smp_ltk *key, *old_key;
2380         u8 role = ltk_role(type);
2381
2382         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2383         if (old_key)
2384                 key = old_key;
2385         else {
2386                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2387                 if (!key)
2388                         return NULL;
2389                 list_add_rcu(&key->list, &hdev->long_term_keys);
2390         }
2391
2392         bacpy(&key->bdaddr, bdaddr);
2393         key->bdaddr_type = addr_type;
2394         memcpy(key->val, tk, sizeof(key->val));
2395         key->authenticated = authenticated;
2396         key->ediv = ediv;
2397         key->rand = rand;
2398         key->enc_size = enc_size;
2399         key->type = type;
2400
2401         return key;
2402 }
2403
2404 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2406 {
2407         struct smp_irk *irk;
2408
2409         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2410         if (!irk) {
2411                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2412                 if (!irk)
2413                         return NULL;
2414
2415                 bacpy(&irk->bdaddr, bdaddr);
2416                 irk->addr_type = addr_type;
2417
2418                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2419         }
2420
2421         memcpy(irk->val, val, 16);
2422         bacpy(&irk->rpa, rpa);
2423
2424         return irk;
2425 }
2426
2427 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2428 {
2429         struct link_key *key;
2430
2431         key = hci_find_link_key(hdev, bdaddr);
2432         if (!key)
2433                 return -ENOENT;
2434
2435         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2436
2437         list_del_rcu(&key->list);
2438         kfree_rcu(key, rcu);
2439
2440         return 0;
2441 }
2442
2443 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2444 {
2445         struct smp_ltk *k;
2446         int removed = 0;
2447
2448         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2449                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2450                         continue;
2451
2452                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2453
2454                 list_del_rcu(&k->list);
2455                 kfree_rcu(k, rcu);
2456                 removed++;
2457         }
2458
2459         return removed ? 0 : -ENOENT;
2460 }
2461
2462 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2463 {
2464         struct smp_irk *k;
2465
2466         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2467                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2468                         continue;
2469
2470                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2471
2472                 list_del_rcu(&k->list);
2473                 kfree_rcu(k, rcu);
2474         }
2475 }
2476
2477 /* HCI command timer function */
2478 static void hci_cmd_timeout(struct work_struct *work)
2479 {
2480         struct hci_dev *hdev = container_of(work, struct hci_dev,
2481                                             cmd_timer.work);
2482
2483         if (hdev->sent_cmd) {
2484                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2485                 u16 opcode = __le16_to_cpu(sent->opcode);
2486
2487                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2488         } else {
2489                 BT_ERR("%s command tx timeout", hdev->name);
2490         }
2491
2492         atomic_set(&hdev->cmd_cnt, 1);
2493         queue_work(hdev->workqueue, &hdev->cmd_work);
2494 }
2495
2496 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2497                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2498 {
2499         struct oob_data *data;
2500
2501         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2502                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2503                         continue;
2504                 if (data->bdaddr_type != bdaddr_type)
2505                         continue;
2506                 return data;
2507         }
2508
2509         return NULL;
2510 }
2511
2512 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2513                                u8 bdaddr_type)
2514 {
2515         struct oob_data *data;
2516
2517         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2518         if (!data)
2519                 return -ENOENT;
2520
2521         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2522
2523         list_del(&data->list);
2524         kfree(data);
2525
2526         return 0;
2527 }
2528
2529 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2530 {
2531         struct oob_data *data, *n;
2532
2533         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2534                 list_del(&data->list);
2535                 kfree(data);
2536         }
2537 }
2538
2539 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2540                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2541                             u8 *hash256, u8 *rand256)
2542 {
2543         struct oob_data *data;
2544
2545         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2546         if (!data) {
2547                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2548                 if (!data)
2549                         return -ENOMEM;
2550
2551                 bacpy(&data->bdaddr, bdaddr);
2552                 data->bdaddr_type = bdaddr_type;
2553                 list_add(&data->list, &hdev->remote_oob_data);
2554         }
2555
2556         if (hash192 && rand192) {
2557                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2558                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2559         } else {
2560                 memset(data->hash192, 0, sizeof(data->hash192));
2561                 memset(data->rand192, 0, sizeof(data->rand192));
2562         }
2563
2564         if (hash256 && rand256) {
2565                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2566                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2567         } else {
2568                 memset(data->hash256, 0, sizeof(data->hash256));
2569                 memset(data->rand256, 0, sizeof(data->rand256));
2570         }
2571
2572         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2573
2574         return 0;
2575 }
2576
2577 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2578                                          bdaddr_t *bdaddr, u8 type)
2579 {
2580         struct bdaddr_list *b;
2581
2582         list_for_each_entry(b, bdaddr_list, list) {
2583                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2584                         return b;
2585         }
2586
2587         return NULL;
2588 }
2589
2590 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2591 {
2592         struct list_head *p, *n;
2593
2594         list_for_each_safe(p, n, bdaddr_list) {
2595                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2596
2597                 list_del(p);
2598                 kfree(b);
2599         }
2600 }
2601
2602 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2603 {
2604         struct bdaddr_list *entry;
2605
2606         if (!bacmp(bdaddr, BDADDR_ANY))
2607                 return -EBADF;
2608
2609         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2610                 return -EEXIST;
2611
2612         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2613         if (!entry)
2614                 return -ENOMEM;
2615
2616         bacpy(&entry->bdaddr, bdaddr);
2617         entry->bdaddr_type = type;
2618
2619         list_add(&entry->list, list);
2620
2621         return 0;
2622 }
2623
2624 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2625 {
2626         struct bdaddr_list *entry;
2627
2628         if (!bacmp(bdaddr, BDADDR_ANY)) {
2629                 hci_bdaddr_list_clear(list);
2630                 return 0;
2631         }
2632
2633         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2634         if (!entry)
2635                 return -ENOENT;
2636
2637         list_del(&entry->list);
2638         kfree(entry);
2639
2640         return 0;
2641 }
2642
2643 /* This function requires the caller holds hdev->lock */
2644 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2645                                                bdaddr_t *addr, u8 addr_type)
2646 {
2647         struct hci_conn_params *params;
2648
2649         /* The conn params list only contains identity addresses */
2650         if (!hci_is_identity_address(addr, addr_type))
2651                 return NULL;
2652
2653         list_for_each_entry(params, &hdev->le_conn_params, list) {
2654                 if (bacmp(&params->addr, addr) == 0 &&
2655                     params->addr_type == addr_type) {
2656                         return params;
2657                 }
2658         }
2659
2660         return NULL;
2661 }
2662
2663 /* This function requires the caller holds hdev->lock */
2664 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2665                                                   bdaddr_t *addr, u8 addr_type)
2666 {
2667         struct hci_conn_params *param;
2668
2669         /* The list only contains identity addresses */
2670         if (!hci_is_identity_address(addr, addr_type))
2671                 return NULL;
2672
2673         list_for_each_entry(param, list, action) {
2674                 if (bacmp(&param->addr, addr) == 0 &&
2675                     param->addr_type == addr_type)
2676                         return param;
2677         }
2678
2679         return NULL;
2680 }
2681
2682 /* This function requires the caller holds hdev->lock */
2683 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2684                                             bdaddr_t *addr, u8 addr_type)
2685 {
2686         struct hci_conn_params *params;
2687
2688         if (!hci_is_identity_address(addr, addr_type))
2689                 return NULL;
2690
2691         params = hci_conn_params_lookup(hdev, addr, addr_type);
2692         if (params)
2693                 return params;
2694
2695         params = kzalloc(sizeof(*params), GFP_KERNEL);
2696         if (!params) {
2697                 BT_ERR("Out of memory");
2698                 return NULL;
2699         }
2700
2701         bacpy(&params->addr, addr);
2702         params->addr_type = addr_type;
2703
2704         list_add(&params->list, &hdev->le_conn_params);
2705         INIT_LIST_HEAD(&params->action);
2706
2707         params->conn_min_interval = hdev->le_conn_min_interval;
2708         params->conn_max_interval = hdev->le_conn_max_interval;
2709         params->conn_latency = hdev->le_conn_latency;
2710         params->supervision_timeout = hdev->le_supv_timeout;
2711         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2712
2713         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2714
2715         return params;
2716 }
2717
2718 static void hci_conn_params_free(struct hci_conn_params *params)
2719 {
2720         if (params->conn) {
2721                 hci_conn_drop(params->conn);
2722                 hci_conn_put(params->conn);
2723         }
2724
2725         list_del(&params->action);
2726         list_del(&params->list);
2727         kfree(params);
2728 }
2729
2730 /* This function requires the caller holds hdev->lock */
2731 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2732 {
2733         struct hci_conn_params *params;
2734
2735         params = hci_conn_params_lookup(hdev, addr, addr_type);
2736         if (!params)
2737                 return;
2738
2739         hci_conn_params_free(params);
2740
2741         hci_update_background_scan(hdev);
2742
2743         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2744 }
2745
2746 /* This function requires the caller holds hdev->lock */
2747 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2748 {
2749         struct hci_conn_params *params, *tmp;
2750
2751         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2752                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2753                         continue;
2754                 list_del(&params->list);
2755                 kfree(params);
2756         }
2757
2758         BT_DBG("All LE disabled connection parameters were removed");
2759 }
2760
2761 /* This function requires the caller holds hdev->lock */
2762 void hci_conn_params_clear_all(struct hci_dev *hdev)
2763 {
2764         struct hci_conn_params *params, *tmp;
2765
2766         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2767                 hci_conn_params_free(params);
2768
2769         hci_update_background_scan(hdev);
2770
2771         BT_DBG("All LE connection parameters were removed");
2772 }
2773
2774 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2775 {
2776         if (status) {
2777                 BT_ERR("Failed to start inquiry: status %d", status);
2778
2779                 hci_dev_lock(hdev);
2780                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2781                 hci_dev_unlock(hdev);
2782                 return;
2783         }
2784 }
2785
2786 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2787 {
2788         /* General inquiry access code (GIAC) */
2789         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2790         struct hci_request req;
2791         struct hci_cp_inquiry cp;
2792         int err;
2793
2794         if (status) {
2795                 BT_ERR("Failed to disable LE scanning: status %d", status);
2796                 return;
2797         }
2798
2799         switch (hdev->discovery.type) {
2800         case DISCOV_TYPE_LE:
2801                 hci_dev_lock(hdev);
2802                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2803                 hci_dev_unlock(hdev);
2804                 break;
2805
2806         case DISCOV_TYPE_INTERLEAVED:
2807                 hci_req_init(&req, hdev);
2808
2809                 memset(&cp, 0, sizeof(cp));
2810                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2811                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2812                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2813
2814                 hci_dev_lock(hdev);
2815
2816                 hci_inquiry_cache_flush(hdev);
2817
2818                 err = hci_req_run(&req, inquiry_complete);
2819                 if (err) {
2820                         BT_ERR("Inquiry request failed: err %d", err);
2821                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2822                 }
2823
2824                 hci_dev_unlock(hdev);
2825                 break;
2826         }
2827 }
2828
2829 static void le_scan_disable_work(struct work_struct *work)
2830 {
2831         struct hci_dev *hdev = container_of(work, struct hci_dev,
2832                                             le_scan_disable.work);
2833         struct hci_request req;
2834         int err;
2835
2836         BT_DBG("%s", hdev->name);
2837
2838         hci_req_init(&req, hdev);
2839
2840         hci_req_add_le_scan_disable(&req);
2841
2842         err = hci_req_run(&req, le_scan_disable_work_complete);
2843         if (err)
2844                 BT_ERR("Disable LE scanning request failed: err %d", err);
2845 }
2846
2847 /* Copy the Identity Address of the controller.
2848  *
2849  * If the controller has a public BD_ADDR, then by default use that one.
2850  * If this is a LE only controller without a public address, default to
2851  * the static random address.
2852  *
2853  * For debugging purposes it is possible to force controllers with a
2854  * public address to use the static random address instead.
2855  *
2856  * In case BR/EDR has been disabled on a dual-mode controller and
2857  * userspace has configured a static address, then that address
2858  * becomes the identity address instead of the public BR/EDR address.
2859  */
2860 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2861                                u8 *bdaddr_type)
2862 {
2863         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2864             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2865             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2866              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2867                 bacpy(bdaddr, &hdev->static_addr);
2868                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2869         } else {
2870                 bacpy(bdaddr, &hdev->bdaddr);
2871                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2872         }
2873 }
2874
2875 /* Alloc HCI device */
2876 struct hci_dev *hci_alloc_dev(void)
2877 {
2878         struct hci_dev *hdev;
2879
2880         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2881         if (!hdev)
2882                 return NULL;
2883
2884         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2885         hdev->esco_type = (ESCO_HV1);
2886         hdev->link_mode = (HCI_LM_ACCEPT);
2887         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2888         hdev->io_capability = 0x03;     /* No Input No Output */
2889         hdev->manufacturer = 0xffff;    /* Default to internal use */
2890         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2891         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2892
2893         hdev->sniff_max_interval = 800;
2894         hdev->sniff_min_interval = 80;
2895
2896         hdev->le_adv_channel_map = 0x07;
2897         hdev->le_adv_min_interval = 0x0800;
2898         hdev->le_adv_max_interval = 0x0800;
2899         hdev->le_scan_interval = 0x0060;
2900         hdev->le_scan_window = 0x0030;
2901         hdev->le_conn_min_interval = 0x0028;
2902         hdev->le_conn_max_interval = 0x0038;
2903         hdev->le_conn_latency = 0x0000;
2904         hdev->le_supv_timeout = 0x002a;
2905         hdev->le_def_tx_len = 0x001b;
2906         hdev->le_def_tx_time = 0x0148;
2907         hdev->le_max_tx_len = 0x001b;
2908         hdev->le_max_tx_time = 0x0148;
2909         hdev->le_max_rx_len = 0x001b;
2910         hdev->le_max_rx_time = 0x0148;
2911
2912         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2913         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2914         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2915         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2916
2917         mutex_init(&hdev->lock);
2918         mutex_init(&hdev->req_lock);
2919
2920         INIT_LIST_HEAD(&hdev->mgmt_pending);
2921         INIT_LIST_HEAD(&hdev->blacklist);
2922         INIT_LIST_HEAD(&hdev->whitelist);
2923         INIT_LIST_HEAD(&hdev->uuids);
2924         INIT_LIST_HEAD(&hdev->link_keys);
2925         INIT_LIST_HEAD(&hdev->long_term_keys);
2926         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2927         INIT_LIST_HEAD(&hdev->remote_oob_data);
2928         INIT_LIST_HEAD(&hdev->le_white_list);
2929         INIT_LIST_HEAD(&hdev->le_conn_params);
2930         INIT_LIST_HEAD(&hdev->pend_le_conns);
2931         INIT_LIST_HEAD(&hdev->pend_le_reports);
2932         INIT_LIST_HEAD(&hdev->conn_hash.list);
2933
2934         INIT_WORK(&hdev->rx_work, hci_rx_work);
2935         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2936         INIT_WORK(&hdev->tx_work, hci_tx_work);
2937         INIT_WORK(&hdev->power_on, hci_power_on);
2938
2939         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2940         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2941         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2942
2943         skb_queue_head_init(&hdev->rx_q);
2944         skb_queue_head_init(&hdev->cmd_q);
2945         skb_queue_head_init(&hdev->raw_q);
2946
2947         init_waitqueue_head(&hdev->req_wait_q);
2948
2949         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2950
2951         hci_init_sysfs(hdev);
2952         discovery_init(hdev);
2953
2954         return hdev;
2955 }
2956 EXPORT_SYMBOL(hci_alloc_dev);
2957
2958 /* Free HCI device */
2959 void hci_free_dev(struct hci_dev *hdev)
2960 {
2961         /* will free via device release */
2962         put_device(&hdev->dev);
2963 }
2964 EXPORT_SYMBOL(hci_free_dev);
2965
2966 /* Register HCI device */
2967 int hci_register_dev(struct hci_dev *hdev)
2968 {
2969         int id, error;
2970
2971         if (!hdev->open || !hdev->close || !hdev->send)
2972                 return -EINVAL;
2973
2974         /* Do not allow HCI_AMP devices to register at index 0,
2975          * so the index can be used as the AMP controller ID.
2976          */
2977         switch (hdev->dev_type) {
2978         case HCI_BREDR:
2979                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2980                 break;
2981         case HCI_AMP:
2982                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2983                 break;
2984         default:
2985                 return -EINVAL;
2986         }
2987
2988         if (id < 0)
2989                 return id;
2990
2991         sprintf(hdev->name, "hci%d", id);
2992         hdev->id = id;
2993
2994         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2995
2996         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2997                                           WQ_MEM_RECLAIM, 1, hdev->name);
2998         if (!hdev->workqueue) {
2999                 error = -ENOMEM;
3000                 goto err;
3001         }
3002
3003         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3004                                               WQ_MEM_RECLAIM, 1, hdev->name);
3005         if (!hdev->req_workqueue) {
3006                 destroy_workqueue(hdev->workqueue);
3007                 error = -ENOMEM;
3008                 goto err;
3009         }
3010
3011         if (!IS_ERR_OR_NULL(bt_debugfs))
3012                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3013
3014         dev_set_name(&hdev->dev, "%s", hdev->name);
3015
3016         error = device_add(&hdev->dev);
3017         if (error < 0)
3018                 goto err_wqueue;
3019
3020         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3021                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3022                                     hdev);
3023         if (hdev->rfkill) {
3024                 if (rfkill_register(hdev->rfkill) < 0) {
3025                         rfkill_destroy(hdev->rfkill);
3026                         hdev->rfkill = NULL;
3027                 }
3028         }
3029
3030         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3031                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3032
3033         set_bit(HCI_SETUP, &hdev->dev_flags);
3034         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3035
3036         if (hdev->dev_type == HCI_BREDR) {
3037                 /* Assume BR/EDR support until proven otherwise (such as
3038                  * through reading supported features during init.
3039                  */
3040                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3041         }
3042
3043         write_lock(&hci_dev_list_lock);
3044         list_add(&hdev->list, &hci_dev_list);
3045         write_unlock(&hci_dev_list_lock);
3046
3047         /* Devices that are marked for raw-only usage are unconfigured
3048          * and should not be included in normal operation.
3049          */
3050         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3051                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3052
3053         hci_notify(hdev, HCI_DEV_REG);
3054         hci_dev_hold(hdev);
3055
3056         queue_work(hdev->req_workqueue, &hdev->power_on);
3057
3058         return id;
3059
3060 err_wqueue:
3061         destroy_workqueue(hdev->workqueue);
3062         destroy_workqueue(hdev->req_workqueue);
3063 err:
3064         ida_simple_remove(&hci_index_ida, hdev->id);
3065
3066         return error;
3067 }
3068 EXPORT_SYMBOL(hci_register_dev);
3069
3070 /* Unregister HCI device */
3071 void hci_unregister_dev(struct hci_dev *hdev)
3072 {
3073         int i, id;
3074
3075         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3076
3077         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3078
3079         id = hdev->id;
3080
3081         write_lock(&hci_dev_list_lock);
3082         list_del(&hdev->list);
3083         write_unlock(&hci_dev_list_lock);
3084
3085         hci_dev_do_close(hdev);
3086
3087         for (i = 0; i < NUM_REASSEMBLY; i++)
3088                 kfree_skb(hdev->reassembly[i]);
3089
3090         cancel_work_sync(&hdev->power_on);
3091
3092         if (!test_bit(HCI_INIT, &hdev->flags) &&
3093             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3094             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3095                 hci_dev_lock(hdev);
3096                 mgmt_index_removed(hdev);
3097                 hci_dev_unlock(hdev);
3098         }
3099
3100         /* mgmt_index_removed should take care of emptying the
3101          * pending list */
3102         BUG_ON(!list_empty(&hdev->mgmt_pending));
3103
3104         hci_notify(hdev, HCI_DEV_UNREG);
3105
3106         if (hdev->rfkill) {
3107                 rfkill_unregister(hdev->rfkill);
3108                 rfkill_destroy(hdev->rfkill);
3109         }
3110
3111         smp_unregister(hdev);
3112
3113         device_del(&hdev->dev);
3114
3115         debugfs_remove_recursive(hdev->debugfs);
3116
3117         destroy_workqueue(hdev->workqueue);
3118         destroy_workqueue(hdev->req_workqueue);
3119
3120         hci_dev_lock(hdev);
3121         hci_bdaddr_list_clear(&hdev->blacklist);
3122         hci_bdaddr_list_clear(&hdev->whitelist);
3123         hci_uuids_clear(hdev);
3124         hci_link_keys_clear(hdev);
3125         hci_smp_ltks_clear(hdev);
3126         hci_smp_irks_clear(hdev);
3127         hci_remote_oob_data_clear(hdev);
3128         hci_bdaddr_list_clear(&hdev->le_white_list);
3129         hci_conn_params_clear_all(hdev);
3130         hci_discovery_filter_clear(hdev);
3131         hci_dev_unlock(hdev);
3132
3133         hci_dev_put(hdev);
3134
3135         ida_simple_remove(&hci_index_ida, id);
3136 }
3137 EXPORT_SYMBOL(hci_unregister_dev);
3138
3139 /* Suspend HCI device */
3140 int hci_suspend_dev(struct hci_dev *hdev)
3141 {
3142         hci_notify(hdev, HCI_DEV_SUSPEND);
3143         return 0;
3144 }
3145 EXPORT_SYMBOL(hci_suspend_dev);
3146
3147 /* Resume HCI device */
3148 int hci_resume_dev(struct hci_dev *hdev)
3149 {
3150         hci_notify(hdev, HCI_DEV_RESUME);
3151         return 0;
3152 }
3153 EXPORT_SYMBOL(hci_resume_dev);
3154
3155 /* Reset HCI device */
3156 int hci_reset_dev(struct hci_dev *hdev)
3157 {
3158         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3159         struct sk_buff *skb;
3160
3161         skb = bt_skb_alloc(3, GFP_ATOMIC);
3162         if (!skb)
3163                 return -ENOMEM;
3164
3165         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3166         memcpy(skb_put(skb, 3), hw_err, 3);
3167
3168         /* Send Hardware Error to upper stack */
3169         return hci_recv_frame(hdev, skb);
3170 }
3171 EXPORT_SYMBOL(hci_reset_dev);
3172
3173 /* Receive frame from HCI drivers */
3174 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3175 {
3176         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3177                       && !test_bit(HCI_INIT, &hdev->flags))) {
3178                 kfree_skb(skb);
3179                 return -ENXIO;
3180         }
3181
3182         /* Incoming skb */
3183         bt_cb(skb)->incoming = 1;
3184
3185         /* Time stamp */
3186         __net_timestamp(skb);
3187
3188         skb_queue_tail(&hdev->rx_q, skb);
3189         queue_work(hdev->workqueue, &hdev->rx_work);
3190
3191         return 0;
3192 }
3193 EXPORT_SYMBOL(hci_recv_frame);
3194
3195 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3196                           int count, __u8 index)
3197 {
3198         int len = 0;
3199         int hlen = 0;
3200         int remain = count;
3201         struct sk_buff *skb;
3202         struct bt_skb_cb *scb;
3203
3204         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3205             index >= NUM_REASSEMBLY)
3206                 return -EILSEQ;
3207
3208         skb = hdev->reassembly[index];
3209
3210         if (!skb) {
3211                 switch (type) {
3212                 case HCI_ACLDATA_PKT:
3213                         len = HCI_MAX_FRAME_SIZE;
3214                         hlen = HCI_ACL_HDR_SIZE;
3215                         break;
3216                 case HCI_EVENT_PKT:
3217                         len = HCI_MAX_EVENT_SIZE;
3218                         hlen = HCI_EVENT_HDR_SIZE;
3219                         break;
3220                 case HCI_SCODATA_PKT:
3221                         len = HCI_MAX_SCO_SIZE;
3222                         hlen = HCI_SCO_HDR_SIZE;
3223                         break;
3224                 }
3225
3226                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3227                 if (!skb)
3228                         return -ENOMEM;
3229
3230                 scb = (void *) skb->cb;
3231                 scb->expect = hlen;
3232                 scb->pkt_type = type;
3233
3234                 hdev->reassembly[index] = skb;
3235         }
3236
3237         while (count) {
3238                 scb = (void *) skb->cb;
3239                 len = min_t(uint, scb->expect, count);
3240
3241                 memcpy(skb_put(skb, len), data, len);
3242
3243                 count -= len;
3244                 data += len;
3245                 scb->expect -= len;
3246                 remain = count;
3247
3248                 switch (type) {
3249                 case HCI_EVENT_PKT:
3250                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3251                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3252                                 scb->expect = h->plen;
3253
3254                                 if (skb_tailroom(skb) < scb->expect) {
3255                                         kfree_skb(skb);
3256                                         hdev->reassembly[index] = NULL;
3257                                         return -ENOMEM;
3258                                 }
3259                         }
3260                         break;
3261
3262                 case HCI_ACLDATA_PKT:
3263                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3264                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3265                                 scb->expect = __le16_to_cpu(h->dlen);
3266
3267                                 if (skb_tailroom(skb) < scb->expect) {
3268                                         kfree_skb(skb);
3269                                         hdev->reassembly[index] = NULL;
3270                                         return -ENOMEM;
3271                                 }
3272                         }
3273                         break;
3274
3275                 case HCI_SCODATA_PKT:
3276                         if (skb->len == HCI_SCO_HDR_SIZE) {
3277                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3278                                 scb->expect = h->dlen;
3279
3280                                 if (skb_tailroom(skb) < scb->expect) {
3281                                         kfree_skb(skb);
3282                                         hdev->reassembly[index] = NULL;
3283                                         return -ENOMEM;
3284                                 }
3285                         }
3286                         break;
3287                 }
3288
3289                 if (scb->expect == 0) {
3290                         /* Complete frame */
3291
3292                         bt_cb(skb)->pkt_type = type;
3293                         hci_recv_frame(hdev, skb);
3294
3295                         hdev->reassembly[index] = NULL;
3296                         return remain;
3297                 }
3298         }
3299
3300         return remain;
3301 }
3302
3303 #define STREAM_REASSEMBLY 0
3304
3305 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3306 {
3307         int type;
3308         int rem = 0;
3309
3310         while (count) {
3311                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3312
3313                 if (!skb) {
3314                         struct { char type; } *pkt;
3315
3316                         /* Start of the frame */
3317                         pkt = data;
3318                         type = pkt->type;
3319
3320                         data++;
3321                         count--;
3322                 } else
3323                         type = bt_cb(skb)->pkt_type;
3324
3325                 rem = hci_reassembly(hdev, type, data, count,
3326                                      STREAM_REASSEMBLY);
3327                 if (rem < 0)
3328                         return rem;
3329
3330                 data += (count - rem);
3331                 count = rem;
3332         }
3333
3334         return rem;
3335 }
3336 EXPORT_SYMBOL(hci_recv_stream_fragment);
3337
3338 /* ---- Interface to upper protocols ---- */
3339
3340 int hci_register_cb(struct hci_cb *cb)
3341 {
3342         BT_DBG("%p name %s", cb, cb->name);
3343
3344         write_lock(&hci_cb_list_lock);
3345         list_add(&cb->list, &hci_cb_list);
3346         write_unlock(&hci_cb_list_lock);
3347
3348         return 0;
3349 }
3350 EXPORT_SYMBOL(hci_register_cb);
3351
3352 int hci_unregister_cb(struct hci_cb *cb)
3353 {
3354         BT_DBG("%p name %s", cb, cb->name);
3355
3356         write_lock(&hci_cb_list_lock);
3357         list_del(&cb->list);
3358         write_unlock(&hci_cb_list_lock);
3359
3360         return 0;
3361 }
3362 EXPORT_SYMBOL(hci_unregister_cb);
3363
3364 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3365 {
3366         int err;
3367
3368         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3369
3370         /* Time stamp */
3371         __net_timestamp(skb);
3372
3373         /* Send copy to monitor */
3374         hci_send_to_monitor(hdev, skb);
3375
3376         if (atomic_read(&hdev->promisc)) {
3377                 /* Send copy to the sockets */
3378                 hci_send_to_sock(hdev, skb);
3379         }
3380
3381         /* Get rid of skb owner, prior to sending to the driver. */
3382         skb_orphan(skb);
3383
3384         err = hdev->send(hdev, skb);
3385         if (err < 0) {
3386                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3387                 kfree_skb(skb);
3388         }
3389 }
3390
3391 bool hci_req_pending(struct hci_dev *hdev)
3392 {
3393         return (hdev->req_status == HCI_REQ_PEND);
3394 }
3395
3396 /* Send HCI command */
3397 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3398                  const void *param)
3399 {
3400         struct sk_buff *skb;
3401
3402         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3403
3404         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3405         if (!skb) {
3406                 BT_ERR("%s no memory for command", hdev->name);
3407                 return -ENOMEM;
3408         }
3409
3410         /* Stand-alone HCI commands must be flagged as
3411          * single-command requests.
3412          */
3413         bt_cb(skb)->req.start = true;
3414
3415         skb_queue_tail(&hdev->cmd_q, skb);
3416         queue_work(hdev->workqueue, &hdev->cmd_work);
3417
3418         return 0;
3419 }
3420
3421 /* Get data from the previously sent command */
3422 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3423 {
3424         struct hci_command_hdr *hdr;
3425
3426         if (!hdev->sent_cmd)
3427                 return NULL;
3428
3429         hdr = (void *) hdev->sent_cmd->data;
3430
3431         if (hdr->opcode != cpu_to_le16(opcode))
3432                 return NULL;
3433
3434         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3435
3436         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3437 }
3438
3439 /* Send ACL data */
3440 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3441 {
3442         struct hci_acl_hdr *hdr;
3443         int len = skb->len;
3444
3445         skb_push(skb, HCI_ACL_HDR_SIZE);
3446         skb_reset_transport_header(skb);
3447         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3448         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3449         hdr->dlen   = cpu_to_le16(len);
3450 }
3451
3452 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3453                           struct sk_buff *skb, __u16 flags)
3454 {
3455         struct hci_conn *conn = chan->conn;
3456         struct hci_dev *hdev = conn->hdev;
3457         struct sk_buff *list;
3458
3459         skb->len = skb_headlen(skb);
3460         skb->data_len = 0;
3461
3462         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3463
3464         switch (hdev->dev_type) {
3465         case HCI_BREDR:
3466                 hci_add_acl_hdr(skb, conn->handle, flags);
3467                 break;
3468         case HCI_AMP:
3469                 hci_add_acl_hdr(skb, chan->handle, flags);
3470                 break;
3471         default:
3472                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3473                 return;
3474         }
3475
3476         list = skb_shinfo(skb)->frag_list;
3477         if (!list) {
3478                 /* Non fragmented */
3479                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3480
3481                 skb_queue_tail(queue, skb);
3482         } else {
3483                 /* Fragmented */
3484                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3485
3486                 skb_shinfo(skb)->frag_list = NULL;
3487
3488                 /* Queue all fragments atomically. We need to use spin_lock_bh
3489                  * here because of 6LoWPAN links, as there this function is
3490                  * called from softirq and using normal spin lock could cause
3491                  * deadlocks.
3492                  */
3493                 spin_lock_bh(&queue->lock);
3494
3495                 __skb_queue_tail(queue, skb);
3496
3497                 flags &= ~ACL_START;
3498                 flags |= ACL_CONT;
3499                 do {
3500                         skb = list; list = list->next;
3501
3502                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3503                         hci_add_acl_hdr(skb, conn->handle, flags);
3504
3505                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3506
3507                         __skb_queue_tail(queue, skb);
3508                 } while (list);
3509
3510                 spin_unlock_bh(&queue->lock);
3511         }
3512 }
3513
3514 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3515 {
3516         struct hci_dev *hdev = chan->conn->hdev;
3517
3518         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3519
3520         hci_queue_acl(chan, &chan->data_q, skb, flags);
3521
3522         queue_work(hdev->workqueue, &hdev->tx_work);
3523 }
3524
3525 /* Send SCO data */
3526 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3527 {
3528         struct hci_dev *hdev = conn->hdev;
3529         struct hci_sco_hdr hdr;
3530
3531         BT_DBG("%s len %d", hdev->name, skb->len);
3532
3533         hdr.handle = cpu_to_le16(conn->handle);
3534         hdr.dlen   = skb->len;
3535
3536         skb_push(skb, HCI_SCO_HDR_SIZE);
3537         skb_reset_transport_header(skb);
3538         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3539
3540         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3541
3542         skb_queue_tail(&conn->data_q, skb);
3543         queue_work(hdev->workqueue, &hdev->tx_work);
3544 }
3545
3546 /* ---- HCI TX task (outgoing data) ---- */
3547
3548 /* HCI Connection scheduler */
3549 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3550                                      int *quote)
3551 {
3552         struct hci_conn_hash *h = &hdev->conn_hash;
3553         struct hci_conn *conn = NULL, *c;
3554         unsigned int num = 0, min = ~0;
3555
3556         /* We don't have to lock device here. Connections are always
3557          * added and removed with TX task disabled. */
3558
3559         rcu_read_lock();
3560
3561         list_for_each_entry_rcu(c, &h->list, list) {
3562                 if (c->type != type || skb_queue_empty(&c->data_q))
3563                         continue;
3564
3565                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3566                         continue;
3567
3568                 num++;
3569
3570                 if (c->sent < min) {
3571                         min  = c->sent;
3572                         conn = c;
3573                 }
3574
3575                 if (hci_conn_num(hdev, type) == num)
3576                         break;
3577         }
3578
3579         rcu_read_unlock();
3580
3581         if (conn) {
3582                 int cnt, q;
3583
3584                 switch (conn->type) {
3585                 case ACL_LINK:
3586                         cnt = hdev->acl_cnt;
3587                         break;
3588                 case SCO_LINK:
3589                 case ESCO_LINK:
3590                         cnt = hdev->sco_cnt;
3591                         break;
3592                 case LE_LINK:
3593                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3594                         break;
3595                 default:
3596                         cnt = 0;
3597                         BT_ERR("Unknown link type");
3598                 }
3599
3600                 q = cnt / num;
3601                 *quote = q ? q : 1;
3602         } else
3603                 *quote = 0;
3604
3605         BT_DBG("conn %p quote %d", conn, *quote);
3606         return conn;
3607 }
3608
3609 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3610 {
3611         struct hci_conn_hash *h = &hdev->conn_hash;
3612         struct hci_conn *c;
3613
3614         BT_ERR("%s link tx timeout", hdev->name);
3615
3616         rcu_read_lock();
3617
3618         /* Kill stalled connections */
3619         list_for_each_entry_rcu(c, &h->list, list) {
3620                 if (c->type == type && c->sent) {
3621                         BT_ERR("%s killing stalled connection %pMR",
3622                                hdev->name, &c->dst);
3623                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3624                 }
3625         }
3626
3627         rcu_read_unlock();
3628 }
3629
3630 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3631                                       int *quote)
3632 {
3633         struct hci_conn_hash *h = &hdev->conn_hash;
3634         struct hci_chan *chan = NULL;
3635         unsigned int num = 0, min = ~0, cur_prio = 0;
3636         struct hci_conn *conn;
3637         int cnt, q, conn_num = 0;
3638
3639         BT_DBG("%s", hdev->name);
3640
3641         rcu_read_lock();
3642
3643         list_for_each_entry_rcu(conn, &h->list, list) {
3644                 struct hci_chan *tmp;
3645
3646                 if (conn->type != type)
3647                         continue;
3648
3649                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3650                         continue;
3651
3652                 conn_num++;
3653
3654                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3655                         struct sk_buff *skb;
3656
3657                         if (skb_queue_empty(&tmp->data_q))
3658                                 continue;
3659
3660                         skb = skb_peek(&tmp->data_q);
3661                         if (skb->priority < cur_prio)
3662                                 continue;
3663
3664                         if (skb->priority > cur_prio) {
3665                                 num = 0;
3666                                 min = ~0;
3667                                 cur_prio = skb->priority;
3668                         }
3669
3670                         num++;
3671
3672                         if (conn->sent < min) {
3673                                 min  = conn->sent;
3674                                 chan = tmp;
3675                         }
3676                 }
3677
3678                 if (hci_conn_num(hdev, type) == conn_num)
3679                         break;
3680         }
3681
3682         rcu_read_unlock();
3683
3684         if (!chan)
3685                 return NULL;
3686
3687         switch (chan->conn->type) {
3688         case ACL_LINK:
3689                 cnt = hdev->acl_cnt;
3690                 break;
3691         case AMP_LINK:
3692                 cnt = hdev->block_cnt;
3693                 break;
3694         case SCO_LINK:
3695         case ESCO_LINK:
3696                 cnt = hdev->sco_cnt;
3697                 break;
3698         case LE_LINK:
3699                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3700                 break;
3701         default:
3702                 cnt = 0;
3703                 BT_ERR("Unknown link type");
3704         }
3705
3706         q = cnt / num;
3707         *quote = q ? q : 1;
3708         BT_DBG("chan %p quote %d", chan, *quote);
3709         return chan;
3710 }
3711
3712 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3713 {
3714         struct hci_conn_hash *h = &hdev->conn_hash;
3715         struct hci_conn *conn;
3716         int num = 0;
3717
3718         BT_DBG("%s", hdev->name);
3719
3720         rcu_read_lock();
3721
3722         list_for_each_entry_rcu(conn, &h->list, list) {
3723                 struct hci_chan *chan;
3724
3725                 if (conn->type != type)
3726                         continue;
3727
3728                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3729                         continue;
3730
3731                 num++;
3732
3733                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3734                         struct sk_buff *skb;
3735
3736                         if (chan->sent) {
3737                                 chan->sent = 0;
3738                                 continue;
3739                         }
3740
3741                         if (skb_queue_empty(&chan->data_q))
3742                                 continue;
3743
3744                         skb = skb_peek(&chan->data_q);
3745                         if (skb->priority >= HCI_PRIO_MAX - 1)
3746                                 continue;
3747
3748                         skb->priority = HCI_PRIO_MAX - 1;
3749
3750                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3751                                skb->priority);
3752                 }
3753
3754                 if (hci_conn_num(hdev, type) == num)
3755                         break;
3756         }
3757
3758         rcu_read_unlock();
3759
3760 }
3761
3762 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3763 {
3764         /* Calculate count of blocks used by this packet */
3765         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3766 }
3767
3768 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3769 {
3770         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3771                 /* ACL tx timeout must be longer than maximum
3772                  * link supervision timeout (40.9 seconds) */
3773                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3774                                        HCI_ACL_TX_TIMEOUT))
3775                         hci_link_tx_to(hdev, ACL_LINK);
3776         }
3777 }
3778
3779 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3780 {
3781         unsigned int cnt = hdev->acl_cnt;
3782         struct hci_chan *chan;
3783         struct sk_buff *skb;
3784         int quote;
3785
3786         __check_timeout(hdev, cnt);
3787
3788         while (hdev->acl_cnt &&
3789                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3790                 u32 priority = (skb_peek(&chan->data_q))->priority;
3791                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3792                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3793                                skb->len, skb->priority);
3794
3795                         /* Stop if priority has changed */
3796                         if (skb->priority < priority)
3797                                 break;
3798
3799                         skb = skb_dequeue(&chan->data_q);
3800
3801                         hci_conn_enter_active_mode(chan->conn,
3802                                                    bt_cb(skb)->force_active);
3803
3804                         hci_send_frame(hdev, skb);
3805                         hdev->acl_last_tx = jiffies;
3806
3807                         hdev->acl_cnt--;
3808                         chan->sent++;
3809                         chan->conn->sent++;
3810                 }
3811         }
3812
3813         if (cnt != hdev->acl_cnt)
3814                 hci_prio_recalculate(hdev, ACL_LINK);
3815 }
3816
3817 static void hci_sched_acl_blk(struct hci_dev *hdev)
3818 {
3819         unsigned int cnt = hdev->block_cnt;
3820         struct hci_chan *chan;
3821         struct sk_buff *skb;
3822         int quote;
3823         u8 type;
3824
3825         __check_timeout(hdev, cnt);
3826
3827         BT_DBG("%s", hdev->name);
3828
3829         if (hdev->dev_type == HCI_AMP)
3830                 type = AMP_LINK;
3831         else
3832                 type = ACL_LINK;
3833
3834         while (hdev->block_cnt > 0 &&
3835                (chan = hci_chan_sent(hdev, type, &quote))) {
3836                 u32 priority = (skb_peek(&chan->data_q))->priority;
3837                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3838                         int blocks;
3839
3840                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3841                                skb->len, skb->priority);
3842
3843                         /* Stop if priority has changed */
3844                         if (skb->priority < priority)
3845                                 break;
3846
3847                         skb = skb_dequeue(&chan->data_q);
3848
3849                         blocks = __get_blocks(hdev, skb);
3850                         if (blocks > hdev->block_cnt)
3851                                 return;
3852
3853                         hci_conn_enter_active_mode(chan->conn,
3854                                                    bt_cb(skb)->force_active);
3855
3856                         hci_send_frame(hdev, skb);
3857                         hdev->acl_last_tx = jiffies;
3858
3859                         hdev->block_cnt -= blocks;
3860                         quote -= blocks;
3861
3862                         chan->sent += blocks;
3863                         chan->conn->sent += blocks;
3864                 }
3865         }
3866
3867         if (cnt != hdev->block_cnt)
3868                 hci_prio_recalculate(hdev, type);
3869 }
3870
3871 static void hci_sched_acl(struct hci_dev *hdev)
3872 {
3873         BT_DBG("%s", hdev->name);
3874
3875         /* No ACL link over BR/EDR controller */
3876         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3877                 return;
3878
3879         /* No AMP link over AMP controller */
3880         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3881                 return;
3882
3883         switch (hdev->flow_ctl_mode) {
3884         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3885                 hci_sched_acl_pkt(hdev);
3886                 break;
3887
3888         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3889                 hci_sched_acl_blk(hdev);
3890                 break;
3891         }
3892 }
3893
3894 /* Schedule SCO */
3895 static void hci_sched_sco(struct hci_dev *hdev)
3896 {
3897         struct hci_conn *conn;
3898         struct sk_buff *skb;
3899         int quote;
3900
3901         BT_DBG("%s", hdev->name);
3902
3903         if (!hci_conn_num(hdev, SCO_LINK))
3904                 return;
3905
3906         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3907                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3908                         BT_DBG("skb %p len %d", skb, skb->len);
3909                         hci_send_frame(hdev, skb);
3910
3911                         conn->sent++;
3912                         if (conn->sent == ~0)
3913                                 conn->sent = 0;
3914                 }
3915         }
3916 }
3917
3918 static void hci_sched_esco(struct hci_dev *hdev)
3919 {
3920         struct hci_conn *conn;
3921         struct sk_buff *skb;
3922         int quote;
3923
3924         BT_DBG("%s", hdev->name);
3925
3926         if (!hci_conn_num(hdev, ESCO_LINK))
3927                 return;
3928
3929         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3930                                                      &quote))) {
3931                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3932                         BT_DBG("skb %p len %d", skb, skb->len);
3933                         hci_send_frame(hdev, skb);
3934
3935                         conn->sent++;
3936                         if (conn->sent == ~0)
3937                                 conn->sent = 0;
3938                 }
3939         }
3940 }
3941
3942 static void hci_sched_le(struct hci_dev *hdev)
3943 {
3944         struct hci_chan *chan;
3945         struct sk_buff *skb;
3946         int quote, cnt, tmp;
3947
3948         BT_DBG("%s", hdev->name);
3949
3950         if (!hci_conn_num(hdev, LE_LINK))
3951                 return;
3952
3953         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3954                 /* LE tx timeout must be longer than maximum
3955                  * link supervision timeout (40.9 seconds) */
3956                 if (!hdev->le_cnt && hdev->le_pkts &&
3957                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3958                         hci_link_tx_to(hdev, LE_LINK);
3959         }
3960
3961         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3962         tmp = cnt;
3963         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3964                 u32 priority = (skb_peek(&chan->data_q))->priority;
3965                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3966                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3967                                skb->len, skb->priority);
3968
3969                         /* Stop if priority has changed */
3970                         if (skb->priority < priority)
3971                                 break;
3972
3973                         skb = skb_dequeue(&chan->data_q);
3974
3975                         hci_send_frame(hdev, skb);
3976                         hdev->le_last_tx = jiffies;
3977
3978                         cnt--;
3979                         chan->sent++;
3980                         chan->conn->sent++;
3981                 }
3982         }
3983
3984         if (hdev->le_pkts)
3985                 hdev->le_cnt = cnt;
3986         else
3987                 hdev->acl_cnt = cnt;
3988
3989         if (cnt != tmp)
3990                 hci_prio_recalculate(hdev, LE_LINK);
3991 }
3992
3993 static void hci_tx_work(struct work_struct *work)
3994 {
3995         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3996         struct sk_buff *skb;
3997
3998         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3999                hdev->sco_cnt, hdev->le_cnt);
4000
4001         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4002                 /* Schedule queues and send stuff to HCI driver */
4003                 hci_sched_acl(hdev);
4004                 hci_sched_sco(hdev);
4005                 hci_sched_esco(hdev);
4006                 hci_sched_le(hdev);
4007         }
4008
4009         /* Send next queued raw (unknown type) packet */
4010         while ((skb = skb_dequeue(&hdev->raw_q)))
4011                 hci_send_frame(hdev, skb);
4012 }
4013
4014 /* ----- HCI RX task (incoming data processing) ----- */
4015
4016 /* ACL data packet */
4017 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4018 {
4019         struct hci_acl_hdr *hdr = (void *) skb->data;
4020         struct hci_conn *conn;
4021         __u16 handle, flags;
4022
4023         skb_pull(skb, HCI_ACL_HDR_SIZE);
4024
4025         handle = __le16_to_cpu(hdr->handle);
4026         flags  = hci_flags(handle);
4027         handle = hci_handle(handle);
4028
4029         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4030                handle, flags);
4031
4032         hdev->stat.acl_rx++;
4033
4034         hci_dev_lock(hdev);
4035         conn = hci_conn_hash_lookup_handle(hdev, handle);
4036         hci_dev_unlock(hdev);
4037
4038         if (conn) {
4039                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4040
4041                 /* Send to upper protocol */
4042                 l2cap_recv_acldata(conn, skb, flags);
4043                 return;
4044         } else {
4045                 BT_ERR("%s ACL packet for unknown connection handle %d",
4046                        hdev->name, handle);
4047         }
4048
4049         kfree_skb(skb);
4050 }
4051
4052 /* SCO data packet */
4053 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4054 {
4055         struct hci_sco_hdr *hdr = (void *) skb->data;
4056         struct hci_conn *conn;
4057         __u16 handle;
4058
4059         skb_pull(skb, HCI_SCO_HDR_SIZE);
4060
4061         handle = __le16_to_cpu(hdr->handle);
4062
4063         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4064
4065         hdev->stat.sco_rx++;
4066
4067         hci_dev_lock(hdev);
4068         conn = hci_conn_hash_lookup_handle(hdev, handle);
4069         hci_dev_unlock(hdev);
4070
4071         if (conn) {
4072                 /* Send to upper protocol */
4073                 sco_recv_scodata(conn, skb);
4074                 return;
4075         } else {
4076                 BT_ERR("%s SCO packet for unknown connection handle %d",
4077                        hdev->name, handle);
4078         }
4079
4080         kfree_skb(skb);
4081 }
4082
4083 static bool hci_req_is_complete(struct hci_dev *hdev)
4084 {
4085         struct sk_buff *skb;
4086
4087         skb = skb_peek(&hdev->cmd_q);
4088         if (!skb)
4089                 return true;
4090
4091         return bt_cb(skb)->req.start;
4092 }
4093
4094 static void hci_resend_last(struct hci_dev *hdev)
4095 {
4096         struct hci_command_hdr *sent;
4097         struct sk_buff *skb;
4098         u16 opcode;
4099
4100         if (!hdev->sent_cmd)
4101                 return;
4102
4103         sent = (void *) hdev->sent_cmd->data;
4104         opcode = __le16_to_cpu(sent->opcode);
4105         if (opcode == HCI_OP_RESET)
4106                 return;
4107
4108         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4109         if (!skb)
4110                 return;
4111
4112         skb_queue_head(&hdev->cmd_q, skb);
4113         queue_work(hdev->workqueue, &hdev->cmd_work);
4114 }
4115
4116 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4117 {
4118         hci_req_complete_t req_complete = NULL;
4119         struct sk_buff *skb;
4120         unsigned long flags;
4121
4122         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4123
4124         /* If the completed command doesn't match the last one that was
4125          * sent we need to do special handling of it.
4126          */
4127         if (!hci_sent_cmd_data(hdev, opcode)) {
4128                 /* Some CSR based controllers generate a spontaneous
4129                  * reset complete event during init and any pending
4130                  * command will never be completed. In such a case we
4131                  * need to resend whatever was the last sent
4132                  * command.
4133                  */
4134                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4135                         hci_resend_last(hdev);
4136
4137                 return;
4138         }
4139
4140         /* If the command succeeded and there's still more commands in
4141          * this request the request is not yet complete.
4142          */
4143         if (!status && !hci_req_is_complete(hdev))
4144                 return;
4145
4146         /* If this was the last command in a request the complete
4147          * callback would be found in hdev->sent_cmd instead of the
4148          * command queue (hdev->cmd_q).
4149          */
4150         if (hdev->sent_cmd) {
4151                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4152
4153                 if (req_complete) {
4154                         /* We must set the complete callback to NULL to
4155                          * avoid calling the callback more than once if
4156                          * this function gets called again.
4157                          */
4158                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4159
4160                         goto call_complete;
4161                 }
4162         }
4163
4164         /* Remove all pending commands belonging to this request */
4165         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4166         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4167                 if (bt_cb(skb)->req.start) {
4168                         __skb_queue_head(&hdev->cmd_q, skb);
4169                         break;
4170                 }
4171
4172                 req_complete = bt_cb(skb)->req.complete;
4173                 kfree_skb(skb);
4174         }
4175         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4176
4177 call_complete:
4178         if (req_complete)
4179                 req_complete(hdev, status);
4180 }
4181
4182 static void hci_rx_work(struct work_struct *work)
4183 {
4184         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4185         struct sk_buff *skb;
4186
4187         BT_DBG("%s", hdev->name);
4188
4189         while ((skb = skb_dequeue(&hdev->rx_q))) {
4190                 /* Send copy to monitor */
4191                 hci_send_to_monitor(hdev, skb);
4192
4193                 if (atomic_read(&hdev->promisc)) {
4194                         /* Send copy to the sockets */
4195                         hci_send_to_sock(hdev, skb);
4196                 }
4197
4198                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4199                         kfree_skb(skb);
4200                         continue;
4201                 }
4202
4203                 if (test_bit(HCI_INIT, &hdev->flags)) {
4204                         /* Don't process data packets in this states. */
4205                         switch (bt_cb(skb)->pkt_type) {
4206                         case HCI_ACLDATA_PKT:
4207                         case HCI_SCODATA_PKT:
4208                                 kfree_skb(skb);
4209                                 continue;
4210                         }
4211                 }
4212
4213                 /* Process frame */
4214                 switch (bt_cb(skb)->pkt_type) {
4215                 case HCI_EVENT_PKT:
4216                         BT_DBG("%s Event packet", hdev->name);
4217                         hci_event_packet(hdev, skb);
4218                         break;
4219
4220                 case HCI_ACLDATA_PKT:
4221                         BT_DBG("%s ACL data packet", hdev->name);
4222                         hci_acldata_packet(hdev, skb);
4223                         break;
4224
4225                 case HCI_SCODATA_PKT:
4226                         BT_DBG("%s SCO data packet", hdev->name);
4227                         hci_scodata_packet(hdev, skb);
4228                         break;
4229
4230                 default:
4231                         kfree_skb(skb);
4232                         break;
4233                 }
4234         }
4235 }
4236
4237 static void hci_cmd_work(struct work_struct *work)
4238 {
4239         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4240         struct sk_buff *skb;
4241
4242         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4243                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4244
4245         /* Send queued commands */
4246         if (atomic_read(&hdev->cmd_cnt)) {
4247                 skb = skb_dequeue(&hdev->cmd_q);
4248                 if (!skb)
4249                         return;
4250
4251                 kfree_skb(hdev->sent_cmd);
4252
4253                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4254                 if (hdev->sent_cmd) {
4255                         atomic_dec(&hdev->cmd_cnt);
4256                         hci_send_frame(hdev, skb);
4257                         if (test_bit(HCI_RESET, &hdev->flags))
4258                                 cancel_delayed_work(&hdev->cmd_timer);
4259                         else
4260                                 schedule_delayed_work(&hdev->cmd_timer,
4261                                                       HCI_CMD_TIMEOUT);
4262                 } else {
4263                         skb_queue_head(&hdev->cmd_q, skb);
4264                         queue_work(hdev->workqueue, &hdev->cmd_work);
4265                 }
4266         }
4267 }