Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static void hci_setup_event_mask(struct hci_request *req)
501 {
502         struct hci_dev *hdev = req->hdev;
503
504         /* The second byte is 0xff instead of 0x9f (two reserved bits
505          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506          * command otherwise.
507          */
508         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511          * any event mask for pre 1.2 devices.
512          */
513         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514                 return;
515
516         if (lmp_bredr_capable(hdev)) {
517                 events[4] |= 0x01; /* Flow Specification Complete */
518                 events[4] |= 0x02; /* Inquiry Result with RSSI */
519                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520                 events[5] |= 0x08; /* Synchronous Connection Complete */
521                 events[5] |= 0x10; /* Synchronous Connection Changed */
522         } else {
523                 /* Use a different default for LE-only devices */
524                 memset(events, 0, sizeof(events));
525                 events[0] |= 0x10; /* Disconnection Complete */
526                 events[1] |= 0x08; /* Read Remote Version Information Complete */
527                 events[1] |= 0x20; /* Command Complete */
528                 events[1] |= 0x40; /* Command Status */
529                 events[1] |= 0x80; /* Hardware Error */
530                 events[2] |= 0x04; /* Number of Completed Packets */
531                 events[3] |= 0x02; /* Data Buffer Overflow */
532
533                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534                         events[0] |= 0x80; /* Encryption Change */
535                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542         if (lmp_sniffsubr_capable(hdev))
543                 events[5] |= 0x20; /* Sniff Subrating */
544
545         if (lmp_pause_enc_capable(hdev))
546                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548         if (lmp_ext_inq_capable(hdev))
549                 events[5] |= 0x40; /* Extended Inquiry Result */
550
551         if (lmp_no_flush_capable(hdev))
552                 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554         if (lmp_lsto_capable(hdev))
555                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557         if (lmp_ssp_capable(hdev)) {
558                 events[6] |= 0x01;      /* IO Capability Request */
559                 events[6] |= 0x02;      /* IO Capability Response */
560                 events[6] |= 0x04;      /* User Confirmation Request */
561                 events[6] |= 0x08;      /* User Passkey Request */
562                 events[6] |= 0x10;      /* Remote OOB Data Request */
563                 events[6] |= 0x20;      /* Simple Pairing Complete */
564                 events[7] |= 0x04;      /* User Passkey Notification */
565                 events[7] |= 0x08;      /* Keypress Notification */
566                 events[7] |= 0x10;      /* Remote Host Supported
567                                          * Features Notification
568                                          */
569         }
570
571         if (lmp_le_capable(hdev))
572                 events[7] |= 0x20;      /* LE Meta-Event */
573
574         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
575 }
576
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
578 {
579         struct hci_dev *hdev = req->hdev;
580
581         if (lmp_bredr_capable(hdev))
582                 bredr_setup(req);
583         else
584                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
585
586         if (lmp_le_capable(hdev))
587                 le_setup(req);
588
589         /* All Bluetooth 1.2 and later controllers should support the
590          * HCI command for reading the local supported commands.
591          *
592          * Unfortunately some controllers indicate Bluetooth 1.2 support,
593          * but do not have support for this command. If that is the case,
594          * the driver can quirk the behavior and skip reading the local
595          * supported commands.
596          */
597         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
600
601         if (lmp_ssp_capable(hdev)) {
602                 /* When SSP is available, then the host features page
603                  * should also be available as well. However some
604                  * controllers list the max_page as 0 as long as SSP
605                  * has not been enabled. To achieve proper debugging
606                  * output, force the minimum max_page to 1 at least.
607                  */
608                 hdev->max_page = 0x01;
609
610                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611                         u8 mode = 0x01;
612                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613                                     sizeof(mode), &mode);
614                 } else {
615                         struct hci_cp_write_eir cp;
616
617                         memset(hdev->eir, 0, sizeof(hdev->eir));
618                         memset(&cp, 0, sizeof(cp));
619
620                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
621                 }
622         }
623
624         if (lmp_inq_rssi_capable(hdev) ||
625             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
626                 u8 mode;
627
628                 /* If Extended Inquiry Result events are supported, then
629                  * they are clearly preferred over Inquiry Result with RSSI
630                  * events.
631                  */
632                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
633
634                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
635         }
636
637         if (lmp_inq_tx_pwr_capable(hdev))
638                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
639
640         if (lmp_ext_feat_capable(hdev)) {
641                 struct hci_cp_read_local_ext_features cp;
642
643                 cp.page = 0x01;
644                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
645                             sizeof(cp), &cp);
646         }
647
648         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
649                 u8 enable = 1;
650                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
651                             &enable);
652         }
653 }
654
655 static void hci_setup_link_policy(struct hci_request *req)
656 {
657         struct hci_dev *hdev = req->hdev;
658         struct hci_cp_write_def_link_policy cp;
659         u16 link_policy = 0;
660
661         if (lmp_rswitch_capable(hdev))
662                 link_policy |= HCI_LP_RSWITCH;
663         if (lmp_hold_capable(hdev))
664                 link_policy |= HCI_LP_HOLD;
665         if (lmp_sniff_capable(hdev))
666                 link_policy |= HCI_LP_SNIFF;
667         if (lmp_park_capable(hdev))
668                 link_policy |= HCI_LP_PARK;
669
670         cp.policy = cpu_to_le16(link_policy);
671         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
672 }
673
674 static void hci_set_le_support(struct hci_request *req)
675 {
676         struct hci_dev *hdev = req->hdev;
677         struct hci_cp_write_le_host_supported cp;
678
679         /* LE-only devices do not support explicit enablement */
680         if (!lmp_bredr_capable(hdev))
681                 return;
682
683         memset(&cp, 0, sizeof(cp));
684
685         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
686                 cp.le = 0x01;
687                 cp.simul = 0x00;
688         }
689
690         if (cp.le != lmp_host_le_capable(hdev))
691                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
692                             &cp);
693 }
694
695 static void hci_set_event_mask_page_2(struct hci_request *req)
696 {
697         struct hci_dev *hdev = req->hdev;
698         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
699
700         /* If Connectionless Slave Broadcast master role is supported
701          * enable all necessary events for it.
702          */
703         if (lmp_csb_master_capable(hdev)) {
704                 events[1] |= 0x40;      /* Triggered Clock Capture */
705                 events[1] |= 0x80;      /* Synchronization Train Complete */
706                 events[2] |= 0x10;      /* Slave Page Response Timeout */
707                 events[2] |= 0x20;      /* CSB Channel Map Change */
708         }
709
710         /* If Connectionless Slave Broadcast slave role is supported
711          * enable all necessary events for it.
712          */
713         if (lmp_csb_slave_capable(hdev)) {
714                 events[2] |= 0x01;      /* Synchronization Train Received */
715                 events[2] |= 0x02;      /* CSB Receive */
716                 events[2] |= 0x04;      /* CSB Timeout */
717                 events[2] |= 0x08;      /* Truncated Page Complete */
718         }
719
720         /* Enable Authenticated Payload Timeout Expired event if supported */
721         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
722                 events[2] |= 0x80;
723
724         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
725 }
726
727 static void hci_init3_req(struct hci_request *req, unsigned long opt)
728 {
729         struct hci_dev *hdev = req->hdev;
730         u8 p;
731
732         hci_setup_event_mask(req);
733
734         if (hdev->commands[6] & 0x20) {
735                 struct hci_cp_read_stored_link_key cp;
736
737                 bacpy(&cp.bdaddr, BDADDR_ANY);
738                 cp.read_all = 0x01;
739                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
740         }
741
742         if (hdev->commands[5] & 0x10)
743                 hci_setup_link_policy(req);
744
745         if (hdev->commands[8] & 0x01)
746                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
747
748         /* Some older Broadcom based Bluetooth 1.2 controllers do not
749          * support the Read Page Scan Type command. Check support for
750          * this command in the bit mask of supported commands.
751          */
752         if (hdev->commands[13] & 0x01)
753                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
754
755         if (lmp_le_capable(hdev)) {
756                 u8 events[8];
757
758                 memset(events, 0, sizeof(events));
759                 events[0] = 0x0f;
760
761                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
762                         events[0] |= 0x10;      /* LE Long Term Key Request */
763
764                 /* If controller supports the Connection Parameters Request
765                  * Link Layer Procedure, enable the corresponding event.
766                  */
767                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
768                         events[0] |= 0x20;      /* LE Remote Connection
769                                                  * Parameter Request
770                                                  */
771
772                 /* If the controller supports the Data Length Extension
773                  * feature, enable the corresponding event.
774                  */
775                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
776                         events[0] |= 0x40;      /* LE Data Length Change */
777
778                 /* If the controller supports Extended Scanner Filter
779                  * Policies, enable the correspondig event.
780                  */
781                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
782                         events[1] |= 0x04;      /* LE Direct Advertising
783                                                  * Report
784                                                  */
785
786                 /* If the controller supports the LE Read Local P-256
787                  * Public Key command, enable the corresponding event.
788                  */
789                 if (hdev->commands[34] & 0x02)
790                         events[0] |= 0x80;      /* LE Read Local P-256
791                                                  * Public Key Complete
792                                                  */
793
794                 /* If the controller supports the LE Generate DHKey
795                  * command, enable the corresponding event.
796                  */
797                 if (hdev->commands[34] & 0x04)
798                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
799
800                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
801                             events);
802
803                 if (hdev->commands[25] & 0x40) {
804                         /* Read LE Advertising Channel TX Power */
805                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
806                 }
807
808                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
809                         /* Read LE Maximum Data Length */
810                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
811
812                         /* Read LE Suggested Default Data Length */
813                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
814                 }
815
816                 hci_set_le_support(req);
817         }
818
819         /* Read features beyond page 1 if available */
820         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
821                 struct hci_cp_read_local_ext_features cp;
822
823                 cp.page = p;
824                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
825                             sizeof(cp), &cp);
826         }
827 }
828
829 static void hci_init4_req(struct hci_request *req, unsigned long opt)
830 {
831         struct hci_dev *hdev = req->hdev;
832
833         /* Some Broadcom based Bluetooth controllers do not support the
834          * Delete Stored Link Key command. They are clearly indicating its
835          * absence in the bit mask of supported commands.
836          *
837          * Check the supported commands and only if the the command is marked
838          * as supported send it. If not supported assume that the controller
839          * does not have actual support for stored link keys which makes this
840          * command redundant anyway.
841          *
842          * Some controllers indicate that they support handling deleting
843          * stored link keys, but they don't. The quirk lets a driver
844          * just disable this command.
845          */
846         if (hdev->commands[6] & 0x80 &&
847             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
848                 struct hci_cp_delete_stored_link_key cp;
849
850                 bacpy(&cp.bdaddr, BDADDR_ANY);
851                 cp.delete_all = 0x01;
852                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
853                             sizeof(cp), &cp);
854         }
855
856         /* Set event mask page 2 if the HCI command for it is supported */
857         if (hdev->commands[22] & 0x04)
858                 hci_set_event_mask_page_2(req);
859
860         /* Read local codec list if the HCI command is supported */
861         if (hdev->commands[29] & 0x20)
862                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
863
864         /* Get MWS transport configuration if the HCI command is supported */
865         if (hdev->commands[30] & 0x08)
866                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
867
868         /* Check for Synchronization Train support */
869         if (lmp_sync_train_capable(hdev))
870                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
871
872         /* Enable Secure Connections if supported and configured */
873         if (bredr_sc_enabled(hdev)) {
874                 u8 support = 0x01;
875                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
876                             sizeof(support), &support);
877         }
878 }
879
880 static int __hci_init(struct hci_dev *hdev)
881 {
882         int err;
883
884         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
885         if (err < 0)
886                 return err;
887
888         /* The Device Under Test (DUT) mode is special and available for
889          * all controller types. So just create it early on.
890          */
891         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
892                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
893                                     &dut_mode_fops);
894         }
895
896         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
897          * BR/EDR/LE type controllers. AMP controllers only need the
898          * first stage init.
899          */
900         if (hdev->dev_type != HCI_BREDR)
901                 return 0;
902
903         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
904         if (err < 0)
905                 return err;
906
907         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
908         if (err < 0)
909                 return err;
910
911         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
912         if (err < 0)
913                 return err;
914
915         /* This function is only called when the controller is actually in
916          * configured state. When the controller is marked as unconfigured,
917          * this initialization procedure is not run.
918          *
919          * It means that it is possible that a controller runs through its
920          * setup phase and then discovers missing settings. If that is the
921          * case, then this function will not be called. It then will only
922          * be called during the config phase.
923          *
924          * So only when in setup phase or config phase, create the debugfs
925          * entries and register the SMP channels.
926          */
927         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
928             !test_bit(HCI_CONFIG, &hdev->dev_flags))
929                 return 0;
930
931         hci_debugfs_create_common(hdev);
932
933         if (lmp_bredr_capable(hdev))
934                 hci_debugfs_create_bredr(hdev);
935
936         if (lmp_le_capable(hdev))
937                 hci_debugfs_create_le(hdev);
938
939         return 0;
940 }
941
942 static void hci_init0_req(struct hci_request *req, unsigned long opt)
943 {
944         struct hci_dev *hdev = req->hdev;
945
946         BT_DBG("%s %ld", hdev->name, opt);
947
948         /* Reset */
949         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
950                 hci_reset_req(req, 0);
951
952         /* Read Local Version */
953         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
954
955         /* Read BD Address */
956         if (hdev->set_bdaddr)
957                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
958 }
959
960 static int __hci_unconf_init(struct hci_dev *hdev)
961 {
962         int err;
963
964         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
965                 return 0;
966
967         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
968         if (err < 0)
969                 return err;
970
971         return 0;
972 }
973
974 static void hci_scan_req(struct hci_request *req, unsigned long opt)
975 {
976         __u8 scan = opt;
977
978         BT_DBG("%s %x", req->hdev->name, scan);
979
980         /* Inquiry and Page scans */
981         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
982 }
983
984 static void hci_auth_req(struct hci_request *req, unsigned long opt)
985 {
986         __u8 auth = opt;
987
988         BT_DBG("%s %x", req->hdev->name, auth);
989
990         /* Authentication */
991         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
992 }
993
994 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
995 {
996         __u8 encrypt = opt;
997
998         BT_DBG("%s %x", req->hdev->name, encrypt);
999
1000         /* Encryption */
1001         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1002 }
1003
1004 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1005 {
1006         __le16 policy = cpu_to_le16(opt);
1007
1008         BT_DBG("%s %x", req->hdev->name, policy);
1009
1010         /* Default link policy */
1011         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1012 }
1013
1014 /* Get HCI device by index.
1015  * Device is held on return. */
1016 struct hci_dev *hci_dev_get(int index)
1017 {
1018         struct hci_dev *hdev = NULL, *d;
1019
1020         BT_DBG("%d", index);
1021
1022         if (index < 0)
1023                 return NULL;
1024
1025         read_lock(&hci_dev_list_lock);
1026         list_for_each_entry(d, &hci_dev_list, list) {
1027                 if (d->id == index) {
1028                         hdev = hci_dev_hold(d);
1029                         break;
1030                 }
1031         }
1032         read_unlock(&hci_dev_list_lock);
1033         return hdev;
1034 }
1035
1036 /* ---- Inquiry support ---- */
1037
1038 bool hci_discovery_active(struct hci_dev *hdev)
1039 {
1040         struct discovery_state *discov = &hdev->discovery;
1041
1042         switch (discov->state) {
1043         case DISCOVERY_FINDING:
1044         case DISCOVERY_RESOLVING:
1045                 return true;
1046
1047         default:
1048                 return false;
1049         }
1050 }
1051
1052 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1053 {
1054         int old_state = hdev->discovery.state;
1055
1056         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1057
1058         if (old_state == state)
1059                 return;
1060
1061         hdev->discovery.state = state;
1062
1063         switch (state) {
1064         case DISCOVERY_STOPPED:
1065                 hci_update_background_scan(hdev);
1066
1067                 if (old_state != DISCOVERY_STARTING)
1068                         mgmt_discovering(hdev, 0);
1069                 break;
1070         case DISCOVERY_STARTING:
1071                 break;
1072         case DISCOVERY_FINDING:
1073                 mgmt_discovering(hdev, 1);
1074                 break;
1075         case DISCOVERY_RESOLVING:
1076                 break;
1077         case DISCOVERY_STOPPING:
1078                 break;
1079         }
1080 }
1081
1082 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1083 {
1084         struct discovery_state *cache = &hdev->discovery;
1085         struct inquiry_entry *p, *n;
1086
1087         list_for_each_entry_safe(p, n, &cache->all, all) {
1088                 list_del(&p->all);
1089                 kfree(p);
1090         }
1091
1092         INIT_LIST_HEAD(&cache->unknown);
1093         INIT_LIST_HEAD(&cache->resolve);
1094 }
1095
1096 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1097                                                bdaddr_t *bdaddr)
1098 {
1099         struct discovery_state *cache = &hdev->discovery;
1100         struct inquiry_entry *e;
1101
1102         BT_DBG("cache %p, %pMR", cache, bdaddr);
1103
1104         list_for_each_entry(e, &cache->all, all) {
1105                 if (!bacmp(&e->data.bdaddr, bdaddr))
1106                         return e;
1107         }
1108
1109         return NULL;
1110 }
1111
1112 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1113                                                        bdaddr_t *bdaddr)
1114 {
1115         struct discovery_state *cache = &hdev->discovery;
1116         struct inquiry_entry *e;
1117
1118         BT_DBG("cache %p, %pMR", cache, bdaddr);
1119
1120         list_for_each_entry(e, &cache->unknown, list) {
1121                 if (!bacmp(&e->data.bdaddr, bdaddr))
1122                         return e;
1123         }
1124
1125         return NULL;
1126 }
1127
1128 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1129                                                        bdaddr_t *bdaddr,
1130                                                        int state)
1131 {
1132         struct discovery_state *cache = &hdev->discovery;
1133         struct inquiry_entry *e;
1134
1135         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1136
1137         list_for_each_entry(e, &cache->resolve, list) {
1138                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1139                         return e;
1140                 if (!bacmp(&e->data.bdaddr, bdaddr))
1141                         return e;
1142         }
1143
1144         return NULL;
1145 }
1146
1147 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1148                                       struct inquiry_entry *ie)
1149 {
1150         struct discovery_state *cache = &hdev->discovery;
1151         struct list_head *pos = &cache->resolve;
1152         struct inquiry_entry *p;
1153
1154         list_del(&ie->list);
1155
1156         list_for_each_entry(p, &cache->resolve, list) {
1157                 if (p->name_state != NAME_PENDING &&
1158                     abs(p->data.rssi) >= abs(ie->data.rssi))
1159                         break;
1160                 pos = &p->list;
1161         }
1162
1163         list_add(&ie->list, pos);
1164 }
1165
1166 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1167                              bool name_known)
1168 {
1169         struct discovery_state *cache = &hdev->discovery;
1170         struct inquiry_entry *ie;
1171         u32 flags = 0;
1172
1173         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1174
1175         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1176
1177         if (!data->ssp_mode)
1178                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1179
1180         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1181         if (ie) {
1182                 if (!ie->data.ssp_mode)
1183                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1184
1185                 if (ie->name_state == NAME_NEEDED &&
1186                     data->rssi != ie->data.rssi) {
1187                         ie->data.rssi = data->rssi;
1188                         hci_inquiry_cache_update_resolve(hdev, ie);
1189                 }
1190
1191                 goto update;
1192         }
1193
1194         /* Entry not in the cache. Add new one. */
1195         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1196         if (!ie) {
1197                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1198                 goto done;
1199         }
1200
1201         list_add(&ie->all, &cache->all);
1202
1203         if (name_known) {
1204                 ie->name_state = NAME_KNOWN;
1205         } else {
1206                 ie->name_state = NAME_NOT_KNOWN;
1207                 list_add(&ie->list, &cache->unknown);
1208         }
1209
1210 update:
1211         if (name_known && ie->name_state != NAME_KNOWN &&
1212             ie->name_state != NAME_PENDING) {
1213                 ie->name_state = NAME_KNOWN;
1214                 list_del(&ie->list);
1215         }
1216
1217         memcpy(&ie->data, data, sizeof(*data));
1218         ie->timestamp = jiffies;
1219         cache->timestamp = jiffies;
1220
1221         if (ie->name_state == NAME_NOT_KNOWN)
1222                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1223
1224 done:
1225         return flags;
1226 }
1227
1228 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1229 {
1230         struct discovery_state *cache = &hdev->discovery;
1231         struct inquiry_info *info = (struct inquiry_info *) buf;
1232         struct inquiry_entry *e;
1233         int copied = 0;
1234
1235         list_for_each_entry(e, &cache->all, all) {
1236                 struct inquiry_data *data = &e->data;
1237
1238                 if (copied >= num)
1239                         break;
1240
1241                 bacpy(&info->bdaddr, &data->bdaddr);
1242                 info->pscan_rep_mode    = data->pscan_rep_mode;
1243                 info->pscan_period_mode = data->pscan_period_mode;
1244                 info->pscan_mode        = data->pscan_mode;
1245                 memcpy(info->dev_class, data->dev_class, 3);
1246                 info->clock_offset      = data->clock_offset;
1247
1248                 info++;
1249                 copied++;
1250         }
1251
1252         BT_DBG("cache %p, copied %d", cache, copied);
1253         return copied;
1254 }
1255
1256 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1257 {
1258         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1259         struct hci_dev *hdev = req->hdev;
1260         struct hci_cp_inquiry cp;
1261
1262         BT_DBG("%s", hdev->name);
1263
1264         if (test_bit(HCI_INQUIRY, &hdev->flags))
1265                 return;
1266
1267         /* Start Inquiry */
1268         memcpy(&cp.lap, &ir->lap, 3);
1269         cp.length  = ir->length;
1270         cp.num_rsp = ir->num_rsp;
1271         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1272 }
1273
1274 int hci_inquiry(void __user *arg)
1275 {
1276         __u8 __user *ptr = arg;
1277         struct hci_inquiry_req ir;
1278         struct hci_dev *hdev;
1279         int err = 0, do_inquiry = 0, max_rsp;
1280         long timeo;
1281         __u8 *buf;
1282
1283         if (copy_from_user(&ir, ptr, sizeof(ir)))
1284                 return -EFAULT;
1285
1286         hdev = hci_dev_get(ir.dev_id);
1287         if (!hdev)
1288                 return -ENODEV;
1289
1290         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1291                 err = -EBUSY;
1292                 goto done;
1293         }
1294
1295         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1296                 err = -EOPNOTSUPP;
1297                 goto done;
1298         }
1299
1300         if (hdev->dev_type != HCI_BREDR) {
1301                 err = -EOPNOTSUPP;
1302                 goto done;
1303         }
1304
1305         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1306                 err = -EOPNOTSUPP;
1307                 goto done;
1308         }
1309
1310         hci_dev_lock(hdev);
1311         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1312             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1313                 hci_inquiry_cache_flush(hdev);
1314                 do_inquiry = 1;
1315         }
1316         hci_dev_unlock(hdev);
1317
1318         timeo = ir.length * msecs_to_jiffies(2000);
1319
1320         if (do_inquiry) {
1321                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1322                                    timeo);
1323                 if (err < 0)
1324                         goto done;
1325
1326                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1327                  * cleared). If it is interrupted by a signal, return -EINTR.
1328                  */
1329                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1330                                 TASK_INTERRUPTIBLE))
1331                         return -EINTR;
1332         }
1333
1334         /* for unlimited number of responses we will use buffer with
1335          * 255 entries
1336          */
1337         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1338
1339         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1340          * copy it to the user space.
1341          */
1342         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1343         if (!buf) {
1344                 err = -ENOMEM;
1345                 goto done;
1346         }
1347
1348         hci_dev_lock(hdev);
1349         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1350         hci_dev_unlock(hdev);
1351
1352         BT_DBG("num_rsp %d", ir.num_rsp);
1353
1354         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1355                 ptr += sizeof(ir);
1356                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1357                                  ir.num_rsp))
1358                         err = -EFAULT;
1359         } else
1360                 err = -EFAULT;
1361
1362         kfree(buf);
1363
1364 done:
1365         hci_dev_put(hdev);
1366         return err;
1367 }
1368
1369 static int hci_dev_do_open(struct hci_dev *hdev)
1370 {
1371         int ret = 0;
1372
1373         BT_DBG("%s %p", hdev->name, hdev);
1374
1375         hci_req_lock(hdev);
1376
1377         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1378                 ret = -ENODEV;
1379                 goto done;
1380         }
1381
1382         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1383             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1384                 /* Check for rfkill but allow the HCI setup stage to
1385                  * proceed (which in itself doesn't cause any RF activity).
1386                  */
1387                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1388                         ret = -ERFKILL;
1389                         goto done;
1390                 }
1391
1392                 /* Check for valid public address or a configured static
1393                  * random adddress, but let the HCI setup proceed to
1394                  * be able to determine if there is a public address
1395                  * or not.
1396                  *
1397                  * In case of user channel usage, it is not important
1398                  * if a public address or static random address is
1399                  * available.
1400                  *
1401                  * This check is only valid for BR/EDR controllers
1402                  * since AMP controllers do not have an address.
1403                  */
1404                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1405                     hdev->dev_type == HCI_BREDR &&
1406                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1407                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1408                         ret = -EADDRNOTAVAIL;
1409                         goto done;
1410                 }
1411         }
1412
1413         if (test_bit(HCI_UP, &hdev->flags)) {
1414                 ret = -EALREADY;
1415                 goto done;
1416         }
1417
1418         if (hdev->open(hdev)) {
1419                 ret = -EIO;
1420                 goto done;
1421         }
1422
1423         atomic_set(&hdev->cmd_cnt, 1);
1424         set_bit(HCI_INIT, &hdev->flags);
1425
1426         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1427                 if (hdev->setup)
1428                         ret = hdev->setup(hdev);
1429
1430                 /* The transport driver can set these quirks before
1431                  * creating the HCI device or in its setup callback.
1432                  *
1433                  * In case any of them is set, the controller has to
1434                  * start up as unconfigured.
1435                  */
1436                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1437                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1438                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1439
1440                 /* For an unconfigured controller it is required to
1441                  * read at least the version information provided by
1442                  * the Read Local Version Information command.
1443                  *
1444                  * If the set_bdaddr driver callback is provided, then
1445                  * also the original Bluetooth public device address
1446                  * will be read using the Read BD Address command.
1447                  */
1448                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1449                         ret = __hci_unconf_init(hdev);
1450         }
1451
1452         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1453                 /* If public address change is configured, ensure that
1454                  * the address gets programmed. If the driver does not
1455                  * support changing the public address, fail the power
1456                  * on procedure.
1457                  */
1458                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1459                     hdev->set_bdaddr)
1460                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1461                 else
1462                         ret = -EADDRNOTAVAIL;
1463         }
1464
1465         if (!ret) {
1466                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1467                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1468                         ret = __hci_init(hdev);
1469         }
1470
1471         clear_bit(HCI_INIT, &hdev->flags);
1472
1473         if (!ret) {
1474                 hci_dev_hold(hdev);
1475                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1476                 set_bit(HCI_UP, &hdev->flags);
1477                 hci_notify(hdev, HCI_DEV_UP);
1478                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1479                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1480                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1481                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1482                     hdev->dev_type == HCI_BREDR) {
1483                         hci_dev_lock(hdev);
1484                         mgmt_powered(hdev, 1);
1485                         hci_dev_unlock(hdev);
1486                 }
1487         } else {
1488                 /* Init failed, cleanup */
1489                 flush_work(&hdev->tx_work);
1490                 flush_work(&hdev->cmd_work);
1491                 flush_work(&hdev->rx_work);
1492
1493                 skb_queue_purge(&hdev->cmd_q);
1494                 skb_queue_purge(&hdev->rx_q);
1495
1496                 if (hdev->flush)
1497                         hdev->flush(hdev);
1498
1499                 if (hdev->sent_cmd) {
1500                         kfree_skb(hdev->sent_cmd);
1501                         hdev->sent_cmd = NULL;
1502                 }
1503
1504                 hdev->close(hdev);
1505                 hdev->flags &= BIT(HCI_RAW);
1506         }
1507
1508 done:
1509         hci_req_unlock(hdev);
1510         return ret;
1511 }
1512
1513 /* ---- HCI ioctl helpers ---- */
1514
1515 int hci_dev_open(__u16 dev)
1516 {
1517         struct hci_dev *hdev;
1518         int err;
1519
1520         hdev = hci_dev_get(dev);
1521         if (!hdev)
1522                 return -ENODEV;
1523
1524         /* Devices that are marked as unconfigured can only be powered
1525          * up as user channel. Trying to bring them up as normal devices
1526          * will result into a failure. Only user channel operation is
1527          * possible.
1528          *
1529          * When this function is called for a user channel, the flag
1530          * HCI_USER_CHANNEL will be set first before attempting to
1531          * open the device.
1532          */
1533         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1534             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1535                 err = -EOPNOTSUPP;
1536                 goto done;
1537         }
1538
1539         /* We need to ensure that no other power on/off work is pending
1540          * before proceeding to call hci_dev_do_open. This is
1541          * particularly important if the setup procedure has not yet
1542          * completed.
1543          */
1544         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1545                 cancel_delayed_work(&hdev->power_off);
1546
1547         /* After this call it is guaranteed that the setup procedure
1548          * has finished. This means that error conditions like RFKILL
1549          * or no valid public or static random address apply.
1550          */
1551         flush_workqueue(hdev->req_workqueue);
1552
1553         /* For controllers not using the management interface and that
1554          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1555          * so that pairing works for them. Once the management interface
1556          * is in use this bit will be cleared again and userspace has
1557          * to explicitly enable it.
1558          */
1559         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1560             !test_bit(HCI_MGMT, &hdev->dev_flags))
1561                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1562
1563         err = hci_dev_do_open(hdev);
1564
1565 done:
1566         hci_dev_put(hdev);
1567         return err;
1568 }
1569
1570 /* This function requires the caller holds hdev->lock */
1571 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1572 {
1573         struct hci_conn_params *p;
1574
1575         list_for_each_entry(p, &hdev->le_conn_params, list) {
1576                 if (p->conn) {
1577                         hci_conn_drop(p->conn);
1578                         hci_conn_put(p->conn);
1579                         p->conn = NULL;
1580                 }
1581                 list_del_init(&p->action);
1582         }
1583
1584         BT_DBG("All LE pending actions cleared");
1585 }
1586
1587 static int hci_dev_do_close(struct hci_dev *hdev)
1588 {
1589         BT_DBG("%s %p", hdev->name, hdev);
1590
1591         cancel_delayed_work(&hdev->power_off);
1592
1593         hci_req_cancel(hdev, ENODEV);
1594         hci_req_lock(hdev);
1595
1596         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1597                 cancel_delayed_work_sync(&hdev->cmd_timer);
1598                 hci_req_unlock(hdev);
1599                 return 0;
1600         }
1601
1602         /* Flush RX and TX works */
1603         flush_work(&hdev->tx_work);
1604         flush_work(&hdev->rx_work);
1605
1606         if (hdev->discov_timeout > 0) {
1607                 cancel_delayed_work(&hdev->discov_off);
1608                 hdev->discov_timeout = 0;
1609                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1610                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1611         }
1612
1613         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1614                 cancel_delayed_work(&hdev->service_cache);
1615
1616         cancel_delayed_work_sync(&hdev->le_scan_disable);
1617
1618         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1619                 cancel_delayed_work_sync(&hdev->rpa_expired);
1620
1621         /* Avoid potential lockdep warnings from the *_flush() calls by
1622          * ensuring the workqueue is empty up front.
1623          */
1624         drain_workqueue(hdev->workqueue);
1625
1626         hci_dev_lock(hdev);
1627
1628         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1629                 if (hdev->dev_type == HCI_BREDR)
1630                         mgmt_powered(hdev, 0);
1631         }
1632
1633         hci_inquiry_cache_flush(hdev);
1634         hci_pend_le_actions_clear(hdev);
1635         hci_conn_hash_flush(hdev);
1636         hci_dev_unlock(hdev);
1637
1638         hci_notify(hdev, HCI_DEV_DOWN);
1639
1640         if (hdev->flush)
1641                 hdev->flush(hdev);
1642
1643         /* Reset device */
1644         skb_queue_purge(&hdev->cmd_q);
1645         atomic_set(&hdev->cmd_cnt, 1);
1646         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1647             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1648             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1649                 set_bit(HCI_INIT, &hdev->flags);
1650                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1651                 clear_bit(HCI_INIT, &hdev->flags);
1652         }
1653
1654         /* flush cmd  work */
1655         flush_work(&hdev->cmd_work);
1656
1657         /* Drop queues */
1658         skb_queue_purge(&hdev->rx_q);
1659         skb_queue_purge(&hdev->cmd_q);
1660         skb_queue_purge(&hdev->raw_q);
1661
1662         /* Drop last sent command */
1663         if (hdev->sent_cmd) {
1664                 cancel_delayed_work_sync(&hdev->cmd_timer);
1665                 kfree_skb(hdev->sent_cmd);
1666                 hdev->sent_cmd = NULL;
1667         }
1668
1669         kfree_skb(hdev->recv_evt);
1670         hdev->recv_evt = NULL;
1671
1672         /* After this point our queues are empty
1673          * and no tasks are scheduled. */
1674         hdev->close(hdev);
1675
1676         /* Clear flags */
1677         hdev->flags &= BIT(HCI_RAW);
1678         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1679
1680         /* Controller radio is available but is currently powered down */
1681         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1682
1683         memset(hdev->eir, 0, sizeof(hdev->eir));
1684         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1685         bacpy(&hdev->random_addr, BDADDR_ANY);
1686
1687         hci_req_unlock(hdev);
1688
1689         hci_dev_put(hdev);
1690         return 0;
1691 }
1692
1693 int hci_dev_close(__u16 dev)
1694 {
1695         struct hci_dev *hdev;
1696         int err;
1697
1698         hdev = hci_dev_get(dev);
1699         if (!hdev)
1700                 return -ENODEV;
1701
1702         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1703                 err = -EBUSY;
1704                 goto done;
1705         }
1706
1707         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1708                 cancel_delayed_work(&hdev->power_off);
1709
1710         err = hci_dev_do_close(hdev);
1711
1712 done:
1713         hci_dev_put(hdev);
1714         return err;
1715 }
1716
1717 int hci_dev_reset(__u16 dev)
1718 {
1719         struct hci_dev *hdev;
1720         int ret = 0;
1721
1722         hdev = hci_dev_get(dev);
1723         if (!hdev)
1724                 return -ENODEV;
1725
1726         hci_req_lock(hdev);
1727
1728         if (!test_bit(HCI_UP, &hdev->flags)) {
1729                 ret = -ENETDOWN;
1730                 goto done;
1731         }
1732
1733         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1734                 ret = -EBUSY;
1735                 goto done;
1736         }
1737
1738         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1739                 ret = -EOPNOTSUPP;
1740                 goto done;
1741         }
1742
1743         /* Drop queues */
1744         skb_queue_purge(&hdev->rx_q);
1745         skb_queue_purge(&hdev->cmd_q);
1746
1747         /* Avoid potential lockdep warnings from the *_flush() calls by
1748          * ensuring the workqueue is empty up front.
1749          */
1750         drain_workqueue(hdev->workqueue);
1751
1752         hci_dev_lock(hdev);
1753         hci_inquiry_cache_flush(hdev);
1754         hci_conn_hash_flush(hdev);
1755         hci_dev_unlock(hdev);
1756
1757         if (hdev->flush)
1758                 hdev->flush(hdev);
1759
1760         atomic_set(&hdev->cmd_cnt, 1);
1761         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1762
1763         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1764
1765 done:
1766         hci_req_unlock(hdev);
1767         hci_dev_put(hdev);
1768         return ret;
1769 }
1770
1771 int hci_dev_reset_stat(__u16 dev)
1772 {
1773         struct hci_dev *hdev;
1774         int ret = 0;
1775
1776         hdev = hci_dev_get(dev);
1777         if (!hdev)
1778                 return -ENODEV;
1779
1780         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1781                 ret = -EBUSY;
1782                 goto done;
1783         }
1784
1785         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1786                 ret = -EOPNOTSUPP;
1787                 goto done;
1788         }
1789
1790         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1791
1792 done:
1793         hci_dev_put(hdev);
1794         return ret;
1795 }
1796
1797 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1798 {
1799         bool conn_changed, discov_changed;
1800
1801         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1802
1803         if ((scan & SCAN_PAGE))
1804                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1805                                                  &hdev->dev_flags);
1806         else
1807                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1808                                                   &hdev->dev_flags);
1809
1810         if ((scan & SCAN_INQUIRY)) {
1811                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1812                                                    &hdev->dev_flags);
1813         } else {
1814                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1815                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1816                                                     &hdev->dev_flags);
1817         }
1818
1819         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1820                 return;
1821
1822         if (conn_changed || discov_changed) {
1823                 /* In case this was disabled through mgmt */
1824                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1825
1826                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1827                         mgmt_update_adv_data(hdev);
1828
1829                 mgmt_new_settings(hdev);
1830         }
1831 }
1832
1833 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1834 {
1835         struct hci_dev *hdev;
1836         struct hci_dev_req dr;
1837         int err = 0;
1838
1839         if (copy_from_user(&dr, arg, sizeof(dr)))
1840                 return -EFAULT;
1841
1842         hdev = hci_dev_get(dr.dev_id);
1843         if (!hdev)
1844                 return -ENODEV;
1845
1846         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1847                 err = -EBUSY;
1848                 goto done;
1849         }
1850
1851         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1852                 err = -EOPNOTSUPP;
1853                 goto done;
1854         }
1855
1856         if (hdev->dev_type != HCI_BREDR) {
1857                 err = -EOPNOTSUPP;
1858                 goto done;
1859         }
1860
1861         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1862                 err = -EOPNOTSUPP;
1863                 goto done;
1864         }
1865
1866         switch (cmd) {
1867         case HCISETAUTH:
1868                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869                                    HCI_INIT_TIMEOUT);
1870                 break;
1871
1872         case HCISETENCRYPT:
1873                 if (!lmp_encrypt_capable(hdev)) {
1874                         err = -EOPNOTSUPP;
1875                         break;
1876                 }
1877
1878                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1879                         /* Auth must be enabled first */
1880                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1881                                            HCI_INIT_TIMEOUT);
1882                         if (err)
1883                                 break;
1884                 }
1885
1886                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1887                                    HCI_INIT_TIMEOUT);
1888                 break;
1889
1890         case HCISETSCAN:
1891                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1892                                    HCI_INIT_TIMEOUT);
1893
1894                 /* Ensure that the connectable and discoverable states
1895                  * get correctly modified as this was a non-mgmt change.
1896                  */
1897                 if (!err)
1898                         hci_update_scan_state(hdev, dr.dev_opt);
1899                 break;
1900
1901         case HCISETLINKPOL:
1902                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1903                                    HCI_INIT_TIMEOUT);
1904                 break;
1905
1906         case HCISETLINKMODE:
1907                 hdev->link_mode = ((__u16) dr.dev_opt) &
1908                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1909                 break;
1910
1911         case HCISETPTYPE:
1912                 hdev->pkt_type = (__u16) dr.dev_opt;
1913                 break;
1914
1915         case HCISETACLMTU:
1916                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1917                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1918                 break;
1919
1920         case HCISETSCOMTU:
1921                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1922                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1923                 break;
1924
1925         default:
1926                 err = -EINVAL;
1927                 break;
1928         }
1929
1930 done:
1931         hci_dev_put(hdev);
1932         return err;
1933 }
1934
1935 int hci_get_dev_list(void __user *arg)
1936 {
1937         struct hci_dev *hdev;
1938         struct hci_dev_list_req *dl;
1939         struct hci_dev_req *dr;
1940         int n = 0, size, err;
1941         __u16 dev_num;
1942
1943         if (get_user(dev_num, (__u16 __user *) arg))
1944                 return -EFAULT;
1945
1946         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1947                 return -EINVAL;
1948
1949         size = sizeof(*dl) + dev_num * sizeof(*dr);
1950
1951         dl = kzalloc(size, GFP_KERNEL);
1952         if (!dl)
1953                 return -ENOMEM;
1954
1955         dr = dl->dev_req;
1956
1957         read_lock(&hci_dev_list_lock);
1958         list_for_each_entry(hdev, &hci_dev_list, list) {
1959                 unsigned long flags = hdev->flags;
1960
1961                 /* When the auto-off is configured it means the transport
1962                  * is running, but in that case still indicate that the
1963                  * device is actually down.
1964                  */
1965                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1966                         flags &= ~BIT(HCI_UP);
1967
1968                 (dr + n)->dev_id  = hdev->id;
1969                 (dr + n)->dev_opt = flags;
1970
1971                 if (++n >= dev_num)
1972                         break;
1973         }
1974         read_unlock(&hci_dev_list_lock);
1975
1976         dl->dev_num = n;
1977         size = sizeof(*dl) + n * sizeof(*dr);
1978
1979         err = copy_to_user(arg, dl, size);
1980         kfree(dl);
1981
1982         return err ? -EFAULT : 0;
1983 }
1984
1985 int hci_get_dev_info(void __user *arg)
1986 {
1987         struct hci_dev *hdev;
1988         struct hci_dev_info di;
1989         unsigned long flags;
1990         int err = 0;
1991
1992         if (copy_from_user(&di, arg, sizeof(di)))
1993                 return -EFAULT;
1994
1995         hdev = hci_dev_get(di.dev_id);
1996         if (!hdev)
1997                 return -ENODEV;
1998
1999         /* When the auto-off is configured it means the transport
2000          * is running, but in that case still indicate that the
2001          * device is actually down.
2002          */
2003         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2004                 flags = hdev->flags & ~BIT(HCI_UP);
2005         else
2006                 flags = hdev->flags;
2007
2008         strcpy(di.name, hdev->name);
2009         di.bdaddr   = hdev->bdaddr;
2010         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2011         di.flags    = flags;
2012         di.pkt_type = hdev->pkt_type;
2013         if (lmp_bredr_capable(hdev)) {
2014                 di.acl_mtu  = hdev->acl_mtu;
2015                 di.acl_pkts = hdev->acl_pkts;
2016                 di.sco_mtu  = hdev->sco_mtu;
2017                 di.sco_pkts = hdev->sco_pkts;
2018         } else {
2019                 di.acl_mtu  = hdev->le_mtu;
2020                 di.acl_pkts = hdev->le_pkts;
2021                 di.sco_mtu  = 0;
2022                 di.sco_pkts = 0;
2023         }
2024         di.link_policy = hdev->link_policy;
2025         di.link_mode   = hdev->link_mode;
2026
2027         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2028         memcpy(&di.features, &hdev->features, sizeof(di.features));
2029
2030         if (copy_to_user(arg, &di, sizeof(di)))
2031                 err = -EFAULT;
2032
2033         hci_dev_put(hdev);
2034
2035         return err;
2036 }
2037
2038 /* ---- Interface to HCI drivers ---- */
2039
2040 static int hci_rfkill_set_block(void *data, bool blocked)
2041 {
2042         struct hci_dev *hdev = data;
2043
2044         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2045
2046         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2047                 return -EBUSY;
2048
2049         if (blocked) {
2050                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2051                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2052                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2053                         hci_dev_do_close(hdev);
2054         } else {
2055                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2056         }
2057
2058         return 0;
2059 }
2060
2061 static const struct rfkill_ops hci_rfkill_ops = {
2062         .set_block = hci_rfkill_set_block,
2063 };
2064
2065 static void hci_power_on(struct work_struct *work)
2066 {
2067         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2068         int err;
2069
2070         BT_DBG("%s", hdev->name);
2071
2072         err = hci_dev_do_open(hdev);
2073         if (err < 0) {
2074                 hci_dev_lock(hdev);
2075                 mgmt_set_powered_failed(hdev, err);
2076                 hci_dev_unlock(hdev);
2077                 return;
2078         }
2079
2080         /* During the HCI setup phase, a few error conditions are
2081          * ignored and they need to be checked now. If they are still
2082          * valid, it is important to turn the device back off.
2083          */
2084         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2085             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2086             (hdev->dev_type == HCI_BREDR &&
2087              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2088              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2089                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2090                 hci_dev_do_close(hdev);
2091         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2092                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2093                                    HCI_AUTO_OFF_TIMEOUT);
2094         }
2095
2096         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2097                 /* For unconfigured devices, set the HCI_RAW flag
2098                  * so that userspace can easily identify them.
2099                  */
2100                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2101                         set_bit(HCI_RAW, &hdev->flags);
2102
2103                 /* For fully configured devices, this will send
2104                  * the Index Added event. For unconfigured devices,
2105                  * it will send Unconfigued Index Added event.
2106                  *
2107                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2108                  * and no event will be send.
2109                  */
2110                 mgmt_index_added(hdev);
2111         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2112                 /* When the controller is now configured, then it
2113                  * is important to clear the HCI_RAW flag.
2114                  */
2115                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2116                         clear_bit(HCI_RAW, &hdev->flags);
2117
2118                 /* Powering on the controller with HCI_CONFIG set only
2119                  * happens with the transition from unconfigured to
2120                  * configured. This will send the Index Added event.
2121                  */
2122                 mgmt_index_added(hdev);
2123         }
2124 }
2125
2126 static void hci_power_off(struct work_struct *work)
2127 {
2128         struct hci_dev *hdev = container_of(work, struct hci_dev,
2129                                             power_off.work);
2130
2131         BT_DBG("%s", hdev->name);
2132
2133         hci_dev_do_close(hdev);
2134
2135         smp_unregister(hdev);
2136 }
2137
2138 static void hci_discov_off(struct work_struct *work)
2139 {
2140         struct hci_dev *hdev;
2141
2142         hdev = container_of(work, struct hci_dev, discov_off.work);
2143
2144         BT_DBG("%s", hdev->name);
2145
2146         mgmt_discoverable_timeout(hdev);
2147 }
2148
2149 void hci_uuids_clear(struct hci_dev *hdev)
2150 {
2151         struct bt_uuid *uuid, *tmp;
2152
2153         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2154                 list_del(&uuid->list);
2155                 kfree(uuid);
2156         }
2157 }
2158
2159 void hci_link_keys_clear(struct hci_dev *hdev)
2160 {
2161         struct link_key *key;
2162
2163         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2164                 list_del_rcu(&key->list);
2165                 kfree_rcu(key, rcu);
2166         }
2167 }
2168
2169 void hci_smp_ltks_clear(struct hci_dev *hdev)
2170 {
2171         struct smp_ltk *k;
2172
2173         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2174                 list_del_rcu(&k->list);
2175                 kfree_rcu(k, rcu);
2176         }
2177 }
2178
2179 void hci_smp_irks_clear(struct hci_dev *hdev)
2180 {
2181         struct smp_irk *k;
2182
2183         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2184                 list_del_rcu(&k->list);
2185                 kfree_rcu(k, rcu);
2186         }
2187 }
2188
2189 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2190 {
2191         struct link_key *k;
2192
2193         rcu_read_lock();
2194         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2195                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2196                         rcu_read_unlock();
2197                         return k;
2198                 }
2199         }
2200         rcu_read_unlock();
2201
2202         return NULL;
2203 }
2204
2205 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2206                                u8 key_type, u8 old_key_type)
2207 {
2208         /* Legacy key */
2209         if (key_type < 0x03)
2210                 return true;
2211
2212         /* Debug keys are insecure so don't store them persistently */
2213         if (key_type == HCI_LK_DEBUG_COMBINATION)
2214                 return false;
2215
2216         /* Changed combination key and there's no previous one */
2217         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2218                 return false;
2219
2220         /* Security mode 3 case */
2221         if (!conn)
2222                 return true;
2223
2224         /* BR/EDR key derived using SC from an LE link */
2225         if (conn->type == LE_LINK)
2226                 return true;
2227
2228         /* Neither local nor remote side had no-bonding as requirement */
2229         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2230                 return true;
2231
2232         /* Local side had dedicated bonding as requirement */
2233         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2234                 return true;
2235
2236         /* Remote side had dedicated bonding as requirement */
2237         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2238                 return true;
2239
2240         /* If none of the above criteria match, then don't store the key
2241          * persistently */
2242         return false;
2243 }
2244
2245 static u8 ltk_role(u8 type)
2246 {
2247         if (type == SMP_LTK)
2248                 return HCI_ROLE_MASTER;
2249
2250         return HCI_ROLE_SLAVE;
2251 }
2252
2253 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2254                              u8 addr_type, u8 role)
2255 {
2256         struct smp_ltk *k;
2257
2258         rcu_read_lock();
2259         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2260                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2261                         continue;
2262
2263                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2264                         rcu_read_unlock();
2265                         return k;
2266                 }
2267         }
2268         rcu_read_unlock();
2269
2270         return NULL;
2271 }
2272
2273 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2274 {
2275         struct smp_irk *irk;
2276
2277         rcu_read_lock();
2278         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2279                 if (!bacmp(&irk->rpa, rpa)) {
2280                         rcu_read_unlock();
2281                         return irk;
2282                 }
2283         }
2284
2285         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2286                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2287                         bacpy(&irk->rpa, rpa);
2288                         rcu_read_unlock();
2289                         return irk;
2290                 }
2291         }
2292         rcu_read_unlock();
2293
2294         return NULL;
2295 }
2296
2297 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2298                                      u8 addr_type)
2299 {
2300         struct smp_irk *irk;
2301
2302         /* Identity Address must be public or static random */
2303         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2304                 return NULL;
2305
2306         rcu_read_lock();
2307         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2308                 if (addr_type == irk->addr_type &&
2309                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2310                         rcu_read_unlock();
2311                         return irk;
2312                 }
2313         }
2314         rcu_read_unlock();
2315
2316         return NULL;
2317 }
2318
2319 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2320                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2321                                   u8 pin_len, bool *persistent)
2322 {
2323         struct link_key *key, *old_key;
2324         u8 old_key_type;
2325
2326         old_key = hci_find_link_key(hdev, bdaddr);
2327         if (old_key) {
2328                 old_key_type = old_key->type;
2329                 key = old_key;
2330         } else {
2331                 old_key_type = conn ? conn->key_type : 0xff;
2332                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2333                 if (!key)
2334                         return NULL;
2335                 list_add_rcu(&key->list, &hdev->link_keys);
2336         }
2337
2338         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2339
2340         /* Some buggy controller combinations generate a changed
2341          * combination key for legacy pairing even when there's no
2342          * previous key */
2343         if (type == HCI_LK_CHANGED_COMBINATION &&
2344             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2345                 type = HCI_LK_COMBINATION;
2346                 if (conn)
2347                         conn->key_type = type;
2348         }
2349
2350         bacpy(&key->bdaddr, bdaddr);
2351         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2352         key->pin_len = pin_len;
2353
2354         if (type == HCI_LK_CHANGED_COMBINATION)
2355                 key->type = old_key_type;
2356         else
2357                 key->type = type;
2358
2359         if (persistent)
2360                 *persistent = hci_persistent_key(hdev, conn, type,
2361                                                  old_key_type);
2362
2363         return key;
2364 }
2365
2366 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2367                             u8 addr_type, u8 type, u8 authenticated,
2368                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2369 {
2370         struct smp_ltk *key, *old_key;
2371         u8 role = ltk_role(type);
2372
2373         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2374         if (old_key)
2375                 key = old_key;
2376         else {
2377                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2378                 if (!key)
2379                         return NULL;
2380                 list_add_rcu(&key->list, &hdev->long_term_keys);
2381         }
2382
2383         bacpy(&key->bdaddr, bdaddr);
2384         key->bdaddr_type = addr_type;
2385         memcpy(key->val, tk, sizeof(key->val));
2386         key->authenticated = authenticated;
2387         key->ediv = ediv;
2388         key->rand = rand;
2389         key->enc_size = enc_size;
2390         key->type = type;
2391
2392         return key;
2393 }
2394
2395 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2396                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2397 {
2398         struct smp_irk *irk;
2399
2400         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2401         if (!irk) {
2402                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2403                 if (!irk)
2404                         return NULL;
2405
2406                 bacpy(&irk->bdaddr, bdaddr);
2407                 irk->addr_type = addr_type;
2408
2409                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2410         }
2411
2412         memcpy(irk->val, val, 16);
2413         bacpy(&irk->rpa, rpa);
2414
2415         return irk;
2416 }
2417
2418 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2419 {
2420         struct link_key *key;
2421
2422         key = hci_find_link_key(hdev, bdaddr);
2423         if (!key)
2424                 return -ENOENT;
2425
2426         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2427
2428         list_del_rcu(&key->list);
2429         kfree_rcu(key, rcu);
2430
2431         return 0;
2432 }
2433
2434 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2435 {
2436         struct smp_ltk *k;
2437         int removed = 0;
2438
2439         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2440                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2441                         continue;
2442
2443                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2444
2445                 list_del_rcu(&k->list);
2446                 kfree_rcu(k, rcu);
2447                 removed++;
2448         }
2449
2450         return removed ? 0 : -ENOENT;
2451 }
2452
2453 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2454 {
2455         struct smp_irk *k;
2456
2457         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2458                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2459                         continue;
2460
2461                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462
2463                 list_del_rcu(&k->list);
2464                 kfree_rcu(k, rcu);
2465         }
2466 }
2467
2468 /* HCI command timer function */
2469 static void hci_cmd_timeout(struct work_struct *work)
2470 {
2471         struct hci_dev *hdev = container_of(work, struct hci_dev,
2472                                             cmd_timer.work);
2473
2474         if (hdev->sent_cmd) {
2475                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476                 u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479         } else {
2480                 BT_ERR("%s command tx timeout", hdev->name);
2481         }
2482
2483         atomic_set(&hdev->cmd_cnt, 1);
2484         queue_work(hdev->workqueue, &hdev->cmd_work);
2485 }
2486
2487 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2488                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2489 {
2490         struct oob_data *data;
2491
2492         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2494                         continue;
2495                 if (data->bdaddr_type != bdaddr_type)
2496                         continue;
2497                 return data;
2498         }
2499
2500         return NULL;
2501 }
2502
2503 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504                                u8 bdaddr_type)
2505 {
2506         struct oob_data *data;
2507
2508         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2509         if (!data)
2510                 return -ENOENT;
2511
2512         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2513
2514         list_del(&data->list);
2515         kfree(data);
2516
2517         return 0;
2518 }
2519
2520 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2521 {
2522         struct oob_data *data, *n;
2523
2524         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525                 list_del(&data->list);
2526                 kfree(data);
2527         }
2528 }
2529
2530 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2532                             u8 *hash256, u8 *rand256)
2533 {
2534         struct oob_data *data;
2535
2536         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2537         if (!data) {
2538                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2539                 if (!data)
2540                         return -ENOMEM;
2541
2542                 bacpy(&data->bdaddr, bdaddr);
2543                 data->bdaddr_type = bdaddr_type;
2544                 list_add(&data->list, &hdev->remote_oob_data);
2545         }
2546
2547         if (hash192 && rand192) {
2548                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2550         } else {
2551                 memset(data->hash192, 0, sizeof(data->hash192));
2552                 memset(data->rand192, 0, sizeof(data->rand192));
2553         }
2554
2555         if (hash256 && rand256) {
2556                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2557                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2558         } else {
2559                 memset(data->hash256, 0, sizeof(data->hash256));
2560                 memset(data->rand256, 0, sizeof(data->rand256));
2561         }
2562
2563         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2564
2565         return 0;
2566 }
2567
2568 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2569                                          bdaddr_t *bdaddr, u8 type)
2570 {
2571         struct bdaddr_list *b;
2572
2573         list_for_each_entry(b, bdaddr_list, list) {
2574                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2575                         return b;
2576         }
2577
2578         return NULL;
2579 }
2580
2581 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2582 {
2583         struct list_head *p, *n;
2584
2585         list_for_each_safe(p, n, bdaddr_list) {
2586                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2587
2588                 list_del(p);
2589                 kfree(b);
2590         }
2591 }
2592
2593 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2594 {
2595         struct bdaddr_list *entry;
2596
2597         if (!bacmp(bdaddr, BDADDR_ANY))
2598                 return -EBADF;
2599
2600         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2601                 return -EEXIST;
2602
2603         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2604         if (!entry)
2605                 return -ENOMEM;
2606
2607         bacpy(&entry->bdaddr, bdaddr);
2608         entry->bdaddr_type = type;
2609
2610         list_add(&entry->list, list);
2611
2612         return 0;
2613 }
2614
2615 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2616 {
2617         struct bdaddr_list *entry;
2618
2619         if (!bacmp(bdaddr, BDADDR_ANY)) {
2620                 hci_bdaddr_list_clear(list);
2621                 return 0;
2622         }
2623
2624         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2625         if (!entry)
2626                 return -ENOENT;
2627
2628         list_del(&entry->list);
2629         kfree(entry);
2630
2631         return 0;
2632 }
2633
2634 /* This function requires the caller holds hdev->lock */
2635 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2636                                                bdaddr_t *addr, u8 addr_type)
2637 {
2638         struct hci_conn_params *params;
2639
2640         /* The conn params list only contains identity addresses */
2641         if (!hci_is_identity_address(addr, addr_type))
2642                 return NULL;
2643
2644         list_for_each_entry(params, &hdev->le_conn_params, list) {
2645                 if (bacmp(&params->addr, addr) == 0 &&
2646                     params->addr_type == addr_type) {
2647                         return params;
2648                 }
2649         }
2650
2651         return NULL;
2652 }
2653
2654 /* This function requires the caller holds hdev->lock */
2655 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2656                                                   bdaddr_t *addr, u8 addr_type)
2657 {
2658         struct hci_conn_params *param;
2659
2660         /* The list only contains identity addresses */
2661         if (!hci_is_identity_address(addr, addr_type))
2662                 return NULL;
2663
2664         list_for_each_entry(param, list, action) {
2665                 if (bacmp(&param->addr, addr) == 0 &&
2666                     param->addr_type == addr_type)
2667                         return param;
2668         }
2669
2670         return NULL;
2671 }
2672
2673 /* This function requires the caller holds hdev->lock */
2674 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2675                                             bdaddr_t *addr, u8 addr_type)
2676 {
2677         struct hci_conn_params *params;
2678
2679         if (!hci_is_identity_address(addr, addr_type))
2680                 return NULL;
2681
2682         params = hci_conn_params_lookup(hdev, addr, addr_type);
2683         if (params)
2684                 return params;
2685
2686         params = kzalloc(sizeof(*params), GFP_KERNEL);
2687         if (!params) {
2688                 BT_ERR("Out of memory");
2689                 return NULL;
2690         }
2691
2692         bacpy(&params->addr, addr);
2693         params->addr_type = addr_type;
2694
2695         list_add(&params->list, &hdev->le_conn_params);
2696         INIT_LIST_HEAD(&params->action);
2697
2698         params->conn_min_interval = hdev->le_conn_min_interval;
2699         params->conn_max_interval = hdev->le_conn_max_interval;
2700         params->conn_latency = hdev->le_conn_latency;
2701         params->supervision_timeout = hdev->le_supv_timeout;
2702         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2703
2704         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2705
2706         return params;
2707 }
2708
2709 static void hci_conn_params_free(struct hci_conn_params *params)
2710 {
2711         if (params->conn) {
2712                 hci_conn_drop(params->conn);
2713                 hci_conn_put(params->conn);
2714         }
2715
2716         list_del(&params->action);
2717         list_del(&params->list);
2718         kfree(params);
2719 }
2720
2721 /* This function requires the caller holds hdev->lock */
2722 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2723 {
2724         struct hci_conn_params *params;
2725
2726         params = hci_conn_params_lookup(hdev, addr, addr_type);
2727         if (!params)
2728                 return;
2729
2730         hci_conn_params_free(params);
2731
2732         hci_update_background_scan(hdev);
2733
2734         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2735 }
2736
2737 /* This function requires the caller holds hdev->lock */
2738 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2739 {
2740         struct hci_conn_params *params, *tmp;
2741
2742         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2743                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2744                         continue;
2745                 list_del(&params->list);
2746                 kfree(params);
2747         }
2748
2749         BT_DBG("All LE disabled connection parameters were removed");
2750 }
2751
2752 /* This function requires the caller holds hdev->lock */
2753 void hci_conn_params_clear_all(struct hci_dev *hdev)
2754 {
2755         struct hci_conn_params *params, *tmp;
2756
2757         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2758                 hci_conn_params_free(params);
2759
2760         hci_update_background_scan(hdev);
2761
2762         BT_DBG("All LE connection parameters were removed");
2763 }
2764
2765 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2766 {
2767         if (status) {
2768                 BT_ERR("Failed to start inquiry: status %d", status);
2769
2770                 hci_dev_lock(hdev);
2771                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2772                 hci_dev_unlock(hdev);
2773                 return;
2774         }
2775 }
2776
2777 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2778                                           u16 opcode)
2779 {
2780         /* General inquiry access code (GIAC) */
2781         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2782         struct hci_request req;
2783         struct hci_cp_inquiry cp;
2784         int err;
2785
2786         if (status) {
2787                 BT_ERR("Failed to disable LE scanning: status %d", status);
2788                 return;
2789         }
2790
2791         switch (hdev->discovery.type) {
2792         case DISCOV_TYPE_LE:
2793                 hci_dev_lock(hdev);
2794                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2795                 hci_dev_unlock(hdev);
2796                 break;
2797
2798         case DISCOV_TYPE_INTERLEAVED:
2799                 hci_req_init(&req, hdev);
2800
2801                 memset(&cp, 0, sizeof(cp));
2802                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2803                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2804                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2805
2806                 hci_dev_lock(hdev);
2807
2808                 hci_inquiry_cache_flush(hdev);
2809
2810                 err = hci_req_run(&req, inquiry_complete);
2811                 if (err) {
2812                         BT_ERR("Inquiry request failed: err %d", err);
2813                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2814                 }
2815
2816                 hci_dev_unlock(hdev);
2817                 break;
2818         }
2819 }
2820
2821 static void le_scan_disable_work(struct work_struct *work)
2822 {
2823         struct hci_dev *hdev = container_of(work, struct hci_dev,
2824                                             le_scan_disable.work);
2825         struct hci_request req;
2826         int err;
2827
2828         BT_DBG("%s", hdev->name);
2829
2830         hci_req_init(&req, hdev);
2831
2832         hci_req_add_le_scan_disable(&req);
2833
2834         err = hci_req_run(&req, le_scan_disable_work_complete);
2835         if (err)
2836                 BT_ERR("Disable LE scanning request failed: err %d", err);
2837 }
2838
2839 /* Copy the Identity Address of the controller.
2840  *
2841  * If the controller has a public BD_ADDR, then by default use that one.
2842  * If this is a LE only controller without a public address, default to
2843  * the static random address.
2844  *
2845  * For debugging purposes it is possible to force controllers with a
2846  * public address to use the static random address instead.
2847  *
2848  * In case BR/EDR has been disabled on a dual-mode controller and
2849  * userspace has configured a static address, then that address
2850  * becomes the identity address instead of the public BR/EDR address.
2851  */
2852 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2853                                u8 *bdaddr_type)
2854 {
2855         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2856             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2857             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2858              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2859                 bacpy(bdaddr, &hdev->static_addr);
2860                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2861         } else {
2862                 bacpy(bdaddr, &hdev->bdaddr);
2863                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2864         }
2865 }
2866
2867 /* Alloc HCI device */
2868 struct hci_dev *hci_alloc_dev(void)
2869 {
2870         struct hci_dev *hdev;
2871
2872         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2873         if (!hdev)
2874                 return NULL;
2875
2876         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2877         hdev->esco_type = (ESCO_HV1);
2878         hdev->link_mode = (HCI_LM_ACCEPT);
2879         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2880         hdev->io_capability = 0x03;     /* No Input No Output */
2881         hdev->manufacturer = 0xffff;    /* Default to internal use */
2882         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2883         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2884
2885         hdev->sniff_max_interval = 800;
2886         hdev->sniff_min_interval = 80;
2887
2888         hdev->le_adv_channel_map = 0x07;
2889         hdev->le_adv_min_interval = 0x0800;
2890         hdev->le_adv_max_interval = 0x0800;
2891         hdev->le_scan_interval = 0x0060;
2892         hdev->le_scan_window = 0x0030;
2893         hdev->le_conn_min_interval = 0x0028;
2894         hdev->le_conn_max_interval = 0x0038;
2895         hdev->le_conn_latency = 0x0000;
2896         hdev->le_supv_timeout = 0x002a;
2897         hdev->le_def_tx_len = 0x001b;
2898         hdev->le_def_tx_time = 0x0148;
2899         hdev->le_max_tx_len = 0x001b;
2900         hdev->le_max_tx_time = 0x0148;
2901         hdev->le_max_rx_len = 0x001b;
2902         hdev->le_max_rx_time = 0x0148;
2903
2904         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2905         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2906         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2907         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2908
2909         mutex_init(&hdev->lock);
2910         mutex_init(&hdev->req_lock);
2911
2912         INIT_LIST_HEAD(&hdev->mgmt_pending);
2913         INIT_LIST_HEAD(&hdev->blacklist);
2914         INIT_LIST_HEAD(&hdev->whitelist);
2915         INIT_LIST_HEAD(&hdev->uuids);
2916         INIT_LIST_HEAD(&hdev->link_keys);
2917         INIT_LIST_HEAD(&hdev->long_term_keys);
2918         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2919         INIT_LIST_HEAD(&hdev->remote_oob_data);
2920         INIT_LIST_HEAD(&hdev->le_white_list);
2921         INIT_LIST_HEAD(&hdev->le_conn_params);
2922         INIT_LIST_HEAD(&hdev->pend_le_conns);
2923         INIT_LIST_HEAD(&hdev->pend_le_reports);
2924         INIT_LIST_HEAD(&hdev->conn_hash.list);
2925
2926         INIT_WORK(&hdev->rx_work, hci_rx_work);
2927         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2928         INIT_WORK(&hdev->tx_work, hci_tx_work);
2929         INIT_WORK(&hdev->power_on, hci_power_on);
2930
2931         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2932         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2933         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2934
2935         skb_queue_head_init(&hdev->rx_q);
2936         skb_queue_head_init(&hdev->cmd_q);
2937         skb_queue_head_init(&hdev->raw_q);
2938
2939         init_waitqueue_head(&hdev->req_wait_q);
2940
2941         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2942
2943         hci_init_sysfs(hdev);
2944         discovery_init(hdev);
2945
2946         return hdev;
2947 }
2948 EXPORT_SYMBOL(hci_alloc_dev);
2949
2950 /* Free HCI device */
2951 void hci_free_dev(struct hci_dev *hdev)
2952 {
2953         /* will free via device release */
2954         put_device(&hdev->dev);
2955 }
2956 EXPORT_SYMBOL(hci_free_dev);
2957
2958 /* Register HCI device */
2959 int hci_register_dev(struct hci_dev *hdev)
2960 {
2961         int id, error;
2962
2963         if (!hdev->open || !hdev->close || !hdev->send)
2964                 return -EINVAL;
2965
2966         /* Do not allow HCI_AMP devices to register at index 0,
2967          * so the index can be used as the AMP controller ID.
2968          */
2969         switch (hdev->dev_type) {
2970         case HCI_BREDR:
2971                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2972                 break;
2973         case HCI_AMP:
2974                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2975                 break;
2976         default:
2977                 return -EINVAL;
2978         }
2979
2980         if (id < 0)
2981                 return id;
2982
2983         sprintf(hdev->name, "hci%d", id);
2984         hdev->id = id;
2985
2986         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2987
2988         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2989                                           WQ_MEM_RECLAIM, 1, hdev->name);
2990         if (!hdev->workqueue) {
2991                 error = -ENOMEM;
2992                 goto err;
2993         }
2994
2995         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2996                                               WQ_MEM_RECLAIM, 1, hdev->name);
2997         if (!hdev->req_workqueue) {
2998                 destroy_workqueue(hdev->workqueue);
2999                 error = -ENOMEM;
3000                 goto err;
3001         }
3002
3003         if (!IS_ERR_OR_NULL(bt_debugfs))
3004                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3005
3006         dev_set_name(&hdev->dev, "%s", hdev->name);
3007
3008         error = device_add(&hdev->dev);
3009         if (error < 0)
3010                 goto err_wqueue;
3011
3012         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3013                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3014                                     hdev);
3015         if (hdev->rfkill) {
3016                 if (rfkill_register(hdev->rfkill) < 0) {
3017                         rfkill_destroy(hdev->rfkill);
3018                         hdev->rfkill = NULL;
3019                 }
3020         }
3021
3022         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3023                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3024
3025         set_bit(HCI_SETUP, &hdev->dev_flags);
3026         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3027
3028         if (hdev->dev_type == HCI_BREDR) {
3029                 /* Assume BR/EDR support until proven otherwise (such as
3030                  * through reading supported features during init.
3031                  */
3032                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3033         }
3034
3035         write_lock(&hci_dev_list_lock);
3036         list_add(&hdev->list, &hci_dev_list);
3037         write_unlock(&hci_dev_list_lock);
3038
3039         /* Devices that are marked for raw-only usage are unconfigured
3040          * and should not be included in normal operation.
3041          */
3042         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3043                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3044
3045         hci_notify(hdev, HCI_DEV_REG);
3046         hci_dev_hold(hdev);
3047
3048         queue_work(hdev->req_workqueue, &hdev->power_on);
3049
3050         return id;
3051
3052 err_wqueue:
3053         destroy_workqueue(hdev->workqueue);
3054         destroy_workqueue(hdev->req_workqueue);
3055 err:
3056         ida_simple_remove(&hci_index_ida, hdev->id);
3057
3058         return error;
3059 }
3060 EXPORT_SYMBOL(hci_register_dev);
3061
3062 /* Unregister HCI device */
3063 void hci_unregister_dev(struct hci_dev *hdev)
3064 {
3065         int i, id;
3066
3067         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3068
3069         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3070
3071         id = hdev->id;
3072
3073         write_lock(&hci_dev_list_lock);
3074         list_del(&hdev->list);
3075         write_unlock(&hci_dev_list_lock);
3076
3077         hci_dev_do_close(hdev);
3078
3079         for (i = 0; i < NUM_REASSEMBLY; i++)
3080                 kfree_skb(hdev->reassembly[i]);
3081
3082         cancel_work_sync(&hdev->power_on);
3083
3084         if (!test_bit(HCI_INIT, &hdev->flags) &&
3085             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3086             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3087                 hci_dev_lock(hdev);
3088                 mgmt_index_removed(hdev);
3089                 hci_dev_unlock(hdev);
3090         }
3091
3092         /* mgmt_index_removed should take care of emptying the
3093          * pending list */
3094         BUG_ON(!list_empty(&hdev->mgmt_pending));
3095
3096         hci_notify(hdev, HCI_DEV_UNREG);
3097
3098         if (hdev->rfkill) {
3099                 rfkill_unregister(hdev->rfkill);
3100                 rfkill_destroy(hdev->rfkill);
3101         }
3102
3103         smp_unregister(hdev);
3104
3105         device_del(&hdev->dev);
3106
3107         debugfs_remove_recursive(hdev->debugfs);
3108
3109         destroy_workqueue(hdev->workqueue);
3110         destroy_workqueue(hdev->req_workqueue);
3111
3112         hci_dev_lock(hdev);
3113         hci_bdaddr_list_clear(&hdev->blacklist);
3114         hci_bdaddr_list_clear(&hdev->whitelist);
3115         hci_uuids_clear(hdev);
3116         hci_link_keys_clear(hdev);
3117         hci_smp_ltks_clear(hdev);
3118         hci_smp_irks_clear(hdev);
3119         hci_remote_oob_data_clear(hdev);
3120         hci_bdaddr_list_clear(&hdev->le_white_list);
3121         hci_conn_params_clear_all(hdev);
3122         hci_discovery_filter_clear(hdev);
3123         hci_dev_unlock(hdev);
3124
3125         hci_dev_put(hdev);
3126
3127         ida_simple_remove(&hci_index_ida, id);
3128 }
3129 EXPORT_SYMBOL(hci_unregister_dev);
3130
3131 /* Suspend HCI device */
3132 int hci_suspend_dev(struct hci_dev *hdev)
3133 {
3134         hci_notify(hdev, HCI_DEV_SUSPEND);
3135         return 0;
3136 }
3137 EXPORT_SYMBOL(hci_suspend_dev);
3138
3139 /* Resume HCI device */
3140 int hci_resume_dev(struct hci_dev *hdev)
3141 {
3142         hci_notify(hdev, HCI_DEV_RESUME);
3143         return 0;
3144 }
3145 EXPORT_SYMBOL(hci_resume_dev);
3146
3147 /* Reset HCI device */
3148 int hci_reset_dev(struct hci_dev *hdev)
3149 {
3150         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3151         struct sk_buff *skb;
3152
3153         skb = bt_skb_alloc(3, GFP_ATOMIC);
3154         if (!skb)
3155                 return -ENOMEM;
3156
3157         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3158         memcpy(skb_put(skb, 3), hw_err, 3);
3159
3160         /* Send Hardware Error to upper stack */
3161         return hci_recv_frame(hdev, skb);
3162 }
3163 EXPORT_SYMBOL(hci_reset_dev);
3164
3165 /* Receive frame from HCI drivers */
3166 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3167 {
3168         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3169                       && !test_bit(HCI_INIT, &hdev->flags))) {
3170                 kfree_skb(skb);
3171                 return -ENXIO;
3172         }
3173
3174         /* Incoming skb */
3175         bt_cb(skb)->incoming = 1;
3176
3177         /* Time stamp */
3178         __net_timestamp(skb);
3179
3180         skb_queue_tail(&hdev->rx_q, skb);
3181         queue_work(hdev->workqueue, &hdev->rx_work);
3182
3183         return 0;
3184 }
3185 EXPORT_SYMBOL(hci_recv_frame);
3186
3187 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3188                           int count, __u8 index)
3189 {
3190         int len = 0;
3191         int hlen = 0;
3192         int remain = count;
3193         struct sk_buff *skb;
3194         struct bt_skb_cb *scb;
3195
3196         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3197             index >= NUM_REASSEMBLY)
3198                 return -EILSEQ;
3199
3200         skb = hdev->reassembly[index];
3201
3202         if (!skb) {
3203                 switch (type) {
3204                 case HCI_ACLDATA_PKT:
3205                         len = HCI_MAX_FRAME_SIZE;
3206                         hlen = HCI_ACL_HDR_SIZE;
3207                         break;
3208                 case HCI_EVENT_PKT:
3209                         len = HCI_MAX_EVENT_SIZE;
3210                         hlen = HCI_EVENT_HDR_SIZE;
3211                         break;
3212                 case HCI_SCODATA_PKT:
3213                         len = HCI_MAX_SCO_SIZE;
3214                         hlen = HCI_SCO_HDR_SIZE;
3215                         break;
3216                 }
3217
3218                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3219                 if (!skb)
3220                         return -ENOMEM;
3221
3222                 scb = (void *) skb->cb;
3223                 scb->expect = hlen;
3224                 scb->pkt_type = type;
3225
3226                 hdev->reassembly[index] = skb;
3227         }
3228
3229         while (count) {
3230                 scb = (void *) skb->cb;
3231                 len = min_t(uint, scb->expect, count);
3232
3233                 memcpy(skb_put(skb, len), data, len);
3234
3235                 count -= len;
3236                 data += len;
3237                 scb->expect -= len;
3238                 remain = count;
3239
3240                 switch (type) {
3241                 case HCI_EVENT_PKT:
3242                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3243                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3244                                 scb->expect = h->plen;
3245
3246                                 if (skb_tailroom(skb) < scb->expect) {
3247                                         kfree_skb(skb);
3248                                         hdev->reassembly[index] = NULL;
3249                                         return -ENOMEM;
3250                                 }
3251                         }
3252                         break;
3253
3254                 case HCI_ACLDATA_PKT:
3255                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3256                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3257                                 scb->expect = __le16_to_cpu(h->dlen);
3258
3259                                 if (skb_tailroom(skb) < scb->expect) {
3260                                         kfree_skb(skb);
3261                                         hdev->reassembly[index] = NULL;
3262                                         return -ENOMEM;
3263                                 }
3264                         }
3265                         break;
3266
3267                 case HCI_SCODATA_PKT:
3268                         if (skb->len == HCI_SCO_HDR_SIZE) {
3269                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3270                                 scb->expect = h->dlen;
3271
3272                                 if (skb_tailroom(skb) < scb->expect) {
3273                                         kfree_skb(skb);
3274                                         hdev->reassembly[index] = NULL;
3275                                         return -ENOMEM;
3276                                 }
3277                         }
3278                         break;
3279                 }
3280
3281                 if (scb->expect == 0) {
3282                         /* Complete frame */
3283
3284                         bt_cb(skb)->pkt_type = type;
3285                         hci_recv_frame(hdev, skb);
3286
3287                         hdev->reassembly[index] = NULL;
3288                         return remain;
3289                 }
3290         }
3291
3292         return remain;
3293 }
3294
3295 #define STREAM_REASSEMBLY 0
3296
3297 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3298 {
3299         int type;
3300         int rem = 0;
3301
3302         while (count) {
3303                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3304
3305                 if (!skb) {
3306                         struct { char type; } *pkt;
3307
3308                         /* Start of the frame */
3309                         pkt = data;
3310                         type = pkt->type;
3311
3312                         data++;
3313                         count--;
3314                 } else
3315                         type = bt_cb(skb)->pkt_type;
3316
3317                 rem = hci_reassembly(hdev, type, data, count,
3318                                      STREAM_REASSEMBLY);
3319                 if (rem < 0)
3320                         return rem;
3321
3322                 data += (count - rem);
3323                 count = rem;
3324         }
3325
3326         return rem;
3327 }
3328 EXPORT_SYMBOL(hci_recv_stream_fragment);
3329
3330 /* ---- Interface to upper protocols ---- */
3331
3332 int hci_register_cb(struct hci_cb *cb)
3333 {
3334         BT_DBG("%p name %s", cb, cb->name);
3335
3336         write_lock(&hci_cb_list_lock);
3337         list_add(&cb->list, &hci_cb_list);
3338         write_unlock(&hci_cb_list_lock);
3339
3340         return 0;
3341 }
3342 EXPORT_SYMBOL(hci_register_cb);
3343
3344 int hci_unregister_cb(struct hci_cb *cb)
3345 {
3346         BT_DBG("%p name %s", cb, cb->name);
3347
3348         write_lock(&hci_cb_list_lock);
3349         list_del(&cb->list);
3350         write_unlock(&hci_cb_list_lock);
3351
3352         return 0;
3353 }
3354 EXPORT_SYMBOL(hci_unregister_cb);
3355
3356 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3357 {
3358         int err;
3359
3360         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3361
3362         /* Time stamp */
3363         __net_timestamp(skb);
3364
3365         /* Send copy to monitor */
3366         hci_send_to_monitor(hdev, skb);
3367
3368         if (atomic_read(&hdev->promisc)) {
3369                 /* Send copy to the sockets */
3370                 hci_send_to_sock(hdev, skb);
3371         }
3372
3373         /* Get rid of skb owner, prior to sending to the driver. */
3374         skb_orphan(skb);
3375
3376         err = hdev->send(hdev, skb);
3377         if (err < 0) {
3378                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3379                 kfree_skb(skb);
3380         }
3381 }
3382
3383 bool hci_req_pending(struct hci_dev *hdev)
3384 {
3385         return (hdev->req_status == HCI_REQ_PEND);
3386 }
3387
3388 /* Send HCI command */
3389 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3390                  const void *param)
3391 {
3392         struct sk_buff *skb;
3393
3394         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3395
3396         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3397         if (!skb) {
3398                 BT_ERR("%s no memory for command", hdev->name);
3399                 return -ENOMEM;
3400         }
3401
3402         /* Stand-alone HCI commands must be flagged as
3403          * single-command requests.
3404          */
3405         bt_cb(skb)->req.start = true;
3406
3407         skb_queue_tail(&hdev->cmd_q, skb);
3408         queue_work(hdev->workqueue, &hdev->cmd_work);
3409
3410         return 0;
3411 }
3412
3413 /* Get data from the previously sent command */
3414 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3415 {
3416         struct hci_command_hdr *hdr;
3417
3418         if (!hdev->sent_cmd)
3419                 return NULL;
3420
3421         hdr = (void *) hdev->sent_cmd->data;
3422
3423         if (hdr->opcode != cpu_to_le16(opcode))
3424                 return NULL;
3425
3426         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3427
3428         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3429 }
3430
3431 /* Send ACL data */
3432 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3433 {
3434         struct hci_acl_hdr *hdr;
3435         int len = skb->len;
3436
3437         skb_push(skb, HCI_ACL_HDR_SIZE);
3438         skb_reset_transport_header(skb);
3439         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3440         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3441         hdr->dlen   = cpu_to_le16(len);
3442 }
3443
3444 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3445                           struct sk_buff *skb, __u16 flags)
3446 {
3447         struct hci_conn *conn = chan->conn;
3448         struct hci_dev *hdev = conn->hdev;
3449         struct sk_buff *list;
3450
3451         skb->len = skb_headlen(skb);
3452         skb->data_len = 0;
3453
3454         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3455
3456         switch (hdev->dev_type) {
3457         case HCI_BREDR:
3458                 hci_add_acl_hdr(skb, conn->handle, flags);
3459                 break;
3460         case HCI_AMP:
3461                 hci_add_acl_hdr(skb, chan->handle, flags);
3462                 break;
3463         default:
3464                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3465                 return;
3466         }
3467
3468         list = skb_shinfo(skb)->frag_list;
3469         if (!list) {
3470                 /* Non fragmented */
3471                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3472
3473                 skb_queue_tail(queue, skb);
3474         } else {
3475                 /* Fragmented */
3476                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3477
3478                 skb_shinfo(skb)->frag_list = NULL;
3479
3480                 /* Queue all fragments atomically. We need to use spin_lock_bh
3481                  * here because of 6LoWPAN links, as there this function is
3482                  * called from softirq and using normal spin lock could cause
3483                  * deadlocks.
3484                  */
3485                 spin_lock_bh(&queue->lock);
3486
3487                 __skb_queue_tail(queue, skb);
3488
3489                 flags &= ~ACL_START;
3490                 flags |= ACL_CONT;
3491                 do {
3492                         skb = list; list = list->next;
3493
3494                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3495                         hci_add_acl_hdr(skb, conn->handle, flags);
3496
3497                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3498
3499                         __skb_queue_tail(queue, skb);
3500                 } while (list);
3501
3502                 spin_unlock_bh(&queue->lock);
3503         }
3504 }
3505
3506 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3507 {
3508         struct hci_dev *hdev = chan->conn->hdev;
3509
3510         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3511
3512         hci_queue_acl(chan, &chan->data_q, skb, flags);
3513
3514         queue_work(hdev->workqueue, &hdev->tx_work);
3515 }
3516
3517 /* Send SCO data */
3518 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3519 {
3520         struct hci_dev *hdev = conn->hdev;
3521         struct hci_sco_hdr hdr;
3522
3523         BT_DBG("%s len %d", hdev->name, skb->len);
3524
3525         hdr.handle = cpu_to_le16(conn->handle);
3526         hdr.dlen   = skb->len;
3527
3528         skb_push(skb, HCI_SCO_HDR_SIZE);
3529         skb_reset_transport_header(skb);
3530         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3531
3532         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3533
3534         skb_queue_tail(&conn->data_q, skb);
3535         queue_work(hdev->workqueue, &hdev->tx_work);
3536 }
3537
3538 /* ---- HCI TX task (outgoing data) ---- */
3539
3540 /* HCI Connection scheduler */
3541 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3542                                      int *quote)
3543 {
3544         struct hci_conn_hash *h = &hdev->conn_hash;
3545         struct hci_conn *conn = NULL, *c;
3546         unsigned int num = 0, min = ~0;
3547
3548         /* We don't have to lock device here. Connections are always
3549          * added and removed with TX task disabled. */
3550
3551         rcu_read_lock();
3552
3553         list_for_each_entry_rcu(c, &h->list, list) {
3554                 if (c->type != type || skb_queue_empty(&c->data_q))
3555                         continue;
3556
3557                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3558                         continue;
3559
3560                 num++;
3561
3562                 if (c->sent < min) {
3563                         min  = c->sent;
3564                         conn = c;
3565                 }
3566
3567                 if (hci_conn_num(hdev, type) == num)
3568                         break;
3569         }
3570
3571         rcu_read_unlock();
3572
3573         if (conn) {
3574                 int cnt, q;
3575
3576                 switch (conn->type) {
3577                 case ACL_LINK:
3578                         cnt = hdev->acl_cnt;
3579                         break;
3580                 case SCO_LINK:
3581                 case ESCO_LINK:
3582                         cnt = hdev->sco_cnt;
3583                         break;
3584                 case LE_LINK:
3585                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3586                         break;
3587                 default:
3588                         cnt = 0;
3589                         BT_ERR("Unknown link type");
3590                 }
3591
3592                 q = cnt / num;
3593                 *quote = q ? q : 1;
3594         } else
3595                 *quote = 0;
3596
3597         BT_DBG("conn %p quote %d", conn, *quote);
3598         return conn;
3599 }
3600
3601 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3602 {
3603         struct hci_conn_hash *h = &hdev->conn_hash;
3604         struct hci_conn *c;
3605
3606         BT_ERR("%s link tx timeout", hdev->name);
3607
3608         rcu_read_lock();
3609
3610         /* Kill stalled connections */
3611         list_for_each_entry_rcu(c, &h->list, list) {
3612                 if (c->type == type && c->sent) {
3613                         BT_ERR("%s killing stalled connection %pMR",
3614                                hdev->name, &c->dst);
3615                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3616                 }
3617         }
3618
3619         rcu_read_unlock();
3620 }
3621
3622 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3623                                       int *quote)
3624 {
3625         struct hci_conn_hash *h = &hdev->conn_hash;
3626         struct hci_chan *chan = NULL;
3627         unsigned int num = 0, min = ~0, cur_prio = 0;
3628         struct hci_conn *conn;
3629         int cnt, q, conn_num = 0;
3630
3631         BT_DBG("%s", hdev->name);
3632
3633         rcu_read_lock();
3634
3635         list_for_each_entry_rcu(conn, &h->list, list) {
3636                 struct hci_chan *tmp;
3637
3638                 if (conn->type != type)
3639                         continue;
3640
3641                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3642                         continue;
3643
3644                 conn_num++;
3645
3646                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3647                         struct sk_buff *skb;
3648
3649                         if (skb_queue_empty(&tmp->data_q))
3650                                 continue;
3651
3652                         skb = skb_peek(&tmp->data_q);
3653                         if (skb->priority < cur_prio)
3654                                 continue;
3655
3656                         if (skb->priority > cur_prio) {
3657                                 num = 0;
3658                                 min = ~0;
3659                                 cur_prio = skb->priority;
3660                         }
3661
3662                         num++;
3663
3664                         if (conn->sent < min) {
3665                                 min  = conn->sent;
3666                                 chan = tmp;
3667                         }
3668                 }
3669
3670                 if (hci_conn_num(hdev, type) == conn_num)
3671                         break;
3672         }
3673
3674         rcu_read_unlock();
3675
3676         if (!chan)
3677                 return NULL;
3678
3679         switch (chan->conn->type) {
3680         case ACL_LINK:
3681                 cnt = hdev->acl_cnt;
3682                 break;
3683         case AMP_LINK:
3684                 cnt = hdev->block_cnt;
3685                 break;
3686         case SCO_LINK:
3687         case ESCO_LINK:
3688                 cnt = hdev->sco_cnt;
3689                 break;
3690         case LE_LINK:
3691                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3692                 break;
3693         default:
3694                 cnt = 0;
3695                 BT_ERR("Unknown link type");
3696         }
3697
3698         q = cnt / num;
3699         *quote = q ? q : 1;
3700         BT_DBG("chan %p quote %d", chan, *quote);
3701         return chan;
3702 }
3703
3704 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3705 {
3706         struct hci_conn_hash *h = &hdev->conn_hash;
3707         struct hci_conn *conn;
3708         int num = 0;
3709
3710         BT_DBG("%s", hdev->name);
3711
3712         rcu_read_lock();
3713
3714         list_for_each_entry_rcu(conn, &h->list, list) {
3715                 struct hci_chan *chan;
3716
3717                 if (conn->type != type)
3718                         continue;
3719
3720                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3721                         continue;
3722
3723                 num++;
3724
3725                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3726                         struct sk_buff *skb;
3727
3728                         if (chan->sent) {
3729                                 chan->sent = 0;
3730                                 continue;
3731                         }
3732
3733                         if (skb_queue_empty(&chan->data_q))
3734                                 continue;
3735
3736                         skb = skb_peek(&chan->data_q);
3737                         if (skb->priority >= HCI_PRIO_MAX - 1)
3738                                 continue;
3739
3740                         skb->priority = HCI_PRIO_MAX - 1;
3741
3742                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3743                                skb->priority);
3744                 }
3745
3746                 if (hci_conn_num(hdev, type) == num)
3747                         break;
3748         }
3749
3750         rcu_read_unlock();
3751
3752 }
3753
3754 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3755 {
3756         /* Calculate count of blocks used by this packet */
3757         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3758 }
3759
3760 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3761 {
3762         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3763                 /* ACL tx timeout must be longer than maximum
3764                  * link supervision timeout (40.9 seconds) */
3765                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3766                                        HCI_ACL_TX_TIMEOUT))
3767                         hci_link_tx_to(hdev, ACL_LINK);
3768         }
3769 }
3770
3771 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3772 {
3773         unsigned int cnt = hdev->acl_cnt;
3774         struct hci_chan *chan;
3775         struct sk_buff *skb;
3776         int quote;
3777
3778         __check_timeout(hdev, cnt);
3779
3780         while (hdev->acl_cnt &&
3781                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3782                 u32 priority = (skb_peek(&chan->data_q))->priority;
3783                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3784                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3785                                skb->len, skb->priority);
3786
3787                         /* Stop if priority has changed */
3788                         if (skb->priority < priority)
3789                                 break;
3790
3791                         skb = skb_dequeue(&chan->data_q);
3792
3793                         hci_conn_enter_active_mode(chan->conn,
3794                                                    bt_cb(skb)->force_active);
3795
3796                         hci_send_frame(hdev, skb);
3797                         hdev->acl_last_tx = jiffies;
3798
3799                         hdev->acl_cnt--;
3800                         chan->sent++;
3801                         chan->conn->sent++;
3802                 }
3803         }
3804
3805         if (cnt != hdev->acl_cnt)
3806                 hci_prio_recalculate(hdev, ACL_LINK);
3807 }
3808
3809 static void hci_sched_acl_blk(struct hci_dev *hdev)
3810 {
3811         unsigned int cnt = hdev->block_cnt;
3812         struct hci_chan *chan;
3813         struct sk_buff *skb;
3814         int quote;
3815         u8 type;
3816
3817         __check_timeout(hdev, cnt);
3818
3819         BT_DBG("%s", hdev->name);
3820
3821         if (hdev->dev_type == HCI_AMP)
3822                 type = AMP_LINK;
3823         else
3824                 type = ACL_LINK;
3825
3826         while (hdev->block_cnt > 0 &&
3827                (chan = hci_chan_sent(hdev, type, &quote))) {
3828                 u32 priority = (skb_peek(&chan->data_q))->priority;
3829                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3830                         int blocks;
3831
3832                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3833                                skb->len, skb->priority);
3834
3835                         /* Stop if priority has changed */
3836                         if (skb->priority < priority)
3837                                 break;
3838
3839                         skb = skb_dequeue(&chan->data_q);
3840
3841                         blocks = __get_blocks(hdev, skb);
3842                         if (blocks > hdev->block_cnt)
3843                                 return;
3844
3845                         hci_conn_enter_active_mode(chan->conn,
3846                                                    bt_cb(skb)->force_active);
3847
3848                         hci_send_frame(hdev, skb);
3849                         hdev->acl_last_tx = jiffies;
3850
3851                         hdev->block_cnt -= blocks;
3852                         quote -= blocks;
3853
3854                         chan->sent += blocks;
3855                         chan->conn->sent += blocks;
3856                 }
3857         }
3858
3859         if (cnt != hdev->block_cnt)
3860                 hci_prio_recalculate(hdev, type);
3861 }
3862
3863 static void hci_sched_acl(struct hci_dev *hdev)
3864 {
3865         BT_DBG("%s", hdev->name);
3866
3867         /* No ACL link over BR/EDR controller */
3868         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3869                 return;
3870
3871         /* No AMP link over AMP controller */
3872         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3873                 return;
3874
3875         switch (hdev->flow_ctl_mode) {
3876         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3877                 hci_sched_acl_pkt(hdev);
3878                 break;
3879
3880         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3881                 hci_sched_acl_blk(hdev);
3882                 break;
3883         }
3884 }
3885
3886 /* Schedule SCO */
3887 static void hci_sched_sco(struct hci_dev *hdev)
3888 {
3889         struct hci_conn *conn;
3890         struct sk_buff *skb;
3891         int quote;
3892
3893         BT_DBG("%s", hdev->name);
3894
3895         if (!hci_conn_num(hdev, SCO_LINK))
3896                 return;
3897
3898         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3899                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3900                         BT_DBG("skb %p len %d", skb, skb->len);
3901                         hci_send_frame(hdev, skb);
3902
3903                         conn->sent++;
3904                         if (conn->sent == ~0)
3905                                 conn->sent = 0;
3906                 }
3907         }
3908 }
3909
3910 static void hci_sched_esco(struct hci_dev *hdev)
3911 {
3912         struct hci_conn *conn;
3913         struct sk_buff *skb;
3914         int quote;
3915
3916         BT_DBG("%s", hdev->name);
3917
3918         if (!hci_conn_num(hdev, ESCO_LINK))
3919                 return;
3920
3921         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3922                                                      &quote))) {
3923                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3924                         BT_DBG("skb %p len %d", skb, skb->len);
3925                         hci_send_frame(hdev, skb);
3926
3927                         conn->sent++;
3928                         if (conn->sent == ~0)
3929                                 conn->sent = 0;
3930                 }
3931         }
3932 }
3933
3934 static void hci_sched_le(struct hci_dev *hdev)
3935 {
3936         struct hci_chan *chan;
3937         struct sk_buff *skb;
3938         int quote, cnt, tmp;
3939
3940         BT_DBG("%s", hdev->name);
3941
3942         if (!hci_conn_num(hdev, LE_LINK))
3943                 return;
3944
3945         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3946                 /* LE tx timeout must be longer than maximum
3947                  * link supervision timeout (40.9 seconds) */
3948                 if (!hdev->le_cnt && hdev->le_pkts &&
3949                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3950                         hci_link_tx_to(hdev, LE_LINK);
3951         }
3952
3953         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3954         tmp = cnt;
3955         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3956                 u32 priority = (skb_peek(&chan->data_q))->priority;
3957                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3958                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3959                                skb->len, skb->priority);
3960
3961                         /* Stop if priority has changed */
3962                         if (skb->priority < priority)
3963                                 break;
3964
3965                         skb = skb_dequeue(&chan->data_q);
3966
3967                         hci_send_frame(hdev, skb);
3968                         hdev->le_last_tx = jiffies;
3969
3970                         cnt--;
3971                         chan->sent++;
3972                         chan->conn->sent++;
3973                 }
3974         }
3975
3976         if (hdev->le_pkts)
3977                 hdev->le_cnt = cnt;
3978         else
3979                 hdev->acl_cnt = cnt;
3980
3981         if (cnt != tmp)
3982                 hci_prio_recalculate(hdev, LE_LINK);
3983 }
3984
3985 static void hci_tx_work(struct work_struct *work)
3986 {
3987         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3988         struct sk_buff *skb;
3989
3990         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3991                hdev->sco_cnt, hdev->le_cnt);
3992
3993         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3994                 /* Schedule queues and send stuff to HCI driver */
3995                 hci_sched_acl(hdev);
3996                 hci_sched_sco(hdev);
3997                 hci_sched_esco(hdev);
3998                 hci_sched_le(hdev);
3999         }
4000
4001         /* Send next queued raw (unknown type) packet */
4002         while ((skb = skb_dequeue(&hdev->raw_q)))
4003                 hci_send_frame(hdev, skb);
4004 }
4005
4006 /* ----- HCI RX task (incoming data processing) ----- */
4007
4008 /* ACL data packet */
4009 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4010 {
4011         struct hci_acl_hdr *hdr = (void *) skb->data;
4012         struct hci_conn *conn;
4013         __u16 handle, flags;
4014
4015         skb_pull(skb, HCI_ACL_HDR_SIZE);
4016
4017         handle = __le16_to_cpu(hdr->handle);
4018         flags  = hci_flags(handle);
4019         handle = hci_handle(handle);
4020
4021         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4022                handle, flags);
4023
4024         hdev->stat.acl_rx++;
4025
4026         hci_dev_lock(hdev);
4027         conn = hci_conn_hash_lookup_handle(hdev, handle);
4028         hci_dev_unlock(hdev);
4029
4030         if (conn) {
4031                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4032
4033                 /* Send to upper protocol */
4034                 l2cap_recv_acldata(conn, skb, flags);
4035                 return;
4036         } else {
4037                 BT_ERR("%s ACL packet for unknown connection handle %d",
4038                        hdev->name, handle);
4039         }
4040
4041         kfree_skb(skb);
4042 }
4043
4044 /* SCO data packet */
4045 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4046 {
4047         struct hci_sco_hdr *hdr = (void *) skb->data;
4048         struct hci_conn *conn;
4049         __u16 handle;
4050
4051         skb_pull(skb, HCI_SCO_HDR_SIZE);
4052
4053         handle = __le16_to_cpu(hdr->handle);
4054
4055         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4056
4057         hdev->stat.sco_rx++;
4058
4059         hci_dev_lock(hdev);
4060         conn = hci_conn_hash_lookup_handle(hdev, handle);
4061         hci_dev_unlock(hdev);
4062
4063         if (conn) {
4064                 /* Send to upper protocol */
4065                 sco_recv_scodata(conn, skb);
4066                 return;
4067         } else {
4068                 BT_ERR("%s SCO packet for unknown connection handle %d",
4069                        hdev->name, handle);
4070         }
4071
4072         kfree_skb(skb);
4073 }
4074
4075 static bool hci_req_is_complete(struct hci_dev *hdev)
4076 {
4077         struct sk_buff *skb;
4078
4079         skb = skb_peek(&hdev->cmd_q);
4080         if (!skb)
4081                 return true;
4082
4083         return bt_cb(skb)->req.start;
4084 }
4085
4086 static void hci_resend_last(struct hci_dev *hdev)
4087 {
4088         struct hci_command_hdr *sent;
4089         struct sk_buff *skb;
4090         u16 opcode;
4091
4092         if (!hdev->sent_cmd)
4093                 return;
4094
4095         sent = (void *) hdev->sent_cmd->data;
4096         opcode = __le16_to_cpu(sent->opcode);
4097         if (opcode == HCI_OP_RESET)
4098                 return;
4099
4100         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4101         if (!skb)
4102                 return;
4103
4104         skb_queue_head(&hdev->cmd_q, skb);
4105         queue_work(hdev->workqueue, &hdev->cmd_work);
4106 }
4107
4108 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4109 {
4110         hci_req_complete_t req_complete = NULL;
4111         struct sk_buff *skb;
4112         unsigned long flags;
4113
4114         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4115
4116         /* If the completed command doesn't match the last one that was
4117          * sent we need to do special handling of it.
4118          */
4119         if (!hci_sent_cmd_data(hdev, opcode)) {
4120                 /* Some CSR based controllers generate a spontaneous
4121                  * reset complete event during init and any pending
4122                  * command will never be completed. In such a case we
4123                  * need to resend whatever was the last sent
4124                  * command.
4125                  */
4126                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4127                         hci_resend_last(hdev);
4128
4129                 return;
4130         }
4131
4132         /* If the command succeeded and there's still more commands in
4133          * this request the request is not yet complete.
4134          */
4135         if (!status && !hci_req_is_complete(hdev))
4136                 return;
4137
4138         /* If this was the last command in a request the complete
4139          * callback would be found in hdev->sent_cmd instead of the
4140          * command queue (hdev->cmd_q).
4141          */
4142         if (hdev->sent_cmd) {
4143                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4144
4145                 if (req_complete) {
4146                         /* We must set the complete callback to NULL to
4147                          * avoid calling the callback more than once if
4148                          * this function gets called again.
4149                          */
4150                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4151
4152                         goto call_complete;
4153                 }
4154         }
4155
4156         /* Remove all pending commands belonging to this request */
4157         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4158         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4159                 if (bt_cb(skb)->req.start) {
4160                         __skb_queue_head(&hdev->cmd_q, skb);
4161                         break;
4162                 }
4163
4164                 req_complete = bt_cb(skb)->req.complete;
4165                 kfree_skb(skb);
4166         }
4167         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4168
4169 call_complete:
4170         if (req_complete)
4171                 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4172 }
4173
4174 static void hci_rx_work(struct work_struct *work)
4175 {
4176         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4177         struct sk_buff *skb;
4178
4179         BT_DBG("%s", hdev->name);
4180
4181         while ((skb = skb_dequeue(&hdev->rx_q))) {
4182                 /* Send copy to monitor */
4183                 hci_send_to_monitor(hdev, skb);
4184
4185                 if (atomic_read(&hdev->promisc)) {
4186                         /* Send copy to the sockets */
4187                         hci_send_to_sock(hdev, skb);
4188                 }
4189
4190                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4191                         kfree_skb(skb);
4192                         continue;
4193                 }
4194
4195                 if (test_bit(HCI_INIT, &hdev->flags)) {
4196                         /* Don't process data packets in this states. */
4197                         switch (bt_cb(skb)->pkt_type) {
4198                         case HCI_ACLDATA_PKT:
4199                         case HCI_SCODATA_PKT:
4200                                 kfree_skb(skb);
4201                                 continue;
4202                         }
4203                 }
4204
4205                 /* Process frame */
4206                 switch (bt_cb(skb)->pkt_type) {
4207                 case HCI_EVENT_PKT:
4208                         BT_DBG("%s Event packet", hdev->name);
4209                         hci_event_packet(hdev, skb);
4210                         break;
4211
4212                 case HCI_ACLDATA_PKT:
4213                         BT_DBG("%s ACL data packet", hdev->name);
4214                         hci_acldata_packet(hdev, skb);
4215                         break;
4216
4217                 case HCI_SCODATA_PKT:
4218                         BT_DBG("%s SCO data packet", hdev->name);
4219                         hci_scodata_packet(hdev, skb);
4220                         break;
4221
4222                 default:
4223                         kfree_skb(skb);
4224                         break;
4225                 }
4226         }
4227 }
4228
4229 static void hci_cmd_work(struct work_struct *work)
4230 {
4231         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4232         struct sk_buff *skb;
4233
4234         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4235                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4236
4237         /* Send queued commands */
4238         if (atomic_read(&hdev->cmd_cnt)) {
4239                 skb = skb_dequeue(&hdev->cmd_q);
4240                 if (!skb)
4241                         return;
4242
4243                 kfree_skb(hdev->sent_cmd);
4244
4245                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4246                 if (hdev->sent_cmd) {
4247                         atomic_dec(&hdev->cmd_cnt);
4248                         hci_send_frame(hdev, skb);
4249                         if (test_bit(HCI_RESET, &hdev->flags))
4250                                 cancel_delayed_work(&hdev->cmd_timer);
4251                         else
4252                                 schedule_delayed_work(&hdev->cmd_timer,
4253                                                       HCI_CMD_TIMEOUT);
4254                 } else {
4255                         skb_queue_head(&hdev->cmd_q, skb);
4256                         queue_work(hdev->workqueue, &hdev->cmd_work);
4257                 }
4258         }
4259 }