Bluetooth: Introduce a new HCI_RFKILLED flag
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         }
458
459         if (lmp_inq_rssi_capable(hdev))
460                 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462         if (lmp_sniffsubr_capable(hdev))
463                 events[5] |= 0x20; /* Sniff Subrating */
464
465         if (lmp_pause_enc_capable(hdev))
466                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468         if (lmp_ext_inq_capable(hdev))
469                 events[5] |= 0x40; /* Extended Inquiry Result */
470
471         if (lmp_no_flush_capable(hdev))
472                 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474         if (lmp_lsto_capable(hdev))
475                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477         if (lmp_ssp_capable(hdev)) {
478                 events[6] |= 0x01;      /* IO Capability Request */
479                 events[6] |= 0x02;      /* IO Capability Response */
480                 events[6] |= 0x04;      /* User Confirmation Request */
481                 events[6] |= 0x08;      /* User Passkey Request */
482                 events[6] |= 0x10;      /* Remote OOB Data Request */
483                 events[6] |= 0x20;      /* Simple Pairing Complete */
484                 events[7] |= 0x04;      /* User Passkey Notification */
485                 events[7] |= 0x08;      /* Keypress Notification */
486                 events[7] |= 0x10;      /* Remote Host Supported
487                                          * Features Notification
488                                          */
489         }
490
491         if (lmp_le_capable(hdev))
492                 events[7] |= 0x20;      /* LE Meta-Event */
493
494         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495
496         if (lmp_le_capable(hdev)) {
497                 memset(events, 0, sizeof(events));
498                 events[0] = 0x1f;
499                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500                             sizeof(events), events);
501         }
502 }
503
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
505 {
506         struct hci_dev *hdev = req->hdev;
507
508         if (lmp_bredr_capable(hdev))
509                 bredr_setup(req);
510
511         if (lmp_le_capable(hdev))
512                 le_setup(req);
513
514         hci_setup_event_mask(req);
515
516         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
518
519         if (lmp_ssp_capable(hdev)) {
520                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521                         u8 mode = 0x01;
522                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523                                     sizeof(mode), &mode);
524                 } else {
525                         struct hci_cp_write_eir cp;
526
527                         memset(hdev->eir, 0, sizeof(hdev->eir));
528                         memset(&cp, 0, sizeof(cp));
529
530                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
531                 }
532         }
533
534         if (lmp_inq_rssi_capable(hdev))
535                 hci_setup_inquiry_mode(req);
536
537         if (lmp_inq_tx_pwr_capable(hdev))
538                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
539
540         if (lmp_ext_feat_capable(hdev)) {
541                 struct hci_cp_read_local_ext_features cp;
542
543                 cp.page = 0x01;
544                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545                             sizeof(cp), &cp);
546         }
547
548         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549                 u8 enable = 1;
550                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551                             &enable);
552         }
553 }
554
555 static void hci_setup_link_policy(struct hci_request *req)
556 {
557         struct hci_dev *hdev = req->hdev;
558         struct hci_cp_write_def_link_policy cp;
559         u16 link_policy = 0;
560
561         if (lmp_rswitch_capable(hdev))
562                 link_policy |= HCI_LP_RSWITCH;
563         if (lmp_hold_capable(hdev))
564                 link_policy |= HCI_LP_HOLD;
565         if (lmp_sniff_capable(hdev))
566                 link_policy |= HCI_LP_SNIFF;
567         if (lmp_park_capable(hdev))
568                 link_policy |= HCI_LP_PARK;
569
570         cp.policy = cpu_to_le16(link_policy);
571         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
572 }
573
574 static void hci_set_le_support(struct hci_request *req)
575 {
576         struct hci_dev *hdev = req->hdev;
577         struct hci_cp_write_le_host_supported cp;
578
579         /* LE-only devices do not support explicit enablement */
580         if (!lmp_bredr_capable(hdev))
581                 return;
582
583         memset(&cp, 0, sizeof(cp));
584
585         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586                 cp.le = 0x01;
587                 cp.simul = lmp_le_br_capable(hdev);
588         }
589
590         if (cp.le != lmp_host_le_capable(hdev))
591                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592                             &cp);
593 }
594
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
596 {
597         struct hci_dev *hdev = req->hdev;
598         u8 p;
599
600         /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601         if (hdev->commands[6] & 0x80) {
602                 struct hci_cp_delete_stored_link_key cp;
603
604                 bacpy(&cp.bdaddr, BDADDR_ANY);
605                 cp.delete_all = 0x01;
606                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607                             sizeof(cp), &cp);
608         }
609
610         if (hdev->commands[5] & 0x10)
611                 hci_setup_link_policy(req);
612
613         if (lmp_le_capable(hdev)) {
614                 hci_set_le_support(req);
615                 hci_update_ad(req);
616         }
617
618         /* Read features beyond page 1 if available */
619         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620                 struct hci_cp_read_local_ext_features cp;
621
622                 cp.page = p;
623                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624                             sizeof(cp), &cp);
625         }
626 }
627
628 static int __hci_init(struct hci_dev *hdev)
629 {
630         int err;
631
632         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633         if (err < 0)
634                 return err;
635
636         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637          * BR/EDR/LE type controllers. AMP controllers only need the
638          * first stage init.
639          */
640         if (hdev->dev_type != HCI_BREDR)
641                 return 0;
642
643         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644         if (err < 0)
645                 return err;
646
647         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
648 }
649
650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
651 {
652         __u8 scan = opt;
653
654         BT_DBG("%s %x", req->hdev->name, scan);
655
656         /* Inquiry and Page scans */
657         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
658 }
659
660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
661 {
662         __u8 auth = opt;
663
664         BT_DBG("%s %x", req->hdev->name, auth);
665
666         /* Authentication */
667         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
668 }
669
670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
671 {
672         __u8 encrypt = opt;
673
674         BT_DBG("%s %x", req->hdev->name, encrypt);
675
676         /* Encryption */
677         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
678 }
679
680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
681 {
682         __le16 policy = cpu_to_le16(opt);
683
684         BT_DBG("%s %x", req->hdev->name, policy);
685
686         /* Default link policy */
687         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
688 }
689
690 /* Get HCI device by index.
691  * Device is held on return. */
692 struct hci_dev *hci_dev_get(int index)
693 {
694         struct hci_dev *hdev = NULL, *d;
695
696         BT_DBG("%d", index);
697
698         if (index < 0)
699                 return NULL;
700
701         read_lock(&hci_dev_list_lock);
702         list_for_each_entry(d, &hci_dev_list, list) {
703                 if (d->id == index) {
704                         hdev = hci_dev_hold(d);
705                         break;
706                 }
707         }
708         read_unlock(&hci_dev_list_lock);
709         return hdev;
710 }
711
712 /* ---- Inquiry support ---- */
713
714 bool hci_discovery_active(struct hci_dev *hdev)
715 {
716         struct discovery_state *discov = &hdev->discovery;
717
718         switch (discov->state) {
719         case DISCOVERY_FINDING:
720         case DISCOVERY_RESOLVING:
721                 return true;
722
723         default:
724                 return false;
725         }
726 }
727
728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
729 {
730         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
731
732         if (hdev->discovery.state == state)
733                 return;
734
735         switch (state) {
736         case DISCOVERY_STOPPED:
737                 if (hdev->discovery.state != DISCOVERY_STARTING)
738                         mgmt_discovering(hdev, 0);
739                 break;
740         case DISCOVERY_STARTING:
741                 break;
742         case DISCOVERY_FINDING:
743                 mgmt_discovering(hdev, 1);
744                 break;
745         case DISCOVERY_RESOLVING:
746                 break;
747         case DISCOVERY_STOPPING:
748                 break;
749         }
750
751         hdev->discovery.state = state;
752 }
753
754 static void inquiry_cache_flush(struct hci_dev *hdev)
755 {
756         struct discovery_state *cache = &hdev->discovery;
757         struct inquiry_entry *p, *n;
758
759         list_for_each_entry_safe(p, n, &cache->all, all) {
760                 list_del(&p->all);
761                 kfree(p);
762         }
763
764         INIT_LIST_HEAD(&cache->unknown);
765         INIT_LIST_HEAD(&cache->resolve);
766 }
767
768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769                                                bdaddr_t *bdaddr)
770 {
771         struct discovery_state *cache = &hdev->discovery;
772         struct inquiry_entry *e;
773
774         BT_DBG("cache %p, %pMR", cache, bdaddr);
775
776         list_for_each_entry(e, &cache->all, all) {
777                 if (!bacmp(&e->data.bdaddr, bdaddr))
778                         return e;
779         }
780
781         return NULL;
782 }
783
784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
785                                                        bdaddr_t *bdaddr)
786 {
787         struct discovery_state *cache = &hdev->discovery;
788         struct inquiry_entry *e;
789
790         BT_DBG("cache %p, %pMR", cache, bdaddr);
791
792         list_for_each_entry(e, &cache->unknown, list) {
793                 if (!bacmp(&e->data.bdaddr, bdaddr))
794                         return e;
795         }
796
797         return NULL;
798 }
799
800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
801                                                        bdaddr_t *bdaddr,
802                                                        int state)
803 {
804         struct discovery_state *cache = &hdev->discovery;
805         struct inquiry_entry *e;
806
807         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
808
809         list_for_each_entry(e, &cache->resolve, list) {
810                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811                         return e;
812                 if (!bacmp(&e->data.bdaddr, bdaddr))
813                         return e;
814         }
815
816         return NULL;
817 }
818
819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820                                       struct inquiry_entry *ie)
821 {
822         struct discovery_state *cache = &hdev->discovery;
823         struct list_head *pos = &cache->resolve;
824         struct inquiry_entry *p;
825
826         list_del(&ie->list);
827
828         list_for_each_entry(p, &cache->resolve, list) {
829                 if (p->name_state != NAME_PENDING &&
830                     abs(p->data.rssi) >= abs(ie->data.rssi))
831                         break;
832                 pos = &p->list;
833         }
834
835         list_add(&ie->list, pos);
836 }
837
838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839                               bool name_known, bool *ssp)
840 {
841         struct discovery_state *cache = &hdev->discovery;
842         struct inquiry_entry *ie;
843
844         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
845
846         hci_remove_remote_oob_data(hdev, &data->bdaddr);
847
848         if (ssp)
849                 *ssp = data->ssp_mode;
850
851         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
852         if (ie) {
853                 if (ie->data.ssp_mode && ssp)
854                         *ssp = true;
855
856                 if (ie->name_state == NAME_NEEDED &&
857                     data->rssi != ie->data.rssi) {
858                         ie->data.rssi = data->rssi;
859                         hci_inquiry_cache_update_resolve(hdev, ie);
860                 }
861
862                 goto update;
863         }
864
865         /* Entry not in the cache. Add new one. */
866         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867         if (!ie)
868                 return false;
869
870         list_add(&ie->all, &cache->all);
871
872         if (name_known) {
873                 ie->name_state = NAME_KNOWN;
874         } else {
875                 ie->name_state = NAME_NOT_KNOWN;
876                 list_add(&ie->list, &cache->unknown);
877         }
878
879 update:
880         if (name_known && ie->name_state != NAME_KNOWN &&
881             ie->name_state != NAME_PENDING) {
882                 ie->name_state = NAME_KNOWN;
883                 list_del(&ie->list);
884         }
885
886         memcpy(&ie->data, data, sizeof(*data));
887         ie->timestamp = jiffies;
888         cache->timestamp = jiffies;
889
890         if (ie->name_state == NAME_NOT_KNOWN)
891                 return false;
892
893         return true;
894 }
895
896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
897 {
898         struct discovery_state *cache = &hdev->discovery;
899         struct inquiry_info *info = (struct inquiry_info *) buf;
900         struct inquiry_entry *e;
901         int copied = 0;
902
903         list_for_each_entry(e, &cache->all, all) {
904                 struct inquiry_data *data = &e->data;
905
906                 if (copied >= num)
907                         break;
908
909                 bacpy(&info->bdaddr, &data->bdaddr);
910                 info->pscan_rep_mode    = data->pscan_rep_mode;
911                 info->pscan_period_mode = data->pscan_period_mode;
912                 info->pscan_mode        = data->pscan_mode;
913                 memcpy(info->dev_class, data->dev_class, 3);
914                 info->clock_offset      = data->clock_offset;
915
916                 info++;
917                 copied++;
918         }
919
920         BT_DBG("cache %p, copied %d", cache, copied);
921         return copied;
922 }
923
924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
925 {
926         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927         struct hci_dev *hdev = req->hdev;
928         struct hci_cp_inquiry cp;
929
930         BT_DBG("%s", hdev->name);
931
932         if (test_bit(HCI_INQUIRY, &hdev->flags))
933                 return;
934
935         /* Start Inquiry */
936         memcpy(&cp.lap, &ir->lap, 3);
937         cp.length  = ir->length;
938         cp.num_rsp = ir->num_rsp;
939         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
940 }
941
942 static int wait_inquiry(void *word)
943 {
944         schedule();
945         return signal_pending(current);
946 }
947
948 int hci_inquiry(void __user *arg)
949 {
950         __u8 __user *ptr = arg;
951         struct hci_inquiry_req ir;
952         struct hci_dev *hdev;
953         int err = 0, do_inquiry = 0, max_rsp;
954         long timeo;
955         __u8 *buf;
956
957         if (copy_from_user(&ir, ptr, sizeof(ir)))
958                 return -EFAULT;
959
960         hdev = hci_dev_get(ir.dev_id);
961         if (!hdev)
962                 return -ENODEV;
963
964         hci_dev_lock(hdev);
965         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967                 inquiry_cache_flush(hdev);
968                 do_inquiry = 1;
969         }
970         hci_dev_unlock(hdev);
971
972         timeo = ir.length * msecs_to_jiffies(2000);
973
974         if (do_inquiry) {
975                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976                                    timeo);
977                 if (err < 0)
978                         goto done;
979
980                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981                  * cleared). If it is interrupted by a signal, return -EINTR.
982                  */
983                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984                                 TASK_INTERRUPTIBLE))
985                         return -EINTR;
986         }
987
988         /* for unlimited number of responses we will use buffer with
989          * 255 entries
990          */
991         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
992
993         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994          * copy it to the user space.
995          */
996         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
997         if (!buf) {
998                 err = -ENOMEM;
999                 goto done;
1000         }
1001
1002         hci_dev_lock(hdev);
1003         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004         hci_dev_unlock(hdev);
1005
1006         BT_DBG("num_rsp %d", ir.num_rsp);
1007
1008         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009                 ptr += sizeof(ir);
1010                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1011                                  ir.num_rsp))
1012                         err = -EFAULT;
1013         } else
1014                 err = -EFAULT;
1015
1016         kfree(buf);
1017
1018 done:
1019         hci_dev_put(hdev);
1020         return err;
1021 }
1022
1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1024 {
1025         u8 ad_len = 0, flags = 0;
1026         size_t name_len;
1027
1028         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029                 flags |= LE_AD_GENERAL;
1030
1031         if (!lmp_bredr_capable(hdev))
1032                 flags |= LE_AD_NO_BREDR;
1033
1034         if (lmp_le_br_capable(hdev))
1035                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1036
1037         if (lmp_host_le_br_capable(hdev))
1038                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1039
1040         if (flags) {
1041                 BT_DBG("adv flags 0x%02x", flags);
1042
1043                 ptr[0] = 2;
1044                 ptr[1] = EIR_FLAGS;
1045                 ptr[2] = flags;
1046
1047                 ad_len += 3;
1048                 ptr += 3;
1049         }
1050
1051         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052                 ptr[0] = 2;
1053                 ptr[1] = EIR_TX_POWER;
1054                 ptr[2] = (u8) hdev->adv_tx_power;
1055
1056                 ad_len += 3;
1057                 ptr += 3;
1058         }
1059
1060         name_len = strlen(hdev->dev_name);
1061         if (name_len > 0) {
1062                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1063
1064                 if (name_len > max_len) {
1065                         name_len = max_len;
1066                         ptr[1] = EIR_NAME_SHORT;
1067                 } else
1068                         ptr[1] = EIR_NAME_COMPLETE;
1069
1070                 ptr[0] = name_len + 1;
1071
1072                 memcpy(ptr + 2, hdev->dev_name, name_len);
1073
1074                 ad_len += (name_len + 2);
1075                 ptr += (name_len + 2);
1076         }
1077
1078         return ad_len;
1079 }
1080
1081 void hci_update_ad(struct hci_request *req)
1082 {
1083         struct hci_dev *hdev = req->hdev;
1084         struct hci_cp_le_set_adv_data cp;
1085         u8 len;
1086
1087         if (!lmp_le_capable(hdev))
1088                 return;
1089
1090         memset(&cp, 0, sizeof(cp));
1091
1092         len = create_ad(hdev, cp.data);
1093
1094         if (hdev->adv_data_len == len &&
1095             memcmp(cp.data, hdev->adv_data, len) == 0)
1096                 return;
1097
1098         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099         hdev->adv_data_len = len;
1100
1101         cp.length = len;
1102
1103         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1104 }
1105
1106 /* ---- HCI ioctl helpers ---- */
1107
1108 int hci_dev_open(__u16 dev)
1109 {
1110         struct hci_dev *hdev;
1111         int ret = 0;
1112
1113         hdev = hci_dev_get(dev);
1114         if (!hdev)
1115                 return -ENODEV;
1116
1117         BT_DBG("%s %p", hdev->name, hdev);
1118
1119         hci_req_lock(hdev);
1120
1121         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122                 ret = -ENODEV;
1123                 goto done;
1124         }
1125
1126         if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1127                 ret = -ERFKILL;
1128                 goto done;
1129         }
1130
1131         if (test_bit(HCI_UP, &hdev->flags)) {
1132                 ret = -EALREADY;
1133                 goto done;
1134         }
1135
1136         if (hdev->open(hdev)) {
1137                 ret = -EIO;
1138                 goto done;
1139         }
1140
1141         atomic_set(&hdev->cmd_cnt, 1);
1142         set_bit(HCI_INIT, &hdev->flags);
1143
1144         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145                 ret = hdev->setup(hdev);
1146
1147         if (!ret) {
1148                 /* Treat all non BR/EDR controllers as raw devices if
1149                  * enable_hs is not set.
1150                  */
1151                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152                         set_bit(HCI_RAW, &hdev->flags);
1153
1154                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155                         set_bit(HCI_RAW, &hdev->flags);
1156
1157                 if (!test_bit(HCI_RAW, &hdev->flags))
1158                         ret = __hci_init(hdev);
1159         }
1160
1161         clear_bit(HCI_INIT, &hdev->flags);
1162
1163         if (!ret) {
1164                 hci_dev_hold(hdev);
1165                 set_bit(HCI_UP, &hdev->flags);
1166                 hci_notify(hdev, HCI_DEV_UP);
1167                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168                     mgmt_valid_hdev(hdev)) {
1169                         hci_dev_lock(hdev);
1170                         mgmt_powered(hdev, 1);
1171                         hci_dev_unlock(hdev);
1172                 }
1173         } else {
1174                 /* Init failed, cleanup */
1175                 flush_work(&hdev->tx_work);
1176                 flush_work(&hdev->cmd_work);
1177                 flush_work(&hdev->rx_work);
1178
1179                 skb_queue_purge(&hdev->cmd_q);
1180                 skb_queue_purge(&hdev->rx_q);
1181
1182                 if (hdev->flush)
1183                         hdev->flush(hdev);
1184
1185                 if (hdev->sent_cmd) {
1186                         kfree_skb(hdev->sent_cmd);
1187                         hdev->sent_cmd = NULL;
1188                 }
1189
1190                 hdev->close(hdev);
1191                 hdev->flags = 0;
1192         }
1193
1194 done:
1195         hci_req_unlock(hdev);
1196         hci_dev_put(hdev);
1197         return ret;
1198 }
1199
1200 static int hci_dev_do_close(struct hci_dev *hdev)
1201 {
1202         BT_DBG("%s %p", hdev->name, hdev);
1203
1204         cancel_work_sync(&hdev->le_scan);
1205
1206         cancel_delayed_work(&hdev->power_off);
1207
1208         hci_req_cancel(hdev, ENODEV);
1209         hci_req_lock(hdev);
1210
1211         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1212                 del_timer_sync(&hdev->cmd_timer);
1213                 hci_req_unlock(hdev);
1214                 return 0;
1215         }
1216
1217         /* Flush RX and TX works */
1218         flush_work(&hdev->tx_work);
1219         flush_work(&hdev->rx_work);
1220
1221         if (hdev->discov_timeout > 0) {
1222                 cancel_delayed_work(&hdev->discov_off);
1223                 hdev->discov_timeout = 0;
1224                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1225         }
1226
1227         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1228                 cancel_delayed_work(&hdev->service_cache);
1229
1230         cancel_delayed_work_sync(&hdev->le_scan_disable);
1231
1232         hci_dev_lock(hdev);
1233         inquiry_cache_flush(hdev);
1234         hci_conn_hash_flush(hdev);
1235         hci_dev_unlock(hdev);
1236
1237         hci_notify(hdev, HCI_DEV_DOWN);
1238
1239         if (hdev->flush)
1240                 hdev->flush(hdev);
1241
1242         /* Reset device */
1243         skb_queue_purge(&hdev->cmd_q);
1244         atomic_set(&hdev->cmd_cnt, 1);
1245         if (!test_bit(HCI_RAW, &hdev->flags) &&
1246             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1247                 set_bit(HCI_INIT, &hdev->flags);
1248                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1249                 clear_bit(HCI_INIT, &hdev->flags);
1250         }
1251
1252         /* flush cmd  work */
1253         flush_work(&hdev->cmd_work);
1254
1255         /* Drop queues */
1256         skb_queue_purge(&hdev->rx_q);
1257         skb_queue_purge(&hdev->cmd_q);
1258         skb_queue_purge(&hdev->raw_q);
1259
1260         /* Drop last sent command */
1261         if (hdev->sent_cmd) {
1262                 del_timer_sync(&hdev->cmd_timer);
1263                 kfree_skb(hdev->sent_cmd);
1264                 hdev->sent_cmd = NULL;
1265         }
1266
1267         kfree_skb(hdev->recv_evt);
1268         hdev->recv_evt = NULL;
1269
1270         /* After this point our queues are empty
1271          * and no tasks are scheduled. */
1272         hdev->close(hdev);
1273
1274         /* Clear flags */
1275         hdev->flags = 0;
1276         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1277
1278         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1279             mgmt_valid_hdev(hdev)) {
1280                 hci_dev_lock(hdev);
1281                 mgmt_powered(hdev, 0);
1282                 hci_dev_unlock(hdev);
1283         }
1284
1285         /* Controller radio is available but is currently powered down */
1286         hdev->amp_status = 0;
1287
1288         memset(hdev->eir, 0, sizeof(hdev->eir));
1289         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1290
1291         hci_req_unlock(hdev);
1292
1293         hci_dev_put(hdev);
1294         return 0;
1295 }
1296
1297 int hci_dev_close(__u16 dev)
1298 {
1299         struct hci_dev *hdev;
1300         int err;
1301
1302         hdev = hci_dev_get(dev);
1303         if (!hdev)
1304                 return -ENODEV;
1305
1306         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1307                 cancel_delayed_work(&hdev->power_off);
1308
1309         err = hci_dev_do_close(hdev);
1310
1311         hci_dev_put(hdev);
1312         return err;
1313 }
1314
1315 int hci_dev_reset(__u16 dev)
1316 {
1317         struct hci_dev *hdev;
1318         int ret = 0;
1319
1320         hdev = hci_dev_get(dev);
1321         if (!hdev)
1322                 return -ENODEV;
1323
1324         hci_req_lock(hdev);
1325
1326         if (!test_bit(HCI_UP, &hdev->flags))
1327                 goto done;
1328
1329         /* Drop queues */
1330         skb_queue_purge(&hdev->rx_q);
1331         skb_queue_purge(&hdev->cmd_q);
1332
1333         hci_dev_lock(hdev);
1334         inquiry_cache_flush(hdev);
1335         hci_conn_hash_flush(hdev);
1336         hci_dev_unlock(hdev);
1337
1338         if (hdev->flush)
1339                 hdev->flush(hdev);
1340
1341         atomic_set(&hdev->cmd_cnt, 1);
1342         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1343
1344         if (!test_bit(HCI_RAW, &hdev->flags))
1345                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1346
1347 done:
1348         hci_req_unlock(hdev);
1349         hci_dev_put(hdev);
1350         return ret;
1351 }
1352
1353 int hci_dev_reset_stat(__u16 dev)
1354 {
1355         struct hci_dev *hdev;
1356         int ret = 0;
1357
1358         hdev = hci_dev_get(dev);
1359         if (!hdev)
1360                 return -ENODEV;
1361
1362         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1363
1364         hci_dev_put(hdev);
1365
1366         return ret;
1367 }
1368
1369 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1370 {
1371         struct hci_dev *hdev;
1372         struct hci_dev_req dr;
1373         int err = 0;
1374
1375         if (copy_from_user(&dr, arg, sizeof(dr)))
1376                 return -EFAULT;
1377
1378         hdev = hci_dev_get(dr.dev_id);
1379         if (!hdev)
1380                 return -ENODEV;
1381
1382         switch (cmd) {
1383         case HCISETAUTH:
1384                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1385                                    HCI_INIT_TIMEOUT);
1386                 break;
1387
1388         case HCISETENCRYPT:
1389                 if (!lmp_encrypt_capable(hdev)) {
1390                         err = -EOPNOTSUPP;
1391                         break;
1392                 }
1393
1394                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1395                         /* Auth must be enabled first */
1396                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397                                            HCI_INIT_TIMEOUT);
1398                         if (err)
1399                                 break;
1400                 }
1401
1402                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1403                                    HCI_INIT_TIMEOUT);
1404                 break;
1405
1406         case HCISETSCAN:
1407                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1408                                    HCI_INIT_TIMEOUT);
1409                 break;
1410
1411         case HCISETLINKPOL:
1412                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1413                                    HCI_INIT_TIMEOUT);
1414                 break;
1415
1416         case HCISETLINKMODE:
1417                 hdev->link_mode = ((__u16) dr.dev_opt) &
1418                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1419                 break;
1420
1421         case HCISETPTYPE:
1422                 hdev->pkt_type = (__u16) dr.dev_opt;
1423                 break;
1424
1425         case HCISETACLMTU:
1426                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1427                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1428                 break;
1429
1430         case HCISETSCOMTU:
1431                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1432                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1433                 break;
1434
1435         default:
1436                 err = -EINVAL;
1437                 break;
1438         }
1439
1440         hci_dev_put(hdev);
1441         return err;
1442 }
1443
1444 int hci_get_dev_list(void __user *arg)
1445 {
1446         struct hci_dev *hdev;
1447         struct hci_dev_list_req *dl;
1448         struct hci_dev_req *dr;
1449         int n = 0, size, err;
1450         __u16 dev_num;
1451
1452         if (get_user(dev_num, (__u16 __user *) arg))
1453                 return -EFAULT;
1454
1455         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1456                 return -EINVAL;
1457
1458         size = sizeof(*dl) + dev_num * sizeof(*dr);
1459
1460         dl = kzalloc(size, GFP_KERNEL);
1461         if (!dl)
1462                 return -ENOMEM;
1463
1464         dr = dl->dev_req;
1465
1466         read_lock(&hci_dev_list_lock);
1467         list_for_each_entry(hdev, &hci_dev_list, list) {
1468                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1469                         cancel_delayed_work(&hdev->power_off);
1470
1471                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1472                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1473
1474                 (dr + n)->dev_id  = hdev->id;
1475                 (dr + n)->dev_opt = hdev->flags;
1476
1477                 if (++n >= dev_num)
1478                         break;
1479         }
1480         read_unlock(&hci_dev_list_lock);
1481
1482         dl->dev_num = n;
1483         size = sizeof(*dl) + n * sizeof(*dr);
1484
1485         err = copy_to_user(arg, dl, size);
1486         kfree(dl);
1487
1488         return err ? -EFAULT : 0;
1489 }
1490
1491 int hci_get_dev_info(void __user *arg)
1492 {
1493         struct hci_dev *hdev;
1494         struct hci_dev_info di;
1495         int err = 0;
1496
1497         if (copy_from_user(&di, arg, sizeof(di)))
1498                 return -EFAULT;
1499
1500         hdev = hci_dev_get(di.dev_id);
1501         if (!hdev)
1502                 return -ENODEV;
1503
1504         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1505                 cancel_delayed_work_sync(&hdev->power_off);
1506
1507         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1508                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1509
1510         strcpy(di.name, hdev->name);
1511         di.bdaddr   = hdev->bdaddr;
1512         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1513         di.flags    = hdev->flags;
1514         di.pkt_type = hdev->pkt_type;
1515         if (lmp_bredr_capable(hdev)) {
1516                 di.acl_mtu  = hdev->acl_mtu;
1517                 di.acl_pkts = hdev->acl_pkts;
1518                 di.sco_mtu  = hdev->sco_mtu;
1519                 di.sco_pkts = hdev->sco_pkts;
1520         } else {
1521                 di.acl_mtu  = hdev->le_mtu;
1522                 di.acl_pkts = hdev->le_pkts;
1523                 di.sco_mtu  = 0;
1524                 di.sco_pkts = 0;
1525         }
1526         di.link_policy = hdev->link_policy;
1527         di.link_mode   = hdev->link_mode;
1528
1529         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1530         memcpy(&di.features, &hdev->features, sizeof(di.features));
1531
1532         if (copy_to_user(arg, &di, sizeof(di)))
1533                 err = -EFAULT;
1534
1535         hci_dev_put(hdev);
1536
1537         return err;
1538 }
1539
1540 /* ---- Interface to HCI drivers ---- */
1541
1542 static int hci_rfkill_set_block(void *data, bool blocked)
1543 {
1544         struct hci_dev *hdev = data;
1545
1546         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1547
1548         if (blocked) {
1549                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1550                 hci_dev_do_close(hdev);
1551         } else {
1552                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1553 }
1554
1555         return 0;
1556 }
1557
1558 static const struct rfkill_ops hci_rfkill_ops = {
1559         .set_block = hci_rfkill_set_block,
1560 };
1561
1562 static void hci_power_on(struct work_struct *work)
1563 {
1564         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1565         int err;
1566
1567         BT_DBG("%s", hdev->name);
1568
1569         err = hci_dev_open(hdev->id);
1570         if (err < 0) {
1571                 mgmt_set_powered_failed(hdev, err);
1572                 return;
1573         }
1574
1575         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1576                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1577                                    HCI_AUTO_OFF_TIMEOUT);
1578
1579         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1580                 mgmt_index_added(hdev);
1581 }
1582
1583 static void hci_power_off(struct work_struct *work)
1584 {
1585         struct hci_dev *hdev = container_of(work, struct hci_dev,
1586                                             power_off.work);
1587
1588         BT_DBG("%s", hdev->name);
1589
1590         hci_dev_do_close(hdev);
1591 }
1592
1593 static void hci_discov_off(struct work_struct *work)
1594 {
1595         struct hci_dev *hdev;
1596         u8 scan = SCAN_PAGE;
1597
1598         hdev = container_of(work, struct hci_dev, discov_off.work);
1599
1600         BT_DBG("%s", hdev->name);
1601
1602         hci_dev_lock(hdev);
1603
1604         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1605
1606         hdev->discov_timeout = 0;
1607
1608         hci_dev_unlock(hdev);
1609 }
1610
1611 int hci_uuids_clear(struct hci_dev *hdev)
1612 {
1613         struct bt_uuid *uuid, *tmp;
1614
1615         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1616                 list_del(&uuid->list);
1617                 kfree(uuid);
1618         }
1619
1620         return 0;
1621 }
1622
1623 int hci_link_keys_clear(struct hci_dev *hdev)
1624 {
1625         struct list_head *p, *n;
1626
1627         list_for_each_safe(p, n, &hdev->link_keys) {
1628                 struct link_key *key;
1629
1630                 key = list_entry(p, struct link_key, list);
1631
1632                 list_del(p);
1633                 kfree(key);
1634         }
1635
1636         return 0;
1637 }
1638
1639 int hci_smp_ltks_clear(struct hci_dev *hdev)
1640 {
1641         struct smp_ltk *k, *tmp;
1642
1643         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1644                 list_del(&k->list);
1645                 kfree(k);
1646         }
1647
1648         return 0;
1649 }
1650
1651 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1652 {
1653         struct link_key *k;
1654
1655         list_for_each_entry(k, &hdev->link_keys, list)
1656                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1657                         return k;
1658
1659         return NULL;
1660 }
1661
1662 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1663                                u8 key_type, u8 old_key_type)
1664 {
1665         /* Legacy key */
1666         if (key_type < 0x03)
1667                 return true;
1668
1669         /* Debug keys are insecure so don't store them persistently */
1670         if (key_type == HCI_LK_DEBUG_COMBINATION)
1671                 return false;
1672
1673         /* Changed combination key and there's no previous one */
1674         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1675                 return false;
1676
1677         /* Security mode 3 case */
1678         if (!conn)
1679                 return true;
1680
1681         /* Neither local nor remote side had no-bonding as requirement */
1682         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1683                 return true;
1684
1685         /* Local side had dedicated bonding as requirement */
1686         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1687                 return true;
1688
1689         /* Remote side had dedicated bonding as requirement */
1690         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1691                 return true;
1692
1693         /* If none of the above criteria match, then don't store the key
1694          * persistently */
1695         return false;
1696 }
1697
1698 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1699 {
1700         struct smp_ltk *k;
1701
1702         list_for_each_entry(k, &hdev->long_term_keys, list) {
1703                 if (k->ediv != ediv ||
1704                     memcmp(rand, k->rand, sizeof(k->rand)))
1705                         continue;
1706
1707                 return k;
1708         }
1709
1710         return NULL;
1711 }
1712
1713 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1714                                      u8 addr_type)
1715 {
1716         struct smp_ltk *k;
1717
1718         list_for_each_entry(k, &hdev->long_term_keys, list)
1719                 if (addr_type == k->bdaddr_type &&
1720                     bacmp(bdaddr, &k->bdaddr) == 0)
1721                         return k;
1722
1723         return NULL;
1724 }
1725
1726 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1727                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1728 {
1729         struct link_key *key, *old_key;
1730         u8 old_key_type;
1731         bool persistent;
1732
1733         old_key = hci_find_link_key(hdev, bdaddr);
1734         if (old_key) {
1735                 old_key_type = old_key->type;
1736                 key = old_key;
1737         } else {
1738                 old_key_type = conn ? conn->key_type : 0xff;
1739                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1740                 if (!key)
1741                         return -ENOMEM;
1742                 list_add(&key->list, &hdev->link_keys);
1743         }
1744
1745         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1746
1747         /* Some buggy controller combinations generate a changed
1748          * combination key for legacy pairing even when there's no
1749          * previous key */
1750         if (type == HCI_LK_CHANGED_COMBINATION &&
1751             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1752                 type = HCI_LK_COMBINATION;
1753                 if (conn)
1754                         conn->key_type = type;
1755         }
1756
1757         bacpy(&key->bdaddr, bdaddr);
1758         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1759         key->pin_len = pin_len;
1760
1761         if (type == HCI_LK_CHANGED_COMBINATION)
1762                 key->type = old_key_type;
1763         else
1764                 key->type = type;
1765
1766         if (!new_key)
1767                 return 0;
1768
1769         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1770
1771         mgmt_new_link_key(hdev, key, persistent);
1772
1773         if (conn)
1774                 conn->flush_key = !persistent;
1775
1776         return 0;
1777 }
1778
1779 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1780                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1781                 ediv, u8 rand[8])
1782 {
1783         struct smp_ltk *key, *old_key;
1784
1785         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1786                 return 0;
1787
1788         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1789         if (old_key)
1790                 key = old_key;
1791         else {
1792                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1793                 if (!key)
1794                         return -ENOMEM;
1795                 list_add(&key->list, &hdev->long_term_keys);
1796         }
1797
1798         bacpy(&key->bdaddr, bdaddr);
1799         key->bdaddr_type = addr_type;
1800         memcpy(key->val, tk, sizeof(key->val));
1801         key->authenticated = authenticated;
1802         key->ediv = ediv;
1803         key->enc_size = enc_size;
1804         key->type = type;
1805         memcpy(key->rand, rand, sizeof(key->rand));
1806
1807         if (!new_key)
1808                 return 0;
1809
1810         if (type & HCI_SMP_LTK)
1811                 mgmt_new_ltk(hdev, key, 1);
1812
1813         return 0;
1814 }
1815
1816 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1817 {
1818         struct link_key *key;
1819
1820         key = hci_find_link_key(hdev, bdaddr);
1821         if (!key)
1822                 return -ENOENT;
1823
1824         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1825
1826         list_del(&key->list);
1827         kfree(key);
1828
1829         return 0;
1830 }
1831
1832 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1833 {
1834         struct smp_ltk *k, *tmp;
1835
1836         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1837                 if (bacmp(bdaddr, &k->bdaddr))
1838                         continue;
1839
1840                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1841
1842                 list_del(&k->list);
1843                 kfree(k);
1844         }
1845
1846         return 0;
1847 }
1848
1849 /* HCI command timer function */
1850 static void hci_cmd_timeout(unsigned long arg)
1851 {
1852         struct hci_dev *hdev = (void *) arg;
1853
1854         if (hdev->sent_cmd) {
1855                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1856                 u16 opcode = __le16_to_cpu(sent->opcode);
1857
1858                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1859         } else {
1860                 BT_ERR("%s command tx timeout", hdev->name);
1861         }
1862
1863         atomic_set(&hdev->cmd_cnt, 1);
1864         queue_work(hdev->workqueue, &hdev->cmd_work);
1865 }
1866
1867 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1868                                           bdaddr_t *bdaddr)
1869 {
1870         struct oob_data *data;
1871
1872         list_for_each_entry(data, &hdev->remote_oob_data, list)
1873                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1874                         return data;
1875
1876         return NULL;
1877 }
1878
1879 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1880 {
1881         struct oob_data *data;
1882
1883         data = hci_find_remote_oob_data(hdev, bdaddr);
1884         if (!data)
1885                 return -ENOENT;
1886
1887         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1888
1889         list_del(&data->list);
1890         kfree(data);
1891
1892         return 0;
1893 }
1894
1895 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1896 {
1897         struct oob_data *data, *n;
1898
1899         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1900                 list_del(&data->list);
1901                 kfree(data);
1902         }
1903
1904         return 0;
1905 }
1906
1907 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1908                             u8 *randomizer)
1909 {
1910         struct oob_data *data;
1911
1912         data = hci_find_remote_oob_data(hdev, bdaddr);
1913
1914         if (!data) {
1915                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1916                 if (!data)
1917                         return -ENOMEM;
1918
1919                 bacpy(&data->bdaddr, bdaddr);
1920                 list_add(&data->list, &hdev->remote_oob_data);
1921         }
1922
1923         memcpy(data->hash, hash, sizeof(data->hash));
1924         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1925
1926         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1927
1928         return 0;
1929 }
1930
1931 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1932 {
1933         struct bdaddr_list *b;
1934
1935         list_for_each_entry(b, &hdev->blacklist, list)
1936                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1937                         return b;
1938
1939         return NULL;
1940 }
1941
1942 int hci_blacklist_clear(struct hci_dev *hdev)
1943 {
1944         struct list_head *p, *n;
1945
1946         list_for_each_safe(p, n, &hdev->blacklist) {
1947                 struct bdaddr_list *b;
1948
1949                 b = list_entry(p, struct bdaddr_list, list);
1950
1951                 list_del(p);
1952                 kfree(b);
1953         }
1954
1955         return 0;
1956 }
1957
1958 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1959 {
1960         struct bdaddr_list *entry;
1961
1962         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1963                 return -EBADF;
1964
1965         if (hci_blacklist_lookup(hdev, bdaddr))
1966                 return -EEXIST;
1967
1968         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1969         if (!entry)
1970                 return -ENOMEM;
1971
1972         bacpy(&entry->bdaddr, bdaddr);
1973
1974         list_add(&entry->list, &hdev->blacklist);
1975
1976         return mgmt_device_blocked(hdev, bdaddr, type);
1977 }
1978
1979 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1980 {
1981         struct bdaddr_list *entry;
1982
1983         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1984                 return hci_blacklist_clear(hdev);
1985
1986         entry = hci_blacklist_lookup(hdev, bdaddr);
1987         if (!entry)
1988                 return -ENOENT;
1989
1990         list_del(&entry->list);
1991         kfree(entry);
1992
1993         return mgmt_device_unblocked(hdev, bdaddr, type);
1994 }
1995
1996 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1997 {
1998         struct le_scan_params *param =  (struct le_scan_params *) opt;
1999         struct hci_cp_le_set_scan_param cp;
2000
2001         memset(&cp, 0, sizeof(cp));
2002         cp.type = param->type;
2003         cp.interval = cpu_to_le16(param->interval);
2004         cp.window = cpu_to_le16(param->window);
2005
2006         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2007 }
2008
2009 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2010 {
2011         struct hci_cp_le_set_scan_enable cp;
2012
2013         memset(&cp, 0, sizeof(cp));
2014         cp.enable = LE_SCAN_ENABLE;
2015         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2016
2017         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2018 }
2019
2020 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2021                           u16 window, int timeout)
2022 {
2023         long timeo = msecs_to_jiffies(3000);
2024         struct le_scan_params param;
2025         int err;
2026
2027         BT_DBG("%s", hdev->name);
2028
2029         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2030                 return -EINPROGRESS;
2031
2032         param.type = type;
2033         param.interval = interval;
2034         param.window = window;
2035
2036         hci_req_lock(hdev);
2037
2038         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2039                              timeo);
2040         if (!err)
2041                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2042
2043         hci_req_unlock(hdev);
2044
2045         if (err < 0)
2046                 return err;
2047
2048         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2049                            timeout);
2050
2051         return 0;
2052 }
2053
2054 int hci_cancel_le_scan(struct hci_dev *hdev)
2055 {
2056         BT_DBG("%s", hdev->name);
2057
2058         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2059                 return -EALREADY;
2060
2061         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2062                 struct hci_cp_le_set_scan_enable cp;
2063
2064                 /* Send HCI command to disable LE Scan */
2065                 memset(&cp, 0, sizeof(cp));
2066                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2067         }
2068
2069         return 0;
2070 }
2071
2072 static void le_scan_disable_work(struct work_struct *work)
2073 {
2074         struct hci_dev *hdev = container_of(work, struct hci_dev,
2075                                             le_scan_disable.work);
2076         struct hci_cp_le_set_scan_enable cp;
2077
2078         BT_DBG("%s", hdev->name);
2079
2080         memset(&cp, 0, sizeof(cp));
2081
2082         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2083 }
2084
2085 static void le_scan_work(struct work_struct *work)
2086 {
2087         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2088         struct le_scan_params *param = &hdev->le_scan_params;
2089
2090         BT_DBG("%s", hdev->name);
2091
2092         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2093                        param->timeout);
2094 }
2095
2096 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2097                 int timeout)
2098 {
2099         struct le_scan_params *param = &hdev->le_scan_params;
2100
2101         BT_DBG("%s", hdev->name);
2102
2103         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2104                 return -ENOTSUPP;
2105
2106         if (work_busy(&hdev->le_scan))
2107                 return -EINPROGRESS;
2108
2109         param->type = type;
2110         param->interval = interval;
2111         param->window = window;
2112         param->timeout = timeout;
2113
2114         queue_work(system_long_wq, &hdev->le_scan);
2115
2116         return 0;
2117 }
2118
2119 /* Alloc HCI device */
2120 struct hci_dev *hci_alloc_dev(void)
2121 {
2122         struct hci_dev *hdev;
2123
2124         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2125         if (!hdev)
2126                 return NULL;
2127
2128         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2129         hdev->esco_type = (ESCO_HV1);
2130         hdev->link_mode = (HCI_LM_ACCEPT);
2131         hdev->io_capability = 0x03; /* No Input No Output */
2132         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2133         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2134
2135         hdev->sniff_max_interval = 800;
2136         hdev->sniff_min_interval = 80;
2137
2138         mutex_init(&hdev->lock);
2139         mutex_init(&hdev->req_lock);
2140
2141         INIT_LIST_HEAD(&hdev->mgmt_pending);
2142         INIT_LIST_HEAD(&hdev->blacklist);
2143         INIT_LIST_HEAD(&hdev->uuids);
2144         INIT_LIST_HEAD(&hdev->link_keys);
2145         INIT_LIST_HEAD(&hdev->long_term_keys);
2146         INIT_LIST_HEAD(&hdev->remote_oob_data);
2147         INIT_LIST_HEAD(&hdev->conn_hash.list);
2148
2149         INIT_WORK(&hdev->rx_work, hci_rx_work);
2150         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2151         INIT_WORK(&hdev->tx_work, hci_tx_work);
2152         INIT_WORK(&hdev->power_on, hci_power_on);
2153         INIT_WORK(&hdev->le_scan, le_scan_work);
2154
2155         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2156         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2157         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2158
2159         skb_queue_head_init(&hdev->rx_q);
2160         skb_queue_head_init(&hdev->cmd_q);
2161         skb_queue_head_init(&hdev->raw_q);
2162
2163         init_waitqueue_head(&hdev->req_wait_q);
2164
2165         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2166
2167         hci_init_sysfs(hdev);
2168         discovery_init(hdev);
2169
2170         return hdev;
2171 }
2172 EXPORT_SYMBOL(hci_alloc_dev);
2173
2174 /* Free HCI device */
2175 void hci_free_dev(struct hci_dev *hdev)
2176 {
2177         /* will free via device release */
2178         put_device(&hdev->dev);
2179 }
2180 EXPORT_SYMBOL(hci_free_dev);
2181
2182 /* Register HCI device */
2183 int hci_register_dev(struct hci_dev *hdev)
2184 {
2185         int id, error;
2186
2187         if (!hdev->open || !hdev->close)
2188                 return -EINVAL;
2189
2190         /* Do not allow HCI_AMP devices to register at index 0,
2191          * so the index can be used as the AMP controller ID.
2192          */
2193         switch (hdev->dev_type) {
2194         case HCI_BREDR:
2195                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2196                 break;
2197         case HCI_AMP:
2198                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2199                 break;
2200         default:
2201                 return -EINVAL;
2202         }
2203
2204         if (id < 0)
2205                 return id;
2206
2207         sprintf(hdev->name, "hci%d", id);
2208         hdev->id = id;
2209
2210         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2211
2212         write_lock(&hci_dev_list_lock);
2213         list_add(&hdev->list, &hci_dev_list);
2214         write_unlock(&hci_dev_list_lock);
2215
2216         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2217                                           WQ_MEM_RECLAIM, 1);
2218         if (!hdev->workqueue) {
2219                 error = -ENOMEM;
2220                 goto err;
2221         }
2222
2223         hdev->req_workqueue = alloc_workqueue(hdev->name,
2224                                               WQ_HIGHPRI | WQ_UNBOUND |
2225                                               WQ_MEM_RECLAIM, 1);
2226         if (!hdev->req_workqueue) {
2227                 destroy_workqueue(hdev->workqueue);
2228                 error = -ENOMEM;
2229                 goto err;
2230         }
2231
2232         error = hci_add_sysfs(hdev);
2233         if (error < 0)
2234                 goto err_wqueue;
2235
2236         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2237                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2238                                     hdev);
2239         if (hdev->rfkill) {
2240                 if (rfkill_register(hdev->rfkill) < 0) {
2241                         rfkill_destroy(hdev->rfkill);
2242                         hdev->rfkill = NULL;
2243                 }
2244         }
2245
2246         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2247                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2248
2249         set_bit(HCI_SETUP, &hdev->dev_flags);
2250
2251         if (hdev->dev_type != HCI_AMP)
2252                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2253
2254         hci_notify(hdev, HCI_DEV_REG);
2255         hci_dev_hold(hdev);
2256
2257         queue_work(hdev->req_workqueue, &hdev->power_on);
2258
2259         return id;
2260
2261 err_wqueue:
2262         destroy_workqueue(hdev->workqueue);
2263         destroy_workqueue(hdev->req_workqueue);
2264 err:
2265         ida_simple_remove(&hci_index_ida, hdev->id);
2266         write_lock(&hci_dev_list_lock);
2267         list_del(&hdev->list);
2268         write_unlock(&hci_dev_list_lock);
2269
2270         return error;
2271 }
2272 EXPORT_SYMBOL(hci_register_dev);
2273
2274 /* Unregister HCI device */
2275 void hci_unregister_dev(struct hci_dev *hdev)
2276 {
2277         int i, id;
2278
2279         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2280
2281         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2282
2283         id = hdev->id;
2284
2285         write_lock(&hci_dev_list_lock);
2286         list_del(&hdev->list);
2287         write_unlock(&hci_dev_list_lock);
2288
2289         hci_dev_do_close(hdev);
2290
2291         for (i = 0; i < NUM_REASSEMBLY; i++)
2292                 kfree_skb(hdev->reassembly[i]);
2293
2294         cancel_work_sync(&hdev->power_on);
2295
2296         if (!test_bit(HCI_INIT, &hdev->flags) &&
2297             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2298                 hci_dev_lock(hdev);
2299                 mgmt_index_removed(hdev);
2300                 hci_dev_unlock(hdev);
2301         }
2302
2303         /* mgmt_index_removed should take care of emptying the
2304          * pending list */
2305         BUG_ON(!list_empty(&hdev->mgmt_pending));
2306
2307         hci_notify(hdev, HCI_DEV_UNREG);
2308
2309         if (hdev->rfkill) {
2310                 rfkill_unregister(hdev->rfkill);
2311                 rfkill_destroy(hdev->rfkill);
2312         }
2313
2314         hci_del_sysfs(hdev);
2315
2316         destroy_workqueue(hdev->workqueue);
2317         destroy_workqueue(hdev->req_workqueue);
2318
2319         hci_dev_lock(hdev);
2320         hci_blacklist_clear(hdev);
2321         hci_uuids_clear(hdev);
2322         hci_link_keys_clear(hdev);
2323         hci_smp_ltks_clear(hdev);
2324         hci_remote_oob_data_clear(hdev);
2325         hci_dev_unlock(hdev);
2326
2327         hci_dev_put(hdev);
2328
2329         ida_simple_remove(&hci_index_ida, id);
2330 }
2331 EXPORT_SYMBOL(hci_unregister_dev);
2332
2333 /* Suspend HCI device */
2334 int hci_suspend_dev(struct hci_dev *hdev)
2335 {
2336         hci_notify(hdev, HCI_DEV_SUSPEND);
2337         return 0;
2338 }
2339 EXPORT_SYMBOL(hci_suspend_dev);
2340
2341 /* Resume HCI device */
2342 int hci_resume_dev(struct hci_dev *hdev)
2343 {
2344         hci_notify(hdev, HCI_DEV_RESUME);
2345         return 0;
2346 }
2347 EXPORT_SYMBOL(hci_resume_dev);
2348
2349 /* Receive frame from HCI drivers */
2350 int hci_recv_frame(struct sk_buff *skb)
2351 {
2352         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2353         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2354                       && !test_bit(HCI_INIT, &hdev->flags))) {
2355                 kfree_skb(skb);
2356                 return -ENXIO;
2357         }
2358
2359         /* Incoming skb */
2360         bt_cb(skb)->incoming = 1;
2361
2362         /* Time stamp */
2363         __net_timestamp(skb);
2364
2365         skb_queue_tail(&hdev->rx_q, skb);
2366         queue_work(hdev->workqueue, &hdev->rx_work);
2367
2368         return 0;
2369 }
2370 EXPORT_SYMBOL(hci_recv_frame);
2371
2372 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2373                           int count, __u8 index)
2374 {
2375         int len = 0;
2376         int hlen = 0;
2377         int remain = count;
2378         struct sk_buff *skb;
2379         struct bt_skb_cb *scb;
2380
2381         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2382             index >= NUM_REASSEMBLY)
2383                 return -EILSEQ;
2384
2385         skb = hdev->reassembly[index];
2386
2387         if (!skb) {
2388                 switch (type) {
2389                 case HCI_ACLDATA_PKT:
2390                         len = HCI_MAX_FRAME_SIZE;
2391                         hlen = HCI_ACL_HDR_SIZE;
2392                         break;
2393                 case HCI_EVENT_PKT:
2394                         len = HCI_MAX_EVENT_SIZE;
2395                         hlen = HCI_EVENT_HDR_SIZE;
2396                         break;
2397                 case HCI_SCODATA_PKT:
2398                         len = HCI_MAX_SCO_SIZE;
2399                         hlen = HCI_SCO_HDR_SIZE;
2400                         break;
2401                 }
2402
2403                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2404                 if (!skb)
2405                         return -ENOMEM;
2406
2407                 scb = (void *) skb->cb;
2408                 scb->expect = hlen;
2409                 scb->pkt_type = type;
2410
2411                 skb->dev = (void *) hdev;
2412                 hdev->reassembly[index] = skb;
2413         }
2414
2415         while (count) {
2416                 scb = (void *) skb->cb;
2417                 len = min_t(uint, scb->expect, count);
2418
2419                 memcpy(skb_put(skb, len), data, len);
2420
2421                 count -= len;
2422                 data += len;
2423                 scb->expect -= len;
2424                 remain = count;
2425
2426                 switch (type) {
2427                 case HCI_EVENT_PKT:
2428                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2429                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2430                                 scb->expect = h->plen;
2431
2432                                 if (skb_tailroom(skb) < scb->expect) {
2433                                         kfree_skb(skb);
2434                                         hdev->reassembly[index] = NULL;
2435                                         return -ENOMEM;
2436                                 }
2437                         }
2438                         break;
2439
2440                 case HCI_ACLDATA_PKT:
2441                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2442                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2443                                 scb->expect = __le16_to_cpu(h->dlen);
2444
2445                                 if (skb_tailroom(skb) < scb->expect) {
2446                                         kfree_skb(skb);
2447                                         hdev->reassembly[index] = NULL;
2448                                         return -ENOMEM;
2449                                 }
2450                         }
2451                         break;
2452
2453                 case HCI_SCODATA_PKT:
2454                         if (skb->len == HCI_SCO_HDR_SIZE) {
2455                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2456                                 scb->expect = h->dlen;
2457
2458                                 if (skb_tailroom(skb) < scb->expect) {
2459                                         kfree_skb(skb);
2460                                         hdev->reassembly[index] = NULL;
2461                                         return -ENOMEM;
2462                                 }
2463                         }
2464                         break;
2465                 }
2466
2467                 if (scb->expect == 0) {
2468                         /* Complete frame */
2469
2470                         bt_cb(skb)->pkt_type = type;
2471                         hci_recv_frame(skb);
2472
2473                         hdev->reassembly[index] = NULL;
2474                         return remain;
2475                 }
2476         }
2477
2478         return remain;
2479 }
2480
2481 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2482 {
2483         int rem = 0;
2484
2485         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2486                 return -EILSEQ;
2487
2488         while (count) {
2489                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2490                 if (rem < 0)
2491                         return rem;
2492
2493                 data += (count - rem);
2494                 count = rem;
2495         }
2496
2497         return rem;
2498 }
2499 EXPORT_SYMBOL(hci_recv_fragment);
2500
2501 #define STREAM_REASSEMBLY 0
2502
2503 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2504 {
2505         int type;
2506         int rem = 0;
2507
2508         while (count) {
2509                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2510
2511                 if (!skb) {
2512                         struct { char type; } *pkt;
2513
2514                         /* Start of the frame */
2515                         pkt = data;
2516                         type = pkt->type;
2517
2518                         data++;
2519                         count--;
2520                 } else
2521                         type = bt_cb(skb)->pkt_type;
2522
2523                 rem = hci_reassembly(hdev, type, data, count,
2524                                      STREAM_REASSEMBLY);
2525                 if (rem < 0)
2526                         return rem;
2527
2528                 data += (count - rem);
2529                 count = rem;
2530         }
2531
2532         return rem;
2533 }
2534 EXPORT_SYMBOL(hci_recv_stream_fragment);
2535
2536 /* ---- Interface to upper protocols ---- */
2537
2538 int hci_register_cb(struct hci_cb *cb)
2539 {
2540         BT_DBG("%p name %s", cb, cb->name);
2541
2542         write_lock(&hci_cb_list_lock);
2543         list_add(&cb->list, &hci_cb_list);
2544         write_unlock(&hci_cb_list_lock);
2545
2546         return 0;
2547 }
2548 EXPORT_SYMBOL(hci_register_cb);
2549
2550 int hci_unregister_cb(struct hci_cb *cb)
2551 {
2552         BT_DBG("%p name %s", cb, cb->name);
2553
2554         write_lock(&hci_cb_list_lock);
2555         list_del(&cb->list);
2556         write_unlock(&hci_cb_list_lock);
2557
2558         return 0;
2559 }
2560 EXPORT_SYMBOL(hci_unregister_cb);
2561
2562 static int hci_send_frame(struct sk_buff *skb)
2563 {
2564         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2565
2566         if (!hdev) {
2567                 kfree_skb(skb);
2568                 return -ENODEV;
2569         }
2570
2571         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2572
2573         /* Time stamp */
2574         __net_timestamp(skb);
2575
2576         /* Send copy to monitor */
2577         hci_send_to_monitor(hdev, skb);
2578
2579         if (atomic_read(&hdev->promisc)) {
2580                 /* Send copy to the sockets */
2581                 hci_send_to_sock(hdev, skb);
2582         }
2583
2584         /* Get rid of skb owner, prior to sending to the driver. */
2585         skb_orphan(skb);
2586
2587         return hdev->send(skb);
2588 }
2589
2590 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2591 {
2592         skb_queue_head_init(&req->cmd_q);
2593         req->hdev = hdev;
2594         req->err = 0;
2595 }
2596
2597 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2598 {
2599         struct hci_dev *hdev = req->hdev;
2600         struct sk_buff *skb;
2601         unsigned long flags;
2602
2603         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2604
2605         /* If an error occured during request building, remove all HCI
2606          * commands queued on the HCI request queue.
2607          */
2608         if (req->err) {
2609                 skb_queue_purge(&req->cmd_q);
2610                 return req->err;
2611         }
2612
2613         /* Do not allow empty requests */
2614         if (skb_queue_empty(&req->cmd_q))
2615                 return -ENODATA;
2616
2617         skb = skb_peek_tail(&req->cmd_q);
2618         bt_cb(skb)->req.complete = complete;
2619
2620         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2621         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2622         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2623
2624         queue_work(hdev->workqueue, &hdev->cmd_work);
2625
2626         return 0;
2627 }
2628
2629 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2630                                        u32 plen, const void *param)
2631 {
2632         int len = HCI_COMMAND_HDR_SIZE + plen;
2633         struct hci_command_hdr *hdr;
2634         struct sk_buff *skb;
2635
2636         skb = bt_skb_alloc(len, GFP_ATOMIC);
2637         if (!skb)
2638                 return NULL;
2639
2640         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2641         hdr->opcode = cpu_to_le16(opcode);
2642         hdr->plen   = plen;
2643
2644         if (plen)
2645                 memcpy(skb_put(skb, plen), param, plen);
2646
2647         BT_DBG("skb len %d", skb->len);
2648
2649         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2650         skb->dev = (void *) hdev;
2651
2652         return skb;
2653 }
2654
2655 /* Send HCI command */
2656 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2657                  const void *param)
2658 {
2659         struct sk_buff *skb;
2660
2661         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2662
2663         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2664         if (!skb) {
2665                 BT_ERR("%s no memory for command", hdev->name);
2666                 return -ENOMEM;
2667         }
2668
2669         /* Stand-alone HCI commands must be flaged as
2670          * single-command requests.
2671          */
2672         bt_cb(skb)->req.start = true;
2673
2674         skb_queue_tail(&hdev->cmd_q, skb);
2675         queue_work(hdev->workqueue, &hdev->cmd_work);
2676
2677         return 0;
2678 }
2679
2680 /* Queue a command to an asynchronous HCI request */
2681 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2682                     const void *param, u8 event)
2683 {
2684         struct hci_dev *hdev = req->hdev;
2685         struct sk_buff *skb;
2686
2687         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2688
2689         /* If an error occured during request building, there is no point in
2690          * queueing the HCI command. We can simply return.
2691          */
2692         if (req->err)
2693                 return;
2694
2695         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2696         if (!skb) {
2697                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2698                        hdev->name, opcode);
2699                 req->err = -ENOMEM;
2700                 return;
2701         }
2702
2703         if (skb_queue_empty(&req->cmd_q))
2704                 bt_cb(skb)->req.start = true;
2705
2706         bt_cb(skb)->req.event = event;
2707
2708         skb_queue_tail(&req->cmd_q, skb);
2709 }
2710
2711 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2712                  const void *param)
2713 {
2714         hci_req_add_ev(req, opcode, plen, param, 0);
2715 }
2716
2717 /* Get data from the previously sent command */
2718 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2719 {
2720         struct hci_command_hdr *hdr;
2721
2722         if (!hdev->sent_cmd)
2723                 return NULL;
2724
2725         hdr = (void *) hdev->sent_cmd->data;
2726
2727         if (hdr->opcode != cpu_to_le16(opcode))
2728                 return NULL;
2729
2730         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2731
2732         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2733 }
2734
2735 /* Send ACL data */
2736 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2737 {
2738         struct hci_acl_hdr *hdr;
2739         int len = skb->len;
2740
2741         skb_push(skb, HCI_ACL_HDR_SIZE);
2742         skb_reset_transport_header(skb);
2743         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2744         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2745         hdr->dlen   = cpu_to_le16(len);
2746 }
2747
2748 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2749                           struct sk_buff *skb, __u16 flags)
2750 {
2751         struct hci_conn *conn = chan->conn;
2752         struct hci_dev *hdev = conn->hdev;
2753         struct sk_buff *list;
2754
2755         skb->len = skb_headlen(skb);
2756         skb->data_len = 0;
2757
2758         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2759
2760         switch (hdev->dev_type) {
2761         case HCI_BREDR:
2762                 hci_add_acl_hdr(skb, conn->handle, flags);
2763                 break;
2764         case HCI_AMP:
2765                 hci_add_acl_hdr(skb, chan->handle, flags);
2766                 break;
2767         default:
2768                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2769                 return;
2770         }
2771
2772         list = skb_shinfo(skb)->frag_list;
2773         if (!list) {
2774                 /* Non fragmented */
2775                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2776
2777                 skb_queue_tail(queue, skb);
2778         } else {
2779                 /* Fragmented */
2780                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2781
2782                 skb_shinfo(skb)->frag_list = NULL;
2783
2784                 /* Queue all fragments atomically */
2785                 spin_lock(&queue->lock);
2786
2787                 __skb_queue_tail(queue, skb);
2788
2789                 flags &= ~ACL_START;
2790                 flags |= ACL_CONT;
2791                 do {
2792                         skb = list; list = list->next;
2793
2794                         skb->dev = (void *) hdev;
2795                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2796                         hci_add_acl_hdr(skb, conn->handle, flags);
2797
2798                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2799
2800                         __skb_queue_tail(queue, skb);
2801                 } while (list);
2802
2803                 spin_unlock(&queue->lock);
2804         }
2805 }
2806
2807 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2808 {
2809         struct hci_dev *hdev = chan->conn->hdev;
2810
2811         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2812
2813         skb->dev = (void *) hdev;
2814
2815         hci_queue_acl(chan, &chan->data_q, skb, flags);
2816
2817         queue_work(hdev->workqueue, &hdev->tx_work);
2818 }
2819
2820 /* Send SCO data */
2821 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2822 {
2823         struct hci_dev *hdev = conn->hdev;
2824         struct hci_sco_hdr hdr;
2825
2826         BT_DBG("%s len %d", hdev->name, skb->len);
2827
2828         hdr.handle = cpu_to_le16(conn->handle);
2829         hdr.dlen   = skb->len;
2830
2831         skb_push(skb, HCI_SCO_HDR_SIZE);
2832         skb_reset_transport_header(skb);
2833         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2834
2835         skb->dev = (void *) hdev;
2836         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2837
2838         skb_queue_tail(&conn->data_q, skb);
2839         queue_work(hdev->workqueue, &hdev->tx_work);
2840 }
2841
2842 /* ---- HCI TX task (outgoing data) ---- */
2843
2844 /* HCI Connection scheduler */
2845 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2846                                      int *quote)
2847 {
2848         struct hci_conn_hash *h = &hdev->conn_hash;
2849         struct hci_conn *conn = NULL, *c;
2850         unsigned int num = 0, min = ~0;
2851
2852         /* We don't have to lock device here. Connections are always
2853          * added and removed with TX task disabled. */
2854
2855         rcu_read_lock();
2856
2857         list_for_each_entry_rcu(c, &h->list, list) {
2858                 if (c->type != type || skb_queue_empty(&c->data_q))
2859                         continue;
2860
2861                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2862                         continue;
2863
2864                 num++;
2865
2866                 if (c->sent < min) {
2867                         min  = c->sent;
2868                         conn = c;
2869                 }
2870
2871                 if (hci_conn_num(hdev, type) == num)
2872                         break;
2873         }
2874
2875         rcu_read_unlock();
2876
2877         if (conn) {
2878                 int cnt, q;
2879
2880                 switch (conn->type) {
2881                 case ACL_LINK:
2882                         cnt = hdev->acl_cnt;
2883                         break;
2884                 case SCO_LINK:
2885                 case ESCO_LINK:
2886                         cnt = hdev->sco_cnt;
2887                         break;
2888                 case LE_LINK:
2889                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2890                         break;
2891                 default:
2892                         cnt = 0;
2893                         BT_ERR("Unknown link type");
2894                 }
2895
2896                 q = cnt / num;
2897                 *quote = q ? q : 1;
2898         } else
2899                 *quote = 0;
2900
2901         BT_DBG("conn %p quote %d", conn, *quote);
2902         return conn;
2903 }
2904
2905 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2906 {
2907         struct hci_conn_hash *h = &hdev->conn_hash;
2908         struct hci_conn *c;
2909
2910         BT_ERR("%s link tx timeout", hdev->name);
2911
2912         rcu_read_lock();
2913
2914         /* Kill stalled connections */
2915         list_for_each_entry_rcu(c, &h->list, list) {
2916                 if (c->type == type && c->sent) {
2917                         BT_ERR("%s killing stalled connection %pMR",
2918                                hdev->name, &c->dst);
2919                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2920                 }
2921         }
2922
2923         rcu_read_unlock();
2924 }
2925
2926 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2927                                       int *quote)
2928 {
2929         struct hci_conn_hash *h = &hdev->conn_hash;
2930         struct hci_chan *chan = NULL;
2931         unsigned int num = 0, min = ~0, cur_prio = 0;
2932         struct hci_conn *conn;
2933         int cnt, q, conn_num = 0;
2934
2935         BT_DBG("%s", hdev->name);
2936
2937         rcu_read_lock();
2938
2939         list_for_each_entry_rcu(conn, &h->list, list) {
2940                 struct hci_chan *tmp;
2941
2942                 if (conn->type != type)
2943                         continue;
2944
2945                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2946                         continue;
2947
2948                 conn_num++;
2949
2950                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2951                         struct sk_buff *skb;
2952
2953                         if (skb_queue_empty(&tmp->data_q))
2954                                 continue;
2955
2956                         skb = skb_peek(&tmp->data_q);
2957                         if (skb->priority < cur_prio)
2958                                 continue;
2959
2960                         if (skb->priority > cur_prio) {
2961                                 num = 0;
2962                                 min = ~0;
2963                                 cur_prio = skb->priority;
2964                         }
2965
2966                         num++;
2967
2968                         if (conn->sent < min) {
2969                                 min  = conn->sent;
2970                                 chan = tmp;
2971                         }
2972                 }
2973
2974                 if (hci_conn_num(hdev, type) == conn_num)
2975                         break;
2976         }
2977
2978         rcu_read_unlock();
2979
2980         if (!chan)
2981                 return NULL;
2982
2983         switch (chan->conn->type) {
2984         case ACL_LINK:
2985                 cnt = hdev->acl_cnt;
2986                 break;
2987         case AMP_LINK:
2988                 cnt = hdev->block_cnt;
2989                 break;
2990         case SCO_LINK:
2991         case ESCO_LINK:
2992                 cnt = hdev->sco_cnt;
2993                 break;
2994         case LE_LINK:
2995                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2996                 break;
2997         default:
2998                 cnt = 0;
2999                 BT_ERR("Unknown link type");
3000         }
3001
3002         q = cnt / num;
3003         *quote = q ? q : 1;
3004         BT_DBG("chan %p quote %d", chan, *quote);
3005         return chan;
3006 }
3007
3008 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3009 {
3010         struct hci_conn_hash *h = &hdev->conn_hash;
3011         struct hci_conn *conn;
3012         int num = 0;
3013
3014         BT_DBG("%s", hdev->name);
3015
3016         rcu_read_lock();
3017
3018         list_for_each_entry_rcu(conn, &h->list, list) {
3019                 struct hci_chan *chan;
3020
3021                 if (conn->type != type)
3022                         continue;
3023
3024                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3025                         continue;
3026
3027                 num++;
3028
3029                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3030                         struct sk_buff *skb;
3031
3032                         if (chan->sent) {
3033                                 chan->sent = 0;
3034                                 continue;
3035                         }
3036
3037                         if (skb_queue_empty(&chan->data_q))
3038                                 continue;
3039
3040                         skb = skb_peek(&chan->data_q);
3041                         if (skb->priority >= HCI_PRIO_MAX - 1)
3042                                 continue;
3043
3044                         skb->priority = HCI_PRIO_MAX - 1;
3045
3046                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3047                                skb->priority);
3048                 }
3049
3050                 if (hci_conn_num(hdev, type) == num)
3051                         break;
3052         }
3053
3054         rcu_read_unlock();
3055
3056 }
3057
3058 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3059 {
3060         /* Calculate count of blocks used by this packet */
3061         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3062 }
3063
3064 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3065 {
3066         if (!test_bit(HCI_RAW, &hdev->flags)) {
3067                 /* ACL tx timeout must be longer than maximum
3068                  * link supervision timeout (40.9 seconds) */
3069                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3070                                        HCI_ACL_TX_TIMEOUT))
3071                         hci_link_tx_to(hdev, ACL_LINK);
3072         }
3073 }
3074
3075 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3076 {
3077         unsigned int cnt = hdev->acl_cnt;
3078         struct hci_chan *chan;
3079         struct sk_buff *skb;
3080         int quote;
3081
3082         __check_timeout(hdev, cnt);
3083
3084         while (hdev->acl_cnt &&
3085                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3086                 u32 priority = (skb_peek(&chan->data_q))->priority;
3087                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3088                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3089                                skb->len, skb->priority);
3090
3091                         /* Stop if priority has changed */
3092                         if (skb->priority < priority)
3093                                 break;
3094
3095                         skb = skb_dequeue(&chan->data_q);
3096
3097                         hci_conn_enter_active_mode(chan->conn,
3098                                                    bt_cb(skb)->force_active);
3099
3100                         hci_send_frame(skb);
3101                         hdev->acl_last_tx = jiffies;
3102
3103                         hdev->acl_cnt--;
3104                         chan->sent++;
3105                         chan->conn->sent++;
3106                 }
3107         }
3108
3109         if (cnt != hdev->acl_cnt)
3110                 hci_prio_recalculate(hdev, ACL_LINK);
3111 }
3112
3113 static void hci_sched_acl_blk(struct hci_dev *hdev)
3114 {
3115         unsigned int cnt = hdev->block_cnt;
3116         struct hci_chan *chan;
3117         struct sk_buff *skb;
3118         int quote;
3119         u8 type;
3120
3121         __check_timeout(hdev, cnt);
3122
3123         BT_DBG("%s", hdev->name);
3124
3125         if (hdev->dev_type == HCI_AMP)
3126                 type = AMP_LINK;
3127         else
3128                 type = ACL_LINK;
3129
3130         while (hdev->block_cnt > 0 &&
3131                (chan = hci_chan_sent(hdev, type, &quote))) {
3132                 u32 priority = (skb_peek(&chan->data_q))->priority;
3133                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3134                         int blocks;
3135
3136                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3137                                skb->len, skb->priority);
3138
3139                         /* Stop if priority has changed */
3140                         if (skb->priority < priority)
3141                                 break;
3142
3143                         skb = skb_dequeue(&chan->data_q);
3144
3145                         blocks = __get_blocks(hdev, skb);
3146                         if (blocks > hdev->block_cnt)
3147                                 return;
3148
3149                         hci_conn_enter_active_mode(chan->conn,
3150                                                    bt_cb(skb)->force_active);
3151
3152                         hci_send_frame(skb);
3153                         hdev->acl_last_tx = jiffies;
3154
3155                         hdev->block_cnt -= blocks;
3156                         quote -= blocks;
3157
3158                         chan->sent += blocks;
3159                         chan->conn->sent += blocks;
3160                 }
3161         }
3162
3163         if (cnt != hdev->block_cnt)
3164                 hci_prio_recalculate(hdev, type);
3165 }
3166
3167 static void hci_sched_acl(struct hci_dev *hdev)
3168 {
3169         BT_DBG("%s", hdev->name);
3170
3171         /* No ACL link over BR/EDR controller */
3172         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3173                 return;
3174
3175         /* No AMP link over AMP controller */
3176         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3177                 return;
3178
3179         switch (hdev->flow_ctl_mode) {
3180         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3181                 hci_sched_acl_pkt(hdev);
3182                 break;
3183
3184         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3185                 hci_sched_acl_blk(hdev);
3186                 break;
3187         }
3188 }
3189
3190 /* Schedule SCO */
3191 static void hci_sched_sco(struct hci_dev *hdev)
3192 {
3193         struct hci_conn *conn;
3194         struct sk_buff *skb;
3195         int quote;
3196
3197         BT_DBG("%s", hdev->name);
3198
3199         if (!hci_conn_num(hdev, SCO_LINK))
3200                 return;
3201
3202         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3203                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3204                         BT_DBG("skb %p len %d", skb, skb->len);
3205                         hci_send_frame(skb);
3206
3207                         conn->sent++;
3208                         if (conn->sent == ~0)
3209                                 conn->sent = 0;
3210                 }
3211         }
3212 }
3213
3214 static void hci_sched_esco(struct hci_dev *hdev)
3215 {
3216         struct hci_conn *conn;
3217         struct sk_buff *skb;
3218         int quote;
3219
3220         BT_DBG("%s", hdev->name);
3221
3222         if (!hci_conn_num(hdev, ESCO_LINK))
3223                 return;
3224
3225         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3226                                                      &quote))) {
3227                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3228                         BT_DBG("skb %p len %d", skb, skb->len);
3229                         hci_send_frame(skb);
3230
3231                         conn->sent++;
3232                         if (conn->sent == ~0)
3233                                 conn->sent = 0;
3234                 }
3235         }
3236 }
3237
3238 static void hci_sched_le(struct hci_dev *hdev)
3239 {
3240         struct hci_chan *chan;
3241         struct sk_buff *skb;
3242         int quote, cnt, tmp;
3243
3244         BT_DBG("%s", hdev->name);
3245
3246         if (!hci_conn_num(hdev, LE_LINK))
3247                 return;
3248
3249         if (!test_bit(HCI_RAW, &hdev->flags)) {
3250                 /* LE tx timeout must be longer than maximum
3251                  * link supervision timeout (40.9 seconds) */
3252                 if (!hdev->le_cnt && hdev->le_pkts &&
3253                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3254                         hci_link_tx_to(hdev, LE_LINK);
3255         }
3256
3257         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3258         tmp = cnt;
3259         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3260                 u32 priority = (skb_peek(&chan->data_q))->priority;
3261                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3262                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3263                                skb->len, skb->priority);
3264
3265                         /* Stop if priority has changed */
3266                         if (skb->priority < priority)
3267                                 break;
3268
3269                         skb = skb_dequeue(&chan->data_q);
3270
3271                         hci_send_frame(skb);
3272                         hdev->le_last_tx = jiffies;
3273
3274                         cnt--;
3275                         chan->sent++;
3276                         chan->conn->sent++;
3277                 }
3278         }
3279
3280         if (hdev->le_pkts)
3281                 hdev->le_cnt = cnt;
3282         else
3283                 hdev->acl_cnt = cnt;
3284
3285         if (cnt != tmp)
3286                 hci_prio_recalculate(hdev, LE_LINK);
3287 }
3288
3289 static void hci_tx_work(struct work_struct *work)
3290 {
3291         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3292         struct sk_buff *skb;
3293
3294         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3295                hdev->sco_cnt, hdev->le_cnt);
3296
3297         /* Schedule queues and send stuff to HCI driver */
3298
3299         hci_sched_acl(hdev);
3300
3301         hci_sched_sco(hdev);
3302
3303         hci_sched_esco(hdev);
3304
3305         hci_sched_le(hdev);
3306
3307         /* Send next queued raw (unknown type) packet */
3308         while ((skb = skb_dequeue(&hdev->raw_q)))
3309                 hci_send_frame(skb);
3310 }
3311
3312 /* ----- HCI RX task (incoming data processing) ----- */
3313
3314 /* ACL data packet */
3315 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3316 {
3317         struct hci_acl_hdr *hdr = (void *) skb->data;
3318         struct hci_conn *conn;
3319         __u16 handle, flags;
3320
3321         skb_pull(skb, HCI_ACL_HDR_SIZE);
3322
3323         handle = __le16_to_cpu(hdr->handle);
3324         flags  = hci_flags(handle);
3325         handle = hci_handle(handle);
3326
3327         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3328                handle, flags);
3329
3330         hdev->stat.acl_rx++;
3331
3332         hci_dev_lock(hdev);
3333         conn = hci_conn_hash_lookup_handle(hdev, handle);
3334         hci_dev_unlock(hdev);
3335
3336         if (conn) {
3337                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3338
3339                 /* Send to upper protocol */
3340                 l2cap_recv_acldata(conn, skb, flags);
3341                 return;
3342         } else {
3343                 BT_ERR("%s ACL packet for unknown connection handle %d",
3344                        hdev->name, handle);
3345         }
3346
3347         kfree_skb(skb);
3348 }
3349
3350 /* SCO data packet */
3351 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3352 {
3353         struct hci_sco_hdr *hdr = (void *) skb->data;
3354         struct hci_conn *conn;
3355         __u16 handle;
3356
3357         skb_pull(skb, HCI_SCO_HDR_SIZE);
3358
3359         handle = __le16_to_cpu(hdr->handle);
3360
3361         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3362
3363         hdev->stat.sco_rx++;
3364
3365         hci_dev_lock(hdev);
3366         conn = hci_conn_hash_lookup_handle(hdev, handle);
3367         hci_dev_unlock(hdev);
3368
3369         if (conn) {
3370                 /* Send to upper protocol */
3371                 sco_recv_scodata(conn, skb);
3372                 return;
3373         } else {
3374                 BT_ERR("%s SCO packet for unknown connection handle %d",
3375                        hdev->name, handle);
3376         }
3377
3378         kfree_skb(skb);
3379 }
3380
3381 static bool hci_req_is_complete(struct hci_dev *hdev)
3382 {
3383         struct sk_buff *skb;
3384
3385         skb = skb_peek(&hdev->cmd_q);
3386         if (!skb)
3387                 return true;
3388
3389         return bt_cb(skb)->req.start;
3390 }
3391
3392 static void hci_resend_last(struct hci_dev *hdev)
3393 {
3394         struct hci_command_hdr *sent;
3395         struct sk_buff *skb;
3396         u16 opcode;
3397
3398         if (!hdev->sent_cmd)
3399                 return;
3400
3401         sent = (void *) hdev->sent_cmd->data;
3402         opcode = __le16_to_cpu(sent->opcode);
3403         if (opcode == HCI_OP_RESET)
3404                 return;
3405
3406         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3407         if (!skb)
3408                 return;
3409
3410         skb_queue_head(&hdev->cmd_q, skb);
3411         queue_work(hdev->workqueue, &hdev->cmd_work);
3412 }
3413
3414 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3415 {
3416         hci_req_complete_t req_complete = NULL;
3417         struct sk_buff *skb;
3418         unsigned long flags;
3419
3420         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3421
3422         /* If the completed command doesn't match the last one that was
3423          * sent we need to do special handling of it.
3424          */
3425         if (!hci_sent_cmd_data(hdev, opcode)) {
3426                 /* Some CSR based controllers generate a spontaneous
3427                  * reset complete event during init and any pending
3428                  * command will never be completed. In such a case we
3429                  * need to resend whatever was the last sent
3430                  * command.
3431                  */
3432                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3433                         hci_resend_last(hdev);
3434
3435                 return;
3436         }
3437
3438         /* If the command succeeded and there's still more commands in
3439          * this request the request is not yet complete.
3440          */
3441         if (!status && !hci_req_is_complete(hdev))
3442                 return;
3443
3444         /* If this was the last command in a request the complete
3445          * callback would be found in hdev->sent_cmd instead of the
3446          * command queue (hdev->cmd_q).
3447          */
3448         if (hdev->sent_cmd) {
3449                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3450                 if (req_complete)
3451                         goto call_complete;
3452         }
3453
3454         /* Remove all pending commands belonging to this request */
3455         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3456         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3457                 if (bt_cb(skb)->req.start) {
3458                         __skb_queue_head(&hdev->cmd_q, skb);
3459                         break;
3460                 }
3461
3462                 req_complete = bt_cb(skb)->req.complete;
3463                 kfree_skb(skb);
3464         }
3465         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3466
3467 call_complete:
3468         if (req_complete)
3469                 req_complete(hdev, status);
3470 }
3471
3472 static void hci_rx_work(struct work_struct *work)
3473 {
3474         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3475         struct sk_buff *skb;
3476
3477         BT_DBG("%s", hdev->name);
3478
3479         while ((skb = skb_dequeue(&hdev->rx_q))) {
3480                 /* Send copy to monitor */
3481                 hci_send_to_monitor(hdev, skb);
3482
3483                 if (atomic_read(&hdev->promisc)) {
3484                         /* Send copy to the sockets */
3485                         hci_send_to_sock(hdev, skb);
3486                 }
3487
3488                 if (test_bit(HCI_RAW, &hdev->flags)) {
3489                         kfree_skb(skb);
3490                         continue;
3491                 }
3492
3493                 if (test_bit(HCI_INIT, &hdev->flags)) {
3494                         /* Don't process data packets in this states. */
3495                         switch (bt_cb(skb)->pkt_type) {
3496                         case HCI_ACLDATA_PKT:
3497                         case HCI_SCODATA_PKT:
3498                                 kfree_skb(skb);
3499                                 continue;
3500                         }
3501                 }
3502
3503                 /* Process frame */
3504                 switch (bt_cb(skb)->pkt_type) {
3505                 case HCI_EVENT_PKT:
3506                         BT_DBG("%s Event packet", hdev->name);
3507                         hci_event_packet(hdev, skb);
3508                         break;
3509
3510                 case HCI_ACLDATA_PKT:
3511                         BT_DBG("%s ACL data packet", hdev->name);
3512                         hci_acldata_packet(hdev, skb);
3513                         break;
3514
3515                 case HCI_SCODATA_PKT:
3516                         BT_DBG("%s SCO data packet", hdev->name);
3517                         hci_scodata_packet(hdev, skb);
3518                         break;
3519
3520                 default:
3521                         kfree_skb(skb);
3522                         break;
3523                 }
3524         }
3525 }
3526
3527 static void hci_cmd_work(struct work_struct *work)
3528 {
3529         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3530         struct sk_buff *skb;
3531
3532         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3533                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3534
3535         /* Send queued commands */
3536         if (atomic_read(&hdev->cmd_cnt)) {
3537                 skb = skb_dequeue(&hdev->cmd_q);
3538                 if (!skb)
3539                         return;
3540
3541                 kfree_skb(hdev->sent_cmd);
3542
3543                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3544                 if (hdev->sent_cmd) {
3545                         atomic_dec(&hdev->cmd_cnt);
3546                         hci_send_frame(skb);
3547                         if (test_bit(HCI_RESET, &hdev->flags))
3548                                 del_timer(&hdev->cmd_timer);
3549                         else
3550                                 mod_timer(&hdev->cmd_timer,
3551                                           jiffies + HCI_CMD_TIMEOUT);
3552                 } else {
3553                         skb_queue_head(&hdev->cmd_q, skb);
3554                         queue_work(hdev->workqueue, &hdev->cmd_work);
3555                 }
3556         }
3557 }
3558
3559 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3560 {
3561         /* General inquiry access code (GIAC) */
3562         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3563         struct hci_cp_inquiry cp;
3564
3565         BT_DBG("%s", hdev->name);
3566
3567         if (test_bit(HCI_INQUIRY, &hdev->flags))
3568                 return -EINPROGRESS;
3569
3570         inquiry_cache_flush(hdev);
3571
3572         memset(&cp, 0, sizeof(cp));
3573         memcpy(&cp.lap, lap, sizeof(cp.lap));
3574         cp.length  = length;
3575
3576         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3577 }
3578
3579 int hci_cancel_inquiry(struct hci_dev *hdev)
3580 {
3581         BT_DBG("%s", hdev->name);
3582
3583         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3584                 return -EALREADY;
3585
3586         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3587 }
3588
3589 u8 bdaddr_to_le(u8 bdaddr_type)
3590 {
3591         switch (bdaddr_type) {
3592         case BDADDR_LE_PUBLIC:
3593                 return ADDR_LE_DEV_PUBLIC;
3594
3595         default:
3596                 /* Fallback to LE Random address type */
3597                 return ADDR_LE_DEV_RANDOM;
3598         }
3599 }