Bluetooth: Return ENODATA in hci_req_run
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84                           void (*func)(struct hci_request *req,
85                                       unsigned long opt),
86                           unsigned long opt, __u32 timeout)
87 {
88         struct hci_request req;
89         DECLARE_WAITQUEUE(wait, current);
90         int err = 0;
91
92         BT_DBG("%s start", hdev->name);
93
94         hci_req_init(&req, hdev);
95
96         hdev->req_status = HCI_REQ_PEND;
97
98         func(&req, opt);
99
100         err = hci_req_run(&req, hci_req_sync_complete);
101         if (err < 0) {
102                 hdev->req_status = 0;
103                 /* req_run will fail if the request did not add any
104                  * commands to the queue, something that can happen when
105                  * a request with conditionals doesn't trigger any
106                  * commands to be sent. This is normal behavior and
107                  * should not trigger an error return.
108                  */
109                 return 0;
110         }
111
112         add_wait_queue(&hdev->req_wait_q, &wait);
113         set_current_state(TASK_INTERRUPTIBLE);
114
115         schedule_timeout(timeout);
116
117         remove_wait_queue(&hdev->req_wait_q, &wait);
118
119         if (signal_pending(current))
120                 return -EINTR;
121
122         switch (hdev->req_status) {
123         case HCI_REQ_DONE:
124                 err = -bt_to_errno(hdev->req_result);
125                 break;
126
127         case HCI_REQ_CANCELED:
128                 err = -hdev->req_result;
129                 break;
130
131         default:
132                 err = -ETIMEDOUT;
133                 break;
134         }
135
136         hdev->req_status = hdev->req_result = 0;
137
138         BT_DBG("%s end: err %d", hdev->name, err);
139
140         return err;
141 }
142
143 static int hci_req_sync(struct hci_dev *hdev,
144                         void (*req)(struct hci_request *req,
145                                     unsigned long opt),
146                         unsigned long opt, __u32 timeout)
147 {
148         int ret;
149
150         if (!test_bit(HCI_UP, &hdev->flags))
151                 return -ENETDOWN;
152
153         /* Serialize all requests */
154         hci_req_lock(hdev);
155         ret = __hci_req_sync(hdev, req, opt, timeout);
156         hci_req_unlock(hdev);
157
158         return ret;
159 }
160
161 static void hci_reset_req(struct hci_request *req, unsigned long opt)
162 {
163         BT_DBG("%s %ld", req->hdev->name, opt);
164
165         /* Reset device */
166         set_bit(HCI_RESET, &req->hdev->flags);
167         hci_req_add(req, HCI_OP_RESET, 0, NULL);
168 }
169
170 static void bredr_init(struct hci_request *req)
171 {
172         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
173
174         /* Read Local Supported Features */
175         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
176
177         /* Read Local Version */
178         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
179
180         /* Read BD Address */
181         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
182 }
183
184 static void amp_init(struct hci_request *req)
185 {
186         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
187
188         /* Read Local Version */
189         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
190
191         /* Read Local AMP Info */
192         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
193
194         /* Read Data Blk size */
195         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
196 }
197
198 static void hci_init1_req(struct hci_request *req, unsigned long opt)
199 {
200         struct hci_dev *hdev = req->hdev;
201         struct hci_request init_req;
202         struct sk_buff *skb;
203
204         BT_DBG("%s %ld", hdev->name, opt);
205
206         /* Driver initialization */
207
208         hci_req_init(&init_req, hdev);
209
210         /* Special commands */
211         while ((skb = skb_dequeue(&hdev->driver_init))) {
212                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
213                 skb->dev = (void *) hdev;
214
215                 if (skb_queue_empty(&init_req.cmd_q))
216                         bt_cb(skb)->req.start = true;
217
218                 skb_queue_tail(&init_req.cmd_q, skb);
219         }
220         skb_queue_purge(&hdev->driver_init);
221
222         hci_req_run(&init_req, NULL);
223
224         /* Reset */
225         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
226                 hci_reset_req(req, 0);
227
228         switch (hdev->dev_type) {
229         case HCI_BREDR:
230                 bredr_init(req);
231                 break;
232
233         case HCI_AMP:
234                 amp_init(req);
235                 break;
236
237         default:
238                 BT_ERR("Unknown device type %d", hdev->dev_type);
239                 break;
240         }
241 }
242
243 static void bredr_setup(struct hci_request *req)
244 {
245         struct hci_cp_delete_stored_link_key cp;
246         __le16 param;
247         __u8 flt_type;
248
249         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
250         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
251
252         /* Read Class of Device */
253         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
254
255         /* Read Local Name */
256         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
257
258         /* Read Voice Setting */
259         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
260
261         /* Clear Event Filters */
262         flt_type = HCI_FLT_CLEAR_ALL;
263         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
264
265         /* Connection accept timeout ~20 secs */
266         param = __constant_cpu_to_le16(0x7d00);
267         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
268
269         bacpy(&cp.bdaddr, BDADDR_ANY);
270         cp.delete_all = 0x01;
271         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
272 }
273
274 static void le_setup(struct hci_request *req)
275 {
276         /* Read LE Buffer Size */
277         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278
279         /* Read LE Local Supported Features */
280         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
281
282         /* Read LE Advertising Channel TX Power */
283         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
284
285         /* Read LE White List Size */
286         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
287
288         /* Read LE Supported States */
289         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
290 }
291
292 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
293 {
294         if (lmp_ext_inq_capable(hdev))
295                 return 0x02;
296
297         if (lmp_inq_rssi_capable(hdev))
298                 return 0x01;
299
300         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
301             hdev->lmp_subver == 0x0757)
302                 return 0x01;
303
304         if (hdev->manufacturer == 15) {
305                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
306                         return 0x01;
307                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
308                         return 0x01;
309                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
310                         return 0x01;
311         }
312
313         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
314             hdev->lmp_subver == 0x1805)
315                 return 0x01;
316
317         return 0x00;
318 }
319
320 static void hci_setup_inquiry_mode(struct hci_request *req)
321 {
322         u8 mode;
323
324         mode = hci_get_inquiry_mode(req->hdev);
325
326         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
327 }
328
329 static void hci_setup_event_mask(struct hci_request *req)
330 {
331         struct hci_dev *hdev = req->hdev;
332
333         /* The second byte is 0xff instead of 0x9f (two reserved bits
334          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
335          * command otherwise.
336          */
337         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
338
339         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
340          * any event mask for pre 1.2 devices.
341          */
342         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
343                 return;
344
345         if (lmp_bredr_capable(hdev)) {
346                 events[4] |= 0x01; /* Flow Specification Complete */
347                 events[4] |= 0x02; /* Inquiry Result with RSSI */
348                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
349                 events[5] |= 0x08; /* Synchronous Connection Complete */
350                 events[5] |= 0x10; /* Synchronous Connection Changed */
351         }
352
353         if (lmp_inq_rssi_capable(hdev))
354                 events[4] |= 0x02; /* Inquiry Result with RSSI */
355
356         if (lmp_sniffsubr_capable(hdev))
357                 events[5] |= 0x20; /* Sniff Subrating */
358
359         if (lmp_pause_enc_capable(hdev))
360                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
361
362         if (lmp_ext_inq_capable(hdev))
363                 events[5] |= 0x40; /* Extended Inquiry Result */
364
365         if (lmp_no_flush_capable(hdev))
366                 events[7] |= 0x01; /* Enhanced Flush Complete */
367
368         if (lmp_lsto_capable(hdev))
369                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
370
371         if (lmp_ssp_capable(hdev)) {
372                 events[6] |= 0x01;      /* IO Capability Request */
373                 events[6] |= 0x02;      /* IO Capability Response */
374                 events[6] |= 0x04;      /* User Confirmation Request */
375                 events[6] |= 0x08;      /* User Passkey Request */
376                 events[6] |= 0x10;      /* Remote OOB Data Request */
377                 events[6] |= 0x20;      /* Simple Pairing Complete */
378                 events[7] |= 0x04;      /* User Passkey Notification */
379                 events[7] |= 0x08;      /* Keypress Notification */
380                 events[7] |= 0x10;      /* Remote Host Supported
381                                          * Features Notification
382                                          */
383         }
384
385         if (lmp_le_capable(hdev))
386                 events[7] |= 0x20;      /* LE Meta-Event */
387
388         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
389
390         if (lmp_le_capable(hdev)) {
391                 memset(events, 0, sizeof(events));
392                 events[0] = 0x1f;
393                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
394                             sizeof(events), events);
395         }
396 }
397
398 static void hci_init2_req(struct hci_request *req, unsigned long opt)
399 {
400         struct hci_dev *hdev = req->hdev;
401
402         if (lmp_bredr_capable(hdev))
403                 bredr_setup(req);
404
405         if (lmp_le_capable(hdev))
406                 le_setup(req);
407
408         hci_setup_event_mask(req);
409
410         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
411                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
412
413         if (lmp_ssp_capable(hdev)) {
414                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
415                         u8 mode = 0x01;
416                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
417                                     sizeof(mode), &mode);
418                 } else {
419                         struct hci_cp_write_eir cp;
420
421                         memset(hdev->eir, 0, sizeof(hdev->eir));
422                         memset(&cp, 0, sizeof(cp));
423
424                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
425                 }
426         }
427
428         if (lmp_inq_rssi_capable(hdev))
429                 hci_setup_inquiry_mode(req);
430
431         if (lmp_inq_tx_pwr_capable(hdev))
432                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
433
434         if (lmp_ext_feat_capable(hdev)) {
435                 struct hci_cp_read_local_ext_features cp;
436
437                 cp.page = 0x01;
438                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
439                             sizeof(cp), &cp);
440         }
441
442         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
443                 u8 enable = 1;
444                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
445                             &enable);
446         }
447 }
448
449 static void hci_setup_link_policy(struct hci_request *req)
450 {
451         struct hci_dev *hdev = req->hdev;
452         struct hci_cp_write_def_link_policy cp;
453         u16 link_policy = 0;
454
455         if (lmp_rswitch_capable(hdev))
456                 link_policy |= HCI_LP_RSWITCH;
457         if (lmp_hold_capable(hdev))
458                 link_policy |= HCI_LP_HOLD;
459         if (lmp_sniff_capable(hdev))
460                 link_policy |= HCI_LP_SNIFF;
461         if (lmp_park_capable(hdev))
462                 link_policy |= HCI_LP_PARK;
463
464         cp.policy = cpu_to_le16(link_policy);
465         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
466 }
467
468 static void hci_set_le_support(struct hci_request *req)
469 {
470         struct hci_dev *hdev = req->hdev;
471         struct hci_cp_write_le_host_supported cp;
472
473         memset(&cp, 0, sizeof(cp));
474
475         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
476                 cp.le = 0x01;
477                 cp.simul = lmp_le_br_capable(hdev);
478         }
479
480         if (cp.le != lmp_host_le_capable(hdev))
481                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
482                             &cp);
483 }
484
485 static void hci_init3_req(struct hci_request *req, unsigned long opt)
486 {
487         struct hci_dev *hdev = req->hdev;
488
489         if (hdev->commands[5] & 0x10)
490                 hci_setup_link_policy(req);
491
492         if (lmp_le_capable(hdev))
493                 hci_set_le_support(req);
494 }
495
496 static int __hci_init(struct hci_dev *hdev)
497 {
498         int err;
499
500         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
501         if (err < 0)
502                 return err;
503
504         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
505          * BR/EDR/LE type controllers. AMP controllers only need the
506          * first stage init.
507          */
508         if (hdev->dev_type != HCI_BREDR)
509                 return 0;
510
511         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
512         if (err < 0)
513                 return err;
514
515         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
516 }
517
518 static void hci_scan_req(struct hci_request *req, unsigned long opt)
519 {
520         __u8 scan = opt;
521
522         BT_DBG("%s %x", req->hdev->name, scan);
523
524         /* Inquiry and Page scans */
525         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
526 }
527
528 static void hci_auth_req(struct hci_request *req, unsigned long opt)
529 {
530         __u8 auth = opt;
531
532         BT_DBG("%s %x", req->hdev->name, auth);
533
534         /* Authentication */
535         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
536 }
537
538 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
539 {
540         __u8 encrypt = opt;
541
542         BT_DBG("%s %x", req->hdev->name, encrypt);
543
544         /* Encryption */
545         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
546 }
547
548 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
549 {
550         __le16 policy = cpu_to_le16(opt);
551
552         BT_DBG("%s %x", req->hdev->name, policy);
553
554         /* Default link policy */
555         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
556 }
557
558 /* Get HCI device by index.
559  * Device is held on return. */
560 struct hci_dev *hci_dev_get(int index)
561 {
562         struct hci_dev *hdev = NULL, *d;
563
564         BT_DBG("%d", index);
565
566         if (index < 0)
567                 return NULL;
568
569         read_lock(&hci_dev_list_lock);
570         list_for_each_entry(d, &hci_dev_list, list) {
571                 if (d->id == index) {
572                         hdev = hci_dev_hold(d);
573                         break;
574                 }
575         }
576         read_unlock(&hci_dev_list_lock);
577         return hdev;
578 }
579
580 /* ---- Inquiry support ---- */
581
582 bool hci_discovery_active(struct hci_dev *hdev)
583 {
584         struct discovery_state *discov = &hdev->discovery;
585
586         switch (discov->state) {
587         case DISCOVERY_FINDING:
588         case DISCOVERY_RESOLVING:
589                 return true;
590
591         default:
592                 return false;
593         }
594 }
595
596 void hci_discovery_set_state(struct hci_dev *hdev, int state)
597 {
598         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
599
600         if (hdev->discovery.state == state)
601                 return;
602
603         switch (state) {
604         case DISCOVERY_STOPPED:
605                 if (hdev->discovery.state != DISCOVERY_STARTING)
606                         mgmt_discovering(hdev, 0);
607                 break;
608         case DISCOVERY_STARTING:
609                 break;
610         case DISCOVERY_FINDING:
611                 mgmt_discovering(hdev, 1);
612                 break;
613         case DISCOVERY_RESOLVING:
614                 break;
615         case DISCOVERY_STOPPING:
616                 break;
617         }
618
619         hdev->discovery.state = state;
620 }
621
622 static void inquiry_cache_flush(struct hci_dev *hdev)
623 {
624         struct discovery_state *cache = &hdev->discovery;
625         struct inquiry_entry *p, *n;
626
627         list_for_each_entry_safe(p, n, &cache->all, all) {
628                 list_del(&p->all);
629                 kfree(p);
630         }
631
632         INIT_LIST_HEAD(&cache->unknown);
633         INIT_LIST_HEAD(&cache->resolve);
634 }
635
636 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
637                                                bdaddr_t *bdaddr)
638 {
639         struct discovery_state *cache = &hdev->discovery;
640         struct inquiry_entry *e;
641
642         BT_DBG("cache %p, %pMR", cache, bdaddr);
643
644         list_for_each_entry(e, &cache->all, all) {
645                 if (!bacmp(&e->data.bdaddr, bdaddr))
646                         return e;
647         }
648
649         return NULL;
650 }
651
652 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
653                                                        bdaddr_t *bdaddr)
654 {
655         struct discovery_state *cache = &hdev->discovery;
656         struct inquiry_entry *e;
657
658         BT_DBG("cache %p, %pMR", cache, bdaddr);
659
660         list_for_each_entry(e, &cache->unknown, list) {
661                 if (!bacmp(&e->data.bdaddr, bdaddr))
662                         return e;
663         }
664
665         return NULL;
666 }
667
668 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
669                                                        bdaddr_t *bdaddr,
670                                                        int state)
671 {
672         struct discovery_state *cache = &hdev->discovery;
673         struct inquiry_entry *e;
674
675         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
676
677         list_for_each_entry(e, &cache->resolve, list) {
678                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
679                         return e;
680                 if (!bacmp(&e->data.bdaddr, bdaddr))
681                         return e;
682         }
683
684         return NULL;
685 }
686
687 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
688                                       struct inquiry_entry *ie)
689 {
690         struct discovery_state *cache = &hdev->discovery;
691         struct list_head *pos = &cache->resolve;
692         struct inquiry_entry *p;
693
694         list_del(&ie->list);
695
696         list_for_each_entry(p, &cache->resolve, list) {
697                 if (p->name_state != NAME_PENDING &&
698                     abs(p->data.rssi) >= abs(ie->data.rssi))
699                         break;
700                 pos = &p->list;
701         }
702
703         list_add(&ie->list, pos);
704 }
705
706 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
707                               bool name_known, bool *ssp)
708 {
709         struct discovery_state *cache = &hdev->discovery;
710         struct inquiry_entry *ie;
711
712         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
713
714         hci_remove_remote_oob_data(hdev, &data->bdaddr);
715
716         if (ssp)
717                 *ssp = data->ssp_mode;
718
719         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
720         if (ie) {
721                 if (ie->data.ssp_mode && ssp)
722                         *ssp = true;
723
724                 if (ie->name_state == NAME_NEEDED &&
725                     data->rssi != ie->data.rssi) {
726                         ie->data.rssi = data->rssi;
727                         hci_inquiry_cache_update_resolve(hdev, ie);
728                 }
729
730                 goto update;
731         }
732
733         /* Entry not in the cache. Add new one. */
734         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
735         if (!ie)
736                 return false;
737
738         list_add(&ie->all, &cache->all);
739
740         if (name_known) {
741                 ie->name_state = NAME_KNOWN;
742         } else {
743                 ie->name_state = NAME_NOT_KNOWN;
744                 list_add(&ie->list, &cache->unknown);
745         }
746
747 update:
748         if (name_known && ie->name_state != NAME_KNOWN &&
749             ie->name_state != NAME_PENDING) {
750                 ie->name_state = NAME_KNOWN;
751                 list_del(&ie->list);
752         }
753
754         memcpy(&ie->data, data, sizeof(*data));
755         ie->timestamp = jiffies;
756         cache->timestamp = jiffies;
757
758         if (ie->name_state == NAME_NOT_KNOWN)
759                 return false;
760
761         return true;
762 }
763
764 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
765 {
766         struct discovery_state *cache = &hdev->discovery;
767         struct inquiry_info *info = (struct inquiry_info *) buf;
768         struct inquiry_entry *e;
769         int copied = 0;
770
771         list_for_each_entry(e, &cache->all, all) {
772                 struct inquiry_data *data = &e->data;
773
774                 if (copied >= num)
775                         break;
776
777                 bacpy(&info->bdaddr, &data->bdaddr);
778                 info->pscan_rep_mode    = data->pscan_rep_mode;
779                 info->pscan_period_mode = data->pscan_period_mode;
780                 info->pscan_mode        = data->pscan_mode;
781                 memcpy(info->dev_class, data->dev_class, 3);
782                 info->clock_offset      = data->clock_offset;
783
784                 info++;
785                 copied++;
786         }
787
788         BT_DBG("cache %p, copied %d", cache, copied);
789         return copied;
790 }
791
792 static void hci_inq_req(struct hci_request *req, unsigned long opt)
793 {
794         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
795         struct hci_dev *hdev = req->hdev;
796         struct hci_cp_inquiry cp;
797
798         BT_DBG("%s", hdev->name);
799
800         if (test_bit(HCI_INQUIRY, &hdev->flags))
801                 return;
802
803         /* Start Inquiry */
804         memcpy(&cp.lap, &ir->lap, 3);
805         cp.length  = ir->length;
806         cp.num_rsp = ir->num_rsp;
807         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
808 }
809
810 int hci_inquiry(void __user *arg)
811 {
812         __u8 __user *ptr = arg;
813         struct hci_inquiry_req ir;
814         struct hci_dev *hdev;
815         int err = 0, do_inquiry = 0, max_rsp;
816         long timeo;
817         __u8 *buf;
818
819         if (copy_from_user(&ir, ptr, sizeof(ir)))
820                 return -EFAULT;
821
822         hdev = hci_dev_get(ir.dev_id);
823         if (!hdev)
824                 return -ENODEV;
825
826         hci_dev_lock(hdev);
827         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
828             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
829                 inquiry_cache_flush(hdev);
830                 do_inquiry = 1;
831         }
832         hci_dev_unlock(hdev);
833
834         timeo = ir.length * msecs_to_jiffies(2000);
835
836         if (do_inquiry) {
837                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
838                                    timeo);
839                 if (err < 0)
840                         goto done;
841         }
842
843         /* for unlimited number of responses we will use buffer with
844          * 255 entries
845          */
846         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
847
848         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
849          * copy it to the user space.
850          */
851         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
852         if (!buf) {
853                 err = -ENOMEM;
854                 goto done;
855         }
856
857         hci_dev_lock(hdev);
858         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
859         hci_dev_unlock(hdev);
860
861         BT_DBG("num_rsp %d", ir.num_rsp);
862
863         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
864                 ptr += sizeof(ir);
865                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
866                                  ir.num_rsp))
867                         err = -EFAULT;
868         } else
869                 err = -EFAULT;
870
871         kfree(buf);
872
873 done:
874         hci_dev_put(hdev);
875         return err;
876 }
877
878 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
879 {
880         u8 ad_len = 0, flags = 0;
881         size_t name_len;
882
883         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
884                 flags |= LE_AD_GENERAL;
885
886         if (!lmp_bredr_capable(hdev))
887                 flags |= LE_AD_NO_BREDR;
888
889         if (lmp_le_br_capable(hdev))
890                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
891
892         if (lmp_host_le_br_capable(hdev))
893                 flags |= LE_AD_SIM_LE_BREDR_HOST;
894
895         if (flags) {
896                 BT_DBG("adv flags 0x%02x", flags);
897
898                 ptr[0] = 2;
899                 ptr[1] = EIR_FLAGS;
900                 ptr[2] = flags;
901
902                 ad_len += 3;
903                 ptr += 3;
904         }
905
906         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
907                 ptr[0] = 2;
908                 ptr[1] = EIR_TX_POWER;
909                 ptr[2] = (u8) hdev->adv_tx_power;
910
911                 ad_len += 3;
912                 ptr += 3;
913         }
914
915         name_len = strlen(hdev->dev_name);
916         if (name_len > 0) {
917                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
918
919                 if (name_len > max_len) {
920                         name_len = max_len;
921                         ptr[1] = EIR_NAME_SHORT;
922                 } else
923                         ptr[1] = EIR_NAME_COMPLETE;
924
925                 ptr[0] = name_len + 1;
926
927                 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929                 ad_len += (name_len + 2);
930                 ptr += (name_len + 2);
931         }
932
933         return ad_len;
934 }
935
936 int hci_update_ad(struct hci_dev *hdev)
937 {
938         struct hci_cp_le_set_adv_data cp;
939         u8 len;
940         int err;
941
942         hci_dev_lock(hdev);
943
944         if (!lmp_le_capable(hdev)) {
945                 err = -EINVAL;
946                 goto unlock;
947         }
948
949         memset(&cp, 0, sizeof(cp));
950
951         len = create_ad(hdev, cp.data);
952
953         if (hdev->adv_data_len == len &&
954             memcmp(cp.data, hdev->adv_data, len) == 0) {
955                 err = 0;
956                 goto unlock;
957         }
958
959         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
960         hdev->adv_data_len = len;
961
962         cp.length = len;
963         err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
964
965 unlock:
966         hci_dev_unlock(hdev);
967
968         return err;
969 }
970
971 /* ---- HCI ioctl helpers ---- */
972
973 int hci_dev_open(__u16 dev)
974 {
975         struct hci_dev *hdev;
976         int ret = 0;
977
978         hdev = hci_dev_get(dev);
979         if (!hdev)
980                 return -ENODEV;
981
982         BT_DBG("%s %p", hdev->name, hdev);
983
984         hci_req_lock(hdev);
985
986         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
987                 ret = -ENODEV;
988                 goto done;
989         }
990
991         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
992                 ret = -ERFKILL;
993                 goto done;
994         }
995
996         if (test_bit(HCI_UP, &hdev->flags)) {
997                 ret = -EALREADY;
998                 goto done;
999         }
1000
1001         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1002                 set_bit(HCI_RAW, &hdev->flags);
1003
1004         /* Treat all non BR/EDR controllers as raw devices if
1005            enable_hs is not set */
1006         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1007                 set_bit(HCI_RAW, &hdev->flags);
1008
1009         if (hdev->open(hdev)) {
1010                 ret = -EIO;
1011                 goto done;
1012         }
1013
1014         if (!test_bit(HCI_RAW, &hdev->flags)) {
1015                 atomic_set(&hdev->cmd_cnt, 1);
1016                 set_bit(HCI_INIT, &hdev->flags);
1017                 ret = __hci_init(hdev);
1018                 clear_bit(HCI_INIT, &hdev->flags);
1019         }
1020
1021         if (!ret) {
1022                 hci_dev_hold(hdev);
1023                 set_bit(HCI_UP, &hdev->flags);
1024                 hci_notify(hdev, HCI_DEV_UP);
1025                 hci_update_ad(hdev);
1026                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1027                     mgmt_valid_hdev(hdev)) {
1028                         hci_dev_lock(hdev);
1029                         mgmt_powered(hdev, 1);
1030                         hci_dev_unlock(hdev);
1031                 }
1032         } else {
1033                 /* Init failed, cleanup */
1034                 flush_work(&hdev->tx_work);
1035                 flush_work(&hdev->cmd_work);
1036                 flush_work(&hdev->rx_work);
1037
1038                 skb_queue_purge(&hdev->cmd_q);
1039                 skb_queue_purge(&hdev->rx_q);
1040
1041                 if (hdev->flush)
1042                         hdev->flush(hdev);
1043
1044                 if (hdev->sent_cmd) {
1045                         kfree_skb(hdev->sent_cmd);
1046                         hdev->sent_cmd = NULL;
1047                 }
1048
1049                 hdev->close(hdev);
1050                 hdev->flags = 0;
1051         }
1052
1053 done:
1054         hci_req_unlock(hdev);
1055         hci_dev_put(hdev);
1056         return ret;
1057 }
1058
1059 static int hci_dev_do_close(struct hci_dev *hdev)
1060 {
1061         BT_DBG("%s %p", hdev->name, hdev);
1062
1063         cancel_work_sync(&hdev->le_scan);
1064
1065         cancel_delayed_work(&hdev->power_off);
1066
1067         hci_req_cancel(hdev, ENODEV);
1068         hci_req_lock(hdev);
1069
1070         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1071                 del_timer_sync(&hdev->cmd_timer);
1072                 hci_req_unlock(hdev);
1073                 return 0;
1074         }
1075
1076         /* Flush RX and TX works */
1077         flush_work(&hdev->tx_work);
1078         flush_work(&hdev->rx_work);
1079
1080         if (hdev->discov_timeout > 0) {
1081                 cancel_delayed_work(&hdev->discov_off);
1082                 hdev->discov_timeout = 0;
1083                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1084         }
1085
1086         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1087                 cancel_delayed_work(&hdev->service_cache);
1088
1089         cancel_delayed_work_sync(&hdev->le_scan_disable);
1090
1091         hci_dev_lock(hdev);
1092         inquiry_cache_flush(hdev);
1093         hci_conn_hash_flush(hdev);
1094         hci_dev_unlock(hdev);
1095
1096         hci_notify(hdev, HCI_DEV_DOWN);
1097
1098         if (hdev->flush)
1099                 hdev->flush(hdev);
1100
1101         /* Reset device */
1102         skb_queue_purge(&hdev->cmd_q);
1103         atomic_set(&hdev->cmd_cnt, 1);
1104         if (!test_bit(HCI_RAW, &hdev->flags) &&
1105             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1106                 set_bit(HCI_INIT, &hdev->flags);
1107                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1108                 clear_bit(HCI_INIT, &hdev->flags);
1109         }
1110
1111         /* flush cmd  work */
1112         flush_work(&hdev->cmd_work);
1113
1114         /* Drop queues */
1115         skb_queue_purge(&hdev->rx_q);
1116         skb_queue_purge(&hdev->cmd_q);
1117         skb_queue_purge(&hdev->raw_q);
1118
1119         /* Drop last sent command */
1120         if (hdev->sent_cmd) {
1121                 del_timer_sync(&hdev->cmd_timer);
1122                 kfree_skb(hdev->sent_cmd);
1123                 hdev->sent_cmd = NULL;
1124         }
1125
1126         /* After this point our queues are empty
1127          * and no tasks are scheduled. */
1128         hdev->close(hdev);
1129
1130         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1131             mgmt_valid_hdev(hdev)) {
1132                 hci_dev_lock(hdev);
1133                 mgmt_powered(hdev, 0);
1134                 hci_dev_unlock(hdev);
1135         }
1136
1137         /* Clear flags */
1138         hdev->flags = 0;
1139
1140         /* Controller radio is available but is currently powered down */
1141         hdev->amp_status = 0;
1142
1143         memset(hdev->eir, 0, sizeof(hdev->eir));
1144         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1145
1146         hci_req_unlock(hdev);
1147
1148         hci_dev_put(hdev);
1149         return 0;
1150 }
1151
1152 int hci_dev_close(__u16 dev)
1153 {
1154         struct hci_dev *hdev;
1155         int err;
1156
1157         hdev = hci_dev_get(dev);
1158         if (!hdev)
1159                 return -ENODEV;
1160
1161         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1162                 cancel_delayed_work(&hdev->power_off);
1163
1164         err = hci_dev_do_close(hdev);
1165
1166         hci_dev_put(hdev);
1167         return err;
1168 }
1169
1170 int hci_dev_reset(__u16 dev)
1171 {
1172         struct hci_dev *hdev;
1173         int ret = 0;
1174
1175         hdev = hci_dev_get(dev);
1176         if (!hdev)
1177                 return -ENODEV;
1178
1179         hci_req_lock(hdev);
1180
1181         if (!test_bit(HCI_UP, &hdev->flags))
1182                 goto done;
1183
1184         /* Drop queues */
1185         skb_queue_purge(&hdev->rx_q);
1186         skb_queue_purge(&hdev->cmd_q);
1187
1188         hci_dev_lock(hdev);
1189         inquiry_cache_flush(hdev);
1190         hci_conn_hash_flush(hdev);
1191         hci_dev_unlock(hdev);
1192
1193         if (hdev->flush)
1194                 hdev->flush(hdev);
1195
1196         atomic_set(&hdev->cmd_cnt, 1);
1197         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1198
1199         if (!test_bit(HCI_RAW, &hdev->flags))
1200                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1201
1202 done:
1203         hci_req_unlock(hdev);
1204         hci_dev_put(hdev);
1205         return ret;
1206 }
1207
1208 int hci_dev_reset_stat(__u16 dev)
1209 {
1210         struct hci_dev *hdev;
1211         int ret = 0;
1212
1213         hdev = hci_dev_get(dev);
1214         if (!hdev)
1215                 return -ENODEV;
1216
1217         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1218
1219         hci_dev_put(hdev);
1220
1221         return ret;
1222 }
1223
1224 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1225 {
1226         struct hci_dev *hdev;
1227         struct hci_dev_req dr;
1228         int err = 0;
1229
1230         if (copy_from_user(&dr, arg, sizeof(dr)))
1231                 return -EFAULT;
1232
1233         hdev = hci_dev_get(dr.dev_id);
1234         if (!hdev)
1235                 return -ENODEV;
1236
1237         switch (cmd) {
1238         case HCISETAUTH:
1239                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1240                                    HCI_INIT_TIMEOUT);
1241                 break;
1242
1243         case HCISETENCRYPT:
1244                 if (!lmp_encrypt_capable(hdev)) {
1245                         err = -EOPNOTSUPP;
1246                         break;
1247                 }
1248
1249                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1250                         /* Auth must be enabled first */
1251                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1252                                            HCI_INIT_TIMEOUT);
1253                         if (err)
1254                                 break;
1255                 }
1256
1257                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1258                                    HCI_INIT_TIMEOUT);
1259                 break;
1260
1261         case HCISETSCAN:
1262                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1263                                    HCI_INIT_TIMEOUT);
1264                 break;
1265
1266         case HCISETLINKPOL:
1267                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1268                                    HCI_INIT_TIMEOUT);
1269                 break;
1270
1271         case HCISETLINKMODE:
1272                 hdev->link_mode = ((__u16) dr.dev_opt) &
1273                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1274                 break;
1275
1276         case HCISETPTYPE:
1277                 hdev->pkt_type = (__u16) dr.dev_opt;
1278                 break;
1279
1280         case HCISETACLMTU:
1281                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1282                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1283                 break;
1284
1285         case HCISETSCOMTU:
1286                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1287                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1288                 break;
1289
1290         default:
1291                 err = -EINVAL;
1292                 break;
1293         }
1294
1295         hci_dev_put(hdev);
1296         return err;
1297 }
1298
1299 int hci_get_dev_list(void __user *arg)
1300 {
1301         struct hci_dev *hdev;
1302         struct hci_dev_list_req *dl;
1303         struct hci_dev_req *dr;
1304         int n = 0, size, err;
1305         __u16 dev_num;
1306
1307         if (get_user(dev_num, (__u16 __user *) arg))
1308                 return -EFAULT;
1309
1310         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1311                 return -EINVAL;
1312
1313         size = sizeof(*dl) + dev_num * sizeof(*dr);
1314
1315         dl = kzalloc(size, GFP_KERNEL);
1316         if (!dl)
1317                 return -ENOMEM;
1318
1319         dr = dl->dev_req;
1320
1321         read_lock(&hci_dev_list_lock);
1322         list_for_each_entry(hdev, &hci_dev_list, list) {
1323                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1324                         cancel_delayed_work(&hdev->power_off);
1325
1326                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1327                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1328
1329                 (dr + n)->dev_id  = hdev->id;
1330                 (dr + n)->dev_opt = hdev->flags;
1331
1332                 if (++n >= dev_num)
1333                         break;
1334         }
1335         read_unlock(&hci_dev_list_lock);
1336
1337         dl->dev_num = n;
1338         size = sizeof(*dl) + n * sizeof(*dr);
1339
1340         err = copy_to_user(arg, dl, size);
1341         kfree(dl);
1342
1343         return err ? -EFAULT : 0;
1344 }
1345
1346 int hci_get_dev_info(void __user *arg)
1347 {
1348         struct hci_dev *hdev;
1349         struct hci_dev_info di;
1350         int err = 0;
1351
1352         if (copy_from_user(&di, arg, sizeof(di)))
1353                 return -EFAULT;
1354
1355         hdev = hci_dev_get(di.dev_id);
1356         if (!hdev)
1357                 return -ENODEV;
1358
1359         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1360                 cancel_delayed_work_sync(&hdev->power_off);
1361
1362         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1363                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1364
1365         strcpy(di.name, hdev->name);
1366         di.bdaddr   = hdev->bdaddr;
1367         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1368         di.flags    = hdev->flags;
1369         di.pkt_type = hdev->pkt_type;
1370         if (lmp_bredr_capable(hdev)) {
1371                 di.acl_mtu  = hdev->acl_mtu;
1372                 di.acl_pkts = hdev->acl_pkts;
1373                 di.sco_mtu  = hdev->sco_mtu;
1374                 di.sco_pkts = hdev->sco_pkts;
1375         } else {
1376                 di.acl_mtu  = hdev->le_mtu;
1377                 di.acl_pkts = hdev->le_pkts;
1378                 di.sco_mtu  = 0;
1379                 di.sco_pkts = 0;
1380         }
1381         di.link_policy = hdev->link_policy;
1382         di.link_mode   = hdev->link_mode;
1383
1384         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1385         memcpy(&di.features, &hdev->features, sizeof(di.features));
1386
1387         if (copy_to_user(arg, &di, sizeof(di)))
1388                 err = -EFAULT;
1389
1390         hci_dev_put(hdev);
1391
1392         return err;
1393 }
1394
1395 /* ---- Interface to HCI drivers ---- */
1396
1397 static int hci_rfkill_set_block(void *data, bool blocked)
1398 {
1399         struct hci_dev *hdev = data;
1400
1401         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1402
1403         if (!blocked)
1404                 return 0;
1405
1406         hci_dev_do_close(hdev);
1407
1408         return 0;
1409 }
1410
1411 static const struct rfkill_ops hci_rfkill_ops = {
1412         .set_block = hci_rfkill_set_block,
1413 };
1414
1415 static void hci_power_on(struct work_struct *work)
1416 {
1417         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1418
1419         BT_DBG("%s", hdev->name);
1420
1421         if (hci_dev_open(hdev->id) < 0)
1422                 return;
1423
1424         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1425                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1426                                    HCI_AUTO_OFF_TIMEOUT);
1427
1428         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1429                 mgmt_index_added(hdev);
1430 }
1431
1432 static void hci_power_off(struct work_struct *work)
1433 {
1434         struct hci_dev *hdev = container_of(work, struct hci_dev,
1435                                             power_off.work);
1436
1437         BT_DBG("%s", hdev->name);
1438
1439         hci_dev_do_close(hdev);
1440 }
1441
1442 static void hci_discov_off(struct work_struct *work)
1443 {
1444         struct hci_dev *hdev;
1445         u8 scan = SCAN_PAGE;
1446
1447         hdev = container_of(work, struct hci_dev, discov_off.work);
1448
1449         BT_DBG("%s", hdev->name);
1450
1451         hci_dev_lock(hdev);
1452
1453         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1454
1455         hdev->discov_timeout = 0;
1456
1457         hci_dev_unlock(hdev);
1458 }
1459
1460 int hci_uuids_clear(struct hci_dev *hdev)
1461 {
1462         struct bt_uuid *uuid, *tmp;
1463
1464         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1465                 list_del(&uuid->list);
1466                 kfree(uuid);
1467         }
1468
1469         return 0;
1470 }
1471
1472 int hci_link_keys_clear(struct hci_dev *hdev)
1473 {
1474         struct list_head *p, *n;
1475
1476         list_for_each_safe(p, n, &hdev->link_keys) {
1477                 struct link_key *key;
1478
1479                 key = list_entry(p, struct link_key, list);
1480
1481                 list_del(p);
1482                 kfree(key);
1483         }
1484
1485         return 0;
1486 }
1487
1488 int hci_smp_ltks_clear(struct hci_dev *hdev)
1489 {
1490         struct smp_ltk *k, *tmp;
1491
1492         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1493                 list_del(&k->list);
1494                 kfree(k);
1495         }
1496
1497         return 0;
1498 }
1499
1500 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1501 {
1502         struct link_key *k;
1503
1504         list_for_each_entry(k, &hdev->link_keys, list)
1505                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1506                         return k;
1507
1508         return NULL;
1509 }
1510
1511 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1512                                u8 key_type, u8 old_key_type)
1513 {
1514         /* Legacy key */
1515         if (key_type < 0x03)
1516                 return true;
1517
1518         /* Debug keys are insecure so don't store them persistently */
1519         if (key_type == HCI_LK_DEBUG_COMBINATION)
1520                 return false;
1521
1522         /* Changed combination key and there's no previous one */
1523         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1524                 return false;
1525
1526         /* Security mode 3 case */
1527         if (!conn)
1528                 return true;
1529
1530         /* Neither local nor remote side had no-bonding as requirement */
1531         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1532                 return true;
1533
1534         /* Local side had dedicated bonding as requirement */
1535         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1536                 return true;
1537
1538         /* Remote side had dedicated bonding as requirement */
1539         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1540                 return true;
1541
1542         /* If none of the above criteria match, then don't store the key
1543          * persistently */
1544         return false;
1545 }
1546
1547 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1548 {
1549         struct smp_ltk *k;
1550
1551         list_for_each_entry(k, &hdev->long_term_keys, list) {
1552                 if (k->ediv != ediv ||
1553                     memcmp(rand, k->rand, sizeof(k->rand)))
1554                         continue;
1555
1556                 return k;
1557         }
1558
1559         return NULL;
1560 }
1561
1562 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1563                                      u8 addr_type)
1564 {
1565         struct smp_ltk *k;
1566
1567         list_for_each_entry(k, &hdev->long_term_keys, list)
1568                 if (addr_type == k->bdaddr_type &&
1569                     bacmp(bdaddr, &k->bdaddr) == 0)
1570                         return k;
1571
1572         return NULL;
1573 }
1574
1575 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1576                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1577 {
1578         struct link_key *key, *old_key;
1579         u8 old_key_type;
1580         bool persistent;
1581
1582         old_key = hci_find_link_key(hdev, bdaddr);
1583         if (old_key) {
1584                 old_key_type = old_key->type;
1585                 key = old_key;
1586         } else {
1587                 old_key_type = conn ? conn->key_type : 0xff;
1588                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1589                 if (!key)
1590                         return -ENOMEM;
1591                 list_add(&key->list, &hdev->link_keys);
1592         }
1593
1594         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1595
1596         /* Some buggy controller combinations generate a changed
1597          * combination key for legacy pairing even when there's no
1598          * previous key */
1599         if (type == HCI_LK_CHANGED_COMBINATION &&
1600             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1601                 type = HCI_LK_COMBINATION;
1602                 if (conn)
1603                         conn->key_type = type;
1604         }
1605
1606         bacpy(&key->bdaddr, bdaddr);
1607         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1608         key->pin_len = pin_len;
1609
1610         if (type == HCI_LK_CHANGED_COMBINATION)
1611                 key->type = old_key_type;
1612         else
1613                 key->type = type;
1614
1615         if (!new_key)
1616                 return 0;
1617
1618         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1619
1620         mgmt_new_link_key(hdev, key, persistent);
1621
1622         if (conn)
1623                 conn->flush_key = !persistent;
1624
1625         return 0;
1626 }
1627
1628 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1629                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1630                 ediv, u8 rand[8])
1631 {
1632         struct smp_ltk *key, *old_key;
1633
1634         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1635                 return 0;
1636
1637         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1638         if (old_key)
1639                 key = old_key;
1640         else {
1641                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1642                 if (!key)
1643                         return -ENOMEM;
1644                 list_add(&key->list, &hdev->long_term_keys);
1645         }
1646
1647         bacpy(&key->bdaddr, bdaddr);
1648         key->bdaddr_type = addr_type;
1649         memcpy(key->val, tk, sizeof(key->val));
1650         key->authenticated = authenticated;
1651         key->ediv = ediv;
1652         key->enc_size = enc_size;
1653         key->type = type;
1654         memcpy(key->rand, rand, sizeof(key->rand));
1655
1656         if (!new_key)
1657                 return 0;
1658
1659         if (type & HCI_SMP_LTK)
1660                 mgmt_new_ltk(hdev, key, 1);
1661
1662         return 0;
1663 }
1664
1665 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1666 {
1667         struct link_key *key;
1668
1669         key = hci_find_link_key(hdev, bdaddr);
1670         if (!key)
1671                 return -ENOENT;
1672
1673         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1674
1675         list_del(&key->list);
1676         kfree(key);
1677
1678         return 0;
1679 }
1680
1681 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1682 {
1683         struct smp_ltk *k, *tmp;
1684
1685         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1686                 if (bacmp(bdaddr, &k->bdaddr))
1687                         continue;
1688
1689                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1690
1691                 list_del(&k->list);
1692                 kfree(k);
1693         }
1694
1695         return 0;
1696 }
1697
1698 /* HCI command timer function */
1699 static void hci_cmd_timeout(unsigned long arg)
1700 {
1701         struct hci_dev *hdev = (void *) arg;
1702
1703         if (hdev->sent_cmd) {
1704                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1705                 u16 opcode = __le16_to_cpu(sent->opcode);
1706
1707                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1708         } else {
1709                 BT_ERR("%s command tx timeout", hdev->name);
1710         }
1711
1712         atomic_set(&hdev->cmd_cnt, 1);
1713         queue_work(hdev->workqueue, &hdev->cmd_work);
1714 }
1715
1716 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1717                                           bdaddr_t *bdaddr)
1718 {
1719         struct oob_data *data;
1720
1721         list_for_each_entry(data, &hdev->remote_oob_data, list)
1722                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1723                         return data;
1724
1725         return NULL;
1726 }
1727
1728 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1729 {
1730         struct oob_data *data;
1731
1732         data = hci_find_remote_oob_data(hdev, bdaddr);
1733         if (!data)
1734                 return -ENOENT;
1735
1736         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1737
1738         list_del(&data->list);
1739         kfree(data);
1740
1741         return 0;
1742 }
1743
1744 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1745 {
1746         struct oob_data *data, *n;
1747
1748         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1749                 list_del(&data->list);
1750                 kfree(data);
1751         }
1752
1753         return 0;
1754 }
1755
1756 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1757                             u8 *randomizer)
1758 {
1759         struct oob_data *data;
1760
1761         data = hci_find_remote_oob_data(hdev, bdaddr);
1762
1763         if (!data) {
1764                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1765                 if (!data)
1766                         return -ENOMEM;
1767
1768                 bacpy(&data->bdaddr, bdaddr);
1769                 list_add(&data->list, &hdev->remote_oob_data);
1770         }
1771
1772         memcpy(data->hash, hash, sizeof(data->hash));
1773         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1774
1775         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1776
1777         return 0;
1778 }
1779
1780 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1781 {
1782         struct bdaddr_list *b;
1783
1784         list_for_each_entry(b, &hdev->blacklist, list)
1785                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1786                         return b;
1787
1788         return NULL;
1789 }
1790
1791 int hci_blacklist_clear(struct hci_dev *hdev)
1792 {
1793         struct list_head *p, *n;
1794
1795         list_for_each_safe(p, n, &hdev->blacklist) {
1796                 struct bdaddr_list *b;
1797
1798                 b = list_entry(p, struct bdaddr_list, list);
1799
1800                 list_del(p);
1801                 kfree(b);
1802         }
1803
1804         return 0;
1805 }
1806
1807 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1808 {
1809         struct bdaddr_list *entry;
1810
1811         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1812                 return -EBADF;
1813
1814         if (hci_blacklist_lookup(hdev, bdaddr))
1815                 return -EEXIST;
1816
1817         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1818         if (!entry)
1819                 return -ENOMEM;
1820
1821         bacpy(&entry->bdaddr, bdaddr);
1822
1823         list_add(&entry->list, &hdev->blacklist);
1824
1825         return mgmt_device_blocked(hdev, bdaddr, type);
1826 }
1827
1828 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1829 {
1830         struct bdaddr_list *entry;
1831
1832         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1833                 return hci_blacklist_clear(hdev);
1834
1835         entry = hci_blacklist_lookup(hdev, bdaddr);
1836         if (!entry)
1837                 return -ENOENT;
1838
1839         list_del(&entry->list);
1840         kfree(entry);
1841
1842         return mgmt_device_unblocked(hdev, bdaddr, type);
1843 }
1844
1845 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1846 {
1847         struct le_scan_params *param =  (struct le_scan_params *) opt;
1848         struct hci_cp_le_set_scan_param cp;
1849
1850         memset(&cp, 0, sizeof(cp));
1851         cp.type = param->type;
1852         cp.interval = cpu_to_le16(param->interval);
1853         cp.window = cpu_to_le16(param->window);
1854
1855         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1856 }
1857
1858 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1859 {
1860         struct hci_cp_le_set_scan_enable cp;
1861
1862         memset(&cp, 0, sizeof(cp));
1863         cp.enable = 1;
1864         cp.filter_dup = 1;
1865
1866         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1867 }
1868
1869 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1870                           u16 window, int timeout)
1871 {
1872         long timeo = msecs_to_jiffies(3000);
1873         struct le_scan_params param;
1874         int err;
1875
1876         BT_DBG("%s", hdev->name);
1877
1878         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1879                 return -EINPROGRESS;
1880
1881         param.type = type;
1882         param.interval = interval;
1883         param.window = window;
1884
1885         hci_req_lock(hdev);
1886
1887         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1888                              timeo);
1889         if (!err)
1890                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1891
1892         hci_req_unlock(hdev);
1893
1894         if (err < 0)
1895                 return err;
1896
1897         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1898                            msecs_to_jiffies(timeout));
1899
1900         return 0;
1901 }
1902
1903 int hci_cancel_le_scan(struct hci_dev *hdev)
1904 {
1905         BT_DBG("%s", hdev->name);
1906
1907         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1908                 return -EALREADY;
1909
1910         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1911                 struct hci_cp_le_set_scan_enable cp;
1912
1913                 /* Send HCI command to disable LE Scan */
1914                 memset(&cp, 0, sizeof(cp));
1915                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1916         }
1917
1918         return 0;
1919 }
1920
1921 static void le_scan_disable_work(struct work_struct *work)
1922 {
1923         struct hci_dev *hdev = container_of(work, struct hci_dev,
1924                                             le_scan_disable.work);
1925         struct hci_cp_le_set_scan_enable cp;
1926
1927         BT_DBG("%s", hdev->name);
1928
1929         memset(&cp, 0, sizeof(cp));
1930
1931         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1932 }
1933
1934 static void le_scan_work(struct work_struct *work)
1935 {
1936         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1937         struct le_scan_params *param = &hdev->le_scan_params;
1938
1939         BT_DBG("%s", hdev->name);
1940
1941         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1942                        param->timeout);
1943 }
1944
1945 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1946                 int timeout)
1947 {
1948         struct le_scan_params *param = &hdev->le_scan_params;
1949
1950         BT_DBG("%s", hdev->name);
1951
1952         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1953                 return -ENOTSUPP;
1954
1955         if (work_busy(&hdev->le_scan))
1956                 return -EINPROGRESS;
1957
1958         param->type = type;
1959         param->interval = interval;
1960         param->window = window;
1961         param->timeout = timeout;
1962
1963         queue_work(system_long_wq, &hdev->le_scan);
1964
1965         return 0;
1966 }
1967
1968 /* Alloc HCI device */
1969 struct hci_dev *hci_alloc_dev(void)
1970 {
1971         struct hci_dev *hdev;
1972
1973         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1974         if (!hdev)
1975                 return NULL;
1976
1977         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1978         hdev->esco_type = (ESCO_HV1);
1979         hdev->link_mode = (HCI_LM_ACCEPT);
1980         hdev->io_capability = 0x03; /* No Input No Output */
1981         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1982         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1983
1984         hdev->sniff_max_interval = 800;
1985         hdev->sniff_min_interval = 80;
1986
1987         mutex_init(&hdev->lock);
1988         mutex_init(&hdev->req_lock);
1989
1990         INIT_LIST_HEAD(&hdev->mgmt_pending);
1991         INIT_LIST_HEAD(&hdev->blacklist);
1992         INIT_LIST_HEAD(&hdev->uuids);
1993         INIT_LIST_HEAD(&hdev->link_keys);
1994         INIT_LIST_HEAD(&hdev->long_term_keys);
1995         INIT_LIST_HEAD(&hdev->remote_oob_data);
1996         INIT_LIST_HEAD(&hdev->conn_hash.list);
1997
1998         INIT_WORK(&hdev->rx_work, hci_rx_work);
1999         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2000         INIT_WORK(&hdev->tx_work, hci_tx_work);
2001         INIT_WORK(&hdev->power_on, hci_power_on);
2002         INIT_WORK(&hdev->le_scan, le_scan_work);
2003
2004         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2005         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2006         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2007
2008         skb_queue_head_init(&hdev->driver_init);
2009         skb_queue_head_init(&hdev->rx_q);
2010         skb_queue_head_init(&hdev->cmd_q);
2011         skb_queue_head_init(&hdev->raw_q);
2012
2013         init_waitqueue_head(&hdev->req_wait_q);
2014
2015         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2016
2017         hci_init_sysfs(hdev);
2018         discovery_init(hdev);
2019
2020         return hdev;
2021 }
2022 EXPORT_SYMBOL(hci_alloc_dev);
2023
2024 /* Free HCI device */
2025 void hci_free_dev(struct hci_dev *hdev)
2026 {
2027         skb_queue_purge(&hdev->driver_init);
2028
2029         /* will free via device release */
2030         put_device(&hdev->dev);
2031 }
2032 EXPORT_SYMBOL(hci_free_dev);
2033
2034 /* Register HCI device */
2035 int hci_register_dev(struct hci_dev *hdev)
2036 {
2037         int id, error;
2038
2039         if (!hdev->open || !hdev->close)
2040                 return -EINVAL;
2041
2042         /* Do not allow HCI_AMP devices to register at index 0,
2043          * so the index can be used as the AMP controller ID.
2044          */
2045         switch (hdev->dev_type) {
2046         case HCI_BREDR:
2047                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2048                 break;
2049         case HCI_AMP:
2050                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2051                 break;
2052         default:
2053                 return -EINVAL;
2054         }
2055
2056         if (id < 0)
2057                 return id;
2058
2059         sprintf(hdev->name, "hci%d", id);
2060         hdev->id = id;
2061
2062         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2063
2064         write_lock(&hci_dev_list_lock);
2065         list_add(&hdev->list, &hci_dev_list);
2066         write_unlock(&hci_dev_list_lock);
2067
2068         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2069                                           WQ_MEM_RECLAIM, 1);
2070         if (!hdev->workqueue) {
2071                 error = -ENOMEM;
2072                 goto err;
2073         }
2074
2075         hdev->req_workqueue = alloc_workqueue(hdev->name,
2076                                               WQ_HIGHPRI | WQ_UNBOUND |
2077                                               WQ_MEM_RECLAIM, 1);
2078         if (!hdev->req_workqueue) {
2079                 destroy_workqueue(hdev->workqueue);
2080                 error = -ENOMEM;
2081                 goto err;
2082         }
2083
2084         error = hci_add_sysfs(hdev);
2085         if (error < 0)
2086                 goto err_wqueue;
2087
2088         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2089                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2090                                     hdev);
2091         if (hdev->rfkill) {
2092                 if (rfkill_register(hdev->rfkill) < 0) {
2093                         rfkill_destroy(hdev->rfkill);
2094                         hdev->rfkill = NULL;
2095                 }
2096         }
2097
2098         set_bit(HCI_SETUP, &hdev->dev_flags);
2099
2100         if (hdev->dev_type != HCI_AMP)
2101                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2102
2103         hci_notify(hdev, HCI_DEV_REG);
2104         hci_dev_hold(hdev);
2105
2106         queue_work(hdev->req_workqueue, &hdev->power_on);
2107
2108         return id;
2109
2110 err_wqueue:
2111         destroy_workqueue(hdev->workqueue);
2112         destroy_workqueue(hdev->req_workqueue);
2113 err:
2114         ida_simple_remove(&hci_index_ida, hdev->id);
2115         write_lock(&hci_dev_list_lock);
2116         list_del(&hdev->list);
2117         write_unlock(&hci_dev_list_lock);
2118
2119         return error;
2120 }
2121 EXPORT_SYMBOL(hci_register_dev);
2122
2123 /* Unregister HCI device */
2124 void hci_unregister_dev(struct hci_dev *hdev)
2125 {
2126         int i, id;
2127
2128         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2129
2130         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2131
2132         id = hdev->id;
2133
2134         write_lock(&hci_dev_list_lock);
2135         list_del(&hdev->list);
2136         write_unlock(&hci_dev_list_lock);
2137
2138         hci_dev_do_close(hdev);
2139
2140         for (i = 0; i < NUM_REASSEMBLY; i++)
2141                 kfree_skb(hdev->reassembly[i]);
2142
2143         cancel_work_sync(&hdev->power_on);
2144
2145         if (!test_bit(HCI_INIT, &hdev->flags) &&
2146             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2147                 hci_dev_lock(hdev);
2148                 mgmt_index_removed(hdev);
2149                 hci_dev_unlock(hdev);
2150         }
2151
2152         /* mgmt_index_removed should take care of emptying the
2153          * pending list */
2154         BUG_ON(!list_empty(&hdev->mgmt_pending));
2155
2156         hci_notify(hdev, HCI_DEV_UNREG);
2157
2158         if (hdev->rfkill) {
2159                 rfkill_unregister(hdev->rfkill);
2160                 rfkill_destroy(hdev->rfkill);
2161         }
2162
2163         hci_del_sysfs(hdev);
2164
2165         destroy_workqueue(hdev->workqueue);
2166         destroy_workqueue(hdev->req_workqueue);
2167
2168         hci_dev_lock(hdev);
2169         hci_blacklist_clear(hdev);
2170         hci_uuids_clear(hdev);
2171         hci_link_keys_clear(hdev);
2172         hci_smp_ltks_clear(hdev);
2173         hci_remote_oob_data_clear(hdev);
2174         hci_dev_unlock(hdev);
2175
2176         hci_dev_put(hdev);
2177
2178         ida_simple_remove(&hci_index_ida, id);
2179 }
2180 EXPORT_SYMBOL(hci_unregister_dev);
2181
2182 /* Suspend HCI device */
2183 int hci_suspend_dev(struct hci_dev *hdev)
2184 {
2185         hci_notify(hdev, HCI_DEV_SUSPEND);
2186         return 0;
2187 }
2188 EXPORT_SYMBOL(hci_suspend_dev);
2189
2190 /* Resume HCI device */
2191 int hci_resume_dev(struct hci_dev *hdev)
2192 {
2193         hci_notify(hdev, HCI_DEV_RESUME);
2194         return 0;
2195 }
2196 EXPORT_SYMBOL(hci_resume_dev);
2197
2198 /* Receive frame from HCI drivers */
2199 int hci_recv_frame(struct sk_buff *skb)
2200 {
2201         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2202         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2203                       && !test_bit(HCI_INIT, &hdev->flags))) {
2204                 kfree_skb(skb);
2205                 return -ENXIO;
2206         }
2207
2208         /* Incoming skb */
2209         bt_cb(skb)->incoming = 1;
2210
2211         /* Time stamp */
2212         __net_timestamp(skb);
2213
2214         skb_queue_tail(&hdev->rx_q, skb);
2215         queue_work(hdev->workqueue, &hdev->rx_work);
2216
2217         return 0;
2218 }
2219 EXPORT_SYMBOL(hci_recv_frame);
2220
2221 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2222                           int count, __u8 index)
2223 {
2224         int len = 0;
2225         int hlen = 0;
2226         int remain = count;
2227         struct sk_buff *skb;
2228         struct bt_skb_cb *scb;
2229
2230         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2231             index >= NUM_REASSEMBLY)
2232                 return -EILSEQ;
2233
2234         skb = hdev->reassembly[index];
2235
2236         if (!skb) {
2237                 switch (type) {
2238                 case HCI_ACLDATA_PKT:
2239                         len = HCI_MAX_FRAME_SIZE;
2240                         hlen = HCI_ACL_HDR_SIZE;
2241                         break;
2242                 case HCI_EVENT_PKT:
2243                         len = HCI_MAX_EVENT_SIZE;
2244                         hlen = HCI_EVENT_HDR_SIZE;
2245                         break;
2246                 case HCI_SCODATA_PKT:
2247                         len = HCI_MAX_SCO_SIZE;
2248                         hlen = HCI_SCO_HDR_SIZE;
2249                         break;
2250                 }
2251
2252                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2253                 if (!skb)
2254                         return -ENOMEM;
2255
2256                 scb = (void *) skb->cb;
2257                 scb->expect = hlen;
2258                 scb->pkt_type = type;
2259
2260                 skb->dev = (void *) hdev;
2261                 hdev->reassembly[index] = skb;
2262         }
2263
2264         while (count) {
2265                 scb = (void *) skb->cb;
2266                 len = min_t(uint, scb->expect, count);
2267
2268                 memcpy(skb_put(skb, len), data, len);
2269
2270                 count -= len;
2271                 data += len;
2272                 scb->expect -= len;
2273                 remain = count;
2274
2275                 switch (type) {
2276                 case HCI_EVENT_PKT:
2277                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2278                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2279                                 scb->expect = h->plen;
2280
2281                                 if (skb_tailroom(skb) < scb->expect) {
2282                                         kfree_skb(skb);
2283                                         hdev->reassembly[index] = NULL;
2284                                         return -ENOMEM;
2285                                 }
2286                         }
2287                         break;
2288
2289                 case HCI_ACLDATA_PKT:
2290                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2291                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2292                                 scb->expect = __le16_to_cpu(h->dlen);
2293
2294                                 if (skb_tailroom(skb) < scb->expect) {
2295                                         kfree_skb(skb);
2296                                         hdev->reassembly[index] = NULL;
2297                                         return -ENOMEM;
2298                                 }
2299                         }
2300                         break;
2301
2302                 case HCI_SCODATA_PKT:
2303                         if (skb->len == HCI_SCO_HDR_SIZE) {
2304                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2305                                 scb->expect = h->dlen;
2306
2307                                 if (skb_tailroom(skb) < scb->expect) {
2308                                         kfree_skb(skb);
2309                                         hdev->reassembly[index] = NULL;
2310                                         return -ENOMEM;
2311                                 }
2312                         }
2313                         break;
2314                 }
2315
2316                 if (scb->expect == 0) {
2317                         /* Complete frame */
2318
2319                         bt_cb(skb)->pkt_type = type;
2320                         hci_recv_frame(skb);
2321
2322                         hdev->reassembly[index] = NULL;
2323                         return remain;
2324                 }
2325         }
2326
2327         return remain;
2328 }
2329
2330 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2331 {
2332         int rem = 0;
2333
2334         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2335                 return -EILSEQ;
2336
2337         while (count) {
2338                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2339                 if (rem < 0)
2340                         return rem;
2341
2342                 data += (count - rem);
2343                 count = rem;
2344         }
2345
2346         return rem;
2347 }
2348 EXPORT_SYMBOL(hci_recv_fragment);
2349
2350 #define STREAM_REASSEMBLY 0
2351
2352 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2353 {
2354         int type;
2355         int rem = 0;
2356
2357         while (count) {
2358                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2359
2360                 if (!skb) {
2361                         struct { char type; } *pkt;
2362
2363                         /* Start of the frame */
2364                         pkt = data;
2365                         type = pkt->type;
2366
2367                         data++;
2368                         count--;
2369                 } else
2370                         type = bt_cb(skb)->pkt_type;
2371
2372                 rem = hci_reassembly(hdev, type, data, count,
2373                                      STREAM_REASSEMBLY);
2374                 if (rem < 0)
2375                         return rem;
2376
2377                 data += (count - rem);
2378                 count = rem;
2379         }
2380
2381         return rem;
2382 }
2383 EXPORT_SYMBOL(hci_recv_stream_fragment);
2384
2385 /* ---- Interface to upper protocols ---- */
2386
2387 int hci_register_cb(struct hci_cb *cb)
2388 {
2389         BT_DBG("%p name %s", cb, cb->name);
2390
2391         write_lock(&hci_cb_list_lock);
2392         list_add(&cb->list, &hci_cb_list);
2393         write_unlock(&hci_cb_list_lock);
2394
2395         return 0;
2396 }
2397 EXPORT_SYMBOL(hci_register_cb);
2398
2399 int hci_unregister_cb(struct hci_cb *cb)
2400 {
2401         BT_DBG("%p name %s", cb, cb->name);
2402
2403         write_lock(&hci_cb_list_lock);
2404         list_del(&cb->list);
2405         write_unlock(&hci_cb_list_lock);
2406
2407         return 0;
2408 }
2409 EXPORT_SYMBOL(hci_unregister_cb);
2410
2411 static int hci_send_frame(struct sk_buff *skb)
2412 {
2413         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2414
2415         if (!hdev) {
2416                 kfree_skb(skb);
2417                 return -ENODEV;
2418         }
2419
2420         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2421
2422         /* Time stamp */
2423         __net_timestamp(skb);
2424
2425         /* Send copy to monitor */
2426         hci_send_to_monitor(hdev, skb);
2427
2428         if (atomic_read(&hdev->promisc)) {
2429                 /* Send copy to the sockets */
2430                 hci_send_to_sock(hdev, skb);
2431         }
2432
2433         /* Get rid of skb owner, prior to sending to the driver. */
2434         skb_orphan(skb);
2435
2436         return hdev->send(skb);
2437 }
2438
2439 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2440 {
2441         skb_queue_head_init(&req->cmd_q);
2442         req->hdev = hdev;
2443 }
2444
2445 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2446 {
2447         struct hci_dev *hdev = req->hdev;
2448         struct sk_buff *skb;
2449         unsigned long flags;
2450
2451         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2452
2453         /* Do not allow empty requests */
2454         if (skb_queue_empty(&req->cmd_q))
2455                 return -ENODATA;
2456
2457         skb = skb_peek_tail(&req->cmd_q);
2458         bt_cb(skb)->req.complete = complete;
2459
2460         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2461         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2462         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2463
2464         queue_work(hdev->workqueue, &hdev->cmd_work);
2465
2466         return 0;
2467 }
2468
2469 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2470                                        u32 plen, void *param)
2471 {
2472         int len = HCI_COMMAND_HDR_SIZE + plen;
2473         struct hci_command_hdr *hdr;
2474         struct sk_buff *skb;
2475
2476         skb = bt_skb_alloc(len, GFP_ATOMIC);
2477         if (!skb)
2478                 return NULL;
2479
2480         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2481         hdr->opcode = cpu_to_le16(opcode);
2482         hdr->plen   = plen;
2483
2484         if (plen)
2485                 memcpy(skb_put(skb, plen), param, plen);
2486
2487         BT_DBG("skb len %d", skb->len);
2488
2489         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2490         skb->dev = (void *) hdev;
2491
2492         return skb;
2493 }
2494
2495 /* Send HCI command */
2496 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2497 {
2498         struct sk_buff *skb;
2499
2500         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2501
2502         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2503         if (!skb) {
2504                 BT_ERR("%s no memory for command", hdev->name);
2505                 return -ENOMEM;
2506         }
2507
2508         /* Stand-alone HCI commands must be flaged as
2509          * single-command requests.
2510          */
2511         bt_cb(skb)->req.start = true;
2512
2513         skb_queue_tail(&hdev->cmd_q, skb);
2514         queue_work(hdev->workqueue, &hdev->cmd_work);
2515
2516         return 0;
2517 }
2518
2519 /* Queue a command to an asynchronous HCI request */
2520 int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2521 {
2522         struct hci_dev *hdev = req->hdev;
2523         struct sk_buff *skb;
2524
2525         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2526
2527         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2528         if (!skb) {
2529                 BT_ERR("%s no memory for command", hdev->name);
2530                 return -ENOMEM;
2531         }
2532
2533         if (skb_queue_empty(&req->cmd_q))
2534                 bt_cb(skb)->req.start = true;
2535
2536         skb_queue_tail(&req->cmd_q, skb);
2537
2538         return 0;
2539 }
2540
2541 /* Get data from the previously sent command */
2542 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2543 {
2544         struct hci_command_hdr *hdr;
2545
2546         if (!hdev->sent_cmd)
2547                 return NULL;
2548
2549         hdr = (void *) hdev->sent_cmd->data;
2550
2551         if (hdr->opcode != cpu_to_le16(opcode))
2552                 return NULL;
2553
2554         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2555
2556         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2557 }
2558
2559 /* Send ACL data */
2560 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2561 {
2562         struct hci_acl_hdr *hdr;
2563         int len = skb->len;
2564
2565         skb_push(skb, HCI_ACL_HDR_SIZE);
2566         skb_reset_transport_header(skb);
2567         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2568         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2569         hdr->dlen   = cpu_to_le16(len);
2570 }
2571
2572 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2573                           struct sk_buff *skb, __u16 flags)
2574 {
2575         struct hci_conn *conn = chan->conn;
2576         struct hci_dev *hdev = conn->hdev;
2577         struct sk_buff *list;
2578
2579         skb->len = skb_headlen(skb);
2580         skb->data_len = 0;
2581
2582         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2583
2584         switch (hdev->dev_type) {
2585         case HCI_BREDR:
2586                 hci_add_acl_hdr(skb, conn->handle, flags);
2587                 break;
2588         case HCI_AMP:
2589                 hci_add_acl_hdr(skb, chan->handle, flags);
2590                 break;
2591         default:
2592                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2593                 return;
2594         }
2595
2596         list = skb_shinfo(skb)->frag_list;
2597         if (!list) {
2598                 /* Non fragmented */
2599                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2600
2601                 skb_queue_tail(queue, skb);
2602         } else {
2603                 /* Fragmented */
2604                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2605
2606                 skb_shinfo(skb)->frag_list = NULL;
2607
2608                 /* Queue all fragments atomically */
2609                 spin_lock(&queue->lock);
2610
2611                 __skb_queue_tail(queue, skb);
2612
2613                 flags &= ~ACL_START;
2614                 flags |= ACL_CONT;
2615                 do {
2616                         skb = list; list = list->next;
2617
2618                         skb->dev = (void *) hdev;
2619                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2620                         hci_add_acl_hdr(skb, conn->handle, flags);
2621
2622                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2623
2624                         __skb_queue_tail(queue, skb);
2625                 } while (list);
2626
2627                 spin_unlock(&queue->lock);
2628         }
2629 }
2630
2631 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2632 {
2633         struct hci_dev *hdev = chan->conn->hdev;
2634
2635         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2636
2637         skb->dev = (void *) hdev;
2638
2639         hci_queue_acl(chan, &chan->data_q, skb, flags);
2640
2641         queue_work(hdev->workqueue, &hdev->tx_work);
2642 }
2643
2644 /* Send SCO data */
2645 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2646 {
2647         struct hci_dev *hdev = conn->hdev;
2648         struct hci_sco_hdr hdr;
2649
2650         BT_DBG("%s len %d", hdev->name, skb->len);
2651
2652         hdr.handle = cpu_to_le16(conn->handle);
2653         hdr.dlen   = skb->len;
2654
2655         skb_push(skb, HCI_SCO_HDR_SIZE);
2656         skb_reset_transport_header(skb);
2657         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2658
2659         skb->dev = (void *) hdev;
2660         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2661
2662         skb_queue_tail(&conn->data_q, skb);
2663         queue_work(hdev->workqueue, &hdev->tx_work);
2664 }
2665
2666 /* ---- HCI TX task (outgoing data) ---- */
2667
2668 /* HCI Connection scheduler */
2669 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2670                                      int *quote)
2671 {
2672         struct hci_conn_hash *h = &hdev->conn_hash;
2673         struct hci_conn *conn = NULL, *c;
2674         unsigned int num = 0, min = ~0;
2675
2676         /* We don't have to lock device here. Connections are always
2677          * added and removed with TX task disabled. */
2678
2679         rcu_read_lock();
2680
2681         list_for_each_entry_rcu(c, &h->list, list) {
2682                 if (c->type != type || skb_queue_empty(&c->data_q))
2683                         continue;
2684
2685                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2686                         continue;
2687
2688                 num++;
2689
2690                 if (c->sent < min) {
2691                         min  = c->sent;
2692                         conn = c;
2693                 }
2694
2695                 if (hci_conn_num(hdev, type) == num)
2696                         break;
2697         }
2698
2699         rcu_read_unlock();
2700
2701         if (conn) {
2702                 int cnt, q;
2703
2704                 switch (conn->type) {
2705                 case ACL_LINK:
2706                         cnt = hdev->acl_cnt;
2707                         break;
2708                 case SCO_LINK:
2709                 case ESCO_LINK:
2710                         cnt = hdev->sco_cnt;
2711                         break;
2712                 case LE_LINK:
2713                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2714                         break;
2715                 default:
2716                         cnt = 0;
2717                         BT_ERR("Unknown link type");
2718                 }
2719
2720                 q = cnt / num;
2721                 *quote = q ? q : 1;
2722         } else
2723                 *quote = 0;
2724
2725         BT_DBG("conn %p quote %d", conn, *quote);
2726         return conn;
2727 }
2728
2729 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2730 {
2731         struct hci_conn_hash *h = &hdev->conn_hash;
2732         struct hci_conn *c;
2733
2734         BT_ERR("%s link tx timeout", hdev->name);
2735
2736         rcu_read_lock();
2737
2738         /* Kill stalled connections */
2739         list_for_each_entry_rcu(c, &h->list, list) {
2740                 if (c->type == type && c->sent) {
2741                         BT_ERR("%s killing stalled connection %pMR",
2742                                hdev->name, &c->dst);
2743                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2744                 }
2745         }
2746
2747         rcu_read_unlock();
2748 }
2749
2750 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2751                                       int *quote)
2752 {
2753         struct hci_conn_hash *h = &hdev->conn_hash;
2754         struct hci_chan *chan = NULL;
2755         unsigned int num = 0, min = ~0, cur_prio = 0;
2756         struct hci_conn *conn;
2757         int cnt, q, conn_num = 0;
2758
2759         BT_DBG("%s", hdev->name);
2760
2761         rcu_read_lock();
2762
2763         list_for_each_entry_rcu(conn, &h->list, list) {
2764                 struct hci_chan *tmp;
2765
2766                 if (conn->type != type)
2767                         continue;
2768
2769                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2770                         continue;
2771
2772                 conn_num++;
2773
2774                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2775                         struct sk_buff *skb;
2776
2777                         if (skb_queue_empty(&tmp->data_q))
2778                                 continue;
2779
2780                         skb = skb_peek(&tmp->data_q);
2781                         if (skb->priority < cur_prio)
2782                                 continue;
2783
2784                         if (skb->priority > cur_prio) {
2785                                 num = 0;
2786                                 min = ~0;
2787                                 cur_prio = skb->priority;
2788                         }
2789
2790                         num++;
2791
2792                         if (conn->sent < min) {
2793                                 min  = conn->sent;
2794                                 chan = tmp;
2795                         }
2796                 }
2797
2798                 if (hci_conn_num(hdev, type) == conn_num)
2799                         break;
2800         }
2801
2802         rcu_read_unlock();
2803
2804         if (!chan)
2805                 return NULL;
2806
2807         switch (chan->conn->type) {
2808         case ACL_LINK:
2809                 cnt = hdev->acl_cnt;
2810                 break;
2811         case AMP_LINK:
2812                 cnt = hdev->block_cnt;
2813                 break;
2814         case SCO_LINK:
2815         case ESCO_LINK:
2816                 cnt = hdev->sco_cnt;
2817                 break;
2818         case LE_LINK:
2819                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2820                 break;
2821         default:
2822                 cnt = 0;
2823                 BT_ERR("Unknown link type");
2824         }
2825
2826         q = cnt / num;
2827         *quote = q ? q : 1;
2828         BT_DBG("chan %p quote %d", chan, *quote);
2829         return chan;
2830 }
2831
2832 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2833 {
2834         struct hci_conn_hash *h = &hdev->conn_hash;
2835         struct hci_conn *conn;
2836         int num = 0;
2837
2838         BT_DBG("%s", hdev->name);
2839
2840         rcu_read_lock();
2841
2842         list_for_each_entry_rcu(conn, &h->list, list) {
2843                 struct hci_chan *chan;
2844
2845                 if (conn->type != type)
2846                         continue;
2847
2848                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2849                         continue;
2850
2851                 num++;
2852
2853                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2854                         struct sk_buff *skb;
2855
2856                         if (chan->sent) {
2857                                 chan->sent = 0;
2858                                 continue;
2859                         }
2860
2861                         if (skb_queue_empty(&chan->data_q))
2862                                 continue;
2863
2864                         skb = skb_peek(&chan->data_q);
2865                         if (skb->priority >= HCI_PRIO_MAX - 1)
2866                                 continue;
2867
2868                         skb->priority = HCI_PRIO_MAX - 1;
2869
2870                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2871                                skb->priority);
2872                 }
2873
2874                 if (hci_conn_num(hdev, type) == num)
2875                         break;
2876         }
2877
2878         rcu_read_unlock();
2879
2880 }
2881
2882 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2883 {
2884         /* Calculate count of blocks used by this packet */
2885         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2886 }
2887
2888 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2889 {
2890         if (!test_bit(HCI_RAW, &hdev->flags)) {
2891                 /* ACL tx timeout must be longer than maximum
2892                  * link supervision timeout (40.9 seconds) */
2893                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2894                                        HCI_ACL_TX_TIMEOUT))
2895                         hci_link_tx_to(hdev, ACL_LINK);
2896         }
2897 }
2898
2899 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2900 {
2901         unsigned int cnt = hdev->acl_cnt;
2902         struct hci_chan *chan;
2903         struct sk_buff *skb;
2904         int quote;
2905
2906         __check_timeout(hdev, cnt);
2907
2908         while (hdev->acl_cnt &&
2909                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2910                 u32 priority = (skb_peek(&chan->data_q))->priority;
2911                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2912                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2913                                skb->len, skb->priority);
2914
2915                         /* Stop if priority has changed */
2916                         if (skb->priority < priority)
2917                                 break;
2918
2919                         skb = skb_dequeue(&chan->data_q);
2920
2921                         hci_conn_enter_active_mode(chan->conn,
2922                                                    bt_cb(skb)->force_active);
2923
2924                         hci_send_frame(skb);
2925                         hdev->acl_last_tx = jiffies;
2926
2927                         hdev->acl_cnt--;
2928                         chan->sent++;
2929                         chan->conn->sent++;
2930                 }
2931         }
2932
2933         if (cnt != hdev->acl_cnt)
2934                 hci_prio_recalculate(hdev, ACL_LINK);
2935 }
2936
2937 static void hci_sched_acl_blk(struct hci_dev *hdev)
2938 {
2939         unsigned int cnt = hdev->block_cnt;
2940         struct hci_chan *chan;
2941         struct sk_buff *skb;
2942         int quote;
2943         u8 type;
2944
2945         __check_timeout(hdev, cnt);
2946
2947         BT_DBG("%s", hdev->name);
2948
2949         if (hdev->dev_type == HCI_AMP)
2950                 type = AMP_LINK;
2951         else
2952                 type = ACL_LINK;
2953
2954         while (hdev->block_cnt > 0 &&
2955                (chan = hci_chan_sent(hdev, type, &quote))) {
2956                 u32 priority = (skb_peek(&chan->data_q))->priority;
2957                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2958                         int blocks;
2959
2960                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2961                                skb->len, skb->priority);
2962
2963                         /* Stop if priority has changed */
2964                         if (skb->priority < priority)
2965                                 break;
2966
2967                         skb = skb_dequeue(&chan->data_q);
2968
2969                         blocks = __get_blocks(hdev, skb);
2970                         if (blocks > hdev->block_cnt)
2971                                 return;
2972
2973                         hci_conn_enter_active_mode(chan->conn,
2974                                                    bt_cb(skb)->force_active);
2975
2976                         hci_send_frame(skb);
2977                         hdev->acl_last_tx = jiffies;
2978
2979                         hdev->block_cnt -= blocks;
2980                         quote -= blocks;
2981
2982                         chan->sent += blocks;
2983                         chan->conn->sent += blocks;
2984                 }
2985         }
2986
2987         if (cnt != hdev->block_cnt)
2988                 hci_prio_recalculate(hdev, type);
2989 }
2990
2991 static void hci_sched_acl(struct hci_dev *hdev)
2992 {
2993         BT_DBG("%s", hdev->name);
2994
2995         /* No ACL link over BR/EDR controller */
2996         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2997                 return;
2998
2999         /* No AMP link over AMP controller */
3000         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3001                 return;
3002
3003         switch (hdev->flow_ctl_mode) {
3004         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3005                 hci_sched_acl_pkt(hdev);
3006                 break;
3007
3008         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3009                 hci_sched_acl_blk(hdev);
3010                 break;
3011         }
3012 }
3013
3014 /* Schedule SCO */
3015 static void hci_sched_sco(struct hci_dev *hdev)
3016 {
3017         struct hci_conn *conn;
3018         struct sk_buff *skb;
3019         int quote;
3020
3021         BT_DBG("%s", hdev->name);
3022
3023         if (!hci_conn_num(hdev, SCO_LINK))
3024                 return;
3025
3026         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3027                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3028                         BT_DBG("skb %p len %d", skb, skb->len);
3029                         hci_send_frame(skb);
3030
3031                         conn->sent++;
3032                         if (conn->sent == ~0)
3033                                 conn->sent = 0;
3034                 }
3035         }
3036 }
3037
3038 static void hci_sched_esco(struct hci_dev *hdev)
3039 {
3040         struct hci_conn *conn;
3041         struct sk_buff *skb;
3042         int quote;
3043
3044         BT_DBG("%s", hdev->name);
3045
3046         if (!hci_conn_num(hdev, ESCO_LINK))
3047                 return;
3048
3049         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3050                                                      &quote))) {
3051                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3052                         BT_DBG("skb %p len %d", skb, skb->len);
3053                         hci_send_frame(skb);
3054
3055                         conn->sent++;
3056                         if (conn->sent == ~0)
3057                                 conn->sent = 0;
3058                 }
3059         }
3060 }
3061
3062 static void hci_sched_le(struct hci_dev *hdev)
3063 {
3064         struct hci_chan *chan;
3065         struct sk_buff *skb;
3066         int quote, cnt, tmp;
3067
3068         BT_DBG("%s", hdev->name);
3069
3070         if (!hci_conn_num(hdev, LE_LINK))
3071                 return;
3072
3073         if (!test_bit(HCI_RAW, &hdev->flags)) {
3074                 /* LE tx timeout must be longer than maximum
3075                  * link supervision timeout (40.9 seconds) */
3076                 if (!hdev->le_cnt && hdev->le_pkts &&
3077                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3078                         hci_link_tx_to(hdev, LE_LINK);
3079         }
3080
3081         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3082         tmp = cnt;
3083         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3084                 u32 priority = (skb_peek(&chan->data_q))->priority;
3085                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3086                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3087                                skb->len, skb->priority);
3088
3089                         /* Stop if priority has changed */
3090                         if (skb->priority < priority)
3091                                 break;
3092
3093                         skb = skb_dequeue(&chan->data_q);
3094
3095                         hci_send_frame(skb);
3096                         hdev->le_last_tx = jiffies;
3097
3098                         cnt--;
3099                         chan->sent++;
3100                         chan->conn->sent++;
3101                 }
3102         }
3103
3104         if (hdev->le_pkts)
3105                 hdev->le_cnt = cnt;
3106         else
3107                 hdev->acl_cnt = cnt;
3108
3109         if (cnt != tmp)
3110                 hci_prio_recalculate(hdev, LE_LINK);
3111 }
3112
3113 static void hci_tx_work(struct work_struct *work)
3114 {
3115         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3116         struct sk_buff *skb;
3117
3118         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3119                hdev->sco_cnt, hdev->le_cnt);
3120
3121         /* Schedule queues and send stuff to HCI driver */
3122
3123         hci_sched_acl(hdev);
3124
3125         hci_sched_sco(hdev);
3126
3127         hci_sched_esco(hdev);
3128
3129         hci_sched_le(hdev);
3130
3131         /* Send next queued raw (unknown type) packet */
3132         while ((skb = skb_dequeue(&hdev->raw_q)))
3133                 hci_send_frame(skb);
3134 }
3135
3136 /* ----- HCI RX task (incoming data processing) ----- */
3137
3138 /* ACL data packet */
3139 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3140 {
3141         struct hci_acl_hdr *hdr = (void *) skb->data;
3142         struct hci_conn *conn;
3143         __u16 handle, flags;
3144
3145         skb_pull(skb, HCI_ACL_HDR_SIZE);
3146
3147         handle = __le16_to_cpu(hdr->handle);
3148         flags  = hci_flags(handle);
3149         handle = hci_handle(handle);
3150
3151         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3152                handle, flags);
3153
3154         hdev->stat.acl_rx++;
3155
3156         hci_dev_lock(hdev);
3157         conn = hci_conn_hash_lookup_handle(hdev, handle);
3158         hci_dev_unlock(hdev);
3159
3160         if (conn) {
3161                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3162
3163                 /* Send to upper protocol */
3164                 l2cap_recv_acldata(conn, skb, flags);
3165                 return;
3166         } else {
3167                 BT_ERR("%s ACL packet for unknown connection handle %d",
3168                        hdev->name, handle);
3169         }
3170
3171         kfree_skb(skb);
3172 }
3173
3174 /* SCO data packet */
3175 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3176 {
3177         struct hci_sco_hdr *hdr = (void *) skb->data;
3178         struct hci_conn *conn;
3179         __u16 handle;
3180
3181         skb_pull(skb, HCI_SCO_HDR_SIZE);
3182
3183         handle = __le16_to_cpu(hdr->handle);
3184
3185         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3186
3187         hdev->stat.sco_rx++;
3188
3189         hci_dev_lock(hdev);
3190         conn = hci_conn_hash_lookup_handle(hdev, handle);
3191         hci_dev_unlock(hdev);
3192
3193         if (conn) {
3194                 /* Send to upper protocol */
3195                 sco_recv_scodata(conn, skb);
3196                 return;
3197         } else {
3198                 BT_ERR("%s SCO packet for unknown connection handle %d",
3199                        hdev->name, handle);
3200         }
3201
3202         kfree_skb(skb);
3203 }
3204
3205 static bool hci_req_is_complete(struct hci_dev *hdev)
3206 {
3207         struct sk_buff *skb;
3208
3209         skb = skb_peek(&hdev->cmd_q);
3210         if (!skb)
3211                 return true;
3212
3213         return bt_cb(skb)->req.start;
3214 }
3215
3216 static void hci_resend_last(struct hci_dev *hdev)
3217 {
3218         struct hci_command_hdr *sent;
3219         struct sk_buff *skb;
3220         u16 opcode;
3221
3222         if (!hdev->sent_cmd)
3223                 return;
3224
3225         sent = (void *) hdev->sent_cmd->data;
3226         opcode = __le16_to_cpu(sent->opcode);
3227         if (opcode == HCI_OP_RESET)
3228                 return;
3229
3230         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3231         if (!skb)
3232                 return;
3233
3234         skb_queue_head(&hdev->cmd_q, skb);
3235         queue_work(hdev->workqueue, &hdev->cmd_work);
3236 }
3237
3238 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3239 {
3240         hci_req_complete_t req_complete = NULL;
3241         struct sk_buff *skb;
3242         unsigned long flags;
3243
3244         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3245
3246         /* If the completed command doesn't match the last one that was
3247          * sent we need to do special handling of it.
3248          */
3249         if (!hci_sent_cmd_data(hdev, opcode)) {
3250                 /* Some CSR based controllers generate a spontaneous
3251                  * reset complete event during init and any pending
3252                  * command will never be completed. In such a case we
3253                  * need to resend whatever was the last sent
3254                  * command.
3255                  */
3256                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3257                         hci_resend_last(hdev);
3258
3259                 return;
3260         }
3261
3262         /* If the command succeeded and there's still more commands in
3263          * this request the request is not yet complete.
3264          */
3265         if (!status && !hci_req_is_complete(hdev))
3266                 return;
3267
3268         /* If this was the last command in a request the complete
3269          * callback would be found in hdev->sent_cmd instead of the
3270          * command queue (hdev->cmd_q).
3271          */
3272         if (hdev->sent_cmd) {
3273                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3274                 if (req_complete)
3275                         goto call_complete;
3276         }
3277
3278         /* Remove all pending commands belonging to this request */
3279         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3280         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3281                 if (bt_cb(skb)->req.start) {
3282                         __skb_queue_head(&hdev->cmd_q, skb);
3283                         break;
3284                 }
3285
3286                 req_complete = bt_cb(skb)->req.complete;
3287                 kfree_skb(skb);
3288         }
3289         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3290
3291 call_complete:
3292         if (req_complete)
3293                 req_complete(hdev, status);
3294 }
3295
3296 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3297 {
3298         hci_req_complete_t req_complete = NULL;
3299
3300         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3301
3302         if (status) {
3303                 hci_req_cmd_complete(hdev, opcode, status);
3304                 return;
3305         }
3306
3307         /* No need to handle success status if there are more commands */
3308         if (!hci_req_is_complete(hdev))
3309                 return;
3310
3311         if (hdev->sent_cmd)
3312                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3313
3314         /* If the request doesn't have a complete callback or there
3315          * are other commands/requests in the hdev queue we consider
3316          * this request as completed.
3317          */
3318         if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3319                 hci_req_cmd_complete(hdev, opcode, status);
3320 }
3321
3322 static void hci_rx_work(struct work_struct *work)
3323 {
3324         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3325         struct sk_buff *skb;
3326
3327         BT_DBG("%s", hdev->name);
3328
3329         while ((skb = skb_dequeue(&hdev->rx_q))) {
3330                 /* Send copy to monitor */
3331                 hci_send_to_monitor(hdev, skb);
3332
3333                 if (atomic_read(&hdev->promisc)) {
3334                         /* Send copy to the sockets */
3335                         hci_send_to_sock(hdev, skb);
3336                 }
3337
3338                 if (test_bit(HCI_RAW, &hdev->flags)) {
3339                         kfree_skb(skb);
3340                         continue;
3341                 }
3342
3343                 if (test_bit(HCI_INIT, &hdev->flags)) {
3344                         /* Don't process data packets in this states. */
3345                         switch (bt_cb(skb)->pkt_type) {
3346                         case HCI_ACLDATA_PKT:
3347                         case HCI_SCODATA_PKT:
3348                                 kfree_skb(skb);
3349                                 continue;
3350                         }
3351                 }
3352
3353                 /* Process frame */
3354                 switch (bt_cb(skb)->pkt_type) {
3355                 case HCI_EVENT_PKT:
3356                         BT_DBG("%s Event packet", hdev->name);
3357                         hci_event_packet(hdev, skb);
3358                         break;
3359
3360                 case HCI_ACLDATA_PKT:
3361                         BT_DBG("%s ACL data packet", hdev->name);
3362                         hci_acldata_packet(hdev, skb);
3363                         break;
3364
3365                 case HCI_SCODATA_PKT:
3366                         BT_DBG("%s SCO data packet", hdev->name);
3367                         hci_scodata_packet(hdev, skb);
3368                         break;
3369
3370                 default:
3371                         kfree_skb(skb);
3372                         break;
3373                 }
3374         }
3375 }
3376
3377 static void hci_cmd_work(struct work_struct *work)
3378 {
3379         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3380         struct sk_buff *skb;
3381
3382         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3383                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3384
3385         /* Send queued commands */
3386         if (atomic_read(&hdev->cmd_cnt)) {
3387                 skb = skb_dequeue(&hdev->cmd_q);
3388                 if (!skb)
3389                         return;
3390
3391                 kfree_skb(hdev->sent_cmd);
3392
3393                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3394                 if (hdev->sent_cmd) {
3395                         atomic_dec(&hdev->cmd_cnt);
3396                         hci_send_frame(skb);
3397                         if (test_bit(HCI_RESET, &hdev->flags))
3398                                 del_timer(&hdev->cmd_timer);
3399                         else
3400                                 mod_timer(&hdev->cmd_timer,
3401                                           jiffies + HCI_CMD_TIMEOUT);
3402                 } else {
3403                         skb_queue_head(&hdev->cmd_q, skb);
3404                         queue_work(hdev->workqueue, &hdev->cmd_work);
3405                 }
3406         }
3407 }
3408
3409 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3410 {
3411         /* General inquiry access code (GIAC) */
3412         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3413         struct hci_cp_inquiry cp;
3414
3415         BT_DBG("%s", hdev->name);
3416
3417         if (test_bit(HCI_INQUIRY, &hdev->flags))
3418                 return -EINPROGRESS;
3419
3420         inquiry_cache_flush(hdev);
3421
3422         memset(&cp, 0, sizeof(cp));
3423         memcpy(&cp.lap, lap, sizeof(cp.lap));
3424         cp.length  = length;
3425
3426         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3427 }
3428
3429 int hci_cancel_inquiry(struct hci_dev *hdev)
3430 {
3431         BT_DBG("%s", hdev->name);
3432
3433         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3434                 return -EALREADY;
3435
3436         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3437 }
3438
3439 u8 bdaddr_to_le(u8 bdaddr_type)
3440 {
3441         switch (bdaddr_type) {
3442         case BDADDR_LE_PUBLIC:
3443                 return ADDR_LE_DEV_PUBLIC;
3444
3445         default:
3446                 /* Fallback to LE Random address type */
3447                 return ADDR_LE_DEV_RANDOM;
3448         }
3449 }