Bluetooth: Use async requests internally in hci_req_sync
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84                           void (*func)(struct hci_request *req,
85                                       unsigned long opt),
86                           unsigned long opt, __u32 timeout)
87 {
88         struct hci_request req;
89         DECLARE_WAITQUEUE(wait, current);
90         int err = 0;
91
92         BT_DBG("%s start", hdev->name);
93
94         hci_req_init(&req, hdev);
95
96         hdev->req_status = HCI_REQ_PEND;
97
98         add_wait_queue(&hdev->req_wait_q, &wait);
99         set_current_state(TASK_INTERRUPTIBLE);
100
101         func(&req, opt);
102
103         err = hci_req_run(&req, hci_req_sync_complete);
104         if (err < 0) {
105                 hdev->req_status = 0;
106                 remove_wait_queue(&hdev->req_wait_q, &wait);
107                 /* req_run will fail if the request did not add any
108                  * commands to the queue, something that can happen when
109                  * a request with conditionals doesn't trigger any
110                  * commands to be sent. This is normal behavior and
111                  * should not trigger an error return.
112                  */
113                 return 0;
114         }
115
116         schedule_timeout(timeout);
117
118         remove_wait_queue(&hdev->req_wait_q, &wait);
119
120         if (signal_pending(current))
121                 return -EINTR;
122
123         switch (hdev->req_status) {
124         case HCI_REQ_DONE:
125                 err = -bt_to_errno(hdev->req_result);
126                 break;
127
128         case HCI_REQ_CANCELED:
129                 err = -hdev->req_result;
130                 break;
131
132         default:
133                 err = -ETIMEDOUT;
134                 break;
135         }
136
137         hdev->req_status = hdev->req_result = 0;
138
139         BT_DBG("%s end: err %d", hdev->name, err);
140
141         return err;
142 }
143
144 static int hci_req_sync(struct hci_dev *hdev,
145                         void (*req)(struct hci_request *req,
146                                     unsigned long opt),
147                         unsigned long opt, __u32 timeout)
148 {
149         int ret;
150
151         if (!test_bit(HCI_UP, &hdev->flags))
152                 return -ENETDOWN;
153
154         /* Serialize all requests */
155         hci_req_lock(hdev);
156         ret = __hci_req_sync(hdev, req, opt, timeout);
157         hci_req_unlock(hdev);
158
159         return ret;
160 }
161
162 static void hci_reset_req(struct hci_request *req, unsigned long opt)
163 {
164         BT_DBG("%s %ld", req->hdev->name, opt);
165
166         /* Reset device */
167         set_bit(HCI_RESET, &req->hdev->flags);
168         hci_req_add(req, HCI_OP_RESET, 0, NULL);
169 }
170
171 static void bredr_init(struct hci_request *req)
172 {
173         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
174
175         /* Read Local Supported Features */
176         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
177
178         /* Read Local Version */
179         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
180
181         /* Read BD Address */
182         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
183 }
184
185 static void amp_init(struct hci_request *req)
186 {
187         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
188
189         /* Read Local Version */
190         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
191
192         /* Read Local AMP Info */
193         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
194
195         /* Read Data Blk size */
196         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
197 }
198
199 static void hci_init1_req(struct hci_request *req, unsigned long opt)
200 {
201         struct hci_dev *hdev = req->hdev;
202         struct hci_request init_req;
203         struct sk_buff *skb;
204
205         BT_DBG("%s %ld", hdev->name, opt);
206
207         /* Driver initialization */
208
209         hci_req_init(&init_req, hdev);
210
211         /* Special commands */
212         while ((skb = skb_dequeue(&hdev->driver_init))) {
213                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214                 skb->dev = (void *) hdev;
215
216                 if (skb_queue_empty(&init_req.cmd_q))
217                         bt_cb(skb)->req.start = true;
218
219                 skb_queue_tail(&init_req.cmd_q, skb);
220         }
221         skb_queue_purge(&hdev->driver_init);
222
223         hci_req_run(&init_req, NULL);
224
225         /* Reset */
226         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
227                 hci_reset_req(req, 0);
228
229         switch (hdev->dev_type) {
230         case HCI_BREDR:
231                 bredr_init(req);
232                 break;
233
234         case HCI_AMP:
235                 amp_init(req);
236                 break;
237
238         default:
239                 BT_ERR("Unknown device type %d", hdev->dev_type);
240                 break;
241         }
242 }
243
244 static void bredr_setup(struct hci_request *req)
245 {
246         struct hci_cp_delete_stored_link_key cp;
247         __le16 param;
248         __u8 flt_type;
249
250         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
251         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
252
253         /* Read Class of Device */
254         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
255
256         /* Read Local Name */
257         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
258
259         /* Read Voice Setting */
260         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
261
262         /* Clear Event Filters */
263         flt_type = HCI_FLT_CLEAR_ALL;
264         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
265
266         /* Connection accept timeout ~20 secs */
267         param = __constant_cpu_to_le16(0x7d00);
268         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
269
270         bacpy(&cp.bdaddr, BDADDR_ANY);
271         cp.delete_all = 0x01;
272         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
273 }
274
275 static void le_setup(struct hci_request *req)
276 {
277         /* Read LE Buffer Size */
278         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
279
280         /* Read LE Local Supported Features */
281         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
282
283         /* Read LE Advertising Channel TX Power */
284         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
285
286         /* Read LE White List Size */
287         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
288
289         /* Read LE Supported States */
290         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
291 }
292
293 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
294 {
295         if (lmp_ext_inq_capable(hdev))
296                 return 0x02;
297
298         if (lmp_inq_rssi_capable(hdev))
299                 return 0x01;
300
301         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
302             hdev->lmp_subver == 0x0757)
303                 return 0x01;
304
305         if (hdev->manufacturer == 15) {
306                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
307                         return 0x01;
308                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
309                         return 0x01;
310                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
311                         return 0x01;
312         }
313
314         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
315             hdev->lmp_subver == 0x1805)
316                 return 0x01;
317
318         return 0x00;
319 }
320
321 static void hci_setup_inquiry_mode(struct hci_request *req)
322 {
323         u8 mode;
324
325         mode = hci_get_inquiry_mode(req->hdev);
326
327         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
328 }
329
330 static void hci_setup_event_mask(struct hci_request *req)
331 {
332         struct hci_dev *hdev = req->hdev;
333
334         /* The second byte is 0xff instead of 0x9f (two reserved bits
335          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
336          * command otherwise.
337          */
338         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
339
340         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
341          * any event mask for pre 1.2 devices.
342          */
343         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
344                 return;
345
346         if (lmp_bredr_capable(hdev)) {
347                 events[4] |= 0x01; /* Flow Specification Complete */
348                 events[4] |= 0x02; /* Inquiry Result with RSSI */
349                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
350                 events[5] |= 0x08; /* Synchronous Connection Complete */
351                 events[5] |= 0x10; /* Synchronous Connection Changed */
352         }
353
354         if (lmp_inq_rssi_capable(hdev))
355                 events[4] |= 0x02; /* Inquiry Result with RSSI */
356
357         if (lmp_sniffsubr_capable(hdev))
358                 events[5] |= 0x20; /* Sniff Subrating */
359
360         if (lmp_pause_enc_capable(hdev))
361                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
362
363         if (lmp_ext_inq_capable(hdev))
364                 events[5] |= 0x40; /* Extended Inquiry Result */
365
366         if (lmp_no_flush_capable(hdev))
367                 events[7] |= 0x01; /* Enhanced Flush Complete */
368
369         if (lmp_lsto_capable(hdev))
370                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
371
372         if (lmp_ssp_capable(hdev)) {
373                 events[6] |= 0x01;      /* IO Capability Request */
374                 events[6] |= 0x02;      /* IO Capability Response */
375                 events[6] |= 0x04;      /* User Confirmation Request */
376                 events[6] |= 0x08;      /* User Passkey Request */
377                 events[6] |= 0x10;      /* Remote OOB Data Request */
378                 events[6] |= 0x20;      /* Simple Pairing Complete */
379                 events[7] |= 0x04;      /* User Passkey Notification */
380                 events[7] |= 0x08;      /* Keypress Notification */
381                 events[7] |= 0x10;      /* Remote Host Supported
382                                          * Features Notification
383                                          */
384         }
385
386         if (lmp_le_capable(hdev))
387                 events[7] |= 0x20;      /* LE Meta-Event */
388
389         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
390
391         if (lmp_le_capable(hdev)) {
392                 memset(events, 0, sizeof(events));
393                 events[0] = 0x1f;
394                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
395                             sizeof(events), events);
396         }
397 }
398
399 static void hci_init2_req(struct hci_request *req, unsigned long opt)
400 {
401         struct hci_dev *hdev = req->hdev;
402
403         if (lmp_bredr_capable(hdev))
404                 bredr_setup(req);
405
406         if (lmp_le_capable(hdev))
407                 le_setup(req);
408
409         hci_setup_event_mask(req);
410
411         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
412                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
413
414         if (lmp_ssp_capable(hdev)) {
415                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
416                         u8 mode = 0x01;
417                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
418                                     sizeof(mode), &mode);
419                 } else {
420                         struct hci_cp_write_eir cp;
421
422                         memset(hdev->eir, 0, sizeof(hdev->eir));
423                         memset(&cp, 0, sizeof(cp));
424
425                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
426                 }
427         }
428
429         if (lmp_inq_rssi_capable(hdev))
430                 hci_setup_inquiry_mode(req);
431
432         if (lmp_inq_tx_pwr_capable(hdev))
433                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
434
435         if (lmp_ext_feat_capable(hdev)) {
436                 struct hci_cp_read_local_ext_features cp;
437
438                 cp.page = 0x01;
439                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
440                             sizeof(cp), &cp);
441         }
442
443         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
444                 u8 enable = 1;
445                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
446                             &enable);
447         }
448 }
449
450 static void hci_setup_link_policy(struct hci_request *req)
451 {
452         struct hci_dev *hdev = req->hdev;
453         struct hci_cp_write_def_link_policy cp;
454         u16 link_policy = 0;
455
456         if (lmp_rswitch_capable(hdev))
457                 link_policy |= HCI_LP_RSWITCH;
458         if (lmp_hold_capable(hdev))
459                 link_policy |= HCI_LP_HOLD;
460         if (lmp_sniff_capable(hdev))
461                 link_policy |= HCI_LP_SNIFF;
462         if (lmp_park_capable(hdev))
463                 link_policy |= HCI_LP_PARK;
464
465         cp.policy = cpu_to_le16(link_policy);
466         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
467 }
468
469 static void hci_set_le_support(struct hci_request *req)
470 {
471         struct hci_dev *hdev = req->hdev;
472         struct hci_cp_write_le_host_supported cp;
473
474         memset(&cp, 0, sizeof(cp));
475
476         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
477                 cp.le = 0x01;
478                 cp.simul = lmp_le_br_capable(hdev);
479         }
480
481         if (cp.le != lmp_host_le_capable(hdev))
482                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
483                             &cp);
484 }
485
486 static void hci_init3_req(struct hci_request *req, unsigned long opt)
487 {
488         struct hci_dev *hdev = req->hdev;
489
490         if (hdev->commands[5] & 0x10)
491                 hci_setup_link_policy(req);
492
493         if (lmp_le_capable(hdev))
494                 hci_set_le_support(req);
495 }
496
497 static int __hci_init(struct hci_dev *hdev)
498 {
499         int err;
500
501         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
502         if (err < 0)
503                 return err;
504
505         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
506          * BR/EDR/LE type controllers. AMP controllers only need the
507          * first stage init.
508          */
509         if (hdev->dev_type != HCI_BREDR)
510                 return 0;
511
512         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
513         if (err < 0)
514                 return err;
515
516         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
517 }
518
519 static void hci_scan_req(struct hci_request *req, unsigned long opt)
520 {
521         __u8 scan = opt;
522
523         BT_DBG("%s %x", req->hdev->name, scan);
524
525         /* Inquiry and Page scans */
526         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
527 }
528
529 static void hci_auth_req(struct hci_request *req, unsigned long opt)
530 {
531         __u8 auth = opt;
532
533         BT_DBG("%s %x", req->hdev->name, auth);
534
535         /* Authentication */
536         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
537 }
538
539 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
540 {
541         __u8 encrypt = opt;
542
543         BT_DBG("%s %x", req->hdev->name, encrypt);
544
545         /* Encryption */
546         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
547 }
548
549 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
550 {
551         __le16 policy = cpu_to_le16(opt);
552
553         BT_DBG("%s %x", req->hdev->name, policy);
554
555         /* Default link policy */
556         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
557 }
558
559 /* Get HCI device by index.
560  * Device is held on return. */
561 struct hci_dev *hci_dev_get(int index)
562 {
563         struct hci_dev *hdev = NULL, *d;
564
565         BT_DBG("%d", index);
566
567         if (index < 0)
568                 return NULL;
569
570         read_lock(&hci_dev_list_lock);
571         list_for_each_entry(d, &hci_dev_list, list) {
572                 if (d->id == index) {
573                         hdev = hci_dev_hold(d);
574                         break;
575                 }
576         }
577         read_unlock(&hci_dev_list_lock);
578         return hdev;
579 }
580
581 /* ---- Inquiry support ---- */
582
583 bool hci_discovery_active(struct hci_dev *hdev)
584 {
585         struct discovery_state *discov = &hdev->discovery;
586
587         switch (discov->state) {
588         case DISCOVERY_FINDING:
589         case DISCOVERY_RESOLVING:
590                 return true;
591
592         default:
593                 return false;
594         }
595 }
596
597 void hci_discovery_set_state(struct hci_dev *hdev, int state)
598 {
599         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
600
601         if (hdev->discovery.state == state)
602                 return;
603
604         switch (state) {
605         case DISCOVERY_STOPPED:
606                 if (hdev->discovery.state != DISCOVERY_STARTING)
607                         mgmt_discovering(hdev, 0);
608                 break;
609         case DISCOVERY_STARTING:
610                 break;
611         case DISCOVERY_FINDING:
612                 mgmt_discovering(hdev, 1);
613                 break;
614         case DISCOVERY_RESOLVING:
615                 break;
616         case DISCOVERY_STOPPING:
617                 break;
618         }
619
620         hdev->discovery.state = state;
621 }
622
623 static void inquiry_cache_flush(struct hci_dev *hdev)
624 {
625         struct discovery_state *cache = &hdev->discovery;
626         struct inquiry_entry *p, *n;
627
628         list_for_each_entry_safe(p, n, &cache->all, all) {
629                 list_del(&p->all);
630                 kfree(p);
631         }
632
633         INIT_LIST_HEAD(&cache->unknown);
634         INIT_LIST_HEAD(&cache->resolve);
635 }
636
637 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
638                                                bdaddr_t *bdaddr)
639 {
640         struct discovery_state *cache = &hdev->discovery;
641         struct inquiry_entry *e;
642
643         BT_DBG("cache %p, %pMR", cache, bdaddr);
644
645         list_for_each_entry(e, &cache->all, all) {
646                 if (!bacmp(&e->data.bdaddr, bdaddr))
647                         return e;
648         }
649
650         return NULL;
651 }
652
653 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
654                                                        bdaddr_t *bdaddr)
655 {
656         struct discovery_state *cache = &hdev->discovery;
657         struct inquiry_entry *e;
658
659         BT_DBG("cache %p, %pMR", cache, bdaddr);
660
661         list_for_each_entry(e, &cache->unknown, list) {
662                 if (!bacmp(&e->data.bdaddr, bdaddr))
663                         return e;
664         }
665
666         return NULL;
667 }
668
669 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
670                                                        bdaddr_t *bdaddr,
671                                                        int state)
672 {
673         struct discovery_state *cache = &hdev->discovery;
674         struct inquiry_entry *e;
675
676         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
677
678         list_for_each_entry(e, &cache->resolve, list) {
679                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
680                         return e;
681                 if (!bacmp(&e->data.bdaddr, bdaddr))
682                         return e;
683         }
684
685         return NULL;
686 }
687
688 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
689                                       struct inquiry_entry *ie)
690 {
691         struct discovery_state *cache = &hdev->discovery;
692         struct list_head *pos = &cache->resolve;
693         struct inquiry_entry *p;
694
695         list_del(&ie->list);
696
697         list_for_each_entry(p, &cache->resolve, list) {
698                 if (p->name_state != NAME_PENDING &&
699                     abs(p->data.rssi) >= abs(ie->data.rssi))
700                         break;
701                 pos = &p->list;
702         }
703
704         list_add(&ie->list, pos);
705 }
706
707 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
708                               bool name_known, bool *ssp)
709 {
710         struct discovery_state *cache = &hdev->discovery;
711         struct inquiry_entry *ie;
712
713         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
714
715         hci_remove_remote_oob_data(hdev, &data->bdaddr);
716
717         if (ssp)
718                 *ssp = data->ssp_mode;
719
720         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
721         if (ie) {
722                 if (ie->data.ssp_mode && ssp)
723                         *ssp = true;
724
725                 if (ie->name_state == NAME_NEEDED &&
726                     data->rssi != ie->data.rssi) {
727                         ie->data.rssi = data->rssi;
728                         hci_inquiry_cache_update_resolve(hdev, ie);
729                 }
730
731                 goto update;
732         }
733
734         /* Entry not in the cache. Add new one. */
735         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
736         if (!ie)
737                 return false;
738
739         list_add(&ie->all, &cache->all);
740
741         if (name_known) {
742                 ie->name_state = NAME_KNOWN;
743         } else {
744                 ie->name_state = NAME_NOT_KNOWN;
745                 list_add(&ie->list, &cache->unknown);
746         }
747
748 update:
749         if (name_known && ie->name_state != NAME_KNOWN &&
750             ie->name_state != NAME_PENDING) {
751                 ie->name_state = NAME_KNOWN;
752                 list_del(&ie->list);
753         }
754
755         memcpy(&ie->data, data, sizeof(*data));
756         ie->timestamp = jiffies;
757         cache->timestamp = jiffies;
758
759         if (ie->name_state == NAME_NOT_KNOWN)
760                 return false;
761
762         return true;
763 }
764
765 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
766 {
767         struct discovery_state *cache = &hdev->discovery;
768         struct inquiry_info *info = (struct inquiry_info *) buf;
769         struct inquiry_entry *e;
770         int copied = 0;
771
772         list_for_each_entry(e, &cache->all, all) {
773                 struct inquiry_data *data = &e->data;
774
775                 if (copied >= num)
776                         break;
777
778                 bacpy(&info->bdaddr, &data->bdaddr);
779                 info->pscan_rep_mode    = data->pscan_rep_mode;
780                 info->pscan_period_mode = data->pscan_period_mode;
781                 info->pscan_mode        = data->pscan_mode;
782                 memcpy(info->dev_class, data->dev_class, 3);
783                 info->clock_offset      = data->clock_offset;
784
785                 info++;
786                 copied++;
787         }
788
789         BT_DBG("cache %p, copied %d", cache, copied);
790         return copied;
791 }
792
793 static void hci_inq_req(struct hci_request *req, unsigned long opt)
794 {
795         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
796         struct hci_dev *hdev = req->hdev;
797         struct hci_cp_inquiry cp;
798
799         BT_DBG("%s", hdev->name);
800
801         if (test_bit(HCI_INQUIRY, &hdev->flags))
802                 return;
803
804         /* Start Inquiry */
805         memcpy(&cp.lap, &ir->lap, 3);
806         cp.length  = ir->length;
807         cp.num_rsp = ir->num_rsp;
808         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
809 }
810
811 int hci_inquiry(void __user *arg)
812 {
813         __u8 __user *ptr = arg;
814         struct hci_inquiry_req ir;
815         struct hci_dev *hdev;
816         int err = 0, do_inquiry = 0, max_rsp;
817         long timeo;
818         __u8 *buf;
819
820         if (copy_from_user(&ir, ptr, sizeof(ir)))
821                 return -EFAULT;
822
823         hdev = hci_dev_get(ir.dev_id);
824         if (!hdev)
825                 return -ENODEV;
826
827         hci_dev_lock(hdev);
828         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
829             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
830                 inquiry_cache_flush(hdev);
831                 do_inquiry = 1;
832         }
833         hci_dev_unlock(hdev);
834
835         timeo = ir.length * msecs_to_jiffies(2000);
836
837         if (do_inquiry) {
838                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
839                                    timeo);
840                 if (err < 0)
841                         goto done;
842         }
843
844         /* for unlimited number of responses we will use buffer with
845          * 255 entries
846          */
847         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
848
849         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
850          * copy it to the user space.
851          */
852         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
853         if (!buf) {
854                 err = -ENOMEM;
855                 goto done;
856         }
857
858         hci_dev_lock(hdev);
859         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
860         hci_dev_unlock(hdev);
861
862         BT_DBG("num_rsp %d", ir.num_rsp);
863
864         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
865                 ptr += sizeof(ir);
866                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
867                                  ir.num_rsp))
868                         err = -EFAULT;
869         } else
870                 err = -EFAULT;
871
872         kfree(buf);
873
874 done:
875         hci_dev_put(hdev);
876         return err;
877 }
878
879 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
880 {
881         u8 ad_len = 0, flags = 0;
882         size_t name_len;
883
884         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
885                 flags |= LE_AD_GENERAL;
886
887         if (!lmp_bredr_capable(hdev))
888                 flags |= LE_AD_NO_BREDR;
889
890         if (lmp_le_br_capable(hdev))
891                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
892
893         if (lmp_host_le_br_capable(hdev))
894                 flags |= LE_AD_SIM_LE_BREDR_HOST;
895
896         if (flags) {
897                 BT_DBG("adv flags 0x%02x", flags);
898
899                 ptr[0] = 2;
900                 ptr[1] = EIR_FLAGS;
901                 ptr[2] = flags;
902
903                 ad_len += 3;
904                 ptr += 3;
905         }
906
907         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
908                 ptr[0] = 2;
909                 ptr[1] = EIR_TX_POWER;
910                 ptr[2] = (u8) hdev->adv_tx_power;
911
912                 ad_len += 3;
913                 ptr += 3;
914         }
915
916         name_len = strlen(hdev->dev_name);
917         if (name_len > 0) {
918                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
919
920                 if (name_len > max_len) {
921                         name_len = max_len;
922                         ptr[1] = EIR_NAME_SHORT;
923                 } else
924                         ptr[1] = EIR_NAME_COMPLETE;
925
926                 ptr[0] = name_len + 1;
927
928                 memcpy(ptr + 2, hdev->dev_name, name_len);
929
930                 ad_len += (name_len + 2);
931                 ptr += (name_len + 2);
932         }
933
934         return ad_len;
935 }
936
937 int hci_update_ad(struct hci_dev *hdev)
938 {
939         struct hci_cp_le_set_adv_data cp;
940         u8 len;
941         int err;
942
943         hci_dev_lock(hdev);
944
945         if (!lmp_le_capable(hdev)) {
946                 err = -EINVAL;
947                 goto unlock;
948         }
949
950         memset(&cp, 0, sizeof(cp));
951
952         len = create_ad(hdev, cp.data);
953
954         if (hdev->adv_data_len == len &&
955             memcmp(cp.data, hdev->adv_data, len) == 0) {
956                 err = 0;
957                 goto unlock;
958         }
959
960         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
961         hdev->adv_data_len = len;
962
963         cp.length = len;
964         err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
965
966 unlock:
967         hci_dev_unlock(hdev);
968
969         return err;
970 }
971
972 /* ---- HCI ioctl helpers ---- */
973
974 int hci_dev_open(__u16 dev)
975 {
976         struct hci_dev *hdev;
977         int ret = 0;
978
979         hdev = hci_dev_get(dev);
980         if (!hdev)
981                 return -ENODEV;
982
983         BT_DBG("%s %p", hdev->name, hdev);
984
985         hci_req_lock(hdev);
986
987         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
988                 ret = -ENODEV;
989                 goto done;
990         }
991
992         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
993                 ret = -ERFKILL;
994                 goto done;
995         }
996
997         if (test_bit(HCI_UP, &hdev->flags)) {
998                 ret = -EALREADY;
999                 goto done;
1000         }
1001
1002         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1003                 set_bit(HCI_RAW, &hdev->flags);
1004
1005         /* Treat all non BR/EDR controllers as raw devices if
1006            enable_hs is not set */
1007         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1008                 set_bit(HCI_RAW, &hdev->flags);
1009
1010         if (hdev->open(hdev)) {
1011                 ret = -EIO;
1012                 goto done;
1013         }
1014
1015         if (!test_bit(HCI_RAW, &hdev->flags)) {
1016                 atomic_set(&hdev->cmd_cnt, 1);
1017                 set_bit(HCI_INIT, &hdev->flags);
1018                 hdev->init_last_cmd = 0;
1019
1020                 ret = __hci_init(hdev);
1021
1022                 clear_bit(HCI_INIT, &hdev->flags);
1023         }
1024
1025         if (!ret) {
1026                 hci_dev_hold(hdev);
1027                 set_bit(HCI_UP, &hdev->flags);
1028                 hci_notify(hdev, HCI_DEV_UP);
1029                 hci_update_ad(hdev);
1030                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1031                     mgmt_valid_hdev(hdev)) {
1032                         hci_dev_lock(hdev);
1033                         mgmt_powered(hdev, 1);
1034                         hci_dev_unlock(hdev);
1035                 }
1036         } else {
1037                 /* Init failed, cleanup */
1038                 flush_work(&hdev->tx_work);
1039                 flush_work(&hdev->cmd_work);
1040                 flush_work(&hdev->rx_work);
1041
1042                 skb_queue_purge(&hdev->cmd_q);
1043                 skb_queue_purge(&hdev->rx_q);
1044
1045                 if (hdev->flush)
1046                         hdev->flush(hdev);
1047
1048                 if (hdev->sent_cmd) {
1049                         kfree_skb(hdev->sent_cmd);
1050                         hdev->sent_cmd = NULL;
1051                 }
1052
1053                 hdev->close(hdev);
1054                 hdev->flags = 0;
1055         }
1056
1057 done:
1058         hci_req_unlock(hdev);
1059         hci_dev_put(hdev);
1060         return ret;
1061 }
1062
1063 static int hci_dev_do_close(struct hci_dev *hdev)
1064 {
1065         BT_DBG("%s %p", hdev->name, hdev);
1066
1067         cancel_work_sync(&hdev->le_scan);
1068
1069         cancel_delayed_work(&hdev->power_off);
1070
1071         hci_req_cancel(hdev, ENODEV);
1072         hci_req_lock(hdev);
1073
1074         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1075                 del_timer_sync(&hdev->cmd_timer);
1076                 hci_req_unlock(hdev);
1077                 return 0;
1078         }
1079
1080         /* Flush RX and TX works */
1081         flush_work(&hdev->tx_work);
1082         flush_work(&hdev->rx_work);
1083
1084         if (hdev->discov_timeout > 0) {
1085                 cancel_delayed_work(&hdev->discov_off);
1086                 hdev->discov_timeout = 0;
1087                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1088         }
1089
1090         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1091                 cancel_delayed_work(&hdev->service_cache);
1092
1093         cancel_delayed_work_sync(&hdev->le_scan_disable);
1094
1095         hci_dev_lock(hdev);
1096         inquiry_cache_flush(hdev);
1097         hci_conn_hash_flush(hdev);
1098         hci_dev_unlock(hdev);
1099
1100         hci_notify(hdev, HCI_DEV_DOWN);
1101
1102         if (hdev->flush)
1103                 hdev->flush(hdev);
1104
1105         /* Reset device */
1106         skb_queue_purge(&hdev->cmd_q);
1107         atomic_set(&hdev->cmd_cnt, 1);
1108         if (!test_bit(HCI_RAW, &hdev->flags) &&
1109             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1110                 set_bit(HCI_INIT, &hdev->flags);
1111                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1112                 clear_bit(HCI_INIT, &hdev->flags);
1113         }
1114
1115         /* flush cmd  work */
1116         flush_work(&hdev->cmd_work);
1117
1118         /* Drop queues */
1119         skb_queue_purge(&hdev->rx_q);
1120         skb_queue_purge(&hdev->cmd_q);
1121         skb_queue_purge(&hdev->raw_q);
1122
1123         /* Drop last sent command */
1124         if (hdev->sent_cmd) {
1125                 del_timer_sync(&hdev->cmd_timer);
1126                 kfree_skb(hdev->sent_cmd);
1127                 hdev->sent_cmd = NULL;
1128         }
1129
1130         /* After this point our queues are empty
1131          * and no tasks are scheduled. */
1132         hdev->close(hdev);
1133
1134         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1135             mgmt_valid_hdev(hdev)) {
1136                 hci_dev_lock(hdev);
1137                 mgmt_powered(hdev, 0);
1138                 hci_dev_unlock(hdev);
1139         }
1140
1141         /* Clear flags */
1142         hdev->flags = 0;
1143
1144         /* Controller radio is available but is currently powered down */
1145         hdev->amp_status = 0;
1146
1147         memset(hdev->eir, 0, sizeof(hdev->eir));
1148         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1149
1150         hci_req_unlock(hdev);
1151
1152         hci_dev_put(hdev);
1153         return 0;
1154 }
1155
1156 int hci_dev_close(__u16 dev)
1157 {
1158         struct hci_dev *hdev;
1159         int err;
1160
1161         hdev = hci_dev_get(dev);
1162         if (!hdev)
1163                 return -ENODEV;
1164
1165         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1166                 cancel_delayed_work(&hdev->power_off);
1167
1168         err = hci_dev_do_close(hdev);
1169
1170         hci_dev_put(hdev);
1171         return err;
1172 }
1173
1174 int hci_dev_reset(__u16 dev)
1175 {
1176         struct hci_dev *hdev;
1177         int ret = 0;
1178
1179         hdev = hci_dev_get(dev);
1180         if (!hdev)
1181                 return -ENODEV;
1182
1183         hci_req_lock(hdev);
1184
1185         if (!test_bit(HCI_UP, &hdev->flags))
1186                 goto done;
1187
1188         /* Drop queues */
1189         skb_queue_purge(&hdev->rx_q);
1190         skb_queue_purge(&hdev->cmd_q);
1191
1192         hci_dev_lock(hdev);
1193         inquiry_cache_flush(hdev);
1194         hci_conn_hash_flush(hdev);
1195         hci_dev_unlock(hdev);
1196
1197         if (hdev->flush)
1198                 hdev->flush(hdev);
1199
1200         atomic_set(&hdev->cmd_cnt, 1);
1201         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1202
1203         if (!test_bit(HCI_RAW, &hdev->flags))
1204                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1205
1206 done:
1207         hci_req_unlock(hdev);
1208         hci_dev_put(hdev);
1209         return ret;
1210 }
1211
1212 int hci_dev_reset_stat(__u16 dev)
1213 {
1214         struct hci_dev *hdev;
1215         int ret = 0;
1216
1217         hdev = hci_dev_get(dev);
1218         if (!hdev)
1219                 return -ENODEV;
1220
1221         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1222
1223         hci_dev_put(hdev);
1224
1225         return ret;
1226 }
1227
1228 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1229 {
1230         struct hci_dev *hdev;
1231         struct hci_dev_req dr;
1232         int err = 0;
1233
1234         if (copy_from_user(&dr, arg, sizeof(dr)))
1235                 return -EFAULT;
1236
1237         hdev = hci_dev_get(dr.dev_id);
1238         if (!hdev)
1239                 return -ENODEV;
1240
1241         switch (cmd) {
1242         case HCISETAUTH:
1243                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1244                                    HCI_INIT_TIMEOUT);
1245                 break;
1246
1247         case HCISETENCRYPT:
1248                 if (!lmp_encrypt_capable(hdev)) {
1249                         err = -EOPNOTSUPP;
1250                         break;
1251                 }
1252
1253                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1254                         /* Auth must be enabled first */
1255                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1256                                            HCI_INIT_TIMEOUT);
1257                         if (err)
1258                                 break;
1259                 }
1260
1261                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1262                                    HCI_INIT_TIMEOUT);
1263                 break;
1264
1265         case HCISETSCAN:
1266                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1267                                    HCI_INIT_TIMEOUT);
1268                 break;
1269
1270         case HCISETLINKPOL:
1271                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1272                                    HCI_INIT_TIMEOUT);
1273                 break;
1274
1275         case HCISETLINKMODE:
1276                 hdev->link_mode = ((__u16) dr.dev_opt) &
1277                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1278                 break;
1279
1280         case HCISETPTYPE:
1281                 hdev->pkt_type = (__u16) dr.dev_opt;
1282                 break;
1283
1284         case HCISETACLMTU:
1285                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1286                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1287                 break;
1288
1289         case HCISETSCOMTU:
1290                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1291                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1292                 break;
1293
1294         default:
1295                 err = -EINVAL;
1296                 break;
1297         }
1298
1299         hci_dev_put(hdev);
1300         return err;
1301 }
1302
1303 int hci_get_dev_list(void __user *arg)
1304 {
1305         struct hci_dev *hdev;
1306         struct hci_dev_list_req *dl;
1307         struct hci_dev_req *dr;
1308         int n = 0, size, err;
1309         __u16 dev_num;
1310
1311         if (get_user(dev_num, (__u16 __user *) arg))
1312                 return -EFAULT;
1313
1314         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1315                 return -EINVAL;
1316
1317         size = sizeof(*dl) + dev_num * sizeof(*dr);
1318
1319         dl = kzalloc(size, GFP_KERNEL);
1320         if (!dl)
1321                 return -ENOMEM;
1322
1323         dr = dl->dev_req;
1324
1325         read_lock(&hci_dev_list_lock);
1326         list_for_each_entry(hdev, &hci_dev_list, list) {
1327                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328                         cancel_delayed_work(&hdev->power_off);
1329
1330                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1331                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1332
1333                 (dr + n)->dev_id  = hdev->id;
1334                 (dr + n)->dev_opt = hdev->flags;
1335
1336                 if (++n >= dev_num)
1337                         break;
1338         }
1339         read_unlock(&hci_dev_list_lock);
1340
1341         dl->dev_num = n;
1342         size = sizeof(*dl) + n * sizeof(*dr);
1343
1344         err = copy_to_user(arg, dl, size);
1345         kfree(dl);
1346
1347         return err ? -EFAULT : 0;
1348 }
1349
1350 int hci_get_dev_info(void __user *arg)
1351 {
1352         struct hci_dev *hdev;
1353         struct hci_dev_info di;
1354         int err = 0;
1355
1356         if (copy_from_user(&di, arg, sizeof(di)))
1357                 return -EFAULT;
1358
1359         hdev = hci_dev_get(di.dev_id);
1360         if (!hdev)
1361                 return -ENODEV;
1362
1363         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1364                 cancel_delayed_work_sync(&hdev->power_off);
1365
1366         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1367                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1368
1369         strcpy(di.name, hdev->name);
1370         di.bdaddr   = hdev->bdaddr;
1371         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1372         di.flags    = hdev->flags;
1373         di.pkt_type = hdev->pkt_type;
1374         if (lmp_bredr_capable(hdev)) {
1375                 di.acl_mtu  = hdev->acl_mtu;
1376                 di.acl_pkts = hdev->acl_pkts;
1377                 di.sco_mtu  = hdev->sco_mtu;
1378                 di.sco_pkts = hdev->sco_pkts;
1379         } else {
1380                 di.acl_mtu  = hdev->le_mtu;
1381                 di.acl_pkts = hdev->le_pkts;
1382                 di.sco_mtu  = 0;
1383                 di.sco_pkts = 0;
1384         }
1385         di.link_policy = hdev->link_policy;
1386         di.link_mode   = hdev->link_mode;
1387
1388         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1389         memcpy(&di.features, &hdev->features, sizeof(di.features));
1390
1391         if (copy_to_user(arg, &di, sizeof(di)))
1392                 err = -EFAULT;
1393
1394         hci_dev_put(hdev);
1395
1396         return err;
1397 }
1398
1399 /* ---- Interface to HCI drivers ---- */
1400
1401 static int hci_rfkill_set_block(void *data, bool blocked)
1402 {
1403         struct hci_dev *hdev = data;
1404
1405         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1406
1407         if (!blocked)
1408                 return 0;
1409
1410         hci_dev_do_close(hdev);
1411
1412         return 0;
1413 }
1414
1415 static const struct rfkill_ops hci_rfkill_ops = {
1416         .set_block = hci_rfkill_set_block,
1417 };
1418
1419 static void hci_power_on(struct work_struct *work)
1420 {
1421         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1422
1423         BT_DBG("%s", hdev->name);
1424
1425         if (hci_dev_open(hdev->id) < 0)
1426                 return;
1427
1428         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1429                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1430                                    HCI_AUTO_OFF_TIMEOUT);
1431
1432         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1433                 mgmt_index_added(hdev);
1434 }
1435
1436 static void hci_power_off(struct work_struct *work)
1437 {
1438         struct hci_dev *hdev = container_of(work, struct hci_dev,
1439                                             power_off.work);
1440
1441         BT_DBG("%s", hdev->name);
1442
1443         hci_dev_do_close(hdev);
1444 }
1445
1446 static void hci_discov_off(struct work_struct *work)
1447 {
1448         struct hci_dev *hdev;
1449         u8 scan = SCAN_PAGE;
1450
1451         hdev = container_of(work, struct hci_dev, discov_off.work);
1452
1453         BT_DBG("%s", hdev->name);
1454
1455         hci_dev_lock(hdev);
1456
1457         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1458
1459         hdev->discov_timeout = 0;
1460
1461         hci_dev_unlock(hdev);
1462 }
1463
1464 int hci_uuids_clear(struct hci_dev *hdev)
1465 {
1466         struct bt_uuid *uuid, *tmp;
1467
1468         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1469                 list_del(&uuid->list);
1470                 kfree(uuid);
1471         }
1472
1473         return 0;
1474 }
1475
1476 int hci_link_keys_clear(struct hci_dev *hdev)
1477 {
1478         struct list_head *p, *n;
1479
1480         list_for_each_safe(p, n, &hdev->link_keys) {
1481                 struct link_key *key;
1482
1483                 key = list_entry(p, struct link_key, list);
1484
1485                 list_del(p);
1486                 kfree(key);
1487         }
1488
1489         return 0;
1490 }
1491
1492 int hci_smp_ltks_clear(struct hci_dev *hdev)
1493 {
1494         struct smp_ltk *k, *tmp;
1495
1496         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1497                 list_del(&k->list);
1498                 kfree(k);
1499         }
1500
1501         return 0;
1502 }
1503
1504 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1505 {
1506         struct link_key *k;
1507
1508         list_for_each_entry(k, &hdev->link_keys, list)
1509                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1510                         return k;
1511
1512         return NULL;
1513 }
1514
1515 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1516                                u8 key_type, u8 old_key_type)
1517 {
1518         /* Legacy key */
1519         if (key_type < 0x03)
1520                 return true;
1521
1522         /* Debug keys are insecure so don't store them persistently */
1523         if (key_type == HCI_LK_DEBUG_COMBINATION)
1524                 return false;
1525
1526         /* Changed combination key and there's no previous one */
1527         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1528                 return false;
1529
1530         /* Security mode 3 case */
1531         if (!conn)
1532                 return true;
1533
1534         /* Neither local nor remote side had no-bonding as requirement */
1535         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1536                 return true;
1537
1538         /* Local side had dedicated bonding as requirement */
1539         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1540                 return true;
1541
1542         /* Remote side had dedicated bonding as requirement */
1543         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1544                 return true;
1545
1546         /* If none of the above criteria match, then don't store the key
1547          * persistently */
1548         return false;
1549 }
1550
1551 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1552 {
1553         struct smp_ltk *k;
1554
1555         list_for_each_entry(k, &hdev->long_term_keys, list) {
1556                 if (k->ediv != ediv ||
1557                     memcmp(rand, k->rand, sizeof(k->rand)))
1558                         continue;
1559
1560                 return k;
1561         }
1562
1563         return NULL;
1564 }
1565
1566 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1567                                      u8 addr_type)
1568 {
1569         struct smp_ltk *k;
1570
1571         list_for_each_entry(k, &hdev->long_term_keys, list)
1572                 if (addr_type == k->bdaddr_type &&
1573                     bacmp(bdaddr, &k->bdaddr) == 0)
1574                         return k;
1575
1576         return NULL;
1577 }
1578
1579 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1580                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1581 {
1582         struct link_key *key, *old_key;
1583         u8 old_key_type;
1584         bool persistent;
1585
1586         old_key = hci_find_link_key(hdev, bdaddr);
1587         if (old_key) {
1588                 old_key_type = old_key->type;
1589                 key = old_key;
1590         } else {
1591                 old_key_type = conn ? conn->key_type : 0xff;
1592                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1593                 if (!key)
1594                         return -ENOMEM;
1595                 list_add(&key->list, &hdev->link_keys);
1596         }
1597
1598         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1599
1600         /* Some buggy controller combinations generate a changed
1601          * combination key for legacy pairing even when there's no
1602          * previous key */
1603         if (type == HCI_LK_CHANGED_COMBINATION &&
1604             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1605                 type = HCI_LK_COMBINATION;
1606                 if (conn)
1607                         conn->key_type = type;
1608         }
1609
1610         bacpy(&key->bdaddr, bdaddr);
1611         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1612         key->pin_len = pin_len;
1613
1614         if (type == HCI_LK_CHANGED_COMBINATION)
1615                 key->type = old_key_type;
1616         else
1617                 key->type = type;
1618
1619         if (!new_key)
1620                 return 0;
1621
1622         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1623
1624         mgmt_new_link_key(hdev, key, persistent);
1625
1626         if (conn)
1627                 conn->flush_key = !persistent;
1628
1629         return 0;
1630 }
1631
1632 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1633                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1634                 ediv, u8 rand[8])
1635 {
1636         struct smp_ltk *key, *old_key;
1637
1638         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1639                 return 0;
1640
1641         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1642         if (old_key)
1643                 key = old_key;
1644         else {
1645                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1646                 if (!key)
1647                         return -ENOMEM;
1648                 list_add(&key->list, &hdev->long_term_keys);
1649         }
1650
1651         bacpy(&key->bdaddr, bdaddr);
1652         key->bdaddr_type = addr_type;
1653         memcpy(key->val, tk, sizeof(key->val));
1654         key->authenticated = authenticated;
1655         key->ediv = ediv;
1656         key->enc_size = enc_size;
1657         key->type = type;
1658         memcpy(key->rand, rand, sizeof(key->rand));
1659
1660         if (!new_key)
1661                 return 0;
1662
1663         if (type & HCI_SMP_LTK)
1664                 mgmt_new_ltk(hdev, key, 1);
1665
1666         return 0;
1667 }
1668
1669 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1670 {
1671         struct link_key *key;
1672
1673         key = hci_find_link_key(hdev, bdaddr);
1674         if (!key)
1675                 return -ENOENT;
1676
1677         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1678
1679         list_del(&key->list);
1680         kfree(key);
1681
1682         return 0;
1683 }
1684
1685 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1686 {
1687         struct smp_ltk *k, *tmp;
1688
1689         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1690                 if (bacmp(bdaddr, &k->bdaddr))
1691                         continue;
1692
1693                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1694
1695                 list_del(&k->list);
1696                 kfree(k);
1697         }
1698
1699         return 0;
1700 }
1701
1702 /* HCI command timer function */
1703 static void hci_cmd_timeout(unsigned long arg)
1704 {
1705         struct hci_dev *hdev = (void *) arg;
1706
1707         if (hdev->sent_cmd) {
1708                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1709                 u16 opcode = __le16_to_cpu(sent->opcode);
1710
1711                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1712         } else {
1713                 BT_ERR("%s command tx timeout", hdev->name);
1714         }
1715
1716         atomic_set(&hdev->cmd_cnt, 1);
1717         queue_work(hdev->workqueue, &hdev->cmd_work);
1718 }
1719
1720 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1721                                           bdaddr_t *bdaddr)
1722 {
1723         struct oob_data *data;
1724
1725         list_for_each_entry(data, &hdev->remote_oob_data, list)
1726                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1727                         return data;
1728
1729         return NULL;
1730 }
1731
1732 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1733 {
1734         struct oob_data *data;
1735
1736         data = hci_find_remote_oob_data(hdev, bdaddr);
1737         if (!data)
1738                 return -ENOENT;
1739
1740         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1741
1742         list_del(&data->list);
1743         kfree(data);
1744
1745         return 0;
1746 }
1747
1748 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1749 {
1750         struct oob_data *data, *n;
1751
1752         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1753                 list_del(&data->list);
1754                 kfree(data);
1755         }
1756
1757         return 0;
1758 }
1759
1760 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1761                             u8 *randomizer)
1762 {
1763         struct oob_data *data;
1764
1765         data = hci_find_remote_oob_data(hdev, bdaddr);
1766
1767         if (!data) {
1768                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1769                 if (!data)
1770                         return -ENOMEM;
1771
1772                 bacpy(&data->bdaddr, bdaddr);
1773                 list_add(&data->list, &hdev->remote_oob_data);
1774         }
1775
1776         memcpy(data->hash, hash, sizeof(data->hash));
1777         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1778
1779         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1780
1781         return 0;
1782 }
1783
1784 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785 {
1786         struct bdaddr_list *b;
1787
1788         list_for_each_entry(b, &hdev->blacklist, list)
1789                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1790                         return b;
1791
1792         return NULL;
1793 }
1794
1795 int hci_blacklist_clear(struct hci_dev *hdev)
1796 {
1797         struct list_head *p, *n;
1798
1799         list_for_each_safe(p, n, &hdev->blacklist) {
1800                 struct bdaddr_list *b;
1801
1802                 b = list_entry(p, struct bdaddr_list, list);
1803
1804                 list_del(p);
1805                 kfree(b);
1806         }
1807
1808         return 0;
1809 }
1810
1811 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1812 {
1813         struct bdaddr_list *entry;
1814
1815         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1816                 return -EBADF;
1817
1818         if (hci_blacklist_lookup(hdev, bdaddr))
1819                 return -EEXIST;
1820
1821         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1822         if (!entry)
1823                 return -ENOMEM;
1824
1825         bacpy(&entry->bdaddr, bdaddr);
1826
1827         list_add(&entry->list, &hdev->blacklist);
1828
1829         return mgmt_device_blocked(hdev, bdaddr, type);
1830 }
1831
1832 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1833 {
1834         struct bdaddr_list *entry;
1835
1836         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1837                 return hci_blacklist_clear(hdev);
1838
1839         entry = hci_blacklist_lookup(hdev, bdaddr);
1840         if (!entry)
1841                 return -ENOENT;
1842
1843         list_del(&entry->list);
1844         kfree(entry);
1845
1846         return mgmt_device_unblocked(hdev, bdaddr, type);
1847 }
1848
1849 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1850 {
1851         struct le_scan_params *param =  (struct le_scan_params *) opt;
1852         struct hci_cp_le_set_scan_param cp;
1853
1854         memset(&cp, 0, sizeof(cp));
1855         cp.type = param->type;
1856         cp.interval = cpu_to_le16(param->interval);
1857         cp.window = cpu_to_le16(param->window);
1858
1859         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1860 }
1861
1862 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1863 {
1864         struct hci_cp_le_set_scan_enable cp;
1865
1866         memset(&cp, 0, sizeof(cp));
1867         cp.enable = 1;
1868         cp.filter_dup = 1;
1869
1870         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1871 }
1872
1873 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1874                           u16 window, int timeout)
1875 {
1876         long timeo = msecs_to_jiffies(3000);
1877         struct le_scan_params param;
1878         int err;
1879
1880         BT_DBG("%s", hdev->name);
1881
1882         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1883                 return -EINPROGRESS;
1884
1885         param.type = type;
1886         param.interval = interval;
1887         param.window = window;
1888
1889         hci_req_lock(hdev);
1890
1891         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1892                              timeo);
1893         if (!err)
1894                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1895
1896         hci_req_unlock(hdev);
1897
1898         if (err < 0)
1899                 return err;
1900
1901         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1902                            msecs_to_jiffies(timeout));
1903
1904         return 0;
1905 }
1906
1907 int hci_cancel_le_scan(struct hci_dev *hdev)
1908 {
1909         BT_DBG("%s", hdev->name);
1910
1911         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1912                 return -EALREADY;
1913
1914         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1915                 struct hci_cp_le_set_scan_enable cp;
1916
1917                 /* Send HCI command to disable LE Scan */
1918                 memset(&cp, 0, sizeof(cp));
1919                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1920         }
1921
1922         return 0;
1923 }
1924
1925 static void le_scan_disable_work(struct work_struct *work)
1926 {
1927         struct hci_dev *hdev = container_of(work, struct hci_dev,
1928                                             le_scan_disable.work);
1929         struct hci_cp_le_set_scan_enable cp;
1930
1931         BT_DBG("%s", hdev->name);
1932
1933         memset(&cp, 0, sizeof(cp));
1934
1935         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1936 }
1937
1938 static void le_scan_work(struct work_struct *work)
1939 {
1940         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1941         struct le_scan_params *param = &hdev->le_scan_params;
1942
1943         BT_DBG("%s", hdev->name);
1944
1945         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1946                        param->timeout);
1947 }
1948
1949 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1950                 int timeout)
1951 {
1952         struct le_scan_params *param = &hdev->le_scan_params;
1953
1954         BT_DBG("%s", hdev->name);
1955
1956         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1957                 return -ENOTSUPP;
1958
1959         if (work_busy(&hdev->le_scan))
1960                 return -EINPROGRESS;
1961
1962         param->type = type;
1963         param->interval = interval;
1964         param->window = window;
1965         param->timeout = timeout;
1966
1967         queue_work(system_long_wq, &hdev->le_scan);
1968
1969         return 0;
1970 }
1971
1972 /* Alloc HCI device */
1973 struct hci_dev *hci_alloc_dev(void)
1974 {
1975         struct hci_dev *hdev;
1976
1977         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1978         if (!hdev)
1979                 return NULL;
1980
1981         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1982         hdev->esco_type = (ESCO_HV1);
1983         hdev->link_mode = (HCI_LM_ACCEPT);
1984         hdev->io_capability = 0x03; /* No Input No Output */
1985         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1986         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1987
1988         hdev->sniff_max_interval = 800;
1989         hdev->sniff_min_interval = 80;
1990
1991         mutex_init(&hdev->lock);
1992         mutex_init(&hdev->req_lock);
1993
1994         INIT_LIST_HEAD(&hdev->mgmt_pending);
1995         INIT_LIST_HEAD(&hdev->blacklist);
1996         INIT_LIST_HEAD(&hdev->uuids);
1997         INIT_LIST_HEAD(&hdev->link_keys);
1998         INIT_LIST_HEAD(&hdev->long_term_keys);
1999         INIT_LIST_HEAD(&hdev->remote_oob_data);
2000         INIT_LIST_HEAD(&hdev->conn_hash.list);
2001
2002         INIT_WORK(&hdev->rx_work, hci_rx_work);
2003         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2004         INIT_WORK(&hdev->tx_work, hci_tx_work);
2005         INIT_WORK(&hdev->power_on, hci_power_on);
2006         INIT_WORK(&hdev->le_scan, le_scan_work);
2007
2008         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2009         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2010         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2011
2012         skb_queue_head_init(&hdev->driver_init);
2013         skb_queue_head_init(&hdev->rx_q);
2014         skb_queue_head_init(&hdev->cmd_q);
2015         skb_queue_head_init(&hdev->raw_q);
2016
2017         init_waitqueue_head(&hdev->req_wait_q);
2018
2019         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2020
2021         hci_init_sysfs(hdev);
2022         discovery_init(hdev);
2023
2024         return hdev;
2025 }
2026 EXPORT_SYMBOL(hci_alloc_dev);
2027
2028 /* Free HCI device */
2029 void hci_free_dev(struct hci_dev *hdev)
2030 {
2031         skb_queue_purge(&hdev->driver_init);
2032
2033         /* will free via device release */
2034         put_device(&hdev->dev);
2035 }
2036 EXPORT_SYMBOL(hci_free_dev);
2037
2038 /* Register HCI device */
2039 int hci_register_dev(struct hci_dev *hdev)
2040 {
2041         int id, error;
2042
2043         if (!hdev->open || !hdev->close)
2044                 return -EINVAL;
2045
2046         /* Do not allow HCI_AMP devices to register at index 0,
2047          * so the index can be used as the AMP controller ID.
2048          */
2049         switch (hdev->dev_type) {
2050         case HCI_BREDR:
2051                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2052                 break;
2053         case HCI_AMP:
2054                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2055                 break;
2056         default:
2057                 return -EINVAL;
2058         }
2059
2060         if (id < 0)
2061                 return id;
2062
2063         sprintf(hdev->name, "hci%d", id);
2064         hdev->id = id;
2065
2066         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2067
2068         write_lock(&hci_dev_list_lock);
2069         list_add(&hdev->list, &hci_dev_list);
2070         write_unlock(&hci_dev_list_lock);
2071
2072         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2073                                           WQ_MEM_RECLAIM, 1);
2074         if (!hdev->workqueue) {
2075                 error = -ENOMEM;
2076                 goto err;
2077         }
2078
2079         hdev->req_workqueue = alloc_workqueue(hdev->name,
2080                                               WQ_HIGHPRI | WQ_UNBOUND |
2081                                               WQ_MEM_RECLAIM, 1);
2082         if (!hdev->req_workqueue) {
2083                 destroy_workqueue(hdev->workqueue);
2084                 error = -ENOMEM;
2085                 goto err;
2086         }
2087
2088         error = hci_add_sysfs(hdev);
2089         if (error < 0)
2090                 goto err_wqueue;
2091
2092         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2093                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2094                                     hdev);
2095         if (hdev->rfkill) {
2096                 if (rfkill_register(hdev->rfkill) < 0) {
2097                         rfkill_destroy(hdev->rfkill);
2098                         hdev->rfkill = NULL;
2099                 }
2100         }
2101
2102         set_bit(HCI_SETUP, &hdev->dev_flags);
2103
2104         if (hdev->dev_type != HCI_AMP)
2105                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2106
2107         hci_notify(hdev, HCI_DEV_REG);
2108         hci_dev_hold(hdev);
2109
2110         queue_work(hdev->req_workqueue, &hdev->power_on);
2111
2112         return id;
2113
2114 err_wqueue:
2115         destroy_workqueue(hdev->workqueue);
2116         destroy_workqueue(hdev->req_workqueue);
2117 err:
2118         ida_simple_remove(&hci_index_ida, hdev->id);
2119         write_lock(&hci_dev_list_lock);
2120         list_del(&hdev->list);
2121         write_unlock(&hci_dev_list_lock);
2122
2123         return error;
2124 }
2125 EXPORT_SYMBOL(hci_register_dev);
2126
2127 /* Unregister HCI device */
2128 void hci_unregister_dev(struct hci_dev *hdev)
2129 {
2130         int i, id;
2131
2132         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2133
2134         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2135
2136         id = hdev->id;
2137
2138         write_lock(&hci_dev_list_lock);
2139         list_del(&hdev->list);
2140         write_unlock(&hci_dev_list_lock);
2141
2142         hci_dev_do_close(hdev);
2143
2144         for (i = 0; i < NUM_REASSEMBLY; i++)
2145                 kfree_skb(hdev->reassembly[i]);
2146
2147         cancel_work_sync(&hdev->power_on);
2148
2149         if (!test_bit(HCI_INIT, &hdev->flags) &&
2150             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2151                 hci_dev_lock(hdev);
2152                 mgmt_index_removed(hdev);
2153                 hci_dev_unlock(hdev);
2154         }
2155
2156         /* mgmt_index_removed should take care of emptying the
2157          * pending list */
2158         BUG_ON(!list_empty(&hdev->mgmt_pending));
2159
2160         hci_notify(hdev, HCI_DEV_UNREG);
2161
2162         if (hdev->rfkill) {
2163                 rfkill_unregister(hdev->rfkill);
2164                 rfkill_destroy(hdev->rfkill);
2165         }
2166
2167         hci_del_sysfs(hdev);
2168
2169         destroy_workqueue(hdev->workqueue);
2170         destroy_workqueue(hdev->req_workqueue);
2171
2172         hci_dev_lock(hdev);
2173         hci_blacklist_clear(hdev);
2174         hci_uuids_clear(hdev);
2175         hci_link_keys_clear(hdev);
2176         hci_smp_ltks_clear(hdev);
2177         hci_remote_oob_data_clear(hdev);
2178         hci_dev_unlock(hdev);
2179
2180         hci_dev_put(hdev);
2181
2182         ida_simple_remove(&hci_index_ida, id);
2183 }
2184 EXPORT_SYMBOL(hci_unregister_dev);
2185
2186 /* Suspend HCI device */
2187 int hci_suspend_dev(struct hci_dev *hdev)
2188 {
2189         hci_notify(hdev, HCI_DEV_SUSPEND);
2190         return 0;
2191 }
2192 EXPORT_SYMBOL(hci_suspend_dev);
2193
2194 /* Resume HCI device */
2195 int hci_resume_dev(struct hci_dev *hdev)
2196 {
2197         hci_notify(hdev, HCI_DEV_RESUME);
2198         return 0;
2199 }
2200 EXPORT_SYMBOL(hci_resume_dev);
2201
2202 /* Receive frame from HCI drivers */
2203 int hci_recv_frame(struct sk_buff *skb)
2204 {
2205         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2206         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2207                       && !test_bit(HCI_INIT, &hdev->flags))) {
2208                 kfree_skb(skb);
2209                 return -ENXIO;
2210         }
2211
2212         /* Incoming skb */
2213         bt_cb(skb)->incoming = 1;
2214
2215         /* Time stamp */
2216         __net_timestamp(skb);
2217
2218         skb_queue_tail(&hdev->rx_q, skb);
2219         queue_work(hdev->workqueue, &hdev->rx_work);
2220
2221         return 0;
2222 }
2223 EXPORT_SYMBOL(hci_recv_frame);
2224
2225 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2226                           int count, __u8 index)
2227 {
2228         int len = 0;
2229         int hlen = 0;
2230         int remain = count;
2231         struct sk_buff *skb;
2232         struct bt_skb_cb *scb;
2233
2234         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2235             index >= NUM_REASSEMBLY)
2236                 return -EILSEQ;
2237
2238         skb = hdev->reassembly[index];
2239
2240         if (!skb) {
2241                 switch (type) {
2242                 case HCI_ACLDATA_PKT:
2243                         len = HCI_MAX_FRAME_SIZE;
2244                         hlen = HCI_ACL_HDR_SIZE;
2245                         break;
2246                 case HCI_EVENT_PKT:
2247                         len = HCI_MAX_EVENT_SIZE;
2248                         hlen = HCI_EVENT_HDR_SIZE;
2249                         break;
2250                 case HCI_SCODATA_PKT:
2251                         len = HCI_MAX_SCO_SIZE;
2252                         hlen = HCI_SCO_HDR_SIZE;
2253                         break;
2254                 }
2255
2256                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2257                 if (!skb)
2258                         return -ENOMEM;
2259
2260                 scb = (void *) skb->cb;
2261                 scb->expect = hlen;
2262                 scb->pkt_type = type;
2263
2264                 skb->dev = (void *) hdev;
2265                 hdev->reassembly[index] = skb;
2266         }
2267
2268         while (count) {
2269                 scb = (void *) skb->cb;
2270                 len = min_t(uint, scb->expect, count);
2271
2272                 memcpy(skb_put(skb, len), data, len);
2273
2274                 count -= len;
2275                 data += len;
2276                 scb->expect -= len;
2277                 remain = count;
2278
2279                 switch (type) {
2280                 case HCI_EVENT_PKT:
2281                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2282                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2283                                 scb->expect = h->plen;
2284
2285                                 if (skb_tailroom(skb) < scb->expect) {
2286                                         kfree_skb(skb);
2287                                         hdev->reassembly[index] = NULL;
2288                                         return -ENOMEM;
2289                                 }
2290                         }
2291                         break;
2292
2293                 case HCI_ACLDATA_PKT:
2294                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2295                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2296                                 scb->expect = __le16_to_cpu(h->dlen);
2297
2298                                 if (skb_tailroom(skb) < scb->expect) {
2299                                         kfree_skb(skb);
2300                                         hdev->reassembly[index] = NULL;
2301                                         return -ENOMEM;
2302                                 }
2303                         }
2304                         break;
2305
2306                 case HCI_SCODATA_PKT:
2307                         if (skb->len == HCI_SCO_HDR_SIZE) {
2308                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2309                                 scb->expect = h->dlen;
2310
2311                                 if (skb_tailroom(skb) < scb->expect) {
2312                                         kfree_skb(skb);
2313                                         hdev->reassembly[index] = NULL;
2314                                         return -ENOMEM;
2315                                 }
2316                         }
2317                         break;
2318                 }
2319
2320                 if (scb->expect == 0) {
2321                         /* Complete frame */
2322
2323                         bt_cb(skb)->pkt_type = type;
2324                         hci_recv_frame(skb);
2325
2326                         hdev->reassembly[index] = NULL;
2327                         return remain;
2328                 }
2329         }
2330
2331         return remain;
2332 }
2333
2334 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2335 {
2336         int rem = 0;
2337
2338         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2339                 return -EILSEQ;
2340
2341         while (count) {
2342                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2343                 if (rem < 0)
2344                         return rem;
2345
2346                 data += (count - rem);
2347                 count = rem;
2348         }
2349
2350         return rem;
2351 }
2352 EXPORT_SYMBOL(hci_recv_fragment);
2353
2354 #define STREAM_REASSEMBLY 0
2355
2356 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2357 {
2358         int type;
2359         int rem = 0;
2360
2361         while (count) {
2362                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2363
2364                 if (!skb) {
2365                         struct { char type; } *pkt;
2366
2367                         /* Start of the frame */
2368                         pkt = data;
2369                         type = pkt->type;
2370
2371                         data++;
2372                         count--;
2373                 } else
2374                         type = bt_cb(skb)->pkt_type;
2375
2376                 rem = hci_reassembly(hdev, type, data, count,
2377                                      STREAM_REASSEMBLY);
2378                 if (rem < 0)
2379                         return rem;
2380
2381                 data += (count - rem);
2382                 count = rem;
2383         }
2384
2385         return rem;
2386 }
2387 EXPORT_SYMBOL(hci_recv_stream_fragment);
2388
2389 /* ---- Interface to upper protocols ---- */
2390
2391 int hci_register_cb(struct hci_cb *cb)
2392 {
2393         BT_DBG("%p name %s", cb, cb->name);
2394
2395         write_lock(&hci_cb_list_lock);
2396         list_add(&cb->list, &hci_cb_list);
2397         write_unlock(&hci_cb_list_lock);
2398
2399         return 0;
2400 }
2401 EXPORT_SYMBOL(hci_register_cb);
2402
2403 int hci_unregister_cb(struct hci_cb *cb)
2404 {
2405         BT_DBG("%p name %s", cb, cb->name);
2406
2407         write_lock(&hci_cb_list_lock);
2408         list_del(&cb->list);
2409         write_unlock(&hci_cb_list_lock);
2410
2411         return 0;
2412 }
2413 EXPORT_SYMBOL(hci_unregister_cb);
2414
2415 static int hci_send_frame(struct sk_buff *skb)
2416 {
2417         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2418
2419         if (!hdev) {
2420                 kfree_skb(skb);
2421                 return -ENODEV;
2422         }
2423
2424         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2425
2426         /* Time stamp */
2427         __net_timestamp(skb);
2428
2429         /* Send copy to monitor */
2430         hci_send_to_monitor(hdev, skb);
2431
2432         if (atomic_read(&hdev->promisc)) {
2433                 /* Send copy to the sockets */
2434                 hci_send_to_sock(hdev, skb);
2435         }
2436
2437         /* Get rid of skb owner, prior to sending to the driver. */
2438         skb_orphan(skb);
2439
2440         return hdev->send(skb);
2441 }
2442
2443 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2444 {
2445         skb_queue_head_init(&req->cmd_q);
2446         req->hdev = hdev;
2447 }
2448
2449 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2450 {
2451         struct hci_dev *hdev = req->hdev;
2452         struct sk_buff *skb;
2453         unsigned long flags;
2454
2455         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2456
2457         /* Do not allow empty requests */
2458         if (skb_queue_empty(&req->cmd_q))
2459                 return -EINVAL;
2460
2461         skb = skb_peek_tail(&req->cmd_q);
2462         bt_cb(skb)->req.complete = complete;
2463
2464         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2465         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2466         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2467
2468         queue_work(hdev->workqueue, &hdev->cmd_work);
2469
2470         return 0;
2471 }
2472
2473 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2474                                        u32 plen, void *param)
2475 {
2476         int len = HCI_COMMAND_HDR_SIZE + plen;
2477         struct hci_command_hdr *hdr;
2478         struct sk_buff *skb;
2479
2480         skb = bt_skb_alloc(len, GFP_ATOMIC);
2481         if (!skb)
2482                 return NULL;
2483
2484         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2485         hdr->opcode = cpu_to_le16(opcode);
2486         hdr->plen   = plen;
2487
2488         if (plen)
2489                 memcpy(skb_put(skb, plen), param, plen);
2490
2491         BT_DBG("skb len %d", skb->len);
2492
2493         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2494         skb->dev = (void *) hdev;
2495
2496         return skb;
2497 }
2498
2499 /* Send HCI command */
2500 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2501 {
2502         struct sk_buff *skb;
2503
2504         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2505
2506         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2507         if (!skb) {
2508                 BT_ERR("%s no memory for command", hdev->name);
2509                 return -ENOMEM;
2510         }
2511
2512         if (test_bit(HCI_INIT, &hdev->flags))
2513                 hdev->init_last_cmd = opcode;
2514
2515         /* Stand-alone HCI commands must be flaged as
2516          * single-command requests.
2517          */
2518         bt_cb(skb)->req.start = true;
2519
2520         skb_queue_tail(&hdev->cmd_q, skb);
2521         queue_work(hdev->workqueue, &hdev->cmd_work);
2522
2523         return 0;
2524 }
2525
2526 /* Queue a command to an asynchronous HCI request */
2527 int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2528 {
2529         struct hci_dev *hdev = req->hdev;
2530         struct sk_buff *skb;
2531
2532         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2533
2534         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2535         if (!skb) {
2536                 BT_ERR("%s no memory for command", hdev->name);
2537                 return -ENOMEM;
2538         }
2539
2540         if (skb_queue_empty(&req->cmd_q))
2541                 bt_cb(skb)->req.start = true;
2542
2543         skb_queue_tail(&req->cmd_q, skb);
2544
2545         return 0;
2546 }
2547
2548 /* Get data from the previously sent command */
2549 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2550 {
2551         struct hci_command_hdr *hdr;
2552
2553         if (!hdev->sent_cmd)
2554                 return NULL;
2555
2556         hdr = (void *) hdev->sent_cmd->data;
2557
2558         if (hdr->opcode != cpu_to_le16(opcode))
2559                 return NULL;
2560
2561         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2562
2563         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2564 }
2565
2566 /* Send ACL data */
2567 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2568 {
2569         struct hci_acl_hdr *hdr;
2570         int len = skb->len;
2571
2572         skb_push(skb, HCI_ACL_HDR_SIZE);
2573         skb_reset_transport_header(skb);
2574         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2575         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2576         hdr->dlen   = cpu_to_le16(len);
2577 }
2578
2579 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2580                           struct sk_buff *skb, __u16 flags)
2581 {
2582         struct hci_conn *conn = chan->conn;
2583         struct hci_dev *hdev = conn->hdev;
2584         struct sk_buff *list;
2585
2586         skb->len = skb_headlen(skb);
2587         skb->data_len = 0;
2588
2589         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2590
2591         switch (hdev->dev_type) {
2592         case HCI_BREDR:
2593                 hci_add_acl_hdr(skb, conn->handle, flags);
2594                 break;
2595         case HCI_AMP:
2596                 hci_add_acl_hdr(skb, chan->handle, flags);
2597                 break;
2598         default:
2599                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2600                 return;
2601         }
2602
2603         list = skb_shinfo(skb)->frag_list;
2604         if (!list) {
2605                 /* Non fragmented */
2606                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2607
2608                 skb_queue_tail(queue, skb);
2609         } else {
2610                 /* Fragmented */
2611                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2612
2613                 skb_shinfo(skb)->frag_list = NULL;
2614
2615                 /* Queue all fragments atomically */
2616                 spin_lock(&queue->lock);
2617
2618                 __skb_queue_tail(queue, skb);
2619
2620                 flags &= ~ACL_START;
2621                 flags |= ACL_CONT;
2622                 do {
2623                         skb = list; list = list->next;
2624
2625                         skb->dev = (void *) hdev;
2626                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2627                         hci_add_acl_hdr(skb, conn->handle, flags);
2628
2629                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2630
2631                         __skb_queue_tail(queue, skb);
2632                 } while (list);
2633
2634                 spin_unlock(&queue->lock);
2635         }
2636 }
2637
2638 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2639 {
2640         struct hci_dev *hdev = chan->conn->hdev;
2641
2642         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2643
2644         skb->dev = (void *) hdev;
2645
2646         hci_queue_acl(chan, &chan->data_q, skb, flags);
2647
2648         queue_work(hdev->workqueue, &hdev->tx_work);
2649 }
2650
2651 /* Send SCO data */
2652 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2653 {
2654         struct hci_dev *hdev = conn->hdev;
2655         struct hci_sco_hdr hdr;
2656
2657         BT_DBG("%s len %d", hdev->name, skb->len);
2658
2659         hdr.handle = cpu_to_le16(conn->handle);
2660         hdr.dlen   = skb->len;
2661
2662         skb_push(skb, HCI_SCO_HDR_SIZE);
2663         skb_reset_transport_header(skb);
2664         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2665
2666         skb->dev = (void *) hdev;
2667         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2668
2669         skb_queue_tail(&conn->data_q, skb);
2670         queue_work(hdev->workqueue, &hdev->tx_work);
2671 }
2672
2673 /* ---- HCI TX task (outgoing data) ---- */
2674
2675 /* HCI Connection scheduler */
2676 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2677                                      int *quote)
2678 {
2679         struct hci_conn_hash *h = &hdev->conn_hash;
2680         struct hci_conn *conn = NULL, *c;
2681         unsigned int num = 0, min = ~0;
2682
2683         /* We don't have to lock device here. Connections are always
2684          * added and removed with TX task disabled. */
2685
2686         rcu_read_lock();
2687
2688         list_for_each_entry_rcu(c, &h->list, list) {
2689                 if (c->type != type || skb_queue_empty(&c->data_q))
2690                         continue;
2691
2692                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2693                         continue;
2694
2695                 num++;
2696
2697                 if (c->sent < min) {
2698                         min  = c->sent;
2699                         conn = c;
2700                 }
2701
2702                 if (hci_conn_num(hdev, type) == num)
2703                         break;
2704         }
2705
2706         rcu_read_unlock();
2707
2708         if (conn) {
2709                 int cnt, q;
2710
2711                 switch (conn->type) {
2712                 case ACL_LINK:
2713                         cnt = hdev->acl_cnt;
2714                         break;
2715                 case SCO_LINK:
2716                 case ESCO_LINK:
2717                         cnt = hdev->sco_cnt;
2718                         break;
2719                 case LE_LINK:
2720                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2721                         break;
2722                 default:
2723                         cnt = 0;
2724                         BT_ERR("Unknown link type");
2725                 }
2726
2727                 q = cnt / num;
2728                 *quote = q ? q : 1;
2729         } else
2730                 *quote = 0;
2731
2732         BT_DBG("conn %p quote %d", conn, *quote);
2733         return conn;
2734 }
2735
2736 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2737 {
2738         struct hci_conn_hash *h = &hdev->conn_hash;
2739         struct hci_conn *c;
2740
2741         BT_ERR("%s link tx timeout", hdev->name);
2742
2743         rcu_read_lock();
2744
2745         /* Kill stalled connections */
2746         list_for_each_entry_rcu(c, &h->list, list) {
2747                 if (c->type == type && c->sent) {
2748                         BT_ERR("%s killing stalled connection %pMR",
2749                                hdev->name, &c->dst);
2750                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2751                 }
2752         }
2753
2754         rcu_read_unlock();
2755 }
2756
2757 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2758                                       int *quote)
2759 {
2760         struct hci_conn_hash *h = &hdev->conn_hash;
2761         struct hci_chan *chan = NULL;
2762         unsigned int num = 0, min = ~0, cur_prio = 0;
2763         struct hci_conn *conn;
2764         int cnt, q, conn_num = 0;
2765
2766         BT_DBG("%s", hdev->name);
2767
2768         rcu_read_lock();
2769
2770         list_for_each_entry_rcu(conn, &h->list, list) {
2771                 struct hci_chan *tmp;
2772
2773                 if (conn->type != type)
2774                         continue;
2775
2776                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2777                         continue;
2778
2779                 conn_num++;
2780
2781                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2782                         struct sk_buff *skb;
2783
2784                         if (skb_queue_empty(&tmp->data_q))
2785                                 continue;
2786
2787                         skb = skb_peek(&tmp->data_q);
2788                         if (skb->priority < cur_prio)
2789                                 continue;
2790
2791                         if (skb->priority > cur_prio) {
2792                                 num = 0;
2793                                 min = ~0;
2794                                 cur_prio = skb->priority;
2795                         }
2796
2797                         num++;
2798
2799                         if (conn->sent < min) {
2800                                 min  = conn->sent;
2801                                 chan = tmp;
2802                         }
2803                 }
2804
2805                 if (hci_conn_num(hdev, type) == conn_num)
2806                         break;
2807         }
2808
2809         rcu_read_unlock();
2810
2811         if (!chan)
2812                 return NULL;
2813
2814         switch (chan->conn->type) {
2815         case ACL_LINK:
2816                 cnt = hdev->acl_cnt;
2817                 break;
2818         case AMP_LINK:
2819                 cnt = hdev->block_cnt;
2820                 break;
2821         case SCO_LINK:
2822         case ESCO_LINK:
2823                 cnt = hdev->sco_cnt;
2824                 break;
2825         case LE_LINK:
2826                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2827                 break;
2828         default:
2829                 cnt = 0;
2830                 BT_ERR("Unknown link type");
2831         }
2832
2833         q = cnt / num;
2834         *quote = q ? q : 1;
2835         BT_DBG("chan %p quote %d", chan, *quote);
2836         return chan;
2837 }
2838
2839 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2840 {
2841         struct hci_conn_hash *h = &hdev->conn_hash;
2842         struct hci_conn *conn;
2843         int num = 0;
2844
2845         BT_DBG("%s", hdev->name);
2846
2847         rcu_read_lock();
2848
2849         list_for_each_entry_rcu(conn, &h->list, list) {
2850                 struct hci_chan *chan;
2851
2852                 if (conn->type != type)
2853                         continue;
2854
2855                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2856                         continue;
2857
2858                 num++;
2859
2860                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2861                         struct sk_buff *skb;
2862
2863                         if (chan->sent) {
2864                                 chan->sent = 0;
2865                                 continue;
2866                         }
2867
2868                         if (skb_queue_empty(&chan->data_q))
2869                                 continue;
2870
2871                         skb = skb_peek(&chan->data_q);
2872                         if (skb->priority >= HCI_PRIO_MAX - 1)
2873                                 continue;
2874
2875                         skb->priority = HCI_PRIO_MAX - 1;
2876
2877                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2878                                skb->priority);
2879                 }
2880
2881                 if (hci_conn_num(hdev, type) == num)
2882                         break;
2883         }
2884
2885         rcu_read_unlock();
2886
2887 }
2888
2889 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2890 {
2891         /* Calculate count of blocks used by this packet */
2892         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2893 }
2894
2895 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2896 {
2897         if (!test_bit(HCI_RAW, &hdev->flags)) {
2898                 /* ACL tx timeout must be longer than maximum
2899                  * link supervision timeout (40.9 seconds) */
2900                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2901                                        HCI_ACL_TX_TIMEOUT))
2902                         hci_link_tx_to(hdev, ACL_LINK);
2903         }
2904 }
2905
2906 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2907 {
2908         unsigned int cnt = hdev->acl_cnt;
2909         struct hci_chan *chan;
2910         struct sk_buff *skb;
2911         int quote;
2912
2913         __check_timeout(hdev, cnt);
2914
2915         while (hdev->acl_cnt &&
2916                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2917                 u32 priority = (skb_peek(&chan->data_q))->priority;
2918                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2919                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2920                                skb->len, skb->priority);
2921
2922                         /* Stop if priority has changed */
2923                         if (skb->priority < priority)
2924                                 break;
2925
2926                         skb = skb_dequeue(&chan->data_q);
2927
2928                         hci_conn_enter_active_mode(chan->conn,
2929                                                    bt_cb(skb)->force_active);
2930
2931                         hci_send_frame(skb);
2932                         hdev->acl_last_tx = jiffies;
2933
2934                         hdev->acl_cnt--;
2935                         chan->sent++;
2936                         chan->conn->sent++;
2937                 }
2938         }
2939
2940         if (cnt != hdev->acl_cnt)
2941                 hci_prio_recalculate(hdev, ACL_LINK);
2942 }
2943
2944 static void hci_sched_acl_blk(struct hci_dev *hdev)
2945 {
2946         unsigned int cnt = hdev->block_cnt;
2947         struct hci_chan *chan;
2948         struct sk_buff *skb;
2949         int quote;
2950         u8 type;
2951
2952         __check_timeout(hdev, cnt);
2953
2954         BT_DBG("%s", hdev->name);
2955
2956         if (hdev->dev_type == HCI_AMP)
2957                 type = AMP_LINK;
2958         else
2959                 type = ACL_LINK;
2960
2961         while (hdev->block_cnt > 0 &&
2962                (chan = hci_chan_sent(hdev, type, &quote))) {
2963                 u32 priority = (skb_peek(&chan->data_q))->priority;
2964                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2965                         int blocks;
2966
2967                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2968                                skb->len, skb->priority);
2969
2970                         /* Stop if priority has changed */
2971                         if (skb->priority < priority)
2972                                 break;
2973
2974                         skb = skb_dequeue(&chan->data_q);
2975
2976                         blocks = __get_blocks(hdev, skb);
2977                         if (blocks > hdev->block_cnt)
2978                                 return;
2979
2980                         hci_conn_enter_active_mode(chan->conn,
2981                                                    bt_cb(skb)->force_active);
2982
2983                         hci_send_frame(skb);
2984                         hdev->acl_last_tx = jiffies;
2985
2986                         hdev->block_cnt -= blocks;
2987                         quote -= blocks;
2988
2989                         chan->sent += blocks;
2990                         chan->conn->sent += blocks;
2991                 }
2992         }
2993
2994         if (cnt != hdev->block_cnt)
2995                 hci_prio_recalculate(hdev, type);
2996 }
2997
2998 static void hci_sched_acl(struct hci_dev *hdev)
2999 {
3000         BT_DBG("%s", hdev->name);
3001
3002         /* No ACL link over BR/EDR controller */
3003         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3004                 return;
3005
3006         /* No AMP link over AMP controller */
3007         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3008                 return;
3009
3010         switch (hdev->flow_ctl_mode) {
3011         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3012                 hci_sched_acl_pkt(hdev);
3013                 break;
3014
3015         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3016                 hci_sched_acl_blk(hdev);
3017                 break;
3018         }
3019 }
3020
3021 /* Schedule SCO */
3022 static void hci_sched_sco(struct hci_dev *hdev)
3023 {
3024         struct hci_conn *conn;
3025         struct sk_buff *skb;
3026         int quote;
3027
3028         BT_DBG("%s", hdev->name);
3029
3030         if (!hci_conn_num(hdev, SCO_LINK))
3031                 return;
3032
3033         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3034                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3035                         BT_DBG("skb %p len %d", skb, skb->len);
3036                         hci_send_frame(skb);
3037
3038                         conn->sent++;
3039                         if (conn->sent == ~0)
3040                                 conn->sent = 0;
3041                 }
3042         }
3043 }
3044
3045 static void hci_sched_esco(struct hci_dev *hdev)
3046 {
3047         struct hci_conn *conn;
3048         struct sk_buff *skb;
3049         int quote;
3050
3051         BT_DBG("%s", hdev->name);
3052
3053         if (!hci_conn_num(hdev, ESCO_LINK))
3054                 return;
3055
3056         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3057                                                      &quote))) {
3058                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3059                         BT_DBG("skb %p len %d", skb, skb->len);
3060                         hci_send_frame(skb);
3061
3062                         conn->sent++;
3063                         if (conn->sent == ~0)
3064                                 conn->sent = 0;
3065                 }
3066         }
3067 }
3068
3069 static void hci_sched_le(struct hci_dev *hdev)
3070 {
3071         struct hci_chan *chan;
3072         struct sk_buff *skb;
3073         int quote, cnt, tmp;
3074
3075         BT_DBG("%s", hdev->name);
3076
3077         if (!hci_conn_num(hdev, LE_LINK))
3078                 return;
3079
3080         if (!test_bit(HCI_RAW, &hdev->flags)) {
3081                 /* LE tx timeout must be longer than maximum
3082                  * link supervision timeout (40.9 seconds) */
3083                 if (!hdev->le_cnt && hdev->le_pkts &&
3084                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3085                         hci_link_tx_to(hdev, LE_LINK);
3086         }
3087
3088         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3089         tmp = cnt;
3090         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3091                 u32 priority = (skb_peek(&chan->data_q))->priority;
3092                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3093                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3094                                skb->len, skb->priority);
3095
3096                         /* Stop if priority has changed */
3097                         if (skb->priority < priority)
3098                                 break;
3099
3100                         skb = skb_dequeue(&chan->data_q);
3101
3102                         hci_send_frame(skb);
3103                         hdev->le_last_tx = jiffies;
3104
3105                         cnt--;
3106                         chan->sent++;
3107                         chan->conn->sent++;
3108                 }
3109         }
3110
3111         if (hdev->le_pkts)
3112                 hdev->le_cnt = cnt;
3113         else
3114                 hdev->acl_cnt = cnt;
3115
3116         if (cnt != tmp)
3117                 hci_prio_recalculate(hdev, LE_LINK);
3118 }
3119
3120 static void hci_tx_work(struct work_struct *work)
3121 {
3122         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3123         struct sk_buff *skb;
3124
3125         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3126                hdev->sco_cnt, hdev->le_cnt);
3127
3128         /* Schedule queues and send stuff to HCI driver */
3129
3130         hci_sched_acl(hdev);
3131
3132         hci_sched_sco(hdev);
3133
3134         hci_sched_esco(hdev);
3135
3136         hci_sched_le(hdev);
3137
3138         /* Send next queued raw (unknown type) packet */
3139         while ((skb = skb_dequeue(&hdev->raw_q)))
3140                 hci_send_frame(skb);
3141 }
3142
3143 /* ----- HCI RX task (incoming data processing) ----- */
3144
3145 /* ACL data packet */
3146 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3147 {
3148         struct hci_acl_hdr *hdr = (void *) skb->data;
3149         struct hci_conn *conn;
3150         __u16 handle, flags;
3151
3152         skb_pull(skb, HCI_ACL_HDR_SIZE);
3153
3154         handle = __le16_to_cpu(hdr->handle);
3155         flags  = hci_flags(handle);
3156         handle = hci_handle(handle);
3157
3158         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3159                handle, flags);
3160
3161         hdev->stat.acl_rx++;
3162
3163         hci_dev_lock(hdev);
3164         conn = hci_conn_hash_lookup_handle(hdev, handle);
3165         hci_dev_unlock(hdev);
3166
3167         if (conn) {
3168                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3169
3170                 /* Send to upper protocol */
3171                 l2cap_recv_acldata(conn, skb, flags);
3172                 return;
3173         } else {
3174                 BT_ERR("%s ACL packet for unknown connection handle %d",
3175                        hdev->name, handle);
3176         }
3177
3178         kfree_skb(skb);
3179 }
3180
3181 /* SCO data packet */
3182 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3183 {
3184         struct hci_sco_hdr *hdr = (void *) skb->data;
3185         struct hci_conn *conn;
3186         __u16 handle;
3187
3188         skb_pull(skb, HCI_SCO_HDR_SIZE);
3189
3190         handle = __le16_to_cpu(hdr->handle);
3191
3192         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3193
3194         hdev->stat.sco_rx++;
3195
3196         hci_dev_lock(hdev);
3197         conn = hci_conn_hash_lookup_handle(hdev, handle);
3198         hci_dev_unlock(hdev);
3199
3200         if (conn) {
3201                 /* Send to upper protocol */
3202                 sco_recv_scodata(conn, skb);
3203                 return;
3204         } else {
3205                 BT_ERR("%s SCO packet for unknown connection handle %d",
3206                        hdev->name, handle);
3207         }
3208
3209         kfree_skb(skb);
3210 }
3211
3212 static bool hci_req_is_complete(struct hci_dev *hdev)
3213 {
3214         struct sk_buff *skb;
3215
3216         skb = skb_peek(&hdev->cmd_q);
3217         if (!skb)
3218                 return true;
3219
3220         return bt_cb(skb)->req.start;
3221 }
3222
3223 static void hci_resend_last(struct hci_dev *hdev)
3224 {
3225         struct hci_command_hdr *sent;
3226         struct sk_buff *skb;
3227         u16 opcode;
3228
3229         if (!hdev->sent_cmd)
3230                 return;
3231
3232         sent = (void *) hdev->sent_cmd->data;
3233         opcode = __le16_to_cpu(sent->opcode);
3234         if (opcode == HCI_OP_RESET)
3235                 return;
3236
3237         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3238         if (!skb)
3239                 return;
3240
3241         skb_queue_head(&hdev->cmd_q, skb);
3242         queue_work(hdev->workqueue, &hdev->cmd_work);
3243 }
3244
3245 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3246 {
3247         hci_req_complete_t req_complete = NULL;
3248         struct sk_buff *skb;
3249         unsigned long flags;
3250
3251         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3252
3253         /* If the completed command doesn't match the last one that was
3254          * sent we need to do special handling of it.
3255          */
3256         if (!hci_sent_cmd_data(hdev, opcode)) {
3257                 /* Some CSR based controllers generate a spontaneous
3258                  * reset complete event during init and any pending
3259                  * command will never be completed. In such a case we
3260                  * need to resend whatever was the last sent
3261                  * command.
3262                  */
3263                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3264                         hci_resend_last(hdev);
3265
3266                 return;
3267         }
3268
3269         /* If the command succeeded and there's still more commands in
3270          * this request the request is not yet complete.
3271          */
3272         if (!status && !hci_req_is_complete(hdev))
3273                 return;
3274
3275         /* If this was the last command in a request the complete
3276          * callback would be found in hdev->sent_cmd instead of the
3277          * command queue (hdev->cmd_q).
3278          */
3279         if (hdev->sent_cmd) {
3280                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3281                 if (req_complete)
3282                         goto call_complete;
3283         }
3284
3285         /* Remove all pending commands belonging to this request */
3286         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3287         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3288                 if (bt_cb(skb)->req.start) {
3289                         __skb_queue_head(&hdev->cmd_q, skb);
3290                         break;
3291                 }
3292
3293                 req_complete = bt_cb(skb)->req.complete;
3294                 kfree_skb(skb);
3295         }
3296         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3297
3298 call_complete:
3299         if (req_complete)
3300                 req_complete(hdev, status);
3301 }
3302
3303 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3304 {
3305         hci_req_complete_t req_complete = NULL;
3306
3307         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3308
3309         if (status) {
3310                 hci_req_cmd_complete(hdev, opcode, status);
3311                 return;
3312         }
3313
3314         /* No need to handle success status if there are more commands */
3315         if (!hci_req_is_complete(hdev))
3316                 return;
3317
3318         if (hdev->sent_cmd)
3319                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3320
3321         /* If the request doesn't have a complete callback or there
3322          * are other commands/requests in the hdev queue we consider
3323          * this request as completed.
3324          */
3325         if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3326                 hci_req_cmd_complete(hdev, opcode, status);
3327 }
3328
3329 static void hci_rx_work(struct work_struct *work)
3330 {
3331         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3332         struct sk_buff *skb;
3333
3334         BT_DBG("%s", hdev->name);
3335
3336         while ((skb = skb_dequeue(&hdev->rx_q))) {
3337                 /* Send copy to monitor */
3338                 hci_send_to_monitor(hdev, skb);
3339
3340                 if (atomic_read(&hdev->promisc)) {
3341                         /* Send copy to the sockets */
3342                         hci_send_to_sock(hdev, skb);
3343                 }
3344
3345                 if (test_bit(HCI_RAW, &hdev->flags)) {
3346                         kfree_skb(skb);
3347                         continue;
3348                 }
3349
3350                 if (test_bit(HCI_INIT, &hdev->flags)) {
3351                         /* Don't process data packets in this states. */
3352                         switch (bt_cb(skb)->pkt_type) {
3353                         case HCI_ACLDATA_PKT:
3354                         case HCI_SCODATA_PKT:
3355                                 kfree_skb(skb);
3356                                 continue;
3357                         }
3358                 }
3359
3360                 /* Process frame */
3361                 switch (bt_cb(skb)->pkt_type) {
3362                 case HCI_EVENT_PKT:
3363                         BT_DBG("%s Event packet", hdev->name);
3364                         hci_event_packet(hdev, skb);
3365                         break;
3366
3367                 case HCI_ACLDATA_PKT:
3368                         BT_DBG("%s ACL data packet", hdev->name);
3369                         hci_acldata_packet(hdev, skb);
3370                         break;
3371
3372                 case HCI_SCODATA_PKT:
3373                         BT_DBG("%s SCO data packet", hdev->name);
3374                         hci_scodata_packet(hdev, skb);
3375                         break;
3376
3377                 default:
3378                         kfree_skb(skb);
3379                         break;
3380                 }
3381         }
3382 }
3383
3384 static void hci_cmd_work(struct work_struct *work)
3385 {
3386         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3387         struct sk_buff *skb;
3388
3389         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3390                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3391
3392         /* Send queued commands */
3393         if (atomic_read(&hdev->cmd_cnt)) {
3394                 skb = skb_dequeue(&hdev->cmd_q);
3395                 if (!skb)
3396                         return;
3397
3398                 kfree_skb(hdev->sent_cmd);
3399
3400                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3401                 if (hdev->sent_cmd) {
3402                         atomic_dec(&hdev->cmd_cnt);
3403                         hci_send_frame(skb);
3404                         if (test_bit(HCI_RESET, &hdev->flags))
3405                                 del_timer(&hdev->cmd_timer);
3406                         else
3407                                 mod_timer(&hdev->cmd_timer,
3408                                           jiffies + HCI_CMD_TIMEOUT);
3409                 } else {
3410                         skb_queue_head(&hdev->cmd_q, skb);
3411                         queue_work(hdev->workqueue, &hdev->cmd_work);
3412                 }
3413         }
3414 }
3415
3416 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3417 {
3418         /* General inquiry access code (GIAC) */
3419         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3420         struct hci_cp_inquiry cp;
3421
3422         BT_DBG("%s", hdev->name);
3423
3424         if (test_bit(HCI_INQUIRY, &hdev->flags))
3425                 return -EINPROGRESS;
3426
3427         inquiry_cache_flush(hdev);
3428
3429         memset(&cp, 0, sizeof(cp));
3430         memcpy(&cp.lap, lap, sizeof(cp.lap));
3431         cp.length  = length;
3432
3433         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3434 }
3435
3436 int hci_cancel_inquiry(struct hci_dev *hdev)
3437 {
3438         BT_DBG("%s", hdev->name);
3439
3440         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3441                 return -EALREADY;
3442
3443         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3444 }
3445
3446 u8 bdaddr_to_le(u8 bdaddr_type)
3447 {
3448         switch (bdaddr_type) {
3449         case BDADDR_LE_PUBLIC:
3450                 return ADDR_LE_DEV_PUBLIC;
3451
3452         default:
3453                 /* Fallback to LE Random address type */
3454                 return ADDR_LE_DEV_RANDOM;
3455         }
3456 }