Bluetooth: Make hci_req_add returning void
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84                           void (*func)(struct hci_request *req,
85                                       unsigned long opt),
86                           unsigned long opt, __u32 timeout)
87 {
88         struct hci_request req;
89         DECLARE_WAITQUEUE(wait, current);
90         int err = 0;
91
92         BT_DBG("%s start", hdev->name);
93
94         hci_req_init(&req, hdev);
95
96         hdev->req_status = HCI_REQ_PEND;
97
98         func(&req, opt);
99
100         err = hci_req_run(&req, hci_req_sync_complete);
101         if (err < 0) {
102                 hdev->req_status = 0;
103
104                 /* ENODATA means the HCI request command queue is empty.
105                  * This can happen when a request with conditionals doesn't
106                  * trigger any commands to be sent. This is normal behavior
107                  * and should not trigger an error return.
108                  */
109                 if (err == -ENODATA)
110                         return 0;
111
112                 return err;
113         }
114
115         add_wait_queue(&hdev->req_wait_q, &wait);
116         set_current_state(TASK_INTERRUPTIBLE);
117
118         schedule_timeout(timeout);
119
120         remove_wait_queue(&hdev->req_wait_q, &wait);
121
122         if (signal_pending(current))
123                 return -EINTR;
124
125         switch (hdev->req_status) {
126         case HCI_REQ_DONE:
127                 err = -bt_to_errno(hdev->req_result);
128                 break;
129
130         case HCI_REQ_CANCELED:
131                 err = -hdev->req_result;
132                 break;
133
134         default:
135                 err = -ETIMEDOUT;
136                 break;
137         }
138
139         hdev->req_status = hdev->req_result = 0;
140
141         BT_DBG("%s end: err %d", hdev->name, err);
142
143         return err;
144 }
145
146 static int hci_req_sync(struct hci_dev *hdev,
147                         void (*req)(struct hci_request *req,
148                                     unsigned long opt),
149                         unsigned long opt, __u32 timeout)
150 {
151         int ret;
152
153         if (!test_bit(HCI_UP, &hdev->flags))
154                 return -ENETDOWN;
155
156         /* Serialize all requests */
157         hci_req_lock(hdev);
158         ret = __hci_req_sync(hdev, req, opt, timeout);
159         hci_req_unlock(hdev);
160
161         return ret;
162 }
163
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
165 {
166         BT_DBG("%s %ld", req->hdev->name, opt);
167
168         /* Reset device */
169         set_bit(HCI_RESET, &req->hdev->flags);
170         hci_req_add(req, HCI_OP_RESET, 0, NULL);
171 }
172
173 static void bredr_init(struct hci_request *req)
174 {
175         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
176
177         /* Read Local Supported Features */
178         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
179
180         /* Read Local Version */
181         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
182
183         /* Read BD Address */
184         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
185 }
186
187 static void amp_init(struct hci_request *req)
188 {
189         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
190
191         /* Read Local Version */
192         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
193
194         /* Read Local AMP Info */
195         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
196
197         /* Read Data Blk size */
198         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
199 }
200
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
202 {
203         struct hci_dev *hdev = req->hdev;
204         struct hci_request init_req;
205         struct sk_buff *skb;
206
207         BT_DBG("%s %ld", hdev->name, opt);
208
209         /* Driver initialization */
210
211         hci_req_init(&init_req, hdev);
212
213         /* Special commands */
214         while ((skb = skb_dequeue(&hdev->driver_init))) {
215                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216                 skb->dev = (void *) hdev;
217
218                 if (skb_queue_empty(&init_req.cmd_q))
219                         bt_cb(skb)->req.start = true;
220
221                 skb_queue_tail(&init_req.cmd_q, skb);
222         }
223         skb_queue_purge(&hdev->driver_init);
224
225         hci_req_run(&init_req, NULL);
226
227         /* Reset */
228         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229                 hci_reset_req(req, 0);
230
231         switch (hdev->dev_type) {
232         case HCI_BREDR:
233                 bredr_init(req);
234                 break;
235
236         case HCI_AMP:
237                 amp_init(req);
238                 break;
239
240         default:
241                 BT_ERR("Unknown device type %d", hdev->dev_type);
242                 break;
243         }
244 }
245
246 static void bredr_setup(struct hci_request *req)
247 {
248         struct hci_cp_delete_stored_link_key cp;
249         __le16 param;
250         __u8 flt_type;
251
252         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
254
255         /* Read Class of Device */
256         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
257
258         /* Read Local Name */
259         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
260
261         /* Read Voice Setting */
262         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
263
264         /* Clear Event Filters */
265         flt_type = HCI_FLT_CLEAR_ALL;
266         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
267
268         /* Connection accept timeout ~20 secs */
269         param = __constant_cpu_to_le16(0x7d00);
270         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
271
272         bacpy(&cp.bdaddr, BDADDR_ANY);
273         cp.delete_all = 0x01;
274         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
275 }
276
277 static void le_setup(struct hci_request *req)
278 {
279         /* Read LE Buffer Size */
280         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
281
282         /* Read LE Local Supported Features */
283         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
284
285         /* Read LE Advertising Channel TX Power */
286         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
287
288         /* Read LE White List Size */
289         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
290
291         /* Read LE Supported States */
292         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
293 }
294
295 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296 {
297         if (lmp_ext_inq_capable(hdev))
298                 return 0x02;
299
300         if (lmp_inq_rssi_capable(hdev))
301                 return 0x01;
302
303         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304             hdev->lmp_subver == 0x0757)
305                 return 0x01;
306
307         if (hdev->manufacturer == 15) {
308                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309                         return 0x01;
310                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311                         return 0x01;
312                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313                         return 0x01;
314         }
315
316         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317             hdev->lmp_subver == 0x1805)
318                 return 0x01;
319
320         return 0x00;
321 }
322
323 static void hci_setup_inquiry_mode(struct hci_request *req)
324 {
325         u8 mode;
326
327         mode = hci_get_inquiry_mode(req->hdev);
328
329         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
330 }
331
332 static void hci_setup_event_mask(struct hci_request *req)
333 {
334         struct hci_dev *hdev = req->hdev;
335
336         /* The second byte is 0xff instead of 0x9f (two reserved bits
337          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338          * command otherwise.
339          */
340         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343          * any event mask for pre 1.2 devices.
344          */
345         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346                 return;
347
348         if (lmp_bredr_capable(hdev)) {
349                 events[4] |= 0x01; /* Flow Specification Complete */
350                 events[4] |= 0x02; /* Inquiry Result with RSSI */
351                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352                 events[5] |= 0x08; /* Synchronous Connection Complete */
353                 events[5] |= 0x10; /* Synchronous Connection Changed */
354         }
355
356         if (lmp_inq_rssi_capable(hdev))
357                 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359         if (lmp_sniffsubr_capable(hdev))
360                 events[5] |= 0x20; /* Sniff Subrating */
361
362         if (lmp_pause_enc_capable(hdev))
363                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365         if (lmp_ext_inq_capable(hdev))
366                 events[5] |= 0x40; /* Extended Inquiry Result */
367
368         if (lmp_no_flush_capable(hdev))
369                 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371         if (lmp_lsto_capable(hdev))
372                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374         if (lmp_ssp_capable(hdev)) {
375                 events[6] |= 0x01;      /* IO Capability Request */
376                 events[6] |= 0x02;      /* IO Capability Response */
377                 events[6] |= 0x04;      /* User Confirmation Request */
378                 events[6] |= 0x08;      /* User Passkey Request */
379                 events[6] |= 0x10;      /* Remote OOB Data Request */
380                 events[6] |= 0x20;      /* Simple Pairing Complete */
381                 events[7] |= 0x04;      /* User Passkey Notification */
382                 events[7] |= 0x08;      /* Keypress Notification */
383                 events[7] |= 0x10;      /* Remote Host Supported
384                                          * Features Notification
385                                          */
386         }
387
388         if (lmp_le_capable(hdev))
389                 events[7] |= 0x20;      /* LE Meta-Event */
390
391         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
392
393         if (lmp_le_capable(hdev)) {
394                 memset(events, 0, sizeof(events));
395                 events[0] = 0x1f;
396                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397                             sizeof(events), events);
398         }
399 }
400
401 static void hci_init2_req(struct hci_request *req, unsigned long opt)
402 {
403         struct hci_dev *hdev = req->hdev;
404
405         if (lmp_bredr_capable(hdev))
406                 bredr_setup(req);
407
408         if (lmp_le_capable(hdev))
409                 le_setup(req);
410
411         hci_setup_event_mask(req);
412
413         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
414                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
415
416         if (lmp_ssp_capable(hdev)) {
417                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418                         u8 mode = 0x01;
419                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420                                     sizeof(mode), &mode);
421                 } else {
422                         struct hci_cp_write_eir cp;
423
424                         memset(hdev->eir, 0, sizeof(hdev->eir));
425                         memset(&cp, 0, sizeof(cp));
426
427                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
428                 }
429         }
430
431         if (lmp_inq_rssi_capable(hdev))
432                 hci_setup_inquiry_mode(req);
433
434         if (lmp_inq_tx_pwr_capable(hdev))
435                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
436
437         if (lmp_ext_feat_capable(hdev)) {
438                 struct hci_cp_read_local_ext_features cp;
439
440                 cp.page = 0x01;
441                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442                             sizeof(cp), &cp);
443         }
444
445         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446                 u8 enable = 1;
447                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448                             &enable);
449         }
450 }
451
452 static void hci_setup_link_policy(struct hci_request *req)
453 {
454         struct hci_dev *hdev = req->hdev;
455         struct hci_cp_write_def_link_policy cp;
456         u16 link_policy = 0;
457
458         if (lmp_rswitch_capable(hdev))
459                 link_policy |= HCI_LP_RSWITCH;
460         if (lmp_hold_capable(hdev))
461                 link_policy |= HCI_LP_HOLD;
462         if (lmp_sniff_capable(hdev))
463                 link_policy |= HCI_LP_SNIFF;
464         if (lmp_park_capable(hdev))
465                 link_policy |= HCI_LP_PARK;
466
467         cp.policy = cpu_to_le16(link_policy);
468         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
469 }
470
471 static void hci_set_le_support(struct hci_request *req)
472 {
473         struct hci_dev *hdev = req->hdev;
474         struct hci_cp_write_le_host_supported cp;
475
476         memset(&cp, 0, sizeof(cp));
477
478         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479                 cp.le = 0x01;
480                 cp.simul = lmp_le_br_capable(hdev);
481         }
482
483         if (cp.le != lmp_host_le_capable(hdev))
484                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485                             &cp);
486 }
487
488 static void hci_init3_req(struct hci_request *req, unsigned long opt)
489 {
490         struct hci_dev *hdev = req->hdev;
491
492         if (hdev->commands[5] & 0x10)
493                 hci_setup_link_policy(req);
494
495         if (lmp_le_capable(hdev))
496                 hci_set_le_support(req);
497 }
498
499 static int __hci_init(struct hci_dev *hdev)
500 {
501         int err;
502
503         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
504         if (err < 0)
505                 return err;
506
507         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
508          * BR/EDR/LE type controllers. AMP controllers only need the
509          * first stage init.
510          */
511         if (hdev->dev_type != HCI_BREDR)
512                 return 0;
513
514         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
515         if (err < 0)
516                 return err;
517
518         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
519 }
520
521 static void hci_scan_req(struct hci_request *req, unsigned long opt)
522 {
523         __u8 scan = opt;
524
525         BT_DBG("%s %x", req->hdev->name, scan);
526
527         /* Inquiry and Page scans */
528         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
529 }
530
531 static void hci_auth_req(struct hci_request *req, unsigned long opt)
532 {
533         __u8 auth = opt;
534
535         BT_DBG("%s %x", req->hdev->name, auth);
536
537         /* Authentication */
538         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
539 }
540
541 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
542 {
543         __u8 encrypt = opt;
544
545         BT_DBG("%s %x", req->hdev->name, encrypt);
546
547         /* Encryption */
548         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
549 }
550
551 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
552 {
553         __le16 policy = cpu_to_le16(opt);
554
555         BT_DBG("%s %x", req->hdev->name, policy);
556
557         /* Default link policy */
558         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
559 }
560
561 /* Get HCI device by index.
562  * Device is held on return. */
563 struct hci_dev *hci_dev_get(int index)
564 {
565         struct hci_dev *hdev = NULL, *d;
566
567         BT_DBG("%d", index);
568
569         if (index < 0)
570                 return NULL;
571
572         read_lock(&hci_dev_list_lock);
573         list_for_each_entry(d, &hci_dev_list, list) {
574                 if (d->id == index) {
575                         hdev = hci_dev_hold(d);
576                         break;
577                 }
578         }
579         read_unlock(&hci_dev_list_lock);
580         return hdev;
581 }
582
583 /* ---- Inquiry support ---- */
584
585 bool hci_discovery_active(struct hci_dev *hdev)
586 {
587         struct discovery_state *discov = &hdev->discovery;
588
589         switch (discov->state) {
590         case DISCOVERY_FINDING:
591         case DISCOVERY_RESOLVING:
592                 return true;
593
594         default:
595                 return false;
596         }
597 }
598
599 void hci_discovery_set_state(struct hci_dev *hdev, int state)
600 {
601         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
602
603         if (hdev->discovery.state == state)
604                 return;
605
606         switch (state) {
607         case DISCOVERY_STOPPED:
608                 if (hdev->discovery.state != DISCOVERY_STARTING)
609                         mgmt_discovering(hdev, 0);
610                 break;
611         case DISCOVERY_STARTING:
612                 break;
613         case DISCOVERY_FINDING:
614                 mgmt_discovering(hdev, 1);
615                 break;
616         case DISCOVERY_RESOLVING:
617                 break;
618         case DISCOVERY_STOPPING:
619                 break;
620         }
621
622         hdev->discovery.state = state;
623 }
624
625 static void inquiry_cache_flush(struct hci_dev *hdev)
626 {
627         struct discovery_state *cache = &hdev->discovery;
628         struct inquiry_entry *p, *n;
629
630         list_for_each_entry_safe(p, n, &cache->all, all) {
631                 list_del(&p->all);
632                 kfree(p);
633         }
634
635         INIT_LIST_HEAD(&cache->unknown);
636         INIT_LIST_HEAD(&cache->resolve);
637 }
638
639 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
640                                                bdaddr_t *bdaddr)
641 {
642         struct discovery_state *cache = &hdev->discovery;
643         struct inquiry_entry *e;
644
645         BT_DBG("cache %p, %pMR", cache, bdaddr);
646
647         list_for_each_entry(e, &cache->all, all) {
648                 if (!bacmp(&e->data.bdaddr, bdaddr))
649                         return e;
650         }
651
652         return NULL;
653 }
654
655 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
656                                                        bdaddr_t *bdaddr)
657 {
658         struct discovery_state *cache = &hdev->discovery;
659         struct inquiry_entry *e;
660
661         BT_DBG("cache %p, %pMR", cache, bdaddr);
662
663         list_for_each_entry(e, &cache->unknown, list) {
664                 if (!bacmp(&e->data.bdaddr, bdaddr))
665                         return e;
666         }
667
668         return NULL;
669 }
670
671 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
672                                                        bdaddr_t *bdaddr,
673                                                        int state)
674 {
675         struct discovery_state *cache = &hdev->discovery;
676         struct inquiry_entry *e;
677
678         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
679
680         list_for_each_entry(e, &cache->resolve, list) {
681                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
682                         return e;
683                 if (!bacmp(&e->data.bdaddr, bdaddr))
684                         return e;
685         }
686
687         return NULL;
688 }
689
690 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
691                                       struct inquiry_entry *ie)
692 {
693         struct discovery_state *cache = &hdev->discovery;
694         struct list_head *pos = &cache->resolve;
695         struct inquiry_entry *p;
696
697         list_del(&ie->list);
698
699         list_for_each_entry(p, &cache->resolve, list) {
700                 if (p->name_state != NAME_PENDING &&
701                     abs(p->data.rssi) >= abs(ie->data.rssi))
702                         break;
703                 pos = &p->list;
704         }
705
706         list_add(&ie->list, pos);
707 }
708
709 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
710                               bool name_known, bool *ssp)
711 {
712         struct discovery_state *cache = &hdev->discovery;
713         struct inquiry_entry *ie;
714
715         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
716
717         hci_remove_remote_oob_data(hdev, &data->bdaddr);
718
719         if (ssp)
720                 *ssp = data->ssp_mode;
721
722         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
723         if (ie) {
724                 if (ie->data.ssp_mode && ssp)
725                         *ssp = true;
726
727                 if (ie->name_state == NAME_NEEDED &&
728                     data->rssi != ie->data.rssi) {
729                         ie->data.rssi = data->rssi;
730                         hci_inquiry_cache_update_resolve(hdev, ie);
731                 }
732
733                 goto update;
734         }
735
736         /* Entry not in the cache. Add new one. */
737         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
738         if (!ie)
739                 return false;
740
741         list_add(&ie->all, &cache->all);
742
743         if (name_known) {
744                 ie->name_state = NAME_KNOWN;
745         } else {
746                 ie->name_state = NAME_NOT_KNOWN;
747                 list_add(&ie->list, &cache->unknown);
748         }
749
750 update:
751         if (name_known && ie->name_state != NAME_KNOWN &&
752             ie->name_state != NAME_PENDING) {
753                 ie->name_state = NAME_KNOWN;
754                 list_del(&ie->list);
755         }
756
757         memcpy(&ie->data, data, sizeof(*data));
758         ie->timestamp = jiffies;
759         cache->timestamp = jiffies;
760
761         if (ie->name_state == NAME_NOT_KNOWN)
762                 return false;
763
764         return true;
765 }
766
767 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
768 {
769         struct discovery_state *cache = &hdev->discovery;
770         struct inquiry_info *info = (struct inquiry_info *) buf;
771         struct inquiry_entry *e;
772         int copied = 0;
773
774         list_for_each_entry(e, &cache->all, all) {
775                 struct inquiry_data *data = &e->data;
776
777                 if (copied >= num)
778                         break;
779
780                 bacpy(&info->bdaddr, &data->bdaddr);
781                 info->pscan_rep_mode    = data->pscan_rep_mode;
782                 info->pscan_period_mode = data->pscan_period_mode;
783                 info->pscan_mode        = data->pscan_mode;
784                 memcpy(info->dev_class, data->dev_class, 3);
785                 info->clock_offset      = data->clock_offset;
786
787                 info++;
788                 copied++;
789         }
790
791         BT_DBG("cache %p, copied %d", cache, copied);
792         return copied;
793 }
794
795 static void hci_inq_req(struct hci_request *req, unsigned long opt)
796 {
797         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
798         struct hci_dev *hdev = req->hdev;
799         struct hci_cp_inquiry cp;
800
801         BT_DBG("%s", hdev->name);
802
803         if (test_bit(HCI_INQUIRY, &hdev->flags))
804                 return;
805
806         /* Start Inquiry */
807         memcpy(&cp.lap, &ir->lap, 3);
808         cp.length  = ir->length;
809         cp.num_rsp = ir->num_rsp;
810         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
811 }
812
813 int hci_inquiry(void __user *arg)
814 {
815         __u8 __user *ptr = arg;
816         struct hci_inquiry_req ir;
817         struct hci_dev *hdev;
818         int err = 0, do_inquiry = 0, max_rsp;
819         long timeo;
820         __u8 *buf;
821
822         if (copy_from_user(&ir, ptr, sizeof(ir)))
823                 return -EFAULT;
824
825         hdev = hci_dev_get(ir.dev_id);
826         if (!hdev)
827                 return -ENODEV;
828
829         hci_dev_lock(hdev);
830         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
831             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
832                 inquiry_cache_flush(hdev);
833                 do_inquiry = 1;
834         }
835         hci_dev_unlock(hdev);
836
837         timeo = ir.length * msecs_to_jiffies(2000);
838
839         if (do_inquiry) {
840                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
841                                    timeo);
842                 if (err < 0)
843                         goto done;
844         }
845
846         /* for unlimited number of responses we will use buffer with
847          * 255 entries
848          */
849         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
850
851         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
852          * copy it to the user space.
853          */
854         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
855         if (!buf) {
856                 err = -ENOMEM;
857                 goto done;
858         }
859
860         hci_dev_lock(hdev);
861         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
862         hci_dev_unlock(hdev);
863
864         BT_DBG("num_rsp %d", ir.num_rsp);
865
866         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
867                 ptr += sizeof(ir);
868                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
869                                  ir.num_rsp))
870                         err = -EFAULT;
871         } else
872                 err = -EFAULT;
873
874         kfree(buf);
875
876 done:
877         hci_dev_put(hdev);
878         return err;
879 }
880
881 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
882 {
883         u8 ad_len = 0, flags = 0;
884         size_t name_len;
885
886         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
887                 flags |= LE_AD_GENERAL;
888
889         if (!lmp_bredr_capable(hdev))
890                 flags |= LE_AD_NO_BREDR;
891
892         if (lmp_le_br_capable(hdev))
893                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
894
895         if (lmp_host_le_br_capable(hdev))
896                 flags |= LE_AD_SIM_LE_BREDR_HOST;
897
898         if (flags) {
899                 BT_DBG("adv flags 0x%02x", flags);
900
901                 ptr[0] = 2;
902                 ptr[1] = EIR_FLAGS;
903                 ptr[2] = flags;
904
905                 ad_len += 3;
906                 ptr += 3;
907         }
908
909         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
910                 ptr[0] = 2;
911                 ptr[1] = EIR_TX_POWER;
912                 ptr[2] = (u8) hdev->adv_tx_power;
913
914                 ad_len += 3;
915                 ptr += 3;
916         }
917
918         name_len = strlen(hdev->dev_name);
919         if (name_len > 0) {
920                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
921
922                 if (name_len > max_len) {
923                         name_len = max_len;
924                         ptr[1] = EIR_NAME_SHORT;
925                 } else
926                         ptr[1] = EIR_NAME_COMPLETE;
927
928                 ptr[0] = name_len + 1;
929
930                 memcpy(ptr + 2, hdev->dev_name, name_len);
931
932                 ad_len += (name_len + 2);
933                 ptr += (name_len + 2);
934         }
935
936         return ad_len;
937 }
938
939 int hci_update_ad(struct hci_dev *hdev)
940 {
941         struct hci_cp_le_set_adv_data cp;
942         u8 len;
943         int err;
944
945         hci_dev_lock(hdev);
946
947         if (!lmp_le_capable(hdev)) {
948                 err = -EINVAL;
949                 goto unlock;
950         }
951
952         memset(&cp, 0, sizeof(cp));
953
954         len = create_ad(hdev, cp.data);
955
956         if (hdev->adv_data_len == len &&
957             memcmp(cp.data, hdev->adv_data, len) == 0) {
958                 err = 0;
959                 goto unlock;
960         }
961
962         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963         hdev->adv_data_len = len;
964
965         cp.length = len;
966         err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
967
968 unlock:
969         hci_dev_unlock(hdev);
970
971         return err;
972 }
973
974 /* ---- HCI ioctl helpers ---- */
975
976 int hci_dev_open(__u16 dev)
977 {
978         struct hci_dev *hdev;
979         int ret = 0;
980
981         hdev = hci_dev_get(dev);
982         if (!hdev)
983                 return -ENODEV;
984
985         BT_DBG("%s %p", hdev->name, hdev);
986
987         hci_req_lock(hdev);
988
989         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
990                 ret = -ENODEV;
991                 goto done;
992         }
993
994         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
995                 ret = -ERFKILL;
996                 goto done;
997         }
998
999         if (test_bit(HCI_UP, &hdev->flags)) {
1000                 ret = -EALREADY;
1001                 goto done;
1002         }
1003
1004         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1005                 set_bit(HCI_RAW, &hdev->flags);
1006
1007         /* Treat all non BR/EDR controllers as raw devices if
1008            enable_hs is not set */
1009         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1010                 set_bit(HCI_RAW, &hdev->flags);
1011
1012         if (hdev->open(hdev)) {
1013                 ret = -EIO;
1014                 goto done;
1015         }
1016
1017         if (!test_bit(HCI_RAW, &hdev->flags)) {
1018                 atomic_set(&hdev->cmd_cnt, 1);
1019                 set_bit(HCI_INIT, &hdev->flags);
1020                 ret = __hci_init(hdev);
1021                 clear_bit(HCI_INIT, &hdev->flags);
1022         }
1023
1024         if (!ret) {
1025                 hci_dev_hold(hdev);
1026                 set_bit(HCI_UP, &hdev->flags);
1027                 hci_notify(hdev, HCI_DEV_UP);
1028                 hci_update_ad(hdev);
1029                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030                     mgmt_valid_hdev(hdev)) {
1031                         hci_dev_lock(hdev);
1032                         mgmt_powered(hdev, 1);
1033                         hci_dev_unlock(hdev);
1034                 }
1035         } else {
1036                 /* Init failed, cleanup */
1037                 flush_work(&hdev->tx_work);
1038                 flush_work(&hdev->cmd_work);
1039                 flush_work(&hdev->rx_work);
1040
1041                 skb_queue_purge(&hdev->cmd_q);
1042                 skb_queue_purge(&hdev->rx_q);
1043
1044                 if (hdev->flush)
1045                         hdev->flush(hdev);
1046
1047                 if (hdev->sent_cmd) {
1048                         kfree_skb(hdev->sent_cmd);
1049                         hdev->sent_cmd = NULL;
1050                 }
1051
1052                 hdev->close(hdev);
1053                 hdev->flags = 0;
1054         }
1055
1056 done:
1057         hci_req_unlock(hdev);
1058         hci_dev_put(hdev);
1059         return ret;
1060 }
1061
1062 static int hci_dev_do_close(struct hci_dev *hdev)
1063 {
1064         BT_DBG("%s %p", hdev->name, hdev);
1065
1066         cancel_work_sync(&hdev->le_scan);
1067
1068         cancel_delayed_work(&hdev->power_off);
1069
1070         hci_req_cancel(hdev, ENODEV);
1071         hci_req_lock(hdev);
1072
1073         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1074                 del_timer_sync(&hdev->cmd_timer);
1075                 hci_req_unlock(hdev);
1076                 return 0;
1077         }
1078
1079         /* Flush RX and TX works */
1080         flush_work(&hdev->tx_work);
1081         flush_work(&hdev->rx_work);
1082
1083         if (hdev->discov_timeout > 0) {
1084                 cancel_delayed_work(&hdev->discov_off);
1085                 hdev->discov_timeout = 0;
1086                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1087         }
1088
1089         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1090                 cancel_delayed_work(&hdev->service_cache);
1091
1092         cancel_delayed_work_sync(&hdev->le_scan_disable);
1093
1094         hci_dev_lock(hdev);
1095         inquiry_cache_flush(hdev);
1096         hci_conn_hash_flush(hdev);
1097         hci_dev_unlock(hdev);
1098
1099         hci_notify(hdev, HCI_DEV_DOWN);
1100
1101         if (hdev->flush)
1102                 hdev->flush(hdev);
1103
1104         /* Reset device */
1105         skb_queue_purge(&hdev->cmd_q);
1106         atomic_set(&hdev->cmd_cnt, 1);
1107         if (!test_bit(HCI_RAW, &hdev->flags) &&
1108             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1109                 set_bit(HCI_INIT, &hdev->flags);
1110                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1111                 clear_bit(HCI_INIT, &hdev->flags);
1112         }
1113
1114         /* flush cmd  work */
1115         flush_work(&hdev->cmd_work);
1116
1117         /* Drop queues */
1118         skb_queue_purge(&hdev->rx_q);
1119         skb_queue_purge(&hdev->cmd_q);
1120         skb_queue_purge(&hdev->raw_q);
1121
1122         /* Drop last sent command */
1123         if (hdev->sent_cmd) {
1124                 del_timer_sync(&hdev->cmd_timer);
1125                 kfree_skb(hdev->sent_cmd);
1126                 hdev->sent_cmd = NULL;
1127         }
1128
1129         /* After this point our queues are empty
1130          * and no tasks are scheduled. */
1131         hdev->close(hdev);
1132
1133         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1134             mgmt_valid_hdev(hdev)) {
1135                 hci_dev_lock(hdev);
1136                 mgmt_powered(hdev, 0);
1137                 hci_dev_unlock(hdev);
1138         }
1139
1140         /* Clear flags */
1141         hdev->flags = 0;
1142
1143         /* Controller radio is available but is currently powered down */
1144         hdev->amp_status = 0;
1145
1146         memset(hdev->eir, 0, sizeof(hdev->eir));
1147         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1148
1149         hci_req_unlock(hdev);
1150
1151         hci_dev_put(hdev);
1152         return 0;
1153 }
1154
1155 int hci_dev_close(__u16 dev)
1156 {
1157         struct hci_dev *hdev;
1158         int err;
1159
1160         hdev = hci_dev_get(dev);
1161         if (!hdev)
1162                 return -ENODEV;
1163
1164         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1165                 cancel_delayed_work(&hdev->power_off);
1166
1167         err = hci_dev_do_close(hdev);
1168
1169         hci_dev_put(hdev);
1170         return err;
1171 }
1172
1173 int hci_dev_reset(__u16 dev)
1174 {
1175         struct hci_dev *hdev;
1176         int ret = 0;
1177
1178         hdev = hci_dev_get(dev);
1179         if (!hdev)
1180                 return -ENODEV;
1181
1182         hci_req_lock(hdev);
1183
1184         if (!test_bit(HCI_UP, &hdev->flags))
1185                 goto done;
1186
1187         /* Drop queues */
1188         skb_queue_purge(&hdev->rx_q);
1189         skb_queue_purge(&hdev->cmd_q);
1190
1191         hci_dev_lock(hdev);
1192         inquiry_cache_flush(hdev);
1193         hci_conn_hash_flush(hdev);
1194         hci_dev_unlock(hdev);
1195
1196         if (hdev->flush)
1197                 hdev->flush(hdev);
1198
1199         atomic_set(&hdev->cmd_cnt, 1);
1200         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1201
1202         if (!test_bit(HCI_RAW, &hdev->flags))
1203                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1204
1205 done:
1206         hci_req_unlock(hdev);
1207         hci_dev_put(hdev);
1208         return ret;
1209 }
1210
1211 int hci_dev_reset_stat(__u16 dev)
1212 {
1213         struct hci_dev *hdev;
1214         int ret = 0;
1215
1216         hdev = hci_dev_get(dev);
1217         if (!hdev)
1218                 return -ENODEV;
1219
1220         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1221
1222         hci_dev_put(hdev);
1223
1224         return ret;
1225 }
1226
1227 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1228 {
1229         struct hci_dev *hdev;
1230         struct hci_dev_req dr;
1231         int err = 0;
1232
1233         if (copy_from_user(&dr, arg, sizeof(dr)))
1234                 return -EFAULT;
1235
1236         hdev = hci_dev_get(dr.dev_id);
1237         if (!hdev)
1238                 return -ENODEV;
1239
1240         switch (cmd) {
1241         case HCISETAUTH:
1242                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1243                                    HCI_INIT_TIMEOUT);
1244                 break;
1245
1246         case HCISETENCRYPT:
1247                 if (!lmp_encrypt_capable(hdev)) {
1248                         err = -EOPNOTSUPP;
1249                         break;
1250                 }
1251
1252                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1253                         /* Auth must be enabled first */
1254                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1255                                            HCI_INIT_TIMEOUT);
1256                         if (err)
1257                                 break;
1258                 }
1259
1260                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1261                                    HCI_INIT_TIMEOUT);
1262                 break;
1263
1264         case HCISETSCAN:
1265                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1266                                    HCI_INIT_TIMEOUT);
1267                 break;
1268
1269         case HCISETLINKPOL:
1270                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1271                                    HCI_INIT_TIMEOUT);
1272                 break;
1273
1274         case HCISETLINKMODE:
1275                 hdev->link_mode = ((__u16) dr.dev_opt) &
1276                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1277                 break;
1278
1279         case HCISETPTYPE:
1280                 hdev->pkt_type = (__u16) dr.dev_opt;
1281                 break;
1282
1283         case HCISETACLMTU:
1284                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1285                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1286                 break;
1287
1288         case HCISETSCOMTU:
1289                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1290                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1291                 break;
1292
1293         default:
1294                 err = -EINVAL;
1295                 break;
1296         }
1297
1298         hci_dev_put(hdev);
1299         return err;
1300 }
1301
1302 int hci_get_dev_list(void __user *arg)
1303 {
1304         struct hci_dev *hdev;
1305         struct hci_dev_list_req *dl;
1306         struct hci_dev_req *dr;
1307         int n = 0, size, err;
1308         __u16 dev_num;
1309
1310         if (get_user(dev_num, (__u16 __user *) arg))
1311                 return -EFAULT;
1312
1313         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1314                 return -EINVAL;
1315
1316         size = sizeof(*dl) + dev_num * sizeof(*dr);
1317
1318         dl = kzalloc(size, GFP_KERNEL);
1319         if (!dl)
1320                 return -ENOMEM;
1321
1322         dr = dl->dev_req;
1323
1324         read_lock(&hci_dev_list_lock);
1325         list_for_each_entry(hdev, &hci_dev_list, list) {
1326                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1327                         cancel_delayed_work(&hdev->power_off);
1328
1329                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1330                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1331
1332                 (dr + n)->dev_id  = hdev->id;
1333                 (dr + n)->dev_opt = hdev->flags;
1334
1335                 if (++n >= dev_num)
1336                         break;
1337         }
1338         read_unlock(&hci_dev_list_lock);
1339
1340         dl->dev_num = n;
1341         size = sizeof(*dl) + n * sizeof(*dr);
1342
1343         err = copy_to_user(arg, dl, size);
1344         kfree(dl);
1345
1346         return err ? -EFAULT : 0;
1347 }
1348
1349 int hci_get_dev_info(void __user *arg)
1350 {
1351         struct hci_dev *hdev;
1352         struct hci_dev_info di;
1353         int err = 0;
1354
1355         if (copy_from_user(&di, arg, sizeof(di)))
1356                 return -EFAULT;
1357
1358         hdev = hci_dev_get(di.dev_id);
1359         if (!hdev)
1360                 return -ENODEV;
1361
1362         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1363                 cancel_delayed_work_sync(&hdev->power_off);
1364
1365         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1366                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1367
1368         strcpy(di.name, hdev->name);
1369         di.bdaddr   = hdev->bdaddr;
1370         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1371         di.flags    = hdev->flags;
1372         di.pkt_type = hdev->pkt_type;
1373         if (lmp_bredr_capable(hdev)) {
1374                 di.acl_mtu  = hdev->acl_mtu;
1375                 di.acl_pkts = hdev->acl_pkts;
1376                 di.sco_mtu  = hdev->sco_mtu;
1377                 di.sco_pkts = hdev->sco_pkts;
1378         } else {
1379                 di.acl_mtu  = hdev->le_mtu;
1380                 di.acl_pkts = hdev->le_pkts;
1381                 di.sco_mtu  = 0;
1382                 di.sco_pkts = 0;
1383         }
1384         di.link_policy = hdev->link_policy;
1385         di.link_mode   = hdev->link_mode;
1386
1387         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1388         memcpy(&di.features, &hdev->features, sizeof(di.features));
1389
1390         if (copy_to_user(arg, &di, sizeof(di)))
1391                 err = -EFAULT;
1392
1393         hci_dev_put(hdev);
1394
1395         return err;
1396 }
1397
1398 /* ---- Interface to HCI drivers ---- */
1399
1400 static int hci_rfkill_set_block(void *data, bool blocked)
1401 {
1402         struct hci_dev *hdev = data;
1403
1404         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1405
1406         if (!blocked)
1407                 return 0;
1408
1409         hci_dev_do_close(hdev);
1410
1411         return 0;
1412 }
1413
1414 static const struct rfkill_ops hci_rfkill_ops = {
1415         .set_block = hci_rfkill_set_block,
1416 };
1417
1418 static void hci_power_on(struct work_struct *work)
1419 {
1420         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1421
1422         BT_DBG("%s", hdev->name);
1423
1424         if (hci_dev_open(hdev->id) < 0)
1425                 return;
1426
1427         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1428                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1429                                    HCI_AUTO_OFF_TIMEOUT);
1430
1431         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1432                 mgmt_index_added(hdev);
1433 }
1434
1435 static void hci_power_off(struct work_struct *work)
1436 {
1437         struct hci_dev *hdev = container_of(work, struct hci_dev,
1438                                             power_off.work);
1439
1440         BT_DBG("%s", hdev->name);
1441
1442         hci_dev_do_close(hdev);
1443 }
1444
1445 static void hci_discov_off(struct work_struct *work)
1446 {
1447         struct hci_dev *hdev;
1448         u8 scan = SCAN_PAGE;
1449
1450         hdev = container_of(work, struct hci_dev, discov_off.work);
1451
1452         BT_DBG("%s", hdev->name);
1453
1454         hci_dev_lock(hdev);
1455
1456         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1457
1458         hdev->discov_timeout = 0;
1459
1460         hci_dev_unlock(hdev);
1461 }
1462
1463 int hci_uuids_clear(struct hci_dev *hdev)
1464 {
1465         struct bt_uuid *uuid, *tmp;
1466
1467         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1468                 list_del(&uuid->list);
1469                 kfree(uuid);
1470         }
1471
1472         return 0;
1473 }
1474
1475 int hci_link_keys_clear(struct hci_dev *hdev)
1476 {
1477         struct list_head *p, *n;
1478
1479         list_for_each_safe(p, n, &hdev->link_keys) {
1480                 struct link_key *key;
1481
1482                 key = list_entry(p, struct link_key, list);
1483
1484                 list_del(p);
1485                 kfree(key);
1486         }
1487
1488         return 0;
1489 }
1490
1491 int hci_smp_ltks_clear(struct hci_dev *hdev)
1492 {
1493         struct smp_ltk *k, *tmp;
1494
1495         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1496                 list_del(&k->list);
1497                 kfree(k);
1498         }
1499
1500         return 0;
1501 }
1502
1503 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1504 {
1505         struct link_key *k;
1506
1507         list_for_each_entry(k, &hdev->link_keys, list)
1508                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1509                         return k;
1510
1511         return NULL;
1512 }
1513
1514 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1515                                u8 key_type, u8 old_key_type)
1516 {
1517         /* Legacy key */
1518         if (key_type < 0x03)
1519                 return true;
1520
1521         /* Debug keys are insecure so don't store them persistently */
1522         if (key_type == HCI_LK_DEBUG_COMBINATION)
1523                 return false;
1524
1525         /* Changed combination key and there's no previous one */
1526         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1527                 return false;
1528
1529         /* Security mode 3 case */
1530         if (!conn)
1531                 return true;
1532
1533         /* Neither local nor remote side had no-bonding as requirement */
1534         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1535                 return true;
1536
1537         /* Local side had dedicated bonding as requirement */
1538         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1539                 return true;
1540
1541         /* Remote side had dedicated bonding as requirement */
1542         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1543                 return true;
1544
1545         /* If none of the above criteria match, then don't store the key
1546          * persistently */
1547         return false;
1548 }
1549
1550 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1551 {
1552         struct smp_ltk *k;
1553
1554         list_for_each_entry(k, &hdev->long_term_keys, list) {
1555                 if (k->ediv != ediv ||
1556                     memcmp(rand, k->rand, sizeof(k->rand)))
1557                         continue;
1558
1559                 return k;
1560         }
1561
1562         return NULL;
1563 }
1564
1565 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1566                                      u8 addr_type)
1567 {
1568         struct smp_ltk *k;
1569
1570         list_for_each_entry(k, &hdev->long_term_keys, list)
1571                 if (addr_type == k->bdaddr_type &&
1572                     bacmp(bdaddr, &k->bdaddr) == 0)
1573                         return k;
1574
1575         return NULL;
1576 }
1577
1578 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1579                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1580 {
1581         struct link_key *key, *old_key;
1582         u8 old_key_type;
1583         bool persistent;
1584
1585         old_key = hci_find_link_key(hdev, bdaddr);
1586         if (old_key) {
1587                 old_key_type = old_key->type;
1588                 key = old_key;
1589         } else {
1590                 old_key_type = conn ? conn->key_type : 0xff;
1591                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1592                 if (!key)
1593                         return -ENOMEM;
1594                 list_add(&key->list, &hdev->link_keys);
1595         }
1596
1597         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1598
1599         /* Some buggy controller combinations generate a changed
1600          * combination key for legacy pairing even when there's no
1601          * previous key */
1602         if (type == HCI_LK_CHANGED_COMBINATION &&
1603             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1604                 type = HCI_LK_COMBINATION;
1605                 if (conn)
1606                         conn->key_type = type;
1607         }
1608
1609         bacpy(&key->bdaddr, bdaddr);
1610         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1611         key->pin_len = pin_len;
1612
1613         if (type == HCI_LK_CHANGED_COMBINATION)
1614                 key->type = old_key_type;
1615         else
1616                 key->type = type;
1617
1618         if (!new_key)
1619                 return 0;
1620
1621         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1622
1623         mgmt_new_link_key(hdev, key, persistent);
1624
1625         if (conn)
1626                 conn->flush_key = !persistent;
1627
1628         return 0;
1629 }
1630
1631 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1632                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1633                 ediv, u8 rand[8])
1634 {
1635         struct smp_ltk *key, *old_key;
1636
1637         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1638                 return 0;
1639
1640         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1641         if (old_key)
1642                 key = old_key;
1643         else {
1644                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1645                 if (!key)
1646                         return -ENOMEM;
1647                 list_add(&key->list, &hdev->long_term_keys);
1648         }
1649
1650         bacpy(&key->bdaddr, bdaddr);
1651         key->bdaddr_type = addr_type;
1652         memcpy(key->val, tk, sizeof(key->val));
1653         key->authenticated = authenticated;
1654         key->ediv = ediv;
1655         key->enc_size = enc_size;
1656         key->type = type;
1657         memcpy(key->rand, rand, sizeof(key->rand));
1658
1659         if (!new_key)
1660                 return 0;
1661
1662         if (type & HCI_SMP_LTK)
1663                 mgmt_new_ltk(hdev, key, 1);
1664
1665         return 0;
1666 }
1667
1668 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1669 {
1670         struct link_key *key;
1671
1672         key = hci_find_link_key(hdev, bdaddr);
1673         if (!key)
1674                 return -ENOENT;
1675
1676         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1677
1678         list_del(&key->list);
1679         kfree(key);
1680
1681         return 0;
1682 }
1683
1684 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1685 {
1686         struct smp_ltk *k, *tmp;
1687
1688         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1689                 if (bacmp(bdaddr, &k->bdaddr))
1690                         continue;
1691
1692                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1693
1694                 list_del(&k->list);
1695                 kfree(k);
1696         }
1697
1698         return 0;
1699 }
1700
1701 /* HCI command timer function */
1702 static void hci_cmd_timeout(unsigned long arg)
1703 {
1704         struct hci_dev *hdev = (void *) arg;
1705
1706         if (hdev->sent_cmd) {
1707                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1708                 u16 opcode = __le16_to_cpu(sent->opcode);
1709
1710                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1711         } else {
1712                 BT_ERR("%s command tx timeout", hdev->name);
1713         }
1714
1715         atomic_set(&hdev->cmd_cnt, 1);
1716         queue_work(hdev->workqueue, &hdev->cmd_work);
1717 }
1718
1719 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1720                                           bdaddr_t *bdaddr)
1721 {
1722         struct oob_data *data;
1723
1724         list_for_each_entry(data, &hdev->remote_oob_data, list)
1725                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1726                         return data;
1727
1728         return NULL;
1729 }
1730
1731 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1732 {
1733         struct oob_data *data;
1734
1735         data = hci_find_remote_oob_data(hdev, bdaddr);
1736         if (!data)
1737                 return -ENOENT;
1738
1739         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1740
1741         list_del(&data->list);
1742         kfree(data);
1743
1744         return 0;
1745 }
1746
1747 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1748 {
1749         struct oob_data *data, *n;
1750
1751         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1752                 list_del(&data->list);
1753                 kfree(data);
1754         }
1755
1756         return 0;
1757 }
1758
1759 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1760                             u8 *randomizer)
1761 {
1762         struct oob_data *data;
1763
1764         data = hci_find_remote_oob_data(hdev, bdaddr);
1765
1766         if (!data) {
1767                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1768                 if (!data)
1769                         return -ENOMEM;
1770
1771                 bacpy(&data->bdaddr, bdaddr);
1772                 list_add(&data->list, &hdev->remote_oob_data);
1773         }
1774
1775         memcpy(data->hash, hash, sizeof(data->hash));
1776         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1777
1778         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1779
1780         return 0;
1781 }
1782
1783 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1784 {
1785         struct bdaddr_list *b;
1786
1787         list_for_each_entry(b, &hdev->blacklist, list)
1788                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1789                         return b;
1790
1791         return NULL;
1792 }
1793
1794 int hci_blacklist_clear(struct hci_dev *hdev)
1795 {
1796         struct list_head *p, *n;
1797
1798         list_for_each_safe(p, n, &hdev->blacklist) {
1799                 struct bdaddr_list *b;
1800
1801                 b = list_entry(p, struct bdaddr_list, list);
1802
1803                 list_del(p);
1804                 kfree(b);
1805         }
1806
1807         return 0;
1808 }
1809
1810 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1811 {
1812         struct bdaddr_list *entry;
1813
1814         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1815                 return -EBADF;
1816
1817         if (hci_blacklist_lookup(hdev, bdaddr))
1818                 return -EEXIST;
1819
1820         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1821         if (!entry)
1822                 return -ENOMEM;
1823
1824         bacpy(&entry->bdaddr, bdaddr);
1825
1826         list_add(&entry->list, &hdev->blacklist);
1827
1828         return mgmt_device_blocked(hdev, bdaddr, type);
1829 }
1830
1831 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1832 {
1833         struct bdaddr_list *entry;
1834
1835         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1836                 return hci_blacklist_clear(hdev);
1837
1838         entry = hci_blacklist_lookup(hdev, bdaddr);
1839         if (!entry)
1840                 return -ENOENT;
1841
1842         list_del(&entry->list);
1843         kfree(entry);
1844
1845         return mgmt_device_unblocked(hdev, bdaddr, type);
1846 }
1847
1848 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1849 {
1850         struct le_scan_params *param =  (struct le_scan_params *) opt;
1851         struct hci_cp_le_set_scan_param cp;
1852
1853         memset(&cp, 0, sizeof(cp));
1854         cp.type = param->type;
1855         cp.interval = cpu_to_le16(param->interval);
1856         cp.window = cpu_to_le16(param->window);
1857
1858         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1859 }
1860
1861 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1862 {
1863         struct hci_cp_le_set_scan_enable cp;
1864
1865         memset(&cp, 0, sizeof(cp));
1866         cp.enable = 1;
1867         cp.filter_dup = 1;
1868
1869         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1870 }
1871
1872 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1873                           u16 window, int timeout)
1874 {
1875         long timeo = msecs_to_jiffies(3000);
1876         struct le_scan_params param;
1877         int err;
1878
1879         BT_DBG("%s", hdev->name);
1880
1881         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1882                 return -EINPROGRESS;
1883
1884         param.type = type;
1885         param.interval = interval;
1886         param.window = window;
1887
1888         hci_req_lock(hdev);
1889
1890         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1891                              timeo);
1892         if (!err)
1893                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1894
1895         hci_req_unlock(hdev);
1896
1897         if (err < 0)
1898                 return err;
1899
1900         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1901                            msecs_to_jiffies(timeout));
1902
1903         return 0;
1904 }
1905
1906 int hci_cancel_le_scan(struct hci_dev *hdev)
1907 {
1908         BT_DBG("%s", hdev->name);
1909
1910         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1911                 return -EALREADY;
1912
1913         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1914                 struct hci_cp_le_set_scan_enable cp;
1915
1916                 /* Send HCI command to disable LE Scan */
1917                 memset(&cp, 0, sizeof(cp));
1918                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1919         }
1920
1921         return 0;
1922 }
1923
1924 static void le_scan_disable_work(struct work_struct *work)
1925 {
1926         struct hci_dev *hdev = container_of(work, struct hci_dev,
1927                                             le_scan_disable.work);
1928         struct hci_cp_le_set_scan_enable cp;
1929
1930         BT_DBG("%s", hdev->name);
1931
1932         memset(&cp, 0, sizeof(cp));
1933
1934         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935 }
1936
1937 static void le_scan_work(struct work_struct *work)
1938 {
1939         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1940         struct le_scan_params *param = &hdev->le_scan_params;
1941
1942         BT_DBG("%s", hdev->name);
1943
1944         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1945                        param->timeout);
1946 }
1947
1948 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1949                 int timeout)
1950 {
1951         struct le_scan_params *param = &hdev->le_scan_params;
1952
1953         BT_DBG("%s", hdev->name);
1954
1955         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1956                 return -ENOTSUPP;
1957
1958         if (work_busy(&hdev->le_scan))
1959                 return -EINPROGRESS;
1960
1961         param->type = type;
1962         param->interval = interval;
1963         param->window = window;
1964         param->timeout = timeout;
1965
1966         queue_work(system_long_wq, &hdev->le_scan);
1967
1968         return 0;
1969 }
1970
1971 /* Alloc HCI device */
1972 struct hci_dev *hci_alloc_dev(void)
1973 {
1974         struct hci_dev *hdev;
1975
1976         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1977         if (!hdev)
1978                 return NULL;
1979
1980         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1981         hdev->esco_type = (ESCO_HV1);
1982         hdev->link_mode = (HCI_LM_ACCEPT);
1983         hdev->io_capability = 0x03; /* No Input No Output */
1984         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1985         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1986
1987         hdev->sniff_max_interval = 800;
1988         hdev->sniff_min_interval = 80;
1989
1990         mutex_init(&hdev->lock);
1991         mutex_init(&hdev->req_lock);
1992
1993         INIT_LIST_HEAD(&hdev->mgmt_pending);
1994         INIT_LIST_HEAD(&hdev->blacklist);
1995         INIT_LIST_HEAD(&hdev->uuids);
1996         INIT_LIST_HEAD(&hdev->link_keys);
1997         INIT_LIST_HEAD(&hdev->long_term_keys);
1998         INIT_LIST_HEAD(&hdev->remote_oob_data);
1999         INIT_LIST_HEAD(&hdev->conn_hash.list);
2000
2001         INIT_WORK(&hdev->rx_work, hci_rx_work);
2002         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2003         INIT_WORK(&hdev->tx_work, hci_tx_work);
2004         INIT_WORK(&hdev->power_on, hci_power_on);
2005         INIT_WORK(&hdev->le_scan, le_scan_work);
2006
2007         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2008         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2009         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2010
2011         skb_queue_head_init(&hdev->driver_init);
2012         skb_queue_head_init(&hdev->rx_q);
2013         skb_queue_head_init(&hdev->cmd_q);
2014         skb_queue_head_init(&hdev->raw_q);
2015
2016         init_waitqueue_head(&hdev->req_wait_q);
2017
2018         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2019
2020         hci_init_sysfs(hdev);
2021         discovery_init(hdev);
2022
2023         return hdev;
2024 }
2025 EXPORT_SYMBOL(hci_alloc_dev);
2026
2027 /* Free HCI device */
2028 void hci_free_dev(struct hci_dev *hdev)
2029 {
2030         skb_queue_purge(&hdev->driver_init);
2031
2032         /* will free via device release */
2033         put_device(&hdev->dev);
2034 }
2035 EXPORT_SYMBOL(hci_free_dev);
2036
2037 /* Register HCI device */
2038 int hci_register_dev(struct hci_dev *hdev)
2039 {
2040         int id, error;
2041
2042         if (!hdev->open || !hdev->close)
2043                 return -EINVAL;
2044
2045         /* Do not allow HCI_AMP devices to register at index 0,
2046          * so the index can be used as the AMP controller ID.
2047          */
2048         switch (hdev->dev_type) {
2049         case HCI_BREDR:
2050                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2051                 break;
2052         case HCI_AMP:
2053                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2054                 break;
2055         default:
2056                 return -EINVAL;
2057         }
2058
2059         if (id < 0)
2060                 return id;
2061
2062         sprintf(hdev->name, "hci%d", id);
2063         hdev->id = id;
2064
2065         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2066
2067         write_lock(&hci_dev_list_lock);
2068         list_add(&hdev->list, &hci_dev_list);
2069         write_unlock(&hci_dev_list_lock);
2070
2071         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2072                                           WQ_MEM_RECLAIM, 1);
2073         if (!hdev->workqueue) {
2074                 error = -ENOMEM;
2075                 goto err;
2076         }
2077
2078         hdev->req_workqueue = alloc_workqueue(hdev->name,
2079                                               WQ_HIGHPRI | WQ_UNBOUND |
2080                                               WQ_MEM_RECLAIM, 1);
2081         if (!hdev->req_workqueue) {
2082                 destroy_workqueue(hdev->workqueue);
2083                 error = -ENOMEM;
2084                 goto err;
2085         }
2086
2087         error = hci_add_sysfs(hdev);
2088         if (error < 0)
2089                 goto err_wqueue;
2090
2091         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2092                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2093                                     hdev);
2094         if (hdev->rfkill) {
2095                 if (rfkill_register(hdev->rfkill) < 0) {
2096                         rfkill_destroy(hdev->rfkill);
2097                         hdev->rfkill = NULL;
2098                 }
2099         }
2100
2101         set_bit(HCI_SETUP, &hdev->dev_flags);
2102
2103         if (hdev->dev_type != HCI_AMP)
2104                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2105
2106         hci_notify(hdev, HCI_DEV_REG);
2107         hci_dev_hold(hdev);
2108
2109         queue_work(hdev->req_workqueue, &hdev->power_on);
2110
2111         return id;
2112
2113 err_wqueue:
2114         destroy_workqueue(hdev->workqueue);
2115         destroy_workqueue(hdev->req_workqueue);
2116 err:
2117         ida_simple_remove(&hci_index_ida, hdev->id);
2118         write_lock(&hci_dev_list_lock);
2119         list_del(&hdev->list);
2120         write_unlock(&hci_dev_list_lock);
2121
2122         return error;
2123 }
2124 EXPORT_SYMBOL(hci_register_dev);
2125
2126 /* Unregister HCI device */
2127 void hci_unregister_dev(struct hci_dev *hdev)
2128 {
2129         int i, id;
2130
2131         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2132
2133         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2134
2135         id = hdev->id;
2136
2137         write_lock(&hci_dev_list_lock);
2138         list_del(&hdev->list);
2139         write_unlock(&hci_dev_list_lock);
2140
2141         hci_dev_do_close(hdev);
2142
2143         for (i = 0; i < NUM_REASSEMBLY; i++)
2144                 kfree_skb(hdev->reassembly[i]);
2145
2146         cancel_work_sync(&hdev->power_on);
2147
2148         if (!test_bit(HCI_INIT, &hdev->flags) &&
2149             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2150                 hci_dev_lock(hdev);
2151                 mgmt_index_removed(hdev);
2152                 hci_dev_unlock(hdev);
2153         }
2154
2155         /* mgmt_index_removed should take care of emptying the
2156          * pending list */
2157         BUG_ON(!list_empty(&hdev->mgmt_pending));
2158
2159         hci_notify(hdev, HCI_DEV_UNREG);
2160
2161         if (hdev->rfkill) {
2162                 rfkill_unregister(hdev->rfkill);
2163                 rfkill_destroy(hdev->rfkill);
2164         }
2165
2166         hci_del_sysfs(hdev);
2167
2168         destroy_workqueue(hdev->workqueue);
2169         destroy_workqueue(hdev->req_workqueue);
2170
2171         hci_dev_lock(hdev);
2172         hci_blacklist_clear(hdev);
2173         hci_uuids_clear(hdev);
2174         hci_link_keys_clear(hdev);
2175         hci_smp_ltks_clear(hdev);
2176         hci_remote_oob_data_clear(hdev);
2177         hci_dev_unlock(hdev);
2178
2179         hci_dev_put(hdev);
2180
2181         ida_simple_remove(&hci_index_ida, id);
2182 }
2183 EXPORT_SYMBOL(hci_unregister_dev);
2184
2185 /* Suspend HCI device */
2186 int hci_suspend_dev(struct hci_dev *hdev)
2187 {
2188         hci_notify(hdev, HCI_DEV_SUSPEND);
2189         return 0;
2190 }
2191 EXPORT_SYMBOL(hci_suspend_dev);
2192
2193 /* Resume HCI device */
2194 int hci_resume_dev(struct hci_dev *hdev)
2195 {
2196         hci_notify(hdev, HCI_DEV_RESUME);
2197         return 0;
2198 }
2199 EXPORT_SYMBOL(hci_resume_dev);
2200
2201 /* Receive frame from HCI drivers */
2202 int hci_recv_frame(struct sk_buff *skb)
2203 {
2204         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2205         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2206                       && !test_bit(HCI_INIT, &hdev->flags))) {
2207                 kfree_skb(skb);
2208                 return -ENXIO;
2209         }
2210
2211         /* Incoming skb */
2212         bt_cb(skb)->incoming = 1;
2213
2214         /* Time stamp */
2215         __net_timestamp(skb);
2216
2217         skb_queue_tail(&hdev->rx_q, skb);
2218         queue_work(hdev->workqueue, &hdev->rx_work);
2219
2220         return 0;
2221 }
2222 EXPORT_SYMBOL(hci_recv_frame);
2223
2224 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2225                           int count, __u8 index)
2226 {
2227         int len = 0;
2228         int hlen = 0;
2229         int remain = count;
2230         struct sk_buff *skb;
2231         struct bt_skb_cb *scb;
2232
2233         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2234             index >= NUM_REASSEMBLY)
2235                 return -EILSEQ;
2236
2237         skb = hdev->reassembly[index];
2238
2239         if (!skb) {
2240                 switch (type) {
2241                 case HCI_ACLDATA_PKT:
2242                         len = HCI_MAX_FRAME_SIZE;
2243                         hlen = HCI_ACL_HDR_SIZE;
2244                         break;
2245                 case HCI_EVENT_PKT:
2246                         len = HCI_MAX_EVENT_SIZE;
2247                         hlen = HCI_EVENT_HDR_SIZE;
2248                         break;
2249                 case HCI_SCODATA_PKT:
2250                         len = HCI_MAX_SCO_SIZE;
2251                         hlen = HCI_SCO_HDR_SIZE;
2252                         break;
2253                 }
2254
2255                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2256                 if (!skb)
2257                         return -ENOMEM;
2258
2259                 scb = (void *) skb->cb;
2260                 scb->expect = hlen;
2261                 scb->pkt_type = type;
2262
2263                 skb->dev = (void *) hdev;
2264                 hdev->reassembly[index] = skb;
2265         }
2266
2267         while (count) {
2268                 scb = (void *) skb->cb;
2269                 len = min_t(uint, scb->expect, count);
2270
2271                 memcpy(skb_put(skb, len), data, len);
2272
2273                 count -= len;
2274                 data += len;
2275                 scb->expect -= len;
2276                 remain = count;
2277
2278                 switch (type) {
2279                 case HCI_EVENT_PKT:
2280                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2281                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2282                                 scb->expect = h->plen;
2283
2284                                 if (skb_tailroom(skb) < scb->expect) {
2285                                         kfree_skb(skb);
2286                                         hdev->reassembly[index] = NULL;
2287                                         return -ENOMEM;
2288                                 }
2289                         }
2290                         break;
2291
2292                 case HCI_ACLDATA_PKT:
2293                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2294                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2295                                 scb->expect = __le16_to_cpu(h->dlen);
2296
2297                                 if (skb_tailroom(skb) < scb->expect) {
2298                                         kfree_skb(skb);
2299                                         hdev->reassembly[index] = NULL;
2300                                         return -ENOMEM;
2301                                 }
2302                         }
2303                         break;
2304
2305                 case HCI_SCODATA_PKT:
2306                         if (skb->len == HCI_SCO_HDR_SIZE) {
2307                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2308                                 scb->expect = h->dlen;
2309
2310                                 if (skb_tailroom(skb) < scb->expect) {
2311                                         kfree_skb(skb);
2312                                         hdev->reassembly[index] = NULL;
2313                                         return -ENOMEM;
2314                                 }
2315                         }
2316                         break;
2317                 }
2318
2319                 if (scb->expect == 0) {
2320                         /* Complete frame */
2321
2322                         bt_cb(skb)->pkt_type = type;
2323                         hci_recv_frame(skb);
2324
2325                         hdev->reassembly[index] = NULL;
2326                         return remain;
2327                 }
2328         }
2329
2330         return remain;
2331 }
2332
2333 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2334 {
2335         int rem = 0;
2336
2337         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2338                 return -EILSEQ;
2339
2340         while (count) {
2341                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2342                 if (rem < 0)
2343                         return rem;
2344
2345                 data += (count - rem);
2346                 count = rem;
2347         }
2348
2349         return rem;
2350 }
2351 EXPORT_SYMBOL(hci_recv_fragment);
2352
2353 #define STREAM_REASSEMBLY 0
2354
2355 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2356 {
2357         int type;
2358         int rem = 0;
2359
2360         while (count) {
2361                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2362
2363                 if (!skb) {
2364                         struct { char type; } *pkt;
2365
2366                         /* Start of the frame */
2367                         pkt = data;
2368                         type = pkt->type;
2369
2370                         data++;
2371                         count--;
2372                 } else
2373                         type = bt_cb(skb)->pkt_type;
2374
2375                 rem = hci_reassembly(hdev, type, data, count,
2376                                      STREAM_REASSEMBLY);
2377                 if (rem < 0)
2378                         return rem;
2379
2380                 data += (count - rem);
2381                 count = rem;
2382         }
2383
2384         return rem;
2385 }
2386 EXPORT_SYMBOL(hci_recv_stream_fragment);
2387
2388 /* ---- Interface to upper protocols ---- */
2389
2390 int hci_register_cb(struct hci_cb *cb)
2391 {
2392         BT_DBG("%p name %s", cb, cb->name);
2393
2394         write_lock(&hci_cb_list_lock);
2395         list_add(&cb->list, &hci_cb_list);
2396         write_unlock(&hci_cb_list_lock);
2397
2398         return 0;
2399 }
2400 EXPORT_SYMBOL(hci_register_cb);
2401
2402 int hci_unregister_cb(struct hci_cb *cb)
2403 {
2404         BT_DBG("%p name %s", cb, cb->name);
2405
2406         write_lock(&hci_cb_list_lock);
2407         list_del(&cb->list);
2408         write_unlock(&hci_cb_list_lock);
2409
2410         return 0;
2411 }
2412 EXPORT_SYMBOL(hci_unregister_cb);
2413
2414 static int hci_send_frame(struct sk_buff *skb)
2415 {
2416         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2417
2418         if (!hdev) {
2419                 kfree_skb(skb);
2420                 return -ENODEV;
2421         }
2422
2423         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2424
2425         /* Time stamp */
2426         __net_timestamp(skb);
2427
2428         /* Send copy to monitor */
2429         hci_send_to_monitor(hdev, skb);
2430
2431         if (atomic_read(&hdev->promisc)) {
2432                 /* Send copy to the sockets */
2433                 hci_send_to_sock(hdev, skb);
2434         }
2435
2436         /* Get rid of skb owner, prior to sending to the driver. */
2437         skb_orphan(skb);
2438
2439         return hdev->send(skb);
2440 }
2441
2442 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2443 {
2444         skb_queue_head_init(&req->cmd_q);
2445         req->hdev = hdev;
2446         req->err = 0;
2447 }
2448
2449 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2450 {
2451         struct hci_dev *hdev = req->hdev;
2452         struct sk_buff *skb;
2453         unsigned long flags;
2454
2455         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2456
2457         /* If an error occured during request building, remove all HCI
2458          * commands queued on the HCI request queue.
2459          */
2460         if (req->err) {
2461                 skb_queue_purge(&req->cmd_q);
2462                 return req->err;
2463         }
2464
2465         /* Do not allow empty requests */
2466         if (skb_queue_empty(&req->cmd_q))
2467                 return -ENODATA;
2468
2469         skb = skb_peek_tail(&req->cmd_q);
2470         bt_cb(skb)->req.complete = complete;
2471
2472         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2473         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2474         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2475
2476         queue_work(hdev->workqueue, &hdev->cmd_work);
2477
2478         return 0;
2479 }
2480
2481 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2482                                        u32 plen, void *param)
2483 {
2484         int len = HCI_COMMAND_HDR_SIZE + plen;
2485         struct hci_command_hdr *hdr;
2486         struct sk_buff *skb;
2487
2488         skb = bt_skb_alloc(len, GFP_ATOMIC);
2489         if (!skb)
2490                 return NULL;
2491
2492         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2493         hdr->opcode = cpu_to_le16(opcode);
2494         hdr->plen   = plen;
2495
2496         if (plen)
2497                 memcpy(skb_put(skb, plen), param, plen);
2498
2499         BT_DBG("skb len %d", skb->len);
2500
2501         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2502         skb->dev = (void *) hdev;
2503
2504         return skb;
2505 }
2506
2507 /* Send HCI command */
2508 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2509 {
2510         struct sk_buff *skb;
2511
2512         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2513
2514         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2515         if (!skb) {
2516                 BT_ERR("%s no memory for command", hdev->name);
2517                 return -ENOMEM;
2518         }
2519
2520         /* Stand-alone HCI commands must be flaged as
2521          * single-command requests.
2522          */
2523         bt_cb(skb)->req.start = true;
2524
2525         skb_queue_tail(&hdev->cmd_q, skb);
2526         queue_work(hdev->workqueue, &hdev->cmd_work);
2527
2528         return 0;
2529 }
2530
2531 /* Queue a command to an asynchronous HCI request */
2532 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2533 {
2534         struct hci_dev *hdev = req->hdev;
2535         struct sk_buff *skb;
2536
2537         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2538
2539         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2540         if (!skb) {
2541                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2542                        hdev->name, opcode);
2543                 req->err = -ENOMEM;
2544                 return;
2545         }
2546
2547         if (skb_queue_empty(&req->cmd_q))
2548                 bt_cb(skb)->req.start = true;
2549
2550         skb_queue_tail(&req->cmd_q, skb);
2551 }
2552
2553 /* Get data from the previously sent command */
2554 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2555 {
2556         struct hci_command_hdr *hdr;
2557
2558         if (!hdev->sent_cmd)
2559                 return NULL;
2560
2561         hdr = (void *) hdev->sent_cmd->data;
2562
2563         if (hdr->opcode != cpu_to_le16(opcode))
2564                 return NULL;
2565
2566         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2567
2568         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2569 }
2570
2571 /* Send ACL data */
2572 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2573 {
2574         struct hci_acl_hdr *hdr;
2575         int len = skb->len;
2576
2577         skb_push(skb, HCI_ACL_HDR_SIZE);
2578         skb_reset_transport_header(skb);
2579         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2580         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2581         hdr->dlen   = cpu_to_le16(len);
2582 }
2583
2584 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2585                           struct sk_buff *skb, __u16 flags)
2586 {
2587         struct hci_conn *conn = chan->conn;
2588         struct hci_dev *hdev = conn->hdev;
2589         struct sk_buff *list;
2590
2591         skb->len = skb_headlen(skb);
2592         skb->data_len = 0;
2593
2594         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2595
2596         switch (hdev->dev_type) {
2597         case HCI_BREDR:
2598                 hci_add_acl_hdr(skb, conn->handle, flags);
2599                 break;
2600         case HCI_AMP:
2601                 hci_add_acl_hdr(skb, chan->handle, flags);
2602                 break;
2603         default:
2604                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2605                 return;
2606         }
2607
2608         list = skb_shinfo(skb)->frag_list;
2609         if (!list) {
2610                 /* Non fragmented */
2611                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2612
2613                 skb_queue_tail(queue, skb);
2614         } else {
2615                 /* Fragmented */
2616                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2617
2618                 skb_shinfo(skb)->frag_list = NULL;
2619
2620                 /* Queue all fragments atomically */
2621                 spin_lock(&queue->lock);
2622
2623                 __skb_queue_tail(queue, skb);
2624
2625                 flags &= ~ACL_START;
2626                 flags |= ACL_CONT;
2627                 do {
2628                         skb = list; list = list->next;
2629
2630                         skb->dev = (void *) hdev;
2631                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2632                         hci_add_acl_hdr(skb, conn->handle, flags);
2633
2634                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2635
2636                         __skb_queue_tail(queue, skb);
2637                 } while (list);
2638
2639                 spin_unlock(&queue->lock);
2640         }
2641 }
2642
2643 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2644 {
2645         struct hci_dev *hdev = chan->conn->hdev;
2646
2647         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2648
2649         skb->dev = (void *) hdev;
2650
2651         hci_queue_acl(chan, &chan->data_q, skb, flags);
2652
2653         queue_work(hdev->workqueue, &hdev->tx_work);
2654 }
2655
2656 /* Send SCO data */
2657 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2658 {
2659         struct hci_dev *hdev = conn->hdev;
2660         struct hci_sco_hdr hdr;
2661
2662         BT_DBG("%s len %d", hdev->name, skb->len);
2663
2664         hdr.handle = cpu_to_le16(conn->handle);
2665         hdr.dlen   = skb->len;
2666
2667         skb_push(skb, HCI_SCO_HDR_SIZE);
2668         skb_reset_transport_header(skb);
2669         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2670
2671         skb->dev = (void *) hdev;
2672         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2673
2674         skb_queue_tail(&conn->data_q, skb);
2675         queue_work(hdev->workqueue, &hdev->tx_work);
2676 }
2677
2678 /* ---- HCI TX task (outgoing data) ---- */
2679
2680 /* HCI Connection scheduler */
2681 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2682                                      int *quote)
2683 {
2684         struct hci_conn_hash *h = &hdev->conn_hash;
2685         struct hci_conn *conn = NULL, *c;
2686         unsigned int num = 0, min = ~0;
2687
2688         /* We don't have to lock device here. Connections are always
2689          * added and removed with TX task disabled. */
2690
2691         rcu_read_lock();
2692
2693         list_for_each_entry_rcu(c, &h->list, list) {
2694                 if (c->type != type || skb_queue_empty(&c->data_q))
2695                         continue;
2696
2697                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2698                         continue;
2699
2700                 num++;
2701
2702                 if (c->sent < min) {
2703                         min  = c->sent;
2704                         conn = c;
2705                 }
2706
2707                 if (hci_conn_num(hdev, type) == num)
2708                         break;
2709         }
2710
2711         rcu_read_unlock();
2712
2713         if (conn) {
2714                 int cnt, q;
2715
2716                 switch (conn->type) {
2717                 case ACL_LINK:
2718                         cnt = hdev->acl_cnt;
2719                         break;
2720                 case SCO_LINK:
2721                 case ESCO_LINK:
2722                         cnt = hdev->sco_cnt;
2723                         break;
2724                 case LE_LINK:
2725                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2726                         break;
2727                 default:
2728                         cnt = 0;
2729                         BT_ERR("Unknown link type");
2730                 }
2731
2732                 q = cnt / num;
2733                 *quote = q ? q : 1;
2734         } else
2735                 *quote = 0;
2736
2737         BT_DBG("conn %p quote %d", conn, *quote);
2738         return conn;
2739 }
2740
2741 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2742 {
2743         struct hci_conn_hash *h = &hdev->conn_hash;
2744         struct hci_conn *c;
2745
2746         BT_ERR("%s link tx timeout", hdev->name);
2747
2748         rcu_read_lock();
2749
2750         /* Kill stalled connections */
2751         list_for_each_entry_rcu(c, &h->list, list) {
2752                 if (c->type == type && c->sent) {
2753                         BT_ERR("%s killing stalled connection %pMR",
2754                                hdev->name, &c->dst);
2755                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2756                 }
2757         }
2758
2759         rcu_read_unlock();
2760 }
2761
2762 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2763                                       int *quote)
2764 {
2765         struct hci_conn_hash *h = &hdev->conn_hash;
2766         struct hci_chan *chan = NULL;
2767         unsigned int num = 0, min = ~0, cur_prio = 0;
2768         struct hci_conn *conn;
2769         int cnt, q, conn_num = 0;
2770
2771         BT_DBG("%s", hdev->name);
2772
2773         rcu_read_lock();
2774
2775         list_for_each_entry_rcu(conn, &h->list, list) {
2776                 struct hci_chan *tmp;
2777
2778                 if (conn->type != type)
2779                         continue;
2780
2781                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2782                         continue;
2783
2784                 conn_num++;
2785
2786                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2787                         struct sk_buff *skb;
2788
2789                         if (skb_queue_empty(&tmp->data_q))
2790                                 continue;
2791
2792                         skb = skb_peek(&tmp->data_q);
2793                         if (skb->priority < cur_prio)
2794                                 continue;
2795
2796                         if (skb->priority > cur_prio) {
2797                                 num = 0;
2798                                 min = ~0;
2799                                 cur_prio = skb->priority;
2800                         }
2801
2802                         num++;
2803
2804                         if (conn->sent < min) {
2805                                 min  = conn->sent;
2806                                 chan = tmp;
2807                         }
2808                 }
2809
2810                 if (hci_conn_num(hdev, type) == conn_num)
2811                         break;
2812         }
2813
2814         rcu_read_unlock();
2815
2816         if (!chan)
2817                 return NULL;
2818
2819         switch (chan->conn->type) {
2820         case ACL_LINK:
2821                 cnt = hdev->acl_cnt;
2822                 break;
2823         case AMP_LINK:
2824                 cnt = hdev->block_cnt;
2825                 break;
2826         case SCO_LINK:
2827         case ESCO_LINK:
2828                 cnt = hdev->sco_cnt;
2829                 break;
2830         case LE_LINK:
2831                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2832                 break;
2833         default:
2834                 cnt = 0;
2835                 BT_ERR("Unknown link type");
2836         }
2837
2838         q = cnt / num;
2839         *quote = q ? q : 1;
2840         BT_DBG("chan %p quote %d", chan, *quote);
2841         return chan;
2842 }
2843
2844 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2845 {
2846         struct hci_conn_hash *h = &hdev->conn_hash;
2847         struct hci_conn *conn;
2848         int num = 0;
2849
2850         BT_DBG("%s", hdev->name);
2851
2852         rcu_read_lock();
2853
2854         list_for_each_entry_rcu(conn, &h->list, list) {
2855                 struct hci_chan *chan;
2856
2857                 if (conn->type != type)
2858                         continue;
2859
2860                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2861                         continue;
2862
2863                 num++;
2864
2865                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2866                         struct sk_buff *skb;
2867
2868                         if (chan->sent) {
2869                                 chan->sent = 0;
2870                                 continue;
2871                         }
2872
2873                         if (skb_queue_empty(&chan->data_q))
2874                                 continue;
2875
2876                         skb = skb_peek(&chan->data_q);
2877                         if (skb->priority >= HCI_PRIO_MAX - 1)
2878                                 continue;
2879
2880                         skb->priority = HCI_PRIO_MAX - 1;
2881
2882                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2883                                skb->priority);
2884                 }
2885
2886                 if (hci_conn_num(hdev, type) == num)
2887                         break;
2888         }
2889
2890         rcu_read_unlock();
2891
2892 }
2893
2894 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2895 {
2896         /* Calculate count of blocks used by this packet */
2897         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2898 }
2899
2900 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2901 {
2902         if (!test_bit(HCI_RAW, &hdev->flags)) {
2903                 /* ACL tx timeout must be longer than maximum
2904                  * link supervision timeout (40.9 seconds) */
2905                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2906                                        HCI_ACL_TX_TIMEOUT))
2907                         hci_link_tx_to(hdev, ACL_LINK);
2908         }
2909 }
2910
2911 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2912 {
2913         unsigned int cnt = hdev->acl_cnt;
2914         struct hci_chan *chan;
2915         struct sk_buff *skb;
2916         int quote;
2917
2918         __check_timeout(hdev, cnt);
2919
2920         while (hdev->acl_cnt &&
2921                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2922                 u32 priority = (skb_peek(&chan->data_q))->priority;
2923                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2924                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2925                                skb->len, skb->priority);
2926
2927                         /* Stop if priority has changed */
2928                         if (skb->priority < priority)
2929                                 break;
2930
2931                         skb = skb_dequeue(&chan->data_q);
2932
2933                         hci_conn_enter_active_mode(chan->conn,
2934                                                    bt_cb(skb)->force_active);
2935
2936                         hci_send_frame(skb);
2937                         hdev->acl_last_tx = jiffies;
2938
2939                         hdev->acl_cnt--;
2940                         chan->sent++;
2941                         chan->conn->sent++;
2942                 }
2943         }
2944
2945         if (cnt != hdev->acl_cnt)
2946                 hci_prio_recalculate(hdev, ACL_LINK);
2947 }
2948
2949 static void hci_sched_acl_blk(struct hci_dev *hdev)
2950 {
2951         unsigned int cnt = hdev->block_cnt;
2952         struct hci_chan *chan;
2953         struct sk_buff *skb;
2954         int quote;
2955         u8 type;
2956
2957         __check_timeout(hdev, cnt);
2958
2959         BT_DBG("%s", hdev->name);
2960
2961         if (hdev->dev_type == HCI_AMP)
2962                 type = AMP_LINK;
2963         else
2964                 type = ACL_LINK;
2965
2966         while (hdev->block_cnt > 0 &&
2967                (chan = hci_chan_sent(hdev, type, &quote))) {
2968                 u32 priority = (skb_peek(&chan->data_q))->priority;
2969                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2970                         int blocks;
2971
2972                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2973                                skb->len, skb->priority);
2974
2975                         /* Stop if priority has changed */
2976                         if (skb->priority < priority)
2977                                 break;
2978
2979                         skb = skb_dequeue(&chan->data_q);
2980
2981                         blocks = __get_blocks(hdev, skb);
2982                         if (blocks > hdev->block_cnt)
2983                                 return;
2984
2985                         hci_conn_enter_active_mode(chan->conn,
2986                                                    bt_cb(skb)->force_active);
2987
2988                         hci_send_frame(skb);
2989                         hdev->acl_last_tx = jiffies;
2990
2991                         hdev->block_cnt -= blocks;
2992                         quote -= blocks;
2993
2994                         chan->sent += blocks;
2995                         chan->conn->sent += blocks;
2996                 }
2997         }
2998
2999         if (cnt != hdev->block_cnt)
3000                 hci_prio_recalculate(hdev, type);
3001 }
3002
3003 static void hci_sched_acl(struct hci_dev *hdev)
3004 {
3005         BT_DBG("%s", hdev->name);
3006
3007         /* No ACL link over BR/EDR controller */
3008         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3009                 return;
3010
3011         /* No AMP link over AMP controller */
3012         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3013                 return;
3014
3015         switch (hdev->flow_ctl_mode) {
3016         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3017                 hci_sched_acl_pkt(hdev);
3018                 break;
3019
3020         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3021                 hci_sched_acl_blk(hdev);
3022                 break;
3023         }
3024 }
3025
3026 /* Schedule SCO */
3027 static void hci_sched_sco(struct hci_dev *hdev)
3028 {
3029         struct hci_conn *conn;
3030         struct sk_buff *skb;
3031         int quote;
3032
3033         BT_DBG("%s", hdev->name);
3034
3035         if (!hci_conn_num(hdev, SCO_LINK))
3036                 return;
3037
3038         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3039                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3040                         BT_DBG("skb %p len %d", skb, skb->len);
3041                         hci_send_frame(skb);
3042
3043                         conn->sent++;
3044                         if (conn->sent == ~0)
3045                                 conn->sent = 0;
3046                 }
3047         }
3048 }
3049
3050 static void hci_sched_esco(struct hci_dev *hdev)
3051 {
3052         struct hci_conn *conn;
3053         struct sk_buff *skb;
3054         int quote;
3055
3056         BT_DBG("%s", hdev->name);
3057
3058         if (!hci_conn_num(hdev, ESCO_LINK))
3059                 return;
3060
3061         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3062                                                      &quote))) {
3063                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3064                         BT_DBG("skb %p len %d", skb, skb->len);
3065                         hci_send_frame(skb);
3066
3067                         conn->sent++;
3068                         if (conn->sent == ~0)
3069                                 conn->sent = 0;
3070                 }
3071         }
3072 }
3073
3074 static void hci_sched_le(struct hci_dev *hdev)
3075 {
3076         struct hci_chan *chan;
3077         struct sk_buff *skb;
3078         int quote, cnt, tmp;
3079
3080         BT_DBG("%s", hdev->name);
3081
3082         if (!hci_conn_num(hdev, LE_LINK))
3083                 return;
3084
3085         if (!test_bit(HCI_RAW, &hdev->flags)) {
3086                 /* LE tx timeout must be longer than maximum
3087                  * link supervision timeout (40.9 seconds) */
3088                 if (!hdev->le_cnt && hdev->le_pkts &&
3089                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3090                         hci_link_tx_to(hdev, LE_LINK);
3091         }
3092
3093         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3094         tmp = cnt;
3095         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3096                 u32 priority = (skb_peek(&chan->data_q))->priority;
3097                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3098                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3099                                skb->len, skb->priority);
3100
3101                         /* Stop if priority has changed */
3102                         if (skb->priority < priority)
3103                                 break;
3104
3105                         skb = skb_dequeue(&chan->data_q);
3106
3107                         hci_send_frame(skb);
3108                         hdev->le_last_tx = jiffies;
3109
3110                         cnt--;
3111                         chan->sent++;
3112                         chan->conn->sent++;
3113                 }
3114         }
3115
3116         if (hdev->le_pkts)
3117                 hdev->le_cnt = cnt;
3118         else
3119                 hdev->acl_cnt = cnt;
3120
3121         if (cnt != tmp)
3122                 hci_prio_recalculate(hdev, LE_LINK);
3123 }
3124
3125 static void hci_tx_work(struct work_struct *work)
3126 {
3127         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3128         struct sk_buff *skb;
3129
3130         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3131                hdev->sco_cnt, hdev->le_cnt);
3132
3133         /* Schedule queues and send stuff to HCI driver */
3134
3135         hci_sched_acl(hdev);
3136
3137         hci_sched_sco(hdev);
3138
3139         hci_sched_esco(hdev);
3140
3141         hci_sched_le(hdev);
3142
3143         /* Send next queued raw (unknown type) packet */
3144         while ((skb = skb_dequeue(&hdev->raw_q)))
3145                 hci_send_frame(skb);
3146 }
3147
3148 /* ----- HCI RX task (incoming data processing) ----- */
3149
3150 /* ACL data packet */
3151 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3152 {
3153         struct hci_acl_hdr *hdr = (void *) skb->data;
3154         struct hci_conn *conn;
3155         __u16 handle, flags;
3156
3157         skb_pull(skb, HCI_ACL_HDR_SIZE);
3158
3159         handle = __le16_to_cpu(hdr->handle);
3160         flags  = hci_flags(handle);
3161         handle = hci_handle(handle);
3162
3163         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3164                handle, flags);
3165
3166         hdev->stat.acl_rx++;
3167
3168         hci_dev_lock(hdev);
3169         conn = hci_conn_hash_lookup_handle(hdev, handle);
3170         hci_dev_unlock(hdev);
3171
3172         if (conn) {
3173                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3174
3175                 /* Send to upper protocol */
3176                 l2cap_recv_acldata(conn, skb, flags);
3177                 return;
3178         } else {
3179                 BT_ERR("%s ACL packet for unknown connection handle %d",
3180                        hdev->name, handle);
3181         }
3182
3183         kfree_skb(skb);
3184 }
3185
3186 /* SCO data packet */
3187 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3188 {
3189         struct hci_sco_hdr *hdr = (void *) skb->data;
3190         struct hci_conn *conn;
3191         __u16 handle;
3192
3193         skb_pull(skb, HCI_SCO_HDR_SIZE);
3194
3195         handle = __le16_to_cpu(hdr->handle);
3196
3197         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3198
3199         hdev->stat.sco_rx++;
3200
3201         hci_dev_lock(hdev);
3202         conn = hci_conn_hash_lookup_handle(hdev, handle);
3203         hci_dev_unlock(hdev);
3204
3205         if (conn) {
3206                 /* Send to upper protocol */
3207                 sco_recv_scodata(conn, skb);
3208                 return;
3209         } else {
3210                 BT_ERR("%s SCO packet for unknown connection handle %d",
3211                        hdev->name, handle);
3212         }
3213
3214         kfree_skb(skb);
3215 }
3216
3217 static bool hci_req_is_complete(struct hci_dev *hdev)
3218 {
3219         struct sk_buff *skb;
3220
3221         skb = skb_peek(&hdev->cmd_q);
3222         if (!skb)
3223                 return true;
3224
3225         return bt_cb(skb)->req.start;
3226 }
3227
3228 static void hci_resend_last(struct hci_dev *hdev)
3229 {
3230         struct hci_command_hdr *sent;
3231         struct sk_buff *skb;
3232         u16 opcode;
3233
3234         if (!hdev->sent_cmd)
3235                 return;
3236
3237         sent = (void *) hdev->sent_cmd->data;
3238         opcode = __le16_to_cpu(sent->opcode);
3239         if (opcode == HCI_OP_RESET)
3240                 return;
3241
3242         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3243         if (!skb)
3244                 return;
3245
3246         skb_queue_head(&hdev->cmd_q, skb);
3247         queue_work(hdev->workqueue, &hdev->cmd_work);
3248 }
3249
3250 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3251 {
3252         hci_req_complete_t req_complete = NULL;
3253         struct sk_buff *skb;
3254         unsigned long flags;
3255
3256         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3257
3258         /* If the completed command doesn't match the last one that was
3259          * sent we need to do special handling of it.
3260          */
3261         if (!hci_sent_cmd_data(hdev, opcode)) {
3262                 /* Some CSR based controllers generate a spontaneous
3263                  * reset complete event during init and any pending
3264                  * command will never be completed. In such a case we
3265                  * need to resend whatever was the last sent
3266                  * command.
3267                  */
3268                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3269                         hci_resend_last(hdev);
3270
3271                 return;
3272         }
3273
3274         /* If the command succeeded and there's still more commands in
3275          * this request the request is not yet complete.
3276          */
3277         if (!status && !hci_req_is_complete(hdev))
3278                 return;
3279
3280         /* If this was the last command in a request the complete
3281          * callback would be found in hdev->sent_cmd instead of the
3282          * command queue (hdev->cmd_q).
3283          */
3284         if (hdev->sent_cmd) {
3285                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3286                 if (req_complete)
3287                         goto call_complete;
3288         }
3289
3290         /* Remove all pending commands belonging to this request */
3291         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3292         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3293                 if (bt_cb(skb)->req.start) {
3294                         __skb_queue_head(&hdev->cmd_q, skb);
3295                         break;
3296                 }
3297
3298                 req_complete = bt_cb(skb)->req.complete;
3299                 kfree_skb(skb);
3300         }
3301         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3302
3303 call_complete:
3304         if (req_complete)
3305                 req_complete(hdev, status);
3306 }
3307
3308 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3309 {
3310         hci_req_complete_t req_complete = NULL;
3311
3312         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3313
3314         if (status) {
3315                 hci_req_cmd_complete(hdev, opcode, status);
3316                 return;
3317         }
3318
3319         /* No need to handle success status if there are more commands */
3320         if (!hci_req_is_complete(hdev))
3321                 return;
3322
3323         if (hdev->sent_cmd)
3324                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3325
3326         /* If the request doesn't have a complete callback or there
3327          * are other commands/requests in the hdev queue we consider
3328          * this request as completed.
3329          */
3330         if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3331                 hci_req_cmd_complete(hdev, opcode, status);
3332 }
3333
3334 static void hci_rx_work(struct work_struct *work)
3335 {
3336         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3337         struct sk_buff *skb;
3338
3339         BT_DBG("%s", hdev->name);
3340
3341         while ((skb = skb_dequeue(&hdev->rx_q))) {
3342                 /* Send copy to monitor */
3343                 hci_send_to_monitor(hdev, skb);
3344
3345                 if (atomic_read(&hdev->promisc)) {
3346                         /* Send copy to the sockets */
3347                         hci_send_to_sock(hdev, skb);
3348                 }
3349
3350                 if (test_bit(HCI_RAW, &hdev->flags)) {
3351                         kfree_skb(skb);
3352                         continue;
3353                 }
3354
3355                 if (test_bit(HCI_INIT, &hdev->flags)) {
3356                         /* Don't process data packets in this states. */
3357                         switch (bt_cb(skb)->pkt_type) {
3358                         case HCI_ACLDATA_PKT:
3359                         case HCI_SCODATA_PKT:
3360                                 kfree_skb(skb);
3361                                 continue;
3362                         }
3363                 }
3364
3365                 /* Process frame */
3366                 switch (bt_cb(skb)->pkt_type) {
3367                 case HCI_EVENT_PKT:
3368                         BT_DBG("%s Event packet", hdev->name);
3369                         hci_event_packet(hdev, skb);
3370                         break;
3371
3372                 case HCI_ACLDATA_PKT:
3373                         BT_DBG("%s ACL data packet", hdev->name);
3374                         hci_acldata_packet(hdev, skb);
3375                         break;
3376
3377                 case HCI_SCODATA_PKT:
3378                         BT_DBG("%s SCO data packet", hdev->name);
3379                         hci_scodata_packet(hdev, skb);
3380                         break;
3381
3382                 default:
3383                         kfree_skb(skb);
3384                         break;
3385                 }
3386         }
3387 }
3388
3389 static void hci_cmd_work(struct work_struct *work)
3390 {
3391         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3392         struct sk_buff *skb;
3393
3394         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3395                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3396
3397         /* Send queued commands */
3398         if (atomic_read(&hdev->cmd_cnt)) {
3399                 skb = skb_dequeue(&hdev->cmd_q);
3400                 if (!skb)
3401                         return;
3402
3403                 kfree_skb(hdev->sent_cmd);
3404
3405                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3406                 if (hdev->sent_cmd) {
3407                         atomic_dec(&hdev->cmd_cnt);
3408                         hci_send_frame(skb);
3409                         if (test_bit(HCI_RESET, &hdev->flags))
3410                                 del_timer(&hdev->cmd_timer);
3411                         else
3412                                 mod_timer(&hdev->cmd_timer,
3413                                           jiffies + HCI_CMD_TIMEOUT);
3414                 } else {
3415                         skb_queue_head(&hdev->cmd_q, skb);
3416                         queue_work(hdev->workqueue, &hdev->cmd_work);
3417                 }
3418         }
3419 }
3420
3421 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3422 {
3423         /* General inquiry access code (GIAC) */
3424         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3425         struct hci_cp_inquiry cp;
3426
3427         BT_DBG("%s", hdev->name);
3428
3429         if (test_bit(HCI_INQUIRY, &hdev->flags))
3430                 return -EINPROGRESS;
3431
3432         inquiry_cache_flush(hdev);
3433
3434         memset(&cp, 0, sizeof(cp));
3435         memcpy(&cp.lap, lap, sizeof(cp.lap));
3436         cp.length  = length;
3437
3438         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3439 }
3440
3441 int hci_cancel_inquiry(struct hci_dev *hdev)
3442 {
3443         BT_DBG("%s", hdev->name);
3444
3445         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3446                 return -EALREADY;
3447
3448         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3449 }
3450
3451 u8 bdaddr_to_le(u8 bdaddr_type)
3452 {
3453         switch (bdaddr_type) {
3454         case BDADDR_LE_PUBLIC:
3455                 return ADDR_LE_DEV_PUBLIC;
3456
3457         default:
3458                 /* Fallback to LE Random address type */
3459                 return ADDR_LE_DEV_RANDOM;
3460         }
3461 }