Bluetooth: Refactor HCI command skb creation
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62         BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64         /* If this is the init phase check if the completed command matches
65          * the last init command, and if not just return.
66          */
67         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69                 u16 opcode = __le16_to_cpu(sent->opcode);
70                 struct sk_buff *skb;
71
72                 /* Some CSR based controllers generate a spontaneous
73                  * reset complete event during init and any pending
74                  * command will never be completed. In such a case we
75                  * need to resend whatever was the last sent
76                  * command.
77                  */
78
79                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80                         return;
81
82                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83                 if (skb) {
84                         skb_queue_head(&hdev->cmd_q, skb);
85                         queue_work(hdev->workqueue, &hdev->cmd_work);
86                 }
87
88                 return;
89         }
90
91         if (hdev->req_status == HCI_REQ_PEND) {
92                 hdev->req_result = result;
93                 hdev->req_status = HCI_REQ_DONE;
94                 wake_up_interruptible(&hdev->req_wait_q);
95         }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100         BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = err;
104                 hdev->req_status = HCI_REQ_CANCELED;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_req_sync(struct hci_dev *hdev,
111                           void (*req)(struct hci_dev *hdev, unsigned long opt),
112                           unsigned long opt, __u32 timeout)
113 {
114         DECLARE_WAITQUEUE(wait, current);
115         int err = 0;
116
117         BT_DBG("%s start", hdev->name);
118
119         hdev->req_status = HCI_REQ_PEND;
120
121         add_wait_queue(&hdev->req_wait_q, &wait);
122         set_current_state(TASK_INTERRUPTIBLE);
123
124         req(hdev, opt);
125
126         /* If the request didn't send any commands return immediately */
127         if (skb_queue_empty(&hdev->cmd_q) && atomic_read(&hdev->cmd_cnt)) {
128                 hdev->req_status = 0;
129                 remove_wait_queue(&hdev->req_wait_q, &wait);
130                 return err;
131         }
132
133         schedule_timeout(timeout);
134
135         remove_wait_queue(&hdev->req_wait_q, &wait);
136
137         if (signal_pending(current))
138                 return -EINTR;
139
140         switch (hdev->req_status) {
141         case HCI_REQ_DONE:
142                 err = -bt_to_errno(hdev->req_result);
143                 break;
144
145         case HCI_REQ_CANCELED:
146                 err = -hdev->req_result;
147                 break;
148
149         default:
150                 err = -ETIMEDOUT;
151                 break;
152         }
153
154         hdev->req_status = hdev->req_result = 0;
155
156         BT_DBG("%s end: err %d", hdev->name, err);
157
158         return err;
159 }
160
161 static int hci_req_sync(struct hci_dev *hdev,
162                         void (*req)(struct hci_dev *hdev, unsigned long opt),
163                         unsigned long opt, __u32 timeout)
164 {
165         int ret;
166
167         if (!test_bit(HCI_UP, &hdev->flags))
168                 return -ENETDOWN;
169
170         /* Serialize all requests */
171         hci_req_lock(hdev);
172         ret = __hci_req_sync(hdev, req, opt, timeout);
173         hci_req_unlock(hdev);
174
175         return ret;
176 }
177
178 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
179 {
180         BT_DBG("%s %ld", hdev->name, opt);
181
182         /* Reset device */
183         set_bit(HCI_RESET, &hdev->flags);
184         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
185 }
186
187 static void bredr_init(struct hci_dev *hdev)
188 {
189         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
190
191         /* Read Local Supported Features */
192         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
193
194         /* Read Local Version */
195         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196
197         /* Read BD Address */
198         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
199 }
200
201 static void amp_init(struct hci_dev *hdev)
202 {
203         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
204
205         /* Read Local Version */
206         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
207
208         /* Read Local AMP Info */
209         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
210
211         /* Read Data Blk size */
212         hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
213 }
214
215 static void hci_init1_req(struct hci_dev *hdev, unsigned long opt)
216 {
217         struct sk_buff *skb;
218
219         BT_DBG("%s %ld", hdev->name, opt);
220
221         /* Driver initialization */
222
223         /* Special commands */
224         while ((skb = skb_dequeue(&hdev->driver_init))) {
225                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
226                 skb->dev = (void *) hdev;
227
228                 skb_queue_tail(&hdev->cmd_q, skb);
229                 queue_work(hdev->workqueue, &hdev->cmd_work);
230         }
231         skb_queue_purge(&hdev->driver_init);
232
233         /* Reset */
234         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
235                 hci_reset_req(hdev, 0);
236
237         switch (hdev->dev_type) {
238         case HCI_BREDR:
239                 bredr_init(hdev);
240                 break;
241
242         case HCI_AMP:
243                 amp_init(hdev);
244                 break;
245
246         default:
247                 BT_ERR("Unknown device type %d", hdev->dev_type);
248                 break;
249         }
250 }
251
252 static void bredr_setup(struct hci_dev *hdev)
253 {
254         struct hci_cp_delete_stored_link_key cp;
255         __le16 param;
256         __u8 flt_type;
257
258         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
259         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
260
261         /* Read Class of Device */
262         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
263
264         /* Read Local Name */
265         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
266
267         /* Read Voice Setting */
268         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
269
270         /* Clear Event Filters */
271         flt_type = HCI_FLT_CLEAR_ALL;
272         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
273
274         /* Connection accept timeout ~20 secs */
275         param = __constant_cpu_to_le16(0x7d00);
276         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
277
278         bacpy(&cp.bdaddr, BDADDR_ANY);
279         cp.delete_all = 0x01;
280         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
281 }
282
283 static void le_setup(struct hci_dev *hdev)
284 {
285         /* Read LE Buffer Size */
286         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287
288         /* Read LE Local Supported Features */
289         hci_send_cmd(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
290
291         /* Read LE Advertising Channel TX Power */
292         hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
293
294         /* Read LE White List Size */
295         hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
296
297         /* Read LE Supported States */
298         hci_send_cmd(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
299 }
300
301 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302 {
303         if (lmp_ext_inq_capable(hdev))
304                 return 0x02;
305
306         if (lmp_inq_rssi_capable(hdev))
307                 return 0x01;
308
309         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310             hdev->lmp_subver == 0x0757)
311                 return 0x01;
312
313         if (hdev->manufacturer == 15) {
314                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315                         return 0x01;
316                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317                         return 0x01;
318                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319                         return 0x01;
320         }
321
322         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323             hdev->lmp_subver == 0x1805)
324                 return 0x01;
325
326         return 0x00;
327 }
328
329 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
330 {
331         u8 mode;
332
333         mode = hci_get_inquiry_mode(hdev);
334
335         hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
336 }
337
338 static void hci_setup_event_mask(struct hci_dev *hdev)
339 {
340         /* The second byte is 0xff instead of 0x9f (two reserved bits
341          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
342          * command otherwise.
343          */
344         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
345
346         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
347          * any event mask for pre 1.2 devices.
348          */
349         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
350                 return;
351
352         if (lmp_bredr_capable(hdev)) {
353                 events[4] |= 0x01; /* Flow Specification Complete */
354                 events[4] |= 0x02; /* Inquiry Result with RSSI */
355                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
356                 events[5] |= 0x08; /* Synchronous Connection Complete */
357                 events[5] |= 0x10; /* Synchronous Connection Changed */
358         }
359
360         if (lmp_inq_rssi_capable(hdev))
361                 events[4] |= 0x02; /* Inquiry Result with RSSI */
362
363         if (lmp_sniffsubr_capable(hdev))
364                 events[5] |= 0x20; /* Sniff Subrating */
365
366         if (lmp_pause_enc_capable(hdev))
367                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
368
369         if (lmp_ext_inq_capable(hdev))
370                 events[5] |= 0x40; /* Extended Inquiry Result */
371
372         if (lmp_no_flush_capable(hdev))
373                 events[7] |= 0x01; /* Enhanced Flush Complete */
374
375         if (lmp_lsto_capable(hdev))
376                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
377
378         if (lmp_ssp_capable(hdev)) {
379                 events[6] |= 0x01;      /* IO Capability Request */
380                 events[6] |= 0x02;      /* IO Capability Response */
381                 events[6] |= 0x04;      /* User Confirmation Request */
382                 events[6] |= 0x08;      /* User Passkey Request */
383                 events[6] |= 0x10;      /* Remote OOB Data Request */
384                 events[6] |= 0x20;      /* Simple Pairing Complete */
385                 events[7] |= 0x04;      /* User Passkey Notification */
386                 events[7] |= 0x08;      /* Keypress Notification */
387                 events[7] |= 0x10;      /* Remote Host Supported
388                                          * Features Notification
389                                          */
390         }
391
392         if (lmp_le_capable(hdev))
393                 events[7] |= 0x20;      /* LE Meta-Event */
394
395         hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
396
397         if (lmp_le_capable(hdev)) {
398                 memset(events, 0, sizeof(events));
399                 events[0] = 0x1f;
400                 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
401                              sizeof(events), events);
402         }
403 }
404
405 static void hci_init2_req(struct hci_dev *hdev, unsigned long opt)
406 {
407         if (lmp_bredr_capable(hdev))
408                 bredr_setup(hdev);
409
410         if (lmp_le_capable(hdev))
411                 le_setup(hdev);
412
413         hci_setup_event_mask(hdev);
414
415         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
416                 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
418         if (lmp_ssp_capable(hdev)) {
419                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
420                         u8 mode = 0x01;
421                         hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
422                                      sizeof(mode), &mode);
423                 } else {
424                         struct hci_cp_write_eir cp;
425
426                         memset(hdev->eir, 0, sizeof(hdev->eir));
427                         memset(&cp, 0, sizeof(cp));
428
429                         hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
430                 }
431         }
432
433         if (lmp_inq_rssi_capable(hdev))
434                 hci_setup_inquiry_mode(hdev);
435
436         if (lmp_inq_tx_pwr_capable(hdev))
437                 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
438
439         if (lmp_ext_feat_capable(hdev)) {
440                 struct hci_cp_read_local_ext_features cp;
441
442                 cp.page = 0x01;
443                 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
444                              &cp);
445         }
446
447         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
448                 u8 enable = 1;
449                 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
450                              &enable);
451         }
452 }
453
454 static void hci_setup_link_policy(struct hci_dev *hdev)
455 {
456         struct hci_cp_write_def_link_policy cp;
457         u16 link_policy = 0;
458
459         if (lmp_rswitch_capable(hdev))
460                 link_policy |= HCI_LP_RSWITCH;
461         if (lmp_hold_capable(hdev))
462                 link_policy |= HCI_LP_HOLD;
463         if (lmp_sniff_capable(hdev))
464                 link_policy |= HCI_LP_SNIFF;
465         if (lmp_park_capable(hdev))
466                 link_policy |= HCI_LP_PARK;
467
468         cp.policy = cpu_to_le16(link_policy);
469         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
470 }
471
472 static void hci_set_le_support(struct hci_dev *hdev)
473 {
474         struct hci_cp_write_le_host_supported cp;
475
476         memset(&cp, 0, sizeof(cp));
477
478         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479                 cp.le = 0x01;
480                 cp.simul = lmp_le_br_capable(hdev);
481         }
482
483         if (cp.le != lmp_host_le_capable(hdev))
484                 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485                              &cp);
486 }
487
488 static void hci_init3_req(struct hci_dev *hdev, unsigned long opt)
489 {
490         if (hdev->commands[5] & 0x10)
491                 hci_setup_link_policy(hdev);
492
493         if (lmp_le_capable(hdev))
494                 hci_set_le_support(hdev);
495 }
496
497 static int __hci_init(struct hci_dev *hdev)
498 {
499         int err;
500
501         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
502         if (err < 0)
503                 return err;
504
505         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
506          * BR/EDR/LE type controllers. AMP controllers only need the
507          * first stage init.
508          */
509         if (hdev->dev_type != HCI_BREDR)
510                 return 0;
511
512         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
513         if (err < 0)
514                 return err;
515
516         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
517 }
518
519 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
520 {
521         __u8 scan = opt;
522
523         BT_DBG("%s %x", hdev->name, scan);
524
525         /* Inquiry and Page scans */
526         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
527 }
528
529 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
530 {
531         __u8 auth = opt;
532
533         BT_DBG("%s %x", hdev->name, auth);
534
535         /* Authentication */
536         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
537 }
538
539 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
540 {
541         __u8 encrypt = opt;
542
543         BT_DBG("%s %x", hdev->name, encrypt);
544
545         /* Encryption */
546         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
547 }
548
549 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
550 {
551         __le16 policy = cpu_to_le16(opt);
552
553         BT_DBG("%s %x", hdev->name, policy);
554
555         /* Default link policy */
556         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
557 }
558
559 /* Get HCI device by index.
560  * Device is held on return. */
561 struct hci_dev *hci_dev_get(int index)
562 {
563         struct hci_dev *hdev = NULL, *d;
564
565         BT_DBG("%d", index);
566
567         if (index < 0)
568                 return NULL;
569
570         read_lock(&hci_dev_list_lock);
571         list_for_each_entry(d, &hci_dev_list, list) {
572                 if (d->id == index) {
573                         hdev = hci_dev_hold(d);
574                         break;
575                 }
576         }
577         read_unlock(&hci_dev_list_lock);
578         return hdev;
579 }
580
581 /* ---- Inquiry support ---- */
582
583 bool hci_discovery_active(struct hci_dev *hdev)
584 {
585         struct discovery_state *discov = &hdev->discovery;
586
587         switch (discov->state) {
588         case DISCOVERY_FINDING:
589         case DISCOVERY_RESOLVING:
590                 return true;
591
592         default:
593                 return false;
594         }
595 }
596
597 void hci_discovery_set_state(struct hci_dev *hdev, int state)
598 {
599         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
600
601         if (hdev->discovery.state == state)
602                 return;
603
604         switch (state) {
605         case DISCOVERY_STOPPED:
606                 if (hdev->discovery.state != DISCOVERY_STARTING)
607                         mgmt_discovering(hdev, 0);
608                 break;
609         case DISCOVERY_STARTING:
610                 break;
611         case DISCOVERY_FINDING:
612                 mgmt_discovering(hdev, 1);
613                 break;
614         case DISCOVERY_RESOLVING:
615                 break;
616         case DISCOVERY_STOPPING:
617                 break;
618         }
619
620         hdev->discovery.state = state;
621 }
622
623 static void inquiry_cache_flush(struct hci_dev *hdev)
624 {
625         struct discovery_state *cache = &hdev->discovery;
626         struct inquiry_entry *p, *n;
627
628         list_for_each_entry_safe(p, n, &cache->all, all) {
629                 list_del(&p->all);
630                 kfree(p);
631         }
632
633         INIT_LIST_HEAD(&cache->unknown);
634         INIT_LIST_HEAD(&cache->resolve);
635 }
636
637 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
638                                                bdaddr_t *bdaddr)
639 {
640         struct discovery_state *cache = &hdev->discovery;
641         struct inquiry_entry *e;
642
643         BT_DBG("cache %p, %pMR", cache, bdaddr);
644
645         list_for_each_entry(e, &cache->all, all) {
646                 if (!bacmp(&e->data.bdaddr, bdaddr))
647                         return e;
648         }
649
650         return NULL;
651 }
652
653 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
654                                                        bdaddr_t *bdaddr)
655 {
656         struct discovery_state *cache = &hdev->discovery;
657         struct inquiry_entry *e;
658
659         BT_DBG("cache %p, %pMR", cache, bdaddr);
660
661         list_for_each_entry(e, &cache->unknown, list) {
662                 if (!bacmp(&e->data.bdaddr, bdaddr))
663                         return e;
664         }
665
666         return NULL;
667 }
668
669 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
670                                                        bdaddr_t *bdaddr,
671                                                        int state)
672 {
673         struct discovery_state *cache = &hdev->discovery;
674         struct inquiry_entry *e;
675
676         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
677
678         list_for_each_entry(e, &cache->resolve, list) {
679                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
680                         return e;
681                 if (!bacmp(&e->data.bdaddr, bdaddr))
682                         return e;
683         }
684
685         return NULL;
686 }
687
688 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
689                                       struct inquiry_entry *ie)
690 {
691         struct discovery_state *cache = &hdev->discovery;
692         struct list_head *pos = &cache->resolve;
693         struct inquiry_entry *p;
694
695         list_del(&ie->list);
696
697         list_for_each_entry(p, &cache->resolve, list) {
698                 if (p->name_state != NAME_PENDING &&
699                     abs(p->data.rssi) >= abs(ie->data.rssi))
700                         break;
701                 pos = &p->list;
702         }
703
704         list_add(&ie->list, pos);
705 }
706
707 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
708                               bool name_known, bool *ssp)
709 {
710         struct discovery_state *cache = &hdev->discovery;
711         struct inquiry_entry *ie;
712
713         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
714
715         hci_remove_remote_oob_data(hdev, &data->bdaddr);
716
717         if (ssp)
718                 *ssp = data->ssp_mode;
719
720         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
721         if (ie) {
722                 if (ie->data.ssp_mode && ssp)
723                         *ssp = true;
724
725                 if (ie->name_state == NAME_NEEDED &&
726                     data->rssi != ie->data.rssi) {
727                         ie->data.rssi = data->rssi;
728                         hci_inquiry_cache_update_resolve(hdev, ie);
729                 }
730
731                 goto update;
732         }
733
734         /* Entry not in the cache. Add new one. */
735         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
736         if (!ie)
737                 return false;
738
739         list_add(&ie->all, &cache->all);
740
741         if (name_known) {
742                 ie->name_state = NAME_KNOWN;
743         } else {
744                 ie->name_state = NAME_NOT_KNOWN;
745                 list_add(&ie->list, &cache->unknown);
746         }
747
748 update:
749         if (name_known && ie->name_state != NAME_KNOWN &&
750             ie->name_state != NAME_PENDING) {
751                 ie->name_state = NAME_KNOWN;
752                 list_del(&ie->list);
753         }
754
755         memcpy(&ie->data, data, sizeof(*data));
756         ie->timestamp = jiffies;
757         cache->timestamp = jiffies;
758
759         if (ie->name_state == NAME_NOT_KNOWN)
760                 return false;
761
762         return true;
763 }
764
765 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
766 {
767         struct discovery_state *cache = &hdev->discovery;
768         struct inquiry_info *info = (struct inquiry_info *) buf;
769         struct inquiry_entry *e;
770         int copied = 0;
771
772         list_for_each_entry(e, &cache->all, all) {
773                 struct inquiry_data *data = &e->data;
774
775                 if (copied >= num)
776                         break;
777
778                 bacpy(&info->bdaddr, &data->bdaddr);
779                 info->pscan_rep_mode    = data->pscan_rep_mode;
780                 info->pscan_period_mode = data->pscan_period_mode;
781                 info->pscan_mode        = data->pscan_mode;
782                 memcpy(info->dev_class, data->dev_class, 3);
783                 info->clock_offset      = data->clock_offset;
784
785                 info++;
786                 copied++;
787         }
788
789         BT_DBG("cache %p, copied %d", cache, copied);
790         return copied;
791 }
792
793 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
794 {
795         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
796         struct hci_cp_inquiry cp;
797
798         BT_DBG("%s", hdev->name);
799
800         if (test_bit(HCI_INQUIRY, &hdev->flags))
801                 return;
802
803         /* Start Inquiry */
804         memcpy(&cp.lap, &ir->lap, 3);
805         cp.length  = ir->length;
806         cp.num_rsp = ir->num_rsp;
807         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
808 }
809
810 int hci_inquiry(void __user *arg)
811 {
812         __u8 __user *ptr = arg;
813         struct hci_inquiry_req ir;
814         struct hci_dev *hdev;
815         int err = 0, do_inquiry = 0, max_rsp;
816         long timeo;
817         __u8 *buf;
818
819         if (copy_from_user(&ir, ptr, sizeof(ir)))
820                 return -EFAULT;
821
822         hdev = hci_dev_get(ir.dev_id);
823         if (!hdev)
824                 return -ENODEV;
825
826         hci_dev_lock(hdev);
827         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
828             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
829                 inquiry_cache_flush(hdev);
830                 do_inquiry = 1;
831         }
832         hci_dev_unlock(hdev);
833
834         timeo = ir.length * msecs_to_jiffies(2000);
835
836         if (do_inquiry) {
837                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
838                                    timeo);
839                 if (err < 0)
840                         goto done;
841         }
842
843         /* for unlimited number of responses we will use buffer with
844          * 255 entries
845          */
846         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
847
848         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
849          * copy it to the user space.
850          */
851         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
852         if (!buf) {
853                 err = -ENOMEM;
854                 goto done;
855         }
856
857         hci_dev_lock(hdev);
858         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
859         hci_dev_unlock(hdev);
860
861         BT_DBG("num_rsp %d", ir.num_rsp);
862
863         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
864                 ptr += sizeof(ir);
865                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
866                                  ir.num_rsp))
867                         err = -EFAULT;
868         } else
869                 err = -EFAULT;
870
871         kfree(buf);
872
873 done:
874         hci_dev_put(hdev);
875         return err;
876 }
877
878 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
879 {
880         u8 ad_len = 0, flags = 0;
881         size_t name_len;
882
883         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
884                 flags |= LE_AD_GENERAL;
885
886         if (!lmp_bredr_capable(hdev))
887                 flags |= LE_AD_NO_BREDR;
888
889         if (lmp_le_br_capable(hdev))
890                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
891
892         if (lmp_host_le_br_capable(hdev))
893                 flags |= LE_AD_SIM_LE_BREDR_HOST;
894
895         if (flags) {
896                 BT_DBG("adv flags 0x%02x", flags);
897
898                 ptr[0] = 2;
899                 ptr[1] = EIR_FLAGS;
900                 ptr[2] = flags;
901
902                 ad_len += 3;
903                 ptr += 3;
904         }
905
906         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
907                 ptr[0] = 2;
908                 ptr[1] = EIR_TX_POWER;
909                 ptr[2] = (u8) hdev->adv_tx_power;
910
911                 ad_len += 3;
912                 ptr += 3;
913         }
914
915         name_len = strlen(hdev->dev_name);
916         if (name_len > 0) {
917                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
918
919                 if (name_len > max_len) {
920                         name_len = max_len;
921                         ptr[1] = EIR_NAME_SHORT;
922                 } else
923                         ptr[1] = EIR_NAME_COMPLETE;
924
925                 ptr[0] = name_len + 1;
926
927                 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929                 ad_len += (name_len + 2);
930                 ptr += (name_len + 2);
931         }
932
933         return ad_len;
934 }
935
936 int hci_update_ad(struct hci_dev *hdev)
937 {
938         struct hci_cp_le_set_adv_data cp;
939         u8 len;
940         int err;
941
942         hci_dev_lock(hdev);
943
944         if (!lmp_le_capable(hdev)) {
945                 err = -EINVAL;
946                 goto unlock;
947         }
948
949         memset(&cp, 0, sizeof(cp));
950
951         len = create_ad(hdev, cp.data);
952
953         if (hdev->adv_data_len == len &&
954             memcmp(cp.data, hdev->adv_data, len) == 0) {
955                 err = 0;
956                 goto unlock;
957         }
958
959         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
960         hdev->adv_data_len = len;
961
962         cp.length = len;
963         err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
964
965 unlock:
966         hci_dev_unlock(hdev);
967
968         return err;
969 }
970
971 /* ---- HCI ioctl helpers ---- */
972
973 int hci_dev_open(__u16 dev)
974 {
975         struct hci_dev *hdev;
976         int ret = 0;
977
978         hdev = hci_dev_get(dev);
979         if (!hdev)
980                 return -ENODEV;
981
982         BT_DBG("%s %p", hdev->name, hdev);
983
984         hci_req_lock(hdev);
985
986         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
987                 ret = -ENODEV;
988                 goto done;
989         }
990
991         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
992                 ret = -ERFKILL;
993                 goto done;
994         }
995
996         if (test_bit(HCI_UP, &hdev->flags)) {
997                 ret = -EALREADY;
998                 goto done;
999         }
1000
1001         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1002                 set_bit(HCI_RAW, &hdev->flags);
1003
1004         /* Treat all non BR/EDR controllers as raw devices if
1005            enable_hs is not set */
1006         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1007                 set_bit(HCI_RAW, &hdev->flags);
1008
1009         if (hdev->open(hdev)) {
1010                 ret = -EIO;
1011                 goto done;
1012         }
1013
1014         if (!test_bit(HCI_RAW, &hdev->flags)) {
1015                 atomic_set(&hdev->cmd_cnt, 1);
1016                 set_bit(HCI_INIT, &hdev->flags);
1017                 hdev->init_last_cmd = 0;
1018
1019                 ret = __hci_init(hdev);
1020
1021                 clear_bit(HCI_INIT, &hdev->flags);
1022         }
1023
1024         if (!ret) {
1025                 hci_dev_hold(hdev);
1026                 set_bit(HCI_UP, &hdev->flags);
1027                 hci_notify(hdev, HCI_DEV_UP);
1028                 hci_update_ad(hdev);
1029                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030                     mgmt_valid_hdev(hdev)) {
1031                         hci_dev_lock(hdev);
1032                         mgmt_powered(hdev, 1);
1033                         hci_dev_unlock(hdev);
1034                 }
1035         } else {
1036                 /* Init failed, cleanup */
1037                 flush_work(&hdev->tx_work);
1038                 flush_work(&hdev->cmd_work);
1039                 flush_work(&hdev->rx_work);
1040
1041                 skb_queue_purge(&hdev->cmd_q);
1042                 skb_queue_purge(&hdev->rx_q);
1043
1044                 if (hdev->flush)
1045                         hdev->flush(hdev);
1046
1047                 if (hdev->sent_cmd) {
1048                         kfree_skb(hdev->sent_cmd);
1049                         hdev->sent_cmd = NULL;
1050                 }
1051
1052                 hdev->close(hdev);
1053                 hdev->flags = 0;
1054         }
1055
1056 done:
1057         hci_req_unlock(hdev);
1058         hci_dev_put(hdev);
1059         return ret;
1060 }
1061
1062 static int hci_dev_do_close(struct hci_dev *hdev)
1063 {
1064         BT_DBG("%s %p", hdev->name, hdev);
1065
1066         cancel_work_sync(&hdev->le_scan);
1067
1068         cancel_delayed_work(&hdev->power_off);
1069
1070         hci_req_cancel(hdev, ENODEV);
1071         hci_req_lock(hdev);
1072
1073         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1074                 del_timer_sync(&hdev->cmd_timer);
1075                 hci_req_unlock(hdev);
1076                 return 0;
1077         }
1078
1079         /* Flush RX and TX works */
1080         flush_work(&hdev->tx_work);
1081         flush_work(&hdev->rx_work);
1082
1083         if (hdev->discov_timeout > 0) {
1084                 cancel_delayed_work(&hdev->discov_off);
1085                 hdev->discov_timeout = 0;
1086                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1087         }
1088
1089         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1090                 cancel_delayed_work(&hdev->service_cache);
1091
1092         cancel_delayed_work_sync(&hdev->le_scan_disable);
1093
1094         hci_dev_lock(hdev);
1095         inquiry_cache_flush(hdev);
1096         hci_conn_hash_flush(hdev);
1097         hci_dev_unlock(hdev);
1098
1099         hci_notify(hdev, HCI_DEV_DOWN);
1100
1101         if (hdev->flush)
1102                 hdev->flush(hdev);
1103
1104         /* Reset device */
1105         skb_queue_purge(&hdev->cmd_q);
1106         atomic_set(&hdev->cmd_cnt, 1);
1107         if (!test_bit(HCI_RAW, &hdev->flags) &&
1108             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1109                 set_bit(HCI_INIT, &hdev->flags);
1110                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1111                 clear_bit(HCI_INIT, &hdev->flags);
1112         }
1113
1114         /* flush cmd  work */
1115         flush_work(&hdev->cmd_work);
1116
1117         /* Drop queues */
1118         skb_queue_purge(&hdev->rx_q);
1119         skb_queue_purge(&hdev->cmd_q);
1120         skb_queue_purge(&hdev->raw_q);
1121
1122         /* Drop last sent command */
1123         if (hdev->sent_cmd) {
1124                 del_timer_sync(&hdev->cmd_timer);
1125                 kfree_skb(hdev->sent_cmd);
1126                 hdev->sent_cmd = NULL;
1127         }
1128
1129         /* After this point our queues are empty
1130          * and no tasks are scheduled. */
1131         hdev->close(hdev);
1132
1133         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1134             mgmt_valid_hdev(hdev)) {
1135                 hci_dev_lock(hdev);
1136                 mgmt_powered(hdev, 0);
1137                 hci_dev_unlock(hdev);
1138         }
1139
1140         /* Clear flags */
1141         hdev->flags = 0;
1142
1143         /* Controller radio is available but is currently powered down */
1144         hdev->amp_status = 0;
1145
1146         memset(hdev->eir, 0, sizeof(hdev->eir));
1147         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1148
1149         hci_req_unlock(hdev);
1150
1151         hci_dev_put(hdev);
1152         return 0;
1153 }
1154
1155 int hci_dev_close(__u16 dev)
1156 {
1157         struct hci_dev *hdev;
1158         int err;
1159
1160         hdev = hci_dev_get(dev);
1161         if (!hdev)
1162                 return -ENODEV;
1163
1164         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1165                 cancel_delayed_work(&hdev->power_off);
1166
1167         err = hci_dev_do_close(hdev);
1168
1169         hci_dev_put(hdev);
1170         return err;
1171 }
1172
1173 int hci_dev_reset(__u16 dev)
1174 {
1175         struct hci_dev *hdev;
1176         int ret = 0;
1177
1178         hdev = hci_dev_get(dev);
1179         if (!hdev)
1180                 return -ENODEV;
1181
1182         hci_req_lock(hdev);
1183
1184         if (!test_bit(HCI_UP, &hdev->flags))
1185                 goto done;
1186
1187         /* Drop queues */
1188         skb_queue_purge(&hdev->rx_q);
1189         skb_queue_purge(&hdev->cmd_q);
1190
1191         hci_dev_lock(hdev);
1192         inquiry_cache_flush(hdev);
1193         hci_conn_hash_flush(hdev);
1194         hci_dev_unlock(hdev);
1195
1196         if (hdev->flush)
1197                 hdev->flush(hdev);
1198
1199         atomic_set(&hdev->cmd_cnt, 1);
1200         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1201
1202         if (!test_bit(HCI_RAW, &hdev->flags))
1203                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1204
1205 done:
1206         hci_req_unlock(hdev);
1207         hci_dev_put(hdev);
1208         return ret;
1209 }
1210
1211 int hci_dev_reset_stat(__u16 dev)
1212 {
1213         struct hci_dev *hdev;
1214         int ret = 0;
1215
1216         hdev = hci_dev_get(dev);
1217         if (!hdev)
1218                 return -ENODEV;
1219
1220         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1221
1222         hci_dev_put(hdev);
1223
1224         return ret;
1225 }
1226
1227 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1228 {
1229         struct hci_dev *hdev;
1230         struct hci_dev_req dr;
1231         int err = 0;
1232
1233         if (copy_from_user(&dr, arg, sizeof(dr)))
1234                 return -EFAULT;
1235
1236         hdev = hci_dev_get(dr.dev_id);
1237         if (!hdev)
1238                 return -ENODEV;
1239
1240         switch (cmd) {
1241         case HCISETAUTH:
1242                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1243                                    HCI_INIT_TIMEOUT);
1244                 break;
1245
1246         case HCISETENCRYPT:
1247                 if (!lmp_encrypt_capable(hdev)) {
1248                         err = -EOPNOTSUPP;
1249                         break;
1250                 }
1251
1252                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1253                         /* Auth must be enabled first */
1254                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1255                                            HCI_INIT_TIMEOUT);
1256                         if (err)
1257                                 break;
1258                 }
1259
1260                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1261                                    HCI_INIT_TIMEOUT);
1262                 break;
1263
1264         case HCISETSCAN:
1265                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1266                                    HCI_INIT_TIMEOUT);
1267                 break;
1268
1269         case HCISETLINKPOL:
1270                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1271                                    HCI_INIT_TIMEOUT);
1272                 break;
1273
1274         case HCISETLINKMODE:
1275                 hdev->link_mode = ((__u16) dr.dev_opt) &
1276                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1277                 break;
1278
1279         case HCISETPTYPE:
1280                 hdev->pkt_type = (__u16) dr.dev_opt;
1281                 break;
1282
1283         case HCISETACLMTU:
1284                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1285                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1286                 break;
1287
1288         case HCISETSCOMTU:
1289                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1290                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1291                 break;
1292
1293         default:
1294                 err = -EINVAL;
1295                 break;
1296         }
1297
1298         hci_dev_put(hdev);
1299         return err;
1300 }
1301
1302 int hci_get_dev_list(void __user *arg)
1303 {
1304         struct hci_dev *hdev;
1305         struct hci_dev_list_req *dl;
1306         struct hci_dev_req *dr;
1307         int n = 0, size, err;
1308         __u16 dev_num;
1309
1310         if (get_user(dev_num, (__u16 __user *) arg))
1311                 return -EFAULT;
1312
1313         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1314                 return -EINVAL;
1315
1316         size = sizeof(*dl) + dev_num * sizeof(*dr);
1317
1318         dl = kzalloc(size, GFP_KERNEL);
1319         if (!dl)
1320                 return -ENOMEM;
1321
1322         dr = dl->dev_req;
1323
1324         read_lock(&hci_dev_list_lock);
1325         list_for_each_entry(hdev, &hci_dev_list, list) {
1326                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1327                         cancel_delayed_work(&hdev->power_off);
1328
1329                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1330                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1331
1332                 (dr + n)->dev_id  = hdev->id;
1333                 (dr + n)->dev_opt = hdev->flags;
1334
1335                 if (++n >= dev_num)
1336                         break;
1337         }
1338         read_unlock(&hci_dev_list_lock);
1339
1340         dl->dev_num = n;
1341         size = sizeof(*dl) + n * sizeof(*dr);
1342
1343         err = copy_to_user(arg, dl, size);
1344         kfree(dl);
1345
1346         return err ? -EFAULT : 0;
1347 }
1348
1349 int hci_get_dev_info(void __user *arg)
1350 {
1351         struct hci_dev *hdev;
1352         struct hci_dev_info di;
1353         int err = 0;
1354
1355         if (copy_from_user(&di, arg, sizeof(di)))
1356                 return -EFAULT;
1357
1358         hdev = hci_dev_get(di.dev_id);
1359         if (!hdev)
1360                 return -ENODEV;
1361
1362         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1363                 cancel_delayed_work_sync(&hdev->power_off);
1364
1365         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1366                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1367
1368         strcpy(di.name, hdev->name);
1369         di.bdaddr   = hdev->bdaddr;
1370         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1371         di.flags    = hdev->flags;
1372         di.pkt_type = hdev->pkt_type;
1373         if (lmp_bredr_capable(hdev)) {
1374                 di.acl_mtu  = hdev->acl_mtu;
1375                 di.acl_pkts = hdev->acl_pkts;
1376                 di.sco_mtu  = hdev->sco_mtu;
1377                 di.sco_pkts = hdev->sco_pkts;
1378         } else {
1379                 di.acl_mtu  = hdev->le_mtu;
1380                 di.acl_pkts = hdev->le_pkts;
1381                 di.sco_mtu  = 0;
1382                 di.sco_pkts = 0;
1383         }
1384         di.link_policy = hdev->link_policy;
1385         di.link_mode   = hdev->link_mode;
1386
1387         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1388         memcpy(&di.features, &hdev->features, sizeof(di.features));
1389
1390         if (copy_to_user(arg, &di, sizeof(di)))
1391                 err = -EFAULT;
1392
1393         hci_dev_put(hdev);
1394
1395         return err;
1396 }
1397
1398 /* ---- Interface to HCI drivers ---- */
1399
1400 static int hci_rfkill_set_block(void *data, bool blocked)
1401 {
1402         struct hci_dev *hdev = data;
1403
1404         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1405
1406         if (!blocked)
1407                 return 0;
1408
1409         hci_dev_do_close(hdev);
1410
1411         return 0;
1412 }
1413
1414 static const struct rfkill_ops hci_rfkill_ops = {
1415         .set_block = hci_rfkill_set_block,
1416 };
1417
1418 static void hci_power_on(struct work_struct *work)
1419 {
1420         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1421
1422         BT_DBG("%s", hdev->name);
1423
1424         if (hci_dev_open(hdev->id) < 0)
1425                 return;
1426
1427         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1428                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1429                                    HCI_AUTO_OFF_TIMEOUT);
1430
1431         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1432                 mgmt_index_added(hdev);
1433 }
1434
1435 static void hci_power_off(struct work_struct *work)
1436 {
1437         struct hci_dev *hdev = container_of(work, struct hci_dev,
1438                                             power_off.work);
1439
1440         BT_DBG("%s", hdev->name);
1441
1442         hci_dev_do_close(hdev);
1443 }
1444
1445 static void hci_discov_off(struct work_struct *work)
1446 {
1447         struct hci_dev *hdev;
1448         u8 scan = SCAN_PAGE;
1449
1450         hdev = container_of(work, struct hci_dev, discov_off.work);
1451
1452         BT_DBG("%s", hdev->name);
1453
1454         hci_dev_lock(hdev);
1455
1456         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1457
1458         hdev->discov_timeout = 0;
1459
1460         hci_dev_unlock(hdev);
1461 }
1462
1463 int hci_uuids_clear(struct hci_dev *hdev)
1464 {
1465         struct bt_uuid *uuid, *tmp;
1466
1467         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1468                 list_del(&uuid->list);
1469                 kfree(uuid);
1470         }
1471
1472         return 0;
1473 }
1474
1475 int hci_link_keys_clear(struct hci_dev *hdev)
1476 {
1477         struct list_head *p, *n;
1478
1479         list_for_each_safe(p, n, &hdev->link_keys) {
1480                 struct link_key *key;
1481
1482                 key = list_entry(p, struct link_key, list);
1483
1484                 list_del(p);
1485                 kfree(key);
1486         }
1487
1488         return 0;
1489 }
1490
1491 int hci_smp_ltks_clear(struct hci_dev *hdev)
1492 {
1493         struct smp_ltk *k, *tmp;
1494
1495         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1496                 list_del(&k->list);
1497                 kfree(k);
1498         }
1499
1500         return 0;
1501 }
1502
1503 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1504 {
1505         struct link_key *k;
1506
1507         list_for_each_entry(k, &hdev->link_keys, list)
1508                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1509                         return k;
1510
1511         return NULL;
1512 }
1513
1514 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1515                                u8 key_type, u8 old_key_type)
1516 {
1517         /* Legacy key */
1518         if (key_type < 0x03)
1519                 return true;
1520
1521         /* Debug keys are insecure so don't store them persistently */
1522         if (key_type == HCI_LK_DEBUG_COMBINATION)
1523                 return false;
1524
1525         /* Changed combination key and there's no previous one */
1526         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1527                 return false;
1528
1529         /* Security mode 3 case */
1530         if (!conn)
1531                 return true;
1532
1533         /* Neither local nor remote side had no-bonding as requirement */
1534         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1535                 return true;
1536
1537         /* Local side had dedicated bonding as requirement */
1538         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1539                 return true;
1540
1541         /* Remote side had dedicated bonding as requirement */
1542         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1543                 return true;
1544
1545         /* If none of the above criteria match, then don't store the key
1546          * persistently */
1547         return false;
1548 }
1549
1550 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1551 {
1552         struct smp_ltk *k;
1553
1554         list_for_each_entry(k, &hdev->long_term_keys, list) {
1555                 if (k->ediv != ediv ||
1556                     memcmp(rand, k->rand, sizeof(k->rand)))
1557                         continue;
1558
1559                 return k;
1560         }
1561
1562         return NULL;
1563 }
1564
1565 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1566                                      u8 addr_type)
1567 {
1568         struct smp_ltk *k;
1569
1570         list_for_each_entry(k, &hdev->long_term_keys, list)
1571                 if (addr_type == k->bdaddr_type &&
1572                     bacmp(bdaddr, &k->bdaddr) == 0)
1573                         return k;
1574
1575         return NULL;
1576 }
1577
1578 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1579                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1580 {
1581         struct link_key *key, *old_key;
1582         u8 old_key_type;
1583         bool persistent;
1584
1585         old_key = hci_find_link_key(hdev, bdaddr);
1586         if (old_key) {
1587                 old_key_type = old_key->type;
1588                 key = old_key;
1589         } else {
1590                 old_key_type = conn ? conn->key_type : 0xff;
1591                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1592                 if (!key)
1593                         return -ENOMEM;
1594                 list_add(&key->list, &hdev->link_keys);
1595         }
1596
1597         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1598
1599         /* Some buggy controller combinations generate a changed
1600          * combination key for legacy pairing even when there's no
1601          * previous key */
1602         if (type == HCI_LK_CHANGED_COMBINATION &&
1603             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1604                 type = HCI_LK_COMBINATION;
1605                 if (conn)
1606                         conn->key_type = type;
1607         }
1608
1609         bacpy(&key->bdaddr, bdaddr);
1610         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1611         key->pin_len = pin_len;
1612
1613         if (type == HCI_LK_CHANGED_COMBINATION)
1614                 key->type = old_key_type;
1615         else
1616                 key->type = type;
1617
1618         if (!new_key)
1619                 return 0;
1620
1621         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1622
1623         mgmt_new_link_key(hdev, key, persistent);
1624
1625         if (conn)
1626                 conn->flush_key = !persistent;
1627
1628         return 0;
1629 }
1630
1631 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1632                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1633                 ediv, u8 rand[8])
1634 {
1635         struct smp_ltk *key, *old_key;
1636
1637         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1638                 return 0;
1639
1640         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1641         if (old_key)
1642                 key = old_key;
1643         else {
1644                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1645                 if (!key)
1646                         return -ENOMEM;
1647                 list_add(&key->list, &hdev->long_term_keys);
1648         }
1649
1650         bacpy(&key->bdaddr, bdaddr);
1651         key->bdaddr_type = addr_type;
1652         memcpy(key->val, tk, sizeof(key->val));
1653         key->authenticated = authenticated;
1654         key->ediv = ediv;
1655         key->enc_size = enc_size;
1656         key->type = type;
1657         memcpy(key->rand, rand, sizeof(key->rand));
1658
1659         if (!new_key)
1660                 return 0;
1661
1662         if (type & HCI_SMP_LTK)
1663                 mgmt_new_ltk(hdev, key, 1);
1664
1665         return 0;
1666 }
1667
1668 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1669 {
1670         struct link_key *key;
1671
1672         key = hci_find_link_key(hdev, bdaddr);
1673         if (!key)
1674                 return -ENOENT;
1675
1676         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1677
1678         list_del(&key->list);
1679         kfree(key);
1680
1681         return 0;
1682 }
1683
1684 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1685 {
1686         struct smp_ltk *k, *tmp;
1687
1688         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1689                 if (bacmp(bdaddr, &k->bdaddr))
1690                         continue;
1691
1692                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1693
1694                 list_del(&k->list);
1695                 kfree(k);
1696         }
1697
1698         return 0;
1699 }
1700
1701 /* HCI command timer function */
1702 static void hci_cmd_timeout(unsigned long arg)
1703 {
1704         struct hci_dev *hdev = (void *) arg;
1705
1706         if (hdev->sent_cmd) {
1707                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1708                 u16 opcode = __le16_to_cpu(sent->opcode);
1709
1710                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1711         } else {
1712                 BT_ERR("%s command tx timeout", hdev->name);
1713         }
1714
1715         atomic_set(&hdev->cmd_cnt, 1);
1716         queue_work(hdev->workqueue, &hdev->cmd_work);
1717 }
1718
1719 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1720                                           bdaddr_t *bdaddr)
1721 {
1722         struct oob_data *data;
1723
1724         list_for_each_entry(data, &hdev->remote_oob_data, list)
1725                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1726                         return data;
1727
1728         return NULL;
1729 }
1730
1731 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1732 {
1733         struct oob_data *data;
1734
1735         data = hci_find_remote_oob_data(hdev, bdaddr);
1736         if (!data)
1737                 return -ENOENT;
1738
1739         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1740
1741         list_del(&data->list);
1742         kfree(data);
1743
1744         return 0;
1745 }
1746
1747 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1748 {
1749         struct oob_data *data, *n;
1750
1751         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1752                 list_del(&data->list);
1753                 kfree(data);
1754         }
1755
1756         return 0;
1757 }
1758
1759 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1760                             u8 *randomizer)
1761 {
1762         struct oob_data *data;
1763
1764         data = hci_find_remote_oob_data(hdev, bdaddr);
1765
1766         if (!data) {
1767                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1768                 if (!data)
1769                         return -ENOMEM;
1770
1771                 bacpy(&data->bdaddr, bdaddr);
1772                 list_add(&data->list, &hdev->remote_oob_data);
1773         }
1774
1775         memcpy(data->hash, hash, sizeof(data->hash));
1776         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1777
1778         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1779
1780         return 0;
1781 }
1782
1783 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1784 {
1785         struct bdaddr_list *b;
1786
1787         list_for_each_entry(b, &hdev->blacklist, list)
1788                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1789                         return b;
1790
1791         return NULL;
1792 }
1793
1794 int hci_blacklist_clear(struct hci_dev *hdev)
1795 {
1796         struct list_head *p, *n;
1797
1798         list_for_each_safe(p, n, &hdev->blacklist) {
1799                 struct bdaddr_list *b;
1800
1801                 b = list_entry(p, struct bdaddr_list, list);
1802
1803                 list_del(p);
1804                 kfree(b);
1805         }
1806
1807         return 0;
1808 }
1809
1810 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1811 {
1812         struct bdaddr_list *entry;
1813
1814         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1815                 return -EBADF;
1816
1817         if (hci_blacklist_lookup(hdev, bdaddr))
1818                 return -EEXIST;
1819
1820         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1821         if (!entry)
1822                 return -ENOMEM;
1823
1824         bacpy(&entry->bdaddr, bdaddr);
1825
1826         list_add(&entry->list, &hdev->blacklist);
1827
1828         return mgmt_device_blocked(hdev, bdaddr, type);
1829 }
1830
1831 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1832 {
1833         struct bdaddr_list *entry;
1834
1835         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1836                 return hci_blacklist_clear(hdev);
1837
1838         entry = hci_blacklist_lookup(hdev, bdaddr);
1839         if (!entry)
1840                 return -ENOENT;
1841
1842         list_del(&entry->list);
1843         kfree(entry);
1844
1845         return mgmt_device_unblocked(hdev, bdaddr, type);
1846 }
1847
1848 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1849 {
1850         struct le_scan_params *param =  (struct le_scan_params *) opt;
1851         struct hci_cp_le_set_scan_param cp;
1852
1853         memset(&cp, 0, sizeof(cp));
1854         cp.type = param->type;
1855         cp.interval = cpu_to_le16(param->interval);
1856         cp.window = cpu_to_le16(param->window);
1857
1858         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1859 }
1860
1861 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1862 {
1863         struct hci_cp_le_set_scan_enable cp;
1864
1865         memset(&cp, 0, sizeof(cp));
1866         cp.enable = 1;
1867         cp.filter_dup = 1;
1868
1869         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1870 }
1871
1872 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1873                           u16 window, int timeout)
1874 {
1875         long timeo = msecs_to_jiffies(3000);
1876         struct le_scan_params param;
1877         int err;
1878
1879         BT_DBG("%s", hdev->name);
1880
1881         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1882                 return -EINPROGRESS;
1883
1884         param.type = type;
1885         param.interval = interval;
1886         param.window = window;
1887
1888         hci_req_lock(hdev);
1889
1890         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1891                              timeo);
1892         if (!err)
1893                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1894
1895         hci_req_unlock(hdev);
1896
1897         if (err < 0)
1898                 return err;
1899
1900         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1901                            msecs_to_jiffies(timeout));
1902
1903         return 0;
1904 }
1905
1906 int hci_cancel_le_scan(struct hci_dev *hdev)
1907 {
1908         BT_DBG("%s", hdev->name);
1909
1910         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1911                 return -EALREADY;
1912
1913         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1914                 struct hci_cp_le_set_scan_enable cp;
1915
1916                 /* Send HCI command to disable LE Scan */
1917                 memset(&cp, 0, sizeof(cp));
1918                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1919         }
1920
1921         return 0;
1922 }
1923
1924 static void le_scan_disable_work(struct work_struct *work)
1925 {
1926         struct hci_dev *hdev = container_of(work, struct hci_dev,
1927                                             le_scan_disable.work);
1928         struct hci_cp_le_set_scan_enable cp;
1929
1930         BT_DBG("%s", hdev->name);
1931
1932         memset(&cp, 0, sizeof(cp));
1933
1934         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935 }
1936
1937 static void le_scan_work(struct work_struct *work)
1938 {
1939         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1940         struct le_scan_params *param = &hdev->le_scan_params;
1941
1942         BT_DBG("%s", hdev->name);
1943
1944         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1945                        param->timeout);
1946 }
1947
1948 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1949                 int timeout)
1950 {
1951         struct le_scan_params *param = &hdev->le_scan_params;
1952
1953         BT_DBG("%s", hdev->name);
1954
1955         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1956                 return -ENOTSUPP;
1957
1958         if (work_busy(&hdev->le_scan))
1959                 return -EINPROGRESS;
1960
1961         param->type = type;
1962         param->interval = interval;
1963         param->window = window;
1964         param->timeout = timeout;
1965
1966         queue_work(system_long_wq, &hdev->le_scan);
1967
1968         return 0;
1969 }
1970
1971 /* Alloc HCI device */
1972 struct hci_dev *hci_alloc_dev(void)
1973 {
1974         struct hci_dev *hdev;
1975
1976         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1977         if (!hdev)
1978                 return NULL;
1979
1980         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1981         hdev->esco_type = (ESCO_HV1);
1982         hdev->link_mode = (HCI_LM_ACCEPT);
1983         hdev->io_capability = 0x03; /* No Input No Output */
1984         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1985         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1986
1987         hdev->sniff_max_interval = 800;
1988         hdev->sniff_min_interval = 80;
1989
1990         mutex_init(&hdev->lock);
1991         mutex_init(&hdev->req_lock);
1992
1993         INIT_LIST_HEAD(&hdev->mgmt_pending);
1994         INIT_LIST_HEAD(&hdev->blacklist);
1995         INIT_LIST_HEAD(&hdev->uuids);
1996         INIT_LIST_HEAD(&hdev->link_keys);
1997         INIT_LIST_HEAD(&hdev->long_term_keys);
1998         INIT_LIST_HEAD(&hdev->remote_oob_data);
1999         INIT_LIST_HEAD(&hdev->conn_hash.list);
2000
2001         INIT_WORK(&hdev->rx_work, hci_rx_work);
2002         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2003         INIT_WORK(&hdev->tx_work, hci_tx_work);
2004         INIT_WORK(&hdev->power_on, hci_power_on);
2005         INIT_WORK(&hdev->le_scan, le_scan_work);
2006
2007         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2008         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2009         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2010
2011         skb_queue_head_init(&hdev->driver_init);
2012         skb_queue_head_init(&hdev->rx_q);
2013         skb_queue_head_init(&hdev->cmd_q);
2014         skb_queue_head_init(&hdev->raw_q);
2015
2016         init_waitqueue_head(&hdev->req_wait_q);
2017
2018         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2019
2020         hci_init_sysfs(hdev);
2021         discovery_init(hdev);
2022
2023         return hdev;
2024 }
2025 EXPORT_SYMBOL(hci_alloc_dev);
2026
2027 /* Free HCI device */
2028 void hci_free_dev(struct hci_dev *hdev)
2029 {
2030         skb_queue_purge(&hdev->driver_init);
2031
2032         /* will free via device release */
2033         put_device(&hdev->dev);
2034 }
2035 EXPORT_SYMBOL(hci_free_dev);
2036
2037 /* Register HCI device */
2038 int hci_register_dev(struct hci_dev *hdev)
2039 {
2040         int id, error;
2041
2042         if (!hdev->open || !hdev->close)
2043                 return -EINVAL;
2044
2045         /* Do not allow HCI_AMP devices to register at index 0,
2046          * so the index can be used as the AMP controller ID.
2047          */
2048         switch (hdev->dev_type) {
2049         case HCI_BREDR:
2050                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2051                 break;
2052         case HCI_AMP:
2053                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2054                 break;
2055         default:
2056                 return -EINVAL;
2057         }
2058
2059         if (id < 0)
2060                 return id;
2061
2062         sprintf(hdev->name, "hci%d", id);
2063         hdev->id = id;
2064
2065         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2066
2067         write_lock(&hci_dev_list_lock);
2068         list_add(&hdev->list, &hci_dev_list);
2069         write_unlock(&hci_dev_list_lock);
2070
2071         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2072                                           WQ_MEM_RECLAIM, 1);
2073         if (!hdev->workqueue) {
2074                 error = -ENOMEM;
2075                 goto err;
2076         }
2077
2078         hdev->req_workqueue = alloc_workqueue(hdev->name,
2079                                               WQ_HIGHPRI | WQ_UNBOUND |
2080                                               WQ_MEM_RECLAIM, 1);
2081         if (!hdev->req_workqueue) {
2082                 destroy_workqueue(hdev->workqueue);
2083                 error = -ENOMEM;
2084                 goto err;
2085         }
2086
2087         error = hci_add_sysfs(hdev);
2088         if (error < 0)
2089                 goto err_wqueue;
2090
2091         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2092                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2093                                     hdev);
2094         if (hdev->rfkill) {
2095                 if (rfkill_register(hdev->rfkill) < 0) {
2096                         rfkill_destroy(hdev->rfkill);
2097                         hdev->rfkill = NULL;
2098                 }
2099         }
2100
2101         set_bit(HCI_SETUP, &hdev->dev_flags);
2102
2103         if (hdev->dev_type != HCI_AMP)
2104                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2105
2106         hci_notify(hdev, HCI_DEV_REG);
2107         hci_dev_hold(hdev);
2108
2109         queue_work(hdev->req_workqueue, &hdev->power_on);
2110
2111         return id;
2112
2113 err_wqueue:
2114         destroy_workqueue(hdev->workqueue);
2115         destroy_workqueue(hdev->req_workqueue);
2116 err:
2117         ida_simple_remove(&hci_index_ida, hdev->id);
2118         write_lock(&hci_dev_list_lock);
2119         list_del(&hdev->list);
2120         write_unlock(&hci_dev_list_lock);
2121
2122         return error;
2123 }
2124 EXPORT_SYMBOL(hci_register_dev);
2125
2126 /* Unregister HCI device */
2127 void hci_unregister_dev(struct hci_dev *hdev)
2128 {
2129         int i, id;
2130
2131         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2132
2133         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2134
2135         id = hdev->id;
2136
2137         write_lock(&hci_dev_list_lock);
2138         list_del(&hdev->list);
2139         write_unlock(&hci_dev_list_lock);
2140
2141         hci_dev_do_close(hdev);
2142
2143         for (i = 0; i < NUM_REASSEMBLY; i++)
2144                 kfree_skb(hdev->reassembly[i]);
2145
2146         cancel_work_sync(&hdev->power_on);
2147
2148         if (!test_bit(HCI_INIT, &hdev->flags) &&
2149             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2150                 hci_dev_lock(hdev);
2151                 mgmt_index_removed(hdev);
2152                 hci_dev_unlock(hdev);
2153         }
2154
2155         /* mgmt_index_removed should take care of emptying the
2156          * pending list */
2157         BUG_ON(!list_empty(&hdev->mgmt_pending));
2158
2159         hci_notify(hdev, HCI_DEV_UNREG);
2160
2161         if (hdev->rfkill) {
2162                 rfkill_unregister(hdev->rfkill);
2163                 rfkill_destroy(hdev->rfkill);
2164         }
2165
2166         hci_del_sysfs(hdev);
2167
2168         destroy_workqueue(hdev->workqueue);
2169         destroy_workqueue(hdev->req_workqueue);
2170
2171         hci_dev_lock(hdev);
2172         hci_blacklist_clear(hdev);
2173         hci_uuids_clear(hdev);
2174         hci_link_keys_clear(hdev);
2175         hci_smp_ltks_clear(hdev);
2176         hci_remote_oob_data_clear(hdev);
2177         hci_dev_unlock(hdev);
2178
2179         hci_dev_put(hdev);
2180
2181         ida_simple_remove(&hci_index_ida, id);
2182 }
2183 EXPORT_SYMBOL(hci_unregister_dev);
2184
2185 /* Suspend HCI device */
2186 int hci_suspend_dev(struct hci_dev *hdev)
2187 {
2188         hci_notify(hdev, HCI_DEV_SUSPEND);
2189         return 0;
2190 }
2191 EXPORT_SYMBOL(hci_suspend_dev);
2192
2193 /* Resume HCI device */
2194 int hci_resume_dev(struct hci_dev *hdev)
2195 {
2196         hci_notify(hdev, HCI_DEV_RESUME);
2197         return 0;
2198 }
2199 EXPORT_SYMBOL(hci_resume_dev);
2200
2201 /* Receive frame from HCI drivers */
2202 int hci_recv_frame(struct sk_buff *skb)
2203 {
2204         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2205         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2206                       && !test_bit(HCI_INIT, &hdev->flags))) {
2207                 kfree_skb(skb);
2208                 return -ENXIO;
2209         }
2210
2211         /* Incoming skb */
2212         bt_cb(skb)->incoming = 1;
2213
2214         /* Time stamp */
2215         __net_timestamp(skb);
2216
2217         skb_queue_tail(&hdev->rx_q, skb);
2218         queue_work(hdev->workqueue, &hdev->rx_work);
2219
2220         return 0;
2221 }
2222 EXPORT_SYMBOL(hci_recv_frame);
2223
2224 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2225                           int count, __u8 index)
2226 {
2227         int len = 0;
2228         int hlen = 0;
2229         int remain = count;
2230         struct sk_buff *skb;
2231         struct bt_skb_cb *scb;
2232
2233         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2234             index >= NUM_REASSEMBLY)
2235                 return -EILSEQ;
2236
2237         skb = hdev->reassembly[index];
2238
2239         if (!skb) {
2240                 switch (type) {
2241                 case HCI_ACLDATA_PKT:
2242                         len = HCI_MAX_FRAME_SIZE;
2243                         hlen = HCI_ACL_HDR_SIZE;
2244                         break;
2245                 case HCI_EVENT_PKT:
2246                         len = HCI_MAX_EVENT_SIZE;
2247                         hlen = HCI_EVENT_HDR_SIZE;
2248                         break;
2249                 case HCI_SCODATA_PKT:
2250                         len = HCI_MAX_SCO_SIZE;
2251                         hlen = HCI_SCO_HDR_SIZE;
2252                         break;
2253                 }
2254
2255                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2256                 if (!skb)
2257                         return -ENOMEM;
2258
2259                 scb = (void *) skb->cb;
2260                 scb->expect = hlen;
2261                 scb->pkt_type = type;
2262
2263                 skb->dev = (void *) hdev;
2264                 hdev->reassembly[index] = skb;
2265         }
2266
2267         while (count) {
2268                 scb = (void *) skb->cb;
2269                 len = min_t(uint, scb->expect, count);
2270
2271                 memcpy(skb_put(skb, len), data, len);
2272
2273                 count -= len;
2274                 data += len;
2275                 scb->expect -= len;
2276                 remain = count;
2277
2278                 switch (type) {
2279                 case HCI_EVENT_PKT:
2280                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2281                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2282                                 scb->expect = h->plen;
2283
2284                                 if (skb_tailroom(skb) < scb->expect) {
2285                                         kfree_skb(skb);
2286                                         hdev->reassembly[index] = NULL;
2287                                         return -ENOMEM;
2288                                 }
2289                         }
2290                         break;
2291
2292                 case HCI_ACLDATA_PKT:
2293                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2294                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2295                                 scb->expect = __le16_to_cpu(h->dlen);
2296
2297                                 if (skb_tailroom(skb) < scb->expect) {
2298                                         kfree_skb(skb);
2299                                         hdev->reassembly[index] = NULL;
2300                                         return -ENOMEM;
2301                                 }
2302                         }
2303                         break;
2304
2305                 case HCI_SCODATA_PKT:
2306                         if (skb->len == HCI_SCO_HDR_SIZE) {
2307                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2308                                 scb->expect = h->dlen;
2309
2310                                 if (skb_tailroom(skb) < scb->expect) {
2311                                         kfree_skb(skb);
2312                                         hdev->reassembly[index] = NULL;
2313                                         return -ENOMEM;
2314                                 }
2315                         }
2316                         break;
2317                 }
2318
2319                 if (scb->expect == 0) {
2320                         /* Complete frame */
2321
2322                         bt_cb(skb)->pkt_type = type;
2323                         hci_recv_frame(skb);
2324
2325                         hdev->reassembly[index] = NULL;
2326                         return remain;
2327                 }
2328         }
2329
2330         return remain;
2331 }
2332
2333 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2334 {
2335         int rem = 0;
2336
2337         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2338                 return -EILSEQ;
2339
2340         while (count) {
2341                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2342                 if (rem < 0)
2343                         return rem;
2344
2345                 data += (count - rem);
2346                 count = rem;
2347         }
2348
2349         return rem;
2350 }
2351 EXPORT_SYMBOL(hci_recv_fragment);
2352
2353 #define STREAM_REASSEMBLY 0
2354
2355 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2356 {
2357         int type;
2358         int rem = 0;
2359
2360         while (count) {
2361                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2362
2363                 if (!skb) {
2364                         struct { char type; } *pkt;
2365
2366                         /* Start of the frame */
2367                         pkt = data;
2368                         type = pkt->type;
2369
2370                         data++;
2371                         count--;
2372                 } else
2373                         type = bt_cb(skb)->pkt_type;
2374
2375                 rem = hci_reassembly(hdev, type, data, count,
2376                                      STREAM_REASSEMBLY);
2377                 if (rem < 0)
2378                         return rem;
2379
2380                 data += (count - rem);
2381                 count = rem;
2382         }
2383
2384         return rem;
2385 }
2386 EXPORT_SYMBOL(hci_recv_stream_fragment);
2387
2388 /* ---- Interface to upper protocols ---- */
2389
2390 int hci_register_cb(struct hci_cb *cb)
2391 {
2392         BT_DBG("%p name %s", cb, cb->name);
2393
2394         write_lock(&hci_cb_list_lock);
2395         list_add(&cb->list, &hci_cb_list);
2396         write_unlock(&hci_cb_list_lock);
2397
2398         return 0;
2399 }
2400 EXPORT_SYMBOL(hci_register_cb);
2401
2402 int hci_unregister_cb(struct hci_cb *cb)
2403 {
2404         BT_DBG("%p name %s", cb, cb->name);
2405
2406         write_lock(&hci_cb_list_lock);
2407         list_del(&cb->list);
2408         write_unlock(&hci_cb_list_lock);
2409
2410         return 0;
2411 }
2412 EXPORT_SYMBOL(hci_unregister_cb);
2413
2414 static int hci_send_frame(struct sk_buff *skb)
2415 {
2416         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2417
2418         if (!hdev) {
2419                 kfree_skb(skb);
2420                 return -ENODEV;
2421         }
2422
2423         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2424
2425         /* Time stamp */
2426         __net_timestamp(skb);
2427
2428         /* Send copy to monitor */
2429         hci_send_to_monitor(hdev, skb);
2430
2431         if (atomic_read(&hdev->promisc)) {
2432                 /* Send copy to the sockets */
2433                 hci_send_to_sock(hdev, skb);
2434         }
2435
2436         /* Get rid of skb owner, prior to sending to the driver. */
2437         skb_orphan(skb);
2438
2439         return hdev->send(skb);
2440 }
2441
2442 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2443 {
2444         skb_queue_head_init(&req->cmd_q);
2445         req->hdev = hdev;
2446 }
2447
2448 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2449 {
2450         struct hci_dev *hdev = req->hdev;
2451         struct sk_buff *skb;
2452         unsigned long flags;
2453
2454         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2455
2456         /* Do not allow empty requests */
2457         if (skb_queue_empty(&req->cmd_q))
2458                 return -EINVAL;
2459
2460         skb = skb_peek_tail(&req->cmd_q);
2461         bt_cb(skb)->req.complete = complete;
2462
2463         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2464         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2465         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2466
2467         queue_work(hdev->workqueue, &hdev->cmd_work);
2468
2469         return 0;
2470 }
2471
2472 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2473                                        u32 plen, void *param)
2474 {
2475         int len = HCI_COMMAND_HDR_SIZE + plen;
2476         struct hci_command_hdr *hdr;
2477         struct sk_buff *skb;
2478
2479         skb = bt_skb_alloc(len, GFP_ATOMIC);
2480         if (!skb)
2481                 return NULL;
2482
2483         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2484         hdr->opcode = cpu_to_le16(opcode);
2485         hdr->plen   = plen;
2486
2487         if (plen)
2488                 memcpy(skb_put(skb, plen), param, plen);
2489
2490         BT_DBG("skb len %d", skb->len);
2491
2492         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2493         skb->dev = (void *) hdev;
2494
2495         return skb;
2496 }
2497
2498 /* Send HCI command */
2499 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2500 {
2501         struct sk_buff *skb;
2502
2503         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2504
2505         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2506         if (!skb) {
2507                 BT_ERR("%s no memory for command", hdev->name);
2508                 return -ENOMEM;
2509         }
2510
2511         if (test_bit(HCI_INIT, &hdev->flags))
2512                 hdev->init_last_cmd = opcode;
2513
2514         skb_queue_tail(&hdev->cmd_q, skb);
2515         queue_work(hdev->workqueue, &hdev->cmd_work);
2516
2517         return 0;
2518 }
2519
2520 /* Get data from the previously sent command */
2521 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2522 {
2523         struct hci_command_hdr *hdr;
2524
2525         if (!hdev->sent_cmd)
2526                 return NULL;
2527
2528         hdr = (void *) hdev->sent_cmd->data;
2529
2530         if (hdr->opcode != cpu_to_le16(opcode))
2531                 return NULL;
2532
2533         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2534
2535         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2536 }
2537
2538 /* Send ACL data */
2539 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2540 {
2541         struct hci_acl_hdr *hdr;
2542         int len = skb->len;
2543
2544         skb_push(skb, HCI_ACL_HDR_SIZE);
2545         skb_reset_transport_header(skb);
2546         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2547         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2548         hdr->dlen   = cpu_to_le16(len);
2549 }
2550
2551 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2552                           struct sk_buff *skb, __u16 flags)
2553 {
2554         struct hci_conn *conn = chan->conn;
2555         struct hci_dev *hdev = conn->hdev;
2556         struct sk_buff *list;
2557
2558         skb->len = skb_headlen(skb);
2559         skb->data_len = 0;
2560
2561         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2562
2563         switch (hdev->dev_type) {
2564         case HCI_BREDR:
2565                 hci_add_acl_hdr(skb, conn->handle, flags);
2566                 break;
2567         case HCI_AMP:
2568                 hci_add_acl_hdr(skb, chan->handle, flags);
2569                 break;
2570         default:
2571                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2572                 return;
2573         }
2574
2575         list = skb_shinfo(skb)->frag_list;
2576         if (!list) {
2577                 /* Non fragmented */
2578                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2579
2580                 skb_queue_tail(queue, skb);
2581         } else {
2582                 /* Fragmented */
2583                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2584
2585                 skb_shinfo(skb)->frag_list = NULL;
2586
2587                 /* Queue all fragments atomically */
2588                 spin_lock(&queue->lock);
2589
2590                 __skb_queue_tail(queue, skb);
2591
2592                 flags &= ~ACL_START;
2593                 flags |= ACL_CONT;
2594                 do {
2595                         skb = list; list = list->next;
2596
2597                         skb->dev = (void *) hdev;
2598                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2599                         hci_add_acl_hdr(skb, conn->handle, flags);
2600
2601                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2602
2603                         __skb_queue_tail(queue, skb);
2604                 } while (list);
2605
2606                 spin_unlock(&queue->lock);
2607         }
2608 }
2609
2610 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2611 {
2612         struct hci_dev *hdev = chan->conn->hdev;
2613
2614         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2615
2616         skb->dev = (void *) hdev;
2617
2618         hci_queue_acl(chan, &chan->data_q, skb, flags);
2619
2620         queue_work(hdev->workqueue, &hdev->tx_work);
2621 }
2622
2623 /* Send SCO data */
2624 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2625 {
2626         struct hci_dev *hdev = conn->hdev;
2627         struct hci_sco_hdr hdr;
2628
2629         BT_DBG("%s len %d", hdev->name, skb->len);
2630
2631         hdr.handle = cpu_to_le16(conn->handle);
2632         hdr.dlen   = skb->len;
2633
2634         skb_push(skb, HCI_SCO_HDR_SIZE);
2635         skb_reset_transport_header(skb);
2636         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2637
2638         skb->dev = (void *) hdev;
2639         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2640
2641         skb_queue_tail(&conn->data_q, skb);
2642         queue_work(hdev->workqueue, &hdev->tx_work);
2643 }
2644
2645 /* ---- HCI TX task (outgoing data) ---- */
2646
2647 /* HCI Connection scheduler */
2648 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2649                                      int *quote)
2650 {
2651         struct hci_conn_hash *h = &hdev->conn_hash;
2652         struct hci_conn *conn = NULL, *c;
2653         unsigned int num = 0, min = ~0;
2654
2655         /* We don't have to lock device here. Connections are always
2656          * added and removed with TX task disabled. */
2657
2658         rcu_read_lock();
2659
2660         list_for_each_entry_rcu(c, &h->list, list) {
2661                 if (c->type != type || skb_queue_empty(&c->data_q))
2662                         continue;
2663
2664                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2665                         continue;
2666
2667                 num++;
2668
2669                 if (c->sent < min) {
2670                         min  = c->sent;
2671                         conn = c;
2672                 }
2673
2674                 if (hci_conn_num(hdev, type) == num)
2675                         break;
2676         }
2677
2678         rcu_read_unlock();
2679
2680         if (conn) {
2681                 int cnt, q;
2682
2683                 switch (conn->type) {
2684                 case ACL_LINK:
2685                         cnt = hdev->acl_cnt;
2686                         break;
2687                 case SCO_LINK:
2688                 case ESCO_LINK:
2689                         cnt = hdev->sco_cnt;
2690                         break;
2691                 case LE_LINK:
2692                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2693                         break;
2694                 default:
2695                         cnt = 0;
2696                         BT_ERR("Unknown link type");
2697                 }
2698
2699                 q = cnt / num;
2700                 *quote = q ? q : 1;
2701         } else
2702                 *quote = 0;
2703
2704         BT_DBG("conn %p quote %d", conn, *quote);
2705         return conn;
2706 }
2707
2708 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2709 {
2710         struct hci_conn_hash *h = &hdev->conn_hash;
2711         struct hci_conn *c;
2712
2713         BT_ERR("%s link tx timeout", hdev->name);
2714
2715         rcu_read_lock();
2716
2717         /* Kill stalled connections */
2718         list_for_each_entry_rcu(c, &h->list, list) {
2719                 if (c->type == type && c->sent) {
2720                         BT_ERR("%s killing stalled connection %pMR",
2721                                hdev->name, &c->dst);
2722                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2723                 }
2724         }
2725
2726         rcu_read_unlock();
2727 }
2728
2729 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2730                                       int *quote)
2731 {
2732         struct hci_conn_hash *h = &hdev->conn_hash;
2733         struct hci_chan *chan = NULL;
2734         unsigned int num = 0, min = ~0, cur_prio = 0;
2735         struct hci_conn *conn;
2736         int cnt, q, conn_num = 0;
2737
2738         BT_DBG("%s", hdev->name);
2739
2740         rcu_read_lock();
2741
2742         list_for_each_entry_rcu(conn, &h->list, list) {
2743                 struct hci_chan *tmp;
2744
2745                 if (conn->type != type)
2746                         continue;
2747
2748                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2749                         continue;
2750
2751                 conn_num++;
2752
2753                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2754                         struct sk_buff *skb;
2755
2756                         if (skb_queue_empty(&tmp->data_q))
2757                                 continue;
2758
2759                         skb = skb_peek(&tmp->data_q);
2760                         if (skb->priority < cur_prio)
2761                                 continue;
2762
2763                         if (skb->priority > cur_prio) {
2764                                 num = 0;
2765                                 min = ~0;
2766                                 cur_prio = skb->priority;
2767                         }
2768
2769                         num++;
2770
2771                         if (conn->sent < min) {
2772                                 min  = conn->sent;
2773                                 chan = tmp;
2774                         }
2775                 }
2776
2777                 if (hci_conn_num(hdev, type) == conn_num)
2778                         break;
2779         }
2780
2781         rcu_read_unlock();
2782
2783         if (!chan)
2784                 return NULL;
2785
2786         switch (chan->conn->type) {
2787         case ACL_LINK:
2788                 cnt = hdev->acl_cnt;
2789                 break;
2790         case AMP_LINK:
2791                 cnt = hdev->block_cnt;
2792                 break;
2793         case SCO_LINK:
2794         case ESCO_LINK:
2795                 cnt = hdev->sco_cnt;
2796                 break;
2797         case LE_LINK:
2798                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2799                 break;
2800         default:
2801                 cnt = 0;
2802                 BT_ERR("Unknown link type");
2803         }
2804
2805         q = cnt / num;
2806         *quote = q ? q : 1;
2807         BT_DBG("chan %p quote %d", chan, *quote);
2808         return chan;
2809 }
2810
2811 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2812 {
2813         struct hci_conn_hash *h = &hdev->conn_hash;
2814         struct hci_conn *conn;
2815         int num = 0;
2816
2817         BT_DBG("%s", hdev->name);
2818
2819         rcu_read_lock();
2820
2821         list_for_each_entry_rcu(conn, &h->list, list) {
2822                 struct hci_chan *chan;
2823
2824                 if (conn->type != type)
2825                         continue;
2826
2827                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2828                         continue;
2829
2830                 num++;
2831
2832                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2833                         struct sk_buff *skb;
2834
2835                         if (chan->sent) {
2836                                 chan->sent = 0;
2837                                 continue;
2838                         }
2839
2840                         if (skb_queue_empty(&chan->data_q))
2841                                 continue;
2842
2843                         skb = skb_peek(&chan->data_q);
2844                         if (skb->priority >= HCI_PRIO_MAX - 1)
2845                                 continue;
2846
2847                         skb->priority = HCI_PRIO_MAX - 1;
2848
2849                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2850                                skb->priority);
2851                 }
2852
2853                 if (hci_conn_num(hdev, type) == num)
2854                         break;
2855         }
2856
2857         rcu_read_unlock();
2858
2859 }
2860
2861 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2862 {
2863         /* Calculate count of blocks used by this packet */
2864         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2865 }
2866
2867 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2868 {
2869         if (!test_bit(HCI_RAW, &hdev->flags)) {
2870                 /* ACL tx timeout must be longer than maximum
2871                  * link supervision timeout (40.9 seconds) */
2872                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2873                                        HCI_ACL_TX_TIMEOUT))
2874                         hci_link_tx_to(hdev, ACL_LINK);
2875         }
2876 }
2877
2878 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2879 {
2880         unsigned int cnt = hdev->acl_cnt;
2881         struct hci_chan *chan;
2882         struct sk_buff *skb;
2883         int quote;
2884
2885         __check_timeout(hdev, cnt);
2886
2887         while (hdev->acl_cnt &&
2888                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2889                 u32 priority = (skb_peek(&chan->data_q))->priority;
2890                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2891                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2892                                skb->len, skb->priority);
2893
2894                         /* Stop if priority has changed */
2895                         if (skb->priority < priority)
2896                                 break;
2897
2898                         skb = skb_dequeue(&chan->data_q);
2899
2900                         hci_conn_enter_active_mode(chan->conn,
2901                                                    bt_cb(skb)->force_active);
2902
2903                         hci_send_frame(skb);
2904                         hdev->acl_last_tx = jiffies;
2905
2906                         hdev->acl_cnt--;
2907                         chan->sent++;
2908                         chan->conn->sent++;
2909                 }
2910         }
2911
2912         if (cnt != hdev->acl_cnt)
2913                 hci_prio_recalculate(hdev, ACL_LINK);
2914 }
2915
2916 static void hci_sched_acl_blk(struct hci_dev *hdev)
2917 {
2918         unsigned int cnt = hdev->block_cnt;
2919         struct hci_chan *chan;
2920         struct sk_buff *skb;
2921         int quote;
2922         u8 type;
2923
2924         __check_timeout(hdev, cnt);
2925
2926         BT_DBG("%s", hdev->name);
2927
2928         if (hdev->dev_type == HCI_AMP)
2929                 type = AMP_LINK;
2930         else
2931                 type = ACL_LINK;
2932
2933         while (hdev->block_cnt > 0 &&
2934                (chan = hci_chan_sent(hdev, type, &quote))) {
2935                 u32 priority = (skb_peek(&chan->data_q))->priority;
2936                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2937                         int blocks;
2938
2939                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2940                                skb->len, skb->priority);
2941
2942                         /* Stop if priority has changed */
2943                         if (skb->priority < priority)
2944                                 break;
2945
2946                         skb = skb_dequeue(&chan->data_q);
2947
2948                         blocks = __get_blocks(hdev, skb);
2949                         if (blocks > hdev->block_cnt)
2950                                 return;
2951
2952                         hci_conn_enter_active_mode(chan->conn,
2953                                                    bt_cb(skb)->force_active);
2954
2955                         hci_send_frame(skb);
2956                         hdev->acl_last_tx = jiffies;
2957
2958                         hdev->block_cnt -= blocks;
2959                         quote -= blocks;
2960
2961                         chan->sent += blocks;
2962                         chan->conn->sent += blocks;
2963                 }
2964         }
2965
2966         if (cnt != hdev->block_cnt)
2967                 hci_prio_recalculate(hdev, type);
2968 }
2969
2970 static void hci_sched_acl(struct hci_dev *hdev)
2971 {
2972         BT_DBG("%s", hdev->name);
2973
2974         /* No ACL link over BR/EDR controller */
2975         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2976                 return;
2977
2978         /* No AMP link over AMP controller */
2979         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2980                 return;
2981
2982         switch (hdev->flow_ctl_mode) {
2983         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2984                 hci_sched_acl_pkt(hdev);
2985                 break;
2986
2987         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2988                 hci_sched_acl_blk(hdev);
2989                 break;
2990         }
2991 }
2992
2993 /* Schedule SCO */
2994 static void hci_sched_sco(struct hci_dev *hdev)
2995 {
2996         struct hci_conn *conn;
2997         struct sk_buff *skb;
2998         int quote;
2999
3000         BT_DBG("%s", hdev->name);
3001
3002         if (!hci_conn_num(hdev, SCO_LINK))
3003                 return;
3004
3005         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3006                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3007                         BT_DBG("skb %p len %d", skb, skb->len);
3008                         hci_send_frame(skb);
3009
3010                         conn->sent++;
3011                         if (conn->sent == ~0)
3012                                 conn->sent = 0;
3013                 }
3014         }
3015 }
3016
3017 static void hci_sched_esco(struct hci_dev *hdev)
3018 {
3019         struct hci_conn *conn;
3020         struct sk_buff *skb;
3021         int quote;
3022
3023         BT_DBG("%s", hdev->name);
3024
3025         if (!hci_conn_num(hdev, ESCO_LINK))
3026                 return;
3027
3028         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3029                                                      &quote))) {
3030                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3031                         BT_DBG("skb %p len %d", skb, skb->len);
3032                         hci_send_frame(skb);
3033
3034                         conn->sent++;
3035                         if (conn->sent == ~0)
3036                                 conn->sent = 0;
3037                 }
3038         }
3039 }
3040
3041 static void hci_sched_le(struct hci_dev *hdev)
3042 {
3043         struct hci_chan *chan;
3044         struct sk_buff *skb;
3045         int quote, cnt, tmp;
3046
3047         BT_DBG("%s", hdev->name);
3048
3049         if (!hci_conn_num(hdev, LE_LINK))
3050                 return;
3051
3052         if (!test_bit(HCI_RAW, &hdev->flags)) {
3053                 /* LE tx timeout must be longer than maximum
3054                  * link supervision timeout (40.9 seconds) */
3055                 if (!hdev->le_cnt && hdev->le_pkts &&
3056                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3057                         hci_link_tx_to(hdev, LE_LINK);
3058         }
3059
3060         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3061         tmp = cnt;
3062         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3063                 u32 priority = (skb_peek(&chan->data_q))->priority;
3064                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3065                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3066                                skb->len, skb->priority);
3067
3068                         /* Stop if priority has changed */
3069                         if (skb->priority < priority)
3070                                 break;
3071
3072                         skb = skb_dequeue(&chan->data_q);
3073
3074                         hci_send_frame(skb);
3075                         hdev->le_last_tx = jiffies;
3076
3077                         cnt--;
3078                         chan->sent++;
3079                         chan->conn->sent++;
3080                 }
3081         }
3082
3083         if (hdev->le_pkts)
3084                 hdev->le_cnt = cnt;
3085         else
3086                 hdev->acl_cnt = cnt;
3087
3088         if (cnt != tmp)
3089                 hci_prio_recalculate(hdev, LE_LINK);
3090 }
3091
3092 static void hci_tx_work(struct work_struct *work)
3093 {
3094         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3095         struct sk_buff *skb;
3096
3097         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3098                hdev->sco_cnt, hdev->le_cnt);
3099
3100         /* Schedule queues and send stuff to HCI driver */
3101
3102         hci_sched_acl(hdev);
3103
3104         hci_sched_sco(hdev);
3105
3106         hci_sched_esco(hdev);
3107
3108         hci_sched_le(hdev);
3109
3110         /* Send next queued raw (unknown type) packet */
3111         while ((skb = skb_dequeue(&hdev->raw_q)))
3112                 hci_send_frame(skb);
3113 }
3114
3115 /* ----- HCI RX task (incoming data processing) ----- */
3116
3117 /* ACL data packet */
3118 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3119 {
3120         struct hci_acl_hdr *hdr = (void *) skb->data;
3121         struct hci_conn *conn;
3122         __u16 handle, flags;
3123
3124         skb_pull(skb, HCI_ACL_HDR_SIZE);
3125
3126         handle = __le16_to_cpu(hdr->handle);
3127         flags  = hci_flags(handle);
3128         handle = hci_handle(handle);
3129
3130         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3131                handle, flags);
3132
3133         hdev->stat.acl_rx++;
3134
3135         hci_dev_lock(hdev);
3136         conn = hci_conn_hash_lookup_handle(hdev, handle);
3137         hci_dev_unlock(hdev);
3138
3139         if (conn) {
3140                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3141
3142                 /* Send to upper protocol */
3143                 l2cap_recv_acldata(conn, skb, flags);
3144                 return;
3145         } else {
3146                 BT_ERR("%s ACL packet for unknown connection handle %d",
3147                        hdev->name, handle);
3148         }
3149
3150         kfree_skb(skb);
3151 }
3152
3153 /* SCO data packet */
3154 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3155 {
3156         struct hci_sco_hdr *hdr = (void *) skb->data;
3157         struct hci_conn *conn;
3158         __u16 handle;
3159
3160         skb_pull(skb, HCI_SCO_HDR_SIZE);
3161
3162         handle = __le16_to_cpu(hdr->handle);
3163
3164         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3165
3166         hdev->stat.sco_rx++;
3167
3168         hci_dev_lock(hdev);
3169         conn = hci_conn_hash_lookup_handle(hdev, handle);
3170         hci_dev_unlock(hdev);
3171
3172         if (conn) {
3173                 /* Send to upper protocol */
3174                 sco_recv_scodata(conn, skb);
3175                 return;
3176         } else {
3177                 BT_ERR("%s SCO packet for unknown connection handle %d",
3178                        hdev->name, handle);
3179         }
3180
3181         kfree_skb(skb);
3182 }
3183
3184 static void hci_rx_work(struct work_struct *work)
3185 {
3186         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3187         struct sk_buff *skb;
3188
3189         BT_DBG("%s", hdev->name);
3190
3191         while ((skb = skb_dequeue(&hdev->rx_q))) {
3192                 /* Send copy to monitor */
3193                 hci_send_to_monitor(hdev, skb);
3194
3195                 if (atomic_read(&hdev->promisc)) {
3196                         /* Send copy to the sockets */
3197                         hci_send_to_sock(hdev, skb);
3198                 }
3199
3200                 if (test_bit(HCI_RAW, &hdev->flags)) {
3201                         kfree_skb(skb);
3202                         continue;
3203                 }
3204
3205                 if (test_bit(HCI_INIT, &hdev->flags)) {
3206                         /* Don't process data packets in this states. */
3207                         switch (bt_cb(skb)->pkt_type) {
3208                         case HCI_ACLDATA_PKT:
3209                         case HCI_SCODATA_PKT:
3210                                 kfree_skb(skb);
3211                                 continue;
3212                         }
3213                 }
3214
3215                 /* Process frame */
3216                 switch (bt_cb(skb)->pkt_type) {
3217                 case HCI_EVENT_PKT:
3218                         BT_DBG("%s Event packet", hdev->name);
3219                         hci_event_packet(hdev, skb);
3220                         break;
3221
3222                 case HCI_ACLDATA_PKT:
3223                         BT_DBG("%s ACL data packet", hdev->name);
3224                         hci_acldata_packet(hdev, skb);
3225                         break;
3226
3227                 case HCI_SCODATA_PKT:
3228                         BT_DBG("%s SCO data packet", hdev->name);
3229                         hci_scodata_packet(hdev, skb);
3230                         break;
3231
3232                 default:
3233                         kfree_skb(skb);
3234                         break;
3235                 }
3236         }
3237 }
3238
3239 static void hci_cmd_work(struct work_struct *work)
3240 {
3241         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3242         struct sk_buff *skb;
3243
3244         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3245                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3246
3247         /* Send queued commands */
3248         if (atomic_read(&hdev->cmd_cnt)) {
3249                 skb = skb_dequeue(&hdev->cmd_q);
3250                 if (!skb)
3251                         return;
3252
3253                 kfree_skb(hdev->sent_cmd);
3254
3255                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3256                 if (hdev->sent_cmd) {
3257                         atomic_dec(&hdev->cmd_cnt);
3258                         hci_send_frame(skb);
3259                         if (test_bit(HCI_RESET, &hdev->flags))
3260                                 del_timer(&hdev->cmd_timer);
3261                         else
3262                                 mod_timer(&hdev->cmd_timer,
3263                                           jiffies + HCI_CMD_TIMEOUT);
3264                 } else {
3265                         skb_queue_head(&hdev->cmd_q, skb);
3266                         queue_work(hdev->workqueue, &hdev->cmd_work);
3267                 }
3268         }
3269 }
3270
3271 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3272 {
3273         /* General inquiry access code (GIAC) */
3274         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3275         struct hci_cp_inquiry cp;
3276
3277         BT_DBG("%s", hdev->name);
3278
3279         if (test_bit(HCI_INQUIRY, &hdev->flags))
3280                 return -EINPROGRESS;
3281
3282         inquiry_cache_flush(hdev);
3283
3284         memset(&cp, 0, sizeof(cp));
3285         memcpy(&cp.lap, lap, sizeof(cp.lap));
3286         cp.length  = length;
3287
3288         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3289 }
3290
3291 int hci_cancel_inquiry(struct hci_dev *hdev)
3292 {
3293         BT_DBG("%s", hdev->name);
3294
3295         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3296                 return -EALREADY;
3297
3298         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3299 }
3300
3301 u8 bdaddr_to_le(u8 bdaddr_type)
3302 {
3303         switch (bdaddr_type) {
3304         case BDADDR_LE_PUBLIC:
3305                 return ADDR_LE_DEV_PUBLIC;
3306
3307         default:
3308                 /* Fallback to LE Random address type */
3309                 return ADDR_LE_DEV_RANDOM;
3310         }
3311 }