Bluetooth: Read stored link key information when powering on controller
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static void hci_setup_event_mask(struct hci_request *req)
501 {
502         struct hci_dev *hdev = req->hdev;
503
504         /* The second byte is 0xff instead of 0x9f (two reserved bits
505          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506          * command otherwise.
507          */
508         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511          * any event mask for pre 1.2 devices.
512          */
513         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514                 return;
515
516         if (lmp_bredr_capable(hdev)) {
517                 events[4] |= 0x01; /* Flow Specification Complete */
518                 events[4] |= 0x02; /* Inquiry Result with RSSI */
519                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520                 events[5] |= 0x08; /* Synchronous Connection Complete */
521                 events[5] |= 0x10; /* Synchronous Connection Changed */
522         } else {
523                 /* Use a different default for LE-only devices */
524                 memset(events, 0, sizeof(events));
525                 events[0] |= 0x10; /* Disconnection Complete */
526                 events[1] |= 0x08; /* Read Remote Version Information Complete */
527                 events[1] |= 0x20; /* Command Complete */
528                 events[1] |= 0x40; /* Command Status */
529                 events[1] |= 0x80; /* Hardware Error */
530                 events[2] |= 0x04; /* Number of Completed Packets */
531                 events[3] |= 0x02; /* Data Buffer Overflow */
532
533                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534                         events[0] |= 0x80; /* Encryption Change */
535                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542         if (lmp_sniffsubr_capable(hdev))
543                 events[5] |= 0x20; /* Sniff Subrating */
544
545         if (lmp_pause_enc_capable(hdev))
546                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548         if (lmp_ext_inq_capable(hdev))
549                 events[5] |= 0x40; /* Extended Inquiry Result */
550
551         if (lmp_no_flush_capable(hdev))
552                 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554         if (lmp_lsto_capable(hdev))
555                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557         if (lmp_ssp_capable(hdev)) {
558                 events[6] |= 0x01;      /* IO Capability Request */
559                 events[6] |= 0x02;      /* IO Capability Response */
560                 events[6] |= 0x04;      /* User Confirmation Request */
561                 events[6] |= 0x08;      /* User Passkey Request */
562                 events[6] |= 0x10;      /* Remote OOB Data Request */
563                 events[6] |= 0x20;      /* Simple Pairing Complete */
564                 events[7] |= 0x04;      /* User Passkey Notification */
565                 events[7] |= 0x08;      /* Keypress Notification */
566                 events[7] |= 0x10;      /* Remote Host Supported
567                                          * Features Notification
568                                          */
569         }
570
571         if (lmp_le_capable(hdev))
572                 events[7] |= 0x20;      /* LE Meta-Event */
573
574         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
575 }
576
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
578 {
579         struct hci_dev *hdev = req->hdev;
580
581         if (lmp_bredr_capable(hdev))
582                 bredr_setup(req);
583         else
584                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
585
586         if (lmp_le_capable(hdev))
587                 le_setup(req);
588
589         /* All Bluetooth 1.2 and later controllers should support the
590          * HCI command for reading the local supported commands.
591          *
592          * Unfortunately some controllers indicate Bluetooth 1.2 support,
593          * but do not have support for this command. If that is the case,
594          * the driver can quirk the behavior and skip reading the local
595          * supported commands.
596          */
597         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
600
601         if (lmp_ssp_capable(hdev)) {
602                 /* When SSP is available, then the host features page
603                  * should also be available as well. However some
604                  * controllers list the max_page as 0 as long as SSP
605                  * has not been enabled. To achieve proper debugging
606                  * output, force the minimum max_page to 1 at least.
607                  */
608                 hdev->max_page = 0x01;
609
610                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611                         u8 mode = 0x01;
612                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
613                                     sizeof(mode), &mode);
614                 } else {
615                         struct hci_cp_write_eir cp;
616
617                         memset(hdev->eir, 0, sizeof(hdev->eir));
618                         memset(&cp, 0, sizeof(cp));
619
620                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
621                 }
622         }
623
624         if (lmp_inq_rssi_capable(hdev) ||
625             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
626                 u8 mode;
627
628                 /* If Extended Inquiry Result events are supported, then
629                  * they are clearly preferred over Inquiry Result with RSSI
630                  * events.
631                  */
632                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
633
634                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
635         }
636
637         if (lmp_inq_tx_pwr_capable(hdev))
638                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
639
640         if (lmp_ext_feat_capable(hdev)) {
641                 struct hci_cp_read_local_ext_features cp;
642
643                 cp.page = 0x01;
644                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
645                             sizeof(cp), &cp);
646         }
647
648         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
649                 u8 enable = 1;
650                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
651                             &enable);
652         }
653 }
654
655 static void hci_setup_link_policy(struct hci_request *req)
656 {
657         struct hci_dev *hdev = req->hdev;
658         struct hci_cp_write_def_link_policy cp;
659         u16 link_policy = 0;
660
661         if (lmp_rswitch_capable(hdev))
662                 link_policy |= HCI_LP_RSWITCH;
663         if (lmp_hold_capable(hdev))
664                 link_policy |= HCI_LP_HOLD;
665         if (lmp_sniff_capable(hdev))
666                 link_policy |= HCI_LP_SNIFF;
667         if (lmp_park_capable(hdev))
668                 link_policy |= HCI_LP_PARK;
669
670         cp.policy = cpu_to_le16(link_policy);
671         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
672 }
673
674 static void hci_set_le_support(struct hci_request *req)
675 {
676         struct hci_dev *hdev = req->hdev;
677         struct hci_cp_write_le_host_supported cp;
678
679         /* LE-only devices do not support explicit enablement */
680         if (!lmp_bredr_capable(hdev))
681                 return;
682
683         memset(&cp, 0, sizeof(cp));
684
685         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
686                 cp.le = 0x01;
687                 cp.simul = 0x00;
688         }
689
690         if (cp.le != lmp_host_le_capable(hdev))
691                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
692                             &cp);
693 }
694
695 static void hci_set_event_mask_page_2(struct hci_request *req)
696 {
697         struct hci_dev *hdev = req->hdev;
698         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
699
700         /* If Connectionless Slave Broadcast master role is supported
701          * enable all necessary events for it.
702          */
703         if (lmp_csb_master_capable(hdev)) {
704                 events[1] |= 0x40;      /* Triggered Clock Capture */
705                 events[1] |= 0x80;      /* Synchronization Train Complete */
706                 events[2] |= 0x10;      /* Slave Page Response Timeout */
707                 events[2] |= 0x20;      /* CSB Channel Map Change */
708         }
709
710         /* If Connectionless Slave Broadcast slave role is supported
711          * enable all necessary events for it.
712          */
713         if (lmp_csb_slave_capable(hdev)) {
714                 events[2] |= 0x01;      /* Synchronization Train Received */
715                 events[2] |= 0x02;      /* CSB Receive */
716                 events[2] |= 0x04;      /* CSB Timeout */
717                 events[2] |= 0x08;      /* Truncated Page Complete */
718         }
719
720         /* Enable Authenticated Payload Timeout Expired event if supported */
721         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
722                 events[2] |= 0x80;
723
724         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
725 }
726
727 static void hci_init3_req(struct hci_request *req, unsigned long opt)
728 {
729         struct hci_dev *hdev = req->hdev;
730         u8 p;
731
732         hci_setup_event_mask(req);
733
734         if (hdev->commands[6] & 0x20) {
735                 struct hci_cp_read_stored_link_key cp;
736
737                 bacpy(&cp.bdaddr, BDADDR_ANY);
738                 cp.read_all = 0x01;
739                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
740         }
741
742         /* Some Broadcom based Bluetooth controllers do not support the
743          * Delete Stored Link Key command. They are clearly indicating its
744          * absence in the bit mask of supported commands.
745          *
746          * Check the supported commands and only if the the command is marked
747          * as supported send it. If not supported assume that the controller
748          * does not have actual support for stored link keys which makes this
749          * command redundant anyway.
750          *
751          * Some controllers indicate that they support handling deleting
752          * stored link keys, but they don't. The quirk lets a driver
753          * just disable this command.
754          */
755         if (hdev->commands[6] & 0x80 &&
756             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
757                 struct hci_cp_delete_stored_link_key cp;
758
759                 bacpy(&cp.bdaddr, BDADDR_ANY);
760                 cp.delete_all = 0x01;
761                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
762                             sizeof(cp), &cp);
763         }
764
765         if (hdev->commands[5] & 0x10)
766                 hci_setup_link_policy(req);
767
768         if (hdev->commands[8] & 0x01)
769                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
770
771         /* Some older Broadcom based Bluetooth 1.2 controllers do not
772          * support the Read Page Scan Type command. Check support for
773          * this command in the bit mask of supported commands.
774          */
775         if (hdev->commands[13] & 0x01)
776                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
777
778         if (lmp_le_capable(hdev)) {
779                 u8 events[8];
780
781                 memset(events, 0, sizeof(events));
782                 events[0] = 0x0f;
783
784                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
785                         events[0] |= 0x10;      /* LE Long Term Key Request */
786
787                 /* If controller supports the Connection Parameters Request
788                  * Link Layer Procedure, enable the corresponding event.
789                  */
790                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
791                         events[0] |= 0x20;      /* LE Remote Connection
792                                                  * Parameter Request
793                                                  */
794
795                 /* If the controller supports the Data Length Extension
796                  * feature, enable the corresponding event.
797                  */
798                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
799                         events[0] |= 0x40;      /* LE Data Length Change */
800
801                 /* If the controller supports Extended Scanner Filter
802                  * Policies, enable the correspondig event.
803                  */
804                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
805                         events[1] |= 0x04;      /* LE Direct Advertising
806                                                  * Report
807                                                  */
808
809                 /* If the controller supports the LE Read Local P-256
810                  * Public Key command, enable the corresponding event.
811                  */
812                 if (hdev->commands[34] & 0x02)
813                         events[0] |= 0x80;      /* LE Read Local P-256
814                                                  * Public Key Complete
815                                                  */
816
817                 /* If the controller supports the LE Generate DHKey
818                  * command, enable the corresponding event.
819                  */
820                 if (hdev->commands[34] & 0x04)
821                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
822
823                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
824                             events);
825
826                 if (hdev->commands[25] & 0x40) {
827                         /* Read LE Advertising Channel TX Power */
828                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
829                 }
830
831                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
832                         /* Read LE Maximum Data Length */
833                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
834
835                         /* Read LE Suggested Default Data Length */
836                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
837                 }
838
839                 hci_set_le_support(req);
840         }
841
842         /* Read features beyond page 1 if available */
843         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
844                 struct hci_cp_read_local_ext_features cp;
845
846                 cp.page = p;
847                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
848                             sizeof(cp), &cp);
849         }
850 }
851
852 static void hci_init4_req(struct hci_request *req, unsigned long opt)
853 {
854         struct hci_dev *hdev = req->hdev;
855
856         /* Set event mask page 2 if the HCI command for it is supported */
857         if (hdev->commands[22] & 0x04)
858                 hci_set_event_mask_page_2(req);
859
860         /* Read local codec list if the HCI command is supported */
861         if (hdev->commands[29] & 0x20)
862                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
863
864         /* Get MWS transport configuration if the HCI command is supported */
865         if (hdev->commands[30] & 0x08)
866                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
867
868         /* Check for Synchronization Train support */
869         if (lmp_sync_train_capable(hdev))
870                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
871
872         /* Enable Secure Connections if supported and configured */
873         if (bredr_sc_enabled(hdev)) {
874                 u8 support = 0x01;
875                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
876                             sizeof(support), &support);
877         }
878 }
879
880 static int __hci_init(struct hci_dev *hdev)
881 {
882         int err;
883
884         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
885         if (err < 0)
886                 return err;
887
888         /* The Device Under Test (DUT) mode is special and available for
889          * all controller types. So just create it early on.
890          */
891         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
892                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
893                                     &dut_mode_fops);
894         }
895
896         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
897          * BR/EDR/LE type controllers. AMP controllers only need the
898          * first stage init.
899          */
900         if (hdev->dev_type != HCI_BREDR)
901                 return 0;
902
903         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
904         if (err < 0)
905                 return err;
906
907         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
908         if (err < 0)
909                 return err;
910
911         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
912         if (err < 0)
913                 return err;
914
915         /* This function is only called when the controller is actually in
916          * configured state. When the controller is marked as unconfigured,
917          * this initialization procedure is not run.
918          *
919          * It means that it is possible that a controller runs through its
920          * setup phase and then discovers missing settings. If that is the
921          * case, then this function will not be called. It then will only
922          * be called during the config phase.
923          *
924          * So only when in setup phase or config phase, create the debugfs
925          * entries and register the SMP channels.
926          */
927         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
928             !test_bit(HCI_CONFIG, &hdev->dev_flags))
929                 return 0;
930
931         hci_debugfs_create_common(hdev);
932
933         if (lmp_bredr_capable(hdev))
934                 hci_debugfs_create_bredr(hdev);
935
936         if (lmp_le_capable(hdev)) {
937                 hci_debugfs_create_le(hdev);
938                 smp_register(hdev);
939         }
940
941         return 0;
942 }
943
944 static void hci_init0_req(struct hci_request *req, unsigned long opt)
945 {
946         struct hci_dev *hdev = req->hdev;
947
948         BT_DBG("%s %ld", hdev->name, opt);
949
950         /* Reset */
951         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
952                 hci_reset_req(req, 0);
953
954         /* Read Local Version */
955         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
956
957         /* Read BD Address */
958         if (hdev->set_bdaddr)
959                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
960 }
961
962 static int __hci_unconf_init(struct hci_dev *hdev)
963 {
964         int err;
965
966         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
967                 return 0;
968
969         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
970         if (err < 0)
971                 return err;
972
973         return 0;
974 }
975
976 static void hci_scan_req(struct hci_request *req, unsigned long opt)
977 {
978         __u8 scan = opt;
979
980         BT_DBG("%s %x", req->hdev->name, scan);
981
982         /* Inquiry and Page scans */
983         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
984 }
985
986 static void hci_auth_req(struct hci_request *req, unsigned long opt)
987 {
988         __u8 auth = opt;
989
990         BT_DBG("%s %x", req->hdev->name, auth);
991
992         /* Authentication */
993         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
994 }
995
996 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
997 {
998         __u8 encrypt = opt;
999
1000         BT_DBG("%s %x", req->hdev->name, encrypt);
1001
1002         /* Encryption */
1003         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1004 }
1005
1006 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1007 {
1008         __le16 policy = cpu_to_le16(opt);
1009
1010         BT_DBG("%s %x", req->hdev->name, policy);
1011
1012         /* Default link policy */
1013         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1014 }
1015
1016 /* Get HCI device by index.
1017  * Device is held on return. */
1018 struct hci_dev *hci_dev_get(int index)
1019 {
1020         struct hci_dev *hdev = NULL, *d;
1021
1022         BT_DBG("%d", index);
1023
1024         if (index < 0)
1025                 return NULL;
1026
1027         read_lock(&hci_dev_list_lock);
1028         list_for_each_entry(d, &hci_dev_list, list) {
1029                 if (d->id == index) {
1030                         hdev = hci_dev_hold(d);
1031                         break;
1032                 }
1033         }
1034         read_unlock(&hci_dev_list_lock);
1035         return hdev;
1036 }
1037
1038 /* ---- Inquiry support ---- */
1039
1040 bool hci_discovery_active(struct hci_dev *hdev)
1041 {
1042         struct discovery_state *discov = &hdev->discovery;
1043
1044         switch (discov->state) {
1045         case DISCOVERY_FINDING:
1046         case DISCOVERY_RESOLVING:
1047                 return true;
1048
1049         default:
1050                 return false;
1051         }
1052 }
1053
1054 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1055 {
1056         int old_state = hdev->discovery.state;
1057
1058         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1059
1060         if (old_state == state)
1061                 return;
1062
1063         hdev->discovery.state = state;
1064
1065         switch (state) {
1066         case DISCOVERY_STOPPED:
1067                 hci_update_background_scan(hdev);
1068
1069                 if (old_state != DISCOVERY_STARTING)
1070                         mgmt_discovering(hdev, 0);
1071                 break;
1072         case DISCOVERY_STARTING:
1073                 break;
1074         case DISCOVERY_FINDING:
1075                 mgmt_discovering(hdev, 1);
1076                 break;
1077         case DISCOVERY_RESOLVING:
1078                 break;
1079         case DISCOVERY_STOPPING:
1080                 break;
1081         }
1082 }
1083
1084 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1085 {
1086         struct discovery_state *cache = &hdev->discovery;
1087         struct inquiry_entry *p, *n;
1088
1089         list_for_each_entry_safe(p, n, &cache->all, all) {
1090                 list_del(&p->all);
1091                 kfree(p);
1092         }
1093
1094         INIT_LIST_HEAD(&cache->unknown);
1095         INIT_LIST_HEAD(&cache->resolve);
1096 }
1097
1098 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1099                                                bdaddr_t *bdaddr)
1100 {
1101         struct discovery_state *cache = &hdev->discovery;
1102         struct inquiry_entry *e;
1103
1104         BT_DBG("cache %p, %pMR", cache, bdaddr);
1105
1106         list_for_each_entry(e, &cache->all, all) {
1107                 if (!bacmp(&e->data.bdaddr, bdaddr))
1108                         return e;
1109         }
1110
1111         return NULL;
1112 }
1113
1114 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1115                                                        bdaddr_t *bdaddr)
1116 {
1117         struct discovery_state *cache = &hdev->discovery;
1118         struct inquiry_entry *e;
1119
1120         BT_DBG("cache %p, %pMR", cache, bdaddr);
1121
1122         list_for_each_entry(e, &cache->unknown, list) {
1123                 if (!bacmp(&e->data.bdaddr, bdaddr))
1124                         return e;
1125         }
1126
1127         return NULL;
1128 }
1129
1130 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1131                                                        bdaddr_t *bdaddr,
1132                                                        int state)
1133 {
1134         struct discovery_state *cache = &hdev->discovery;
1135         struct inquiry_entry *e;
1136
1137         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1138
1139         list_for_each_entry(e, &cache->resolve, list) {
1140                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1141                         return e;
1142                 if (!bacmp(&e->data.bdaddr, bdaddr))
1143                         return e;
1144         }
1145
1146         return NULL;
1147 }
1148
1149 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1150                                       struct inquiry_entry *ie)
1151 {
1152         struct discovery_state *cache = &hdev->discovery;
1153         struct list_head *pos = &cache->resolve;
1154         struct inquiry_entry *p;
1155
1156         list_del(&ie->list);
1157
1158         list_for_each_entry(p, &cache->resolve, list) {
1159                 if (p->name_state != NAME_PENDING &&
1160                     abs(p->data.rssi) >= abs(ie->data.rssi))
1161                         break;
1162                 pos = &p->list;
1163         }
1164
1165         list_add(&ie->list, pos);
1166 }
1167
1168 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1169                              bool name_known)
1170 {
1171         struct discovery_state *cache = &hdev->discovery;
1172         struct inquiry_entry *ie;
1173         u32 flags = 0;
1174
1175         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1176
1177         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1178
1179         if (!data->ssp_mode)
1180                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1181
1182         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1183         if (ie) {
1184                 if (!ie->data.ssp_mode)
1185                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1186
1187                 if (ie->name_state == NAME_NEEDED &&
1188                     data->rssi != ie->data.rssi) {
1189                         ie->data.rssi = data->rssi;
1190                         hci_inquiry_cache_update_resolve(hdev, ie);
1191                 }
1192
1193                 goto update;
1194         }
1195
1196         /* Entry not in the cache. Add new one. */
1197         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1198         if (!ie) {
1199                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1200                 goto done;
1201         }
1202
1203         list_add(&ie->all, &cache->all);
1204
1205         if (name_known) {
1206                 ie->name_state = NAME_KNOWN;
1207         } else {
1208                 ie->name_state = NAME_NOT_KNOWN;
1209                 list_add(&ie->list, &cache->unknown);
1210         }
1211
1212 update:
1213         if (name_known && ie->name_state != NAME_KNOWN &&
1214             ie->name_state != NAME_PENDING) {
1215                 ie->name_state = NAME_KNOWN;
1216                 list_del(&ie->list);
1217         }
1218
1219         memcpy(&ie->data, data, sizeof(*data));
1220         ie->timestamp = jiffies;
1221         cache->timestamp = jiffies;
1222
1223         if (ie->name_state == NAME_NOT_KNOWN)
1224                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225
1226 done:
1227         return flags;
1228 }
1229
1230 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1231 {
1232         struct discovery_state *cache = &hdev->discovery;
1233         struct inquiry_info *info = (struct inquiry_info *) buf;
1234         struct inquiry_entry *e;
1235         int copied = 0;
1236
1237         list_for_each_entry(e, &cache->all, all) {
1238                 struct inquiry_data *data = &e->data;
1239
1240                 if (copied >= num)
1241                         break;
1242
1243                 bacpy(&info->bdaddr, &data->bdaddr);
1244                 info->pscan_rep_mode    = data->pscan_rep_mode;
1245                 info->pscan_period_mode = data->pscan_period_mode;
1246                 info->pscan_mode        = data->pscan_mode;
1247                 memcpy(info->dev_class, data->dev_class, 3);
1248                 info->clock_offset      = data->clock_offset;
1249
1250                 info++;
1251                 copied++;
1252         }
1253
1254         BT_DBG("cache %p, copied %d", cache, copied);
1255         return copied;
1256 }
1257
1258 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1259 {
1260         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1261         struct hci_dev *hdev = req->hdev;
1262         struct hci_cp_inquiry cp;
1263
1264         BT_DBG("%s", hdev->name);
1265
1266         if (test_bit(HCI_INQUIRY, &hdev->flags))
1267                 return;
1268
1269         /* Start Inquiry */
1270         memcpy(&cp.lap, &ir->lap, 3);
1271         cp.length  = ir->length;
1272         cp.num_rsp = ir->num_rsp;
1273         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1274 }
1275
1276 int hci_inquiry(void __user *arg)
1277 {
1278         __u8 __user *ptr = arg;
1279         struct hci_inquiry_req ir;
1280         struct hci_dev *hdev;
1281         int err = 0, do_inquiry = 0, max_rsp;
1282         long timeo;
1283         __u8 *buf;
1284
1285         if (copy_from_user(&ir, ptr, sizeof(ir)))
1286                 return -EFAULT;
1287
1288         hdev = hci_dev_get(ir.dev_id);
1289         if (!hdev)
1290                 return -ENODEV;
1291
1292         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1293                 err = -EBUSY;
1294                 goto done;
1295         }
1296
1297         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1298                 err = -EOPNOTSUPP;
1299                 goto done;
1300         }
1301
1302         if (hdev->dev_type != HCI_BREDR) {
1303                 err = -EOPNOTSUPP;
1304                 goto done;
1305         }
1306
1307         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1308                 err = -EOPNOTSUPP;
1309                 goto done;
1310         }
1311
1312         hci_dev_lock(hdev);
1313         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1314             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1315                 hci_inquiry_cache_flush(hdev);
1316                 do_inquiry = 1;
1317         }
1318         hci_dev_unlock(hdev);
1319
1320         timeo = ir.length * msecs_to_jiffies(2000);
1321
1322         if (do_inquiry) {
1323                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1324                                    timeo);
1325                 if (err < 0)
1326                         goto done;
1327
1328                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1329                  * cleared). If it is interrupted by a signal, return -EINTR.
1330                  */
1331                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1332                                 TASK_INTERRUPTIBLE))
1333                         return -EINTR;
1334         }
1335
1336         /* for unlimited number of responses we will use buffer with
1337          * 255 entries
1338          */
1339         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1340
1341         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1342          * copy it to the user space.
1343          */
1344         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1345         if (!buf) {
1346                 err = -ENOMEM;
1347                 goto done;
1348         }
1349
1350         hci_dev_lock(hdev);
1351         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1352         hci_dev_unlock(hdev);
1353
1354         BT_DBG("num_rsp %d", ir.num_rsp);
1355
1356         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1357                 ptr += sizeof(ir);
1358                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1359                                  ir.num_rsp))
1360                         err = -EFAULT;
1361         } else
1362                 err = -EFAULT;
1363
1364         kfree(buf);
1365
1366 done:
1367         hci_dev_put(hdev);
1368         return err;
1369 }
1370
1371 static int hci_dev_do_open(struct hci_dev *hdev)
1372 {
1373         int ret = 0;
1374
1375         BT_DBG("%s %p", hdev->name, hdev);
1376
1377         hci_req_lock(hdev);
1378
1379         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1380                 ret = -ENODEV;
1381                 goto done;
1382         }
1383
1384         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1385             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1386                 /* Check for rfkill but allow the HCI setup stage to
1387                  * proceed (which in itself doesn't cause any RF activity).
1388                  */
1389                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1390                         ret = -ERFKILL;
1391                         goto done;
1392                 }
1393
1394                 /* Check for valid public address or a configured static
1395                  * random adddress, but let the HCI setup proceed to
1396                  * be able to determine if there is a public address
1397                  * or not.
1398                  *
1399                  * In case of user channel usage, it is not important
1400                  * if a public address or static random address is
1401                  * available.
1402                  *
1403                  * This check is only valid for BR/EDR controllers
1404                  * since AMP controllers do not have an address.
1405                  */
1406                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1407                     hdev->dev_type == HCI_BREDR &&
1408                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1409                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1410                         ret = -EADDRNOTAVAIL;
1411                         goto done;
1412                 }
1413         }
1414
1415         if (test_bit(HCI_UP, &hdev->flags)) {
1416                 ret = -EALREADY;
1417                 goto done;
1418         }
1419
1420         if (hdev->open(hdev)) {
1421                 ret = -EIO;
1422                 goto done;
1423         }
1424
1425         atomic_set(&hdev->cmd_cnt, 1);
1426         set_bit(HCI_INIT, &hdev->flags);
1427
1428         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1429                 if (hdev->setup)
1430                         ret = hdev->setup(hdev);
1431
1432                 /* The transport driver can set these quirks before
1433                  * creating the HCI device or in its setup callback.
1434                  *
1435                  * In case any of them is set, the controller has to
1436                  * start up as unconfigured.
1437                  */
1438                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1439                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1440                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1441
1442                 /* For an unconfigured controller it is required to
1443                  * read at least the version information provided by
1444                  * the Read Local Version Information command.
1445                  *
1446                  * If the set_bdaddr driver callback is provided, then
1447                  * also the original Bluetooth public device address
1448                  * will be read using the Read BD Address command.
1449                  */
1450                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1451                         ret = __hci_unconf_init(hdev);
1452         }
1453
1454         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1455                 /* If public address change is configured, ensure that
1456                  * the address gets programmed. If the driver does not
1457                  * support changing the public address, fail the power
1458                  * on procedure.
1459                  */
1460                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1461                     hdev->set_bdaddr)
1462                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1463                 else
1464                         ret = -EADDRNOTAVAIL;
1465         }
1466
1467         if (!ret) {
1468                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1469                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1470                         ret = __hci_init(hdev);
1471         }
1472
1473         clear_bit(HCI_INIT, &hdev->flags);
1474
1475         if (!ret) {
1476                 hci_dev_hold(hdev);
1477                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1478                 set_bit(HCI_UP, &hdev->flags);
1479                 hci_notify(hdev, HCI_DEV_UP);
1480                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1481                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1482                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1483                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1484                     hdev->dev_type == HCI_BREDR) {
1485                         hci_dev_lock(hdev);
1486                         mgmt_powered(hdev, 1);
1487                         hci_dev_unlock(hdev);
1488                 }
1489         } else {
1490                 /* Init failed, cleanup */
1491                 flush_work(&hdev->tx_work);
1492                 flush_work(&hdev->cmd_work);
1493                 flush_work(&hdev->rx_work);
1494
1495                 skb_queue_purge(&hdev->cmd_q);
1496                 skb_queue_purge(&hdev->rx_q);
1497
1498                 if (hdev->flush)
1499                         hdev->flush(hdev);
1500
1501                 if (hdev->sent_cmd) {
1502                         kfree_skb(hdev->sent_cmd);
1503                         hdev->sent_cmd = NULL;
1504                 }
1505
1506                 hdev->close(hdev);
1507                 hdev->flags &= BIT(HCI_RAW);
1508         }
1509
1510 done:
1511         hci_req_unlock(hdev);
1512         return ret;
1513 }
1514
1515 /* ---- HCI ioctl helpers ---- */
1516
1517 int hci_dev_open(__u16 dev)
1518 {
1519         struct hci_dev *hdev;
1520         int err;
1521
1522         hdev = hci_dev_get(dev);
1523         if (!hdev)
1524                 return -ENODEV;
1525
1526         /* Devices that are marked as unconfigured can only be powered
1527          * up as user channel. Trying to bring them up as normal devices
1528          * will result into a failure. Only user channel operation is
1529          * possible.
1530          *
1531          * When this function is called for a user channel, the flag
1532          * HCI_USER_CHANNEL will be set first before attempting to
1533          * open the device.
1534          */
1535         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1536             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1537                 err = -EOPNOTSUPP;
1538                 goto done;
1539         }
1540
1541         /* We need to ensure that no other power on/off work is pending
1542          * before proceeding to call hci_dev_do_open. This is
1543          * particularly important if the setup procedure has not yet
1544          * completed.
1545          */
1546         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1547                 cancel_delayed_work(&hdev->power_off);
1548
1549         /* After this call it is guaranteed that the setup procedure
1550          * has finished. This means that error conditions like RFKILL
1551          * or no valid public or static random address apply.
1552          */
1553         flush_workqueue(hdev->req_workqueue);
1554
1555         /* For controllers not using the management interface and that
1556          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1557          * so that pairing works for them. Once the management interface
1558          * is in use this bit will be cleared again and userspace has
1559          * to explicitly enable it.
1560          */
1561         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1562             !test_bit(HCI_MGMT, &hdev->dev_flags))
1563                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1564
1565         err = hci_dev_do_open(hdev);
1566
1567 done:
1568         hci_dev_put(hdev);
1569         return err;
1570 }
1571
1572 /* This function requires the caller holds hdev->lock */
1573 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1574 {
1575         struct hci_conn_params *p;
1576
1577         list_for_each_entry(p, &hdev->le_conn_params, list) {
1578                 if (p->conn) {
1579                         hci_conn_drop(p->conn);
1580                         hci_conn_put(p->conn);
1581                         p->conn = NULL;
1582                 }
1583                 list_del_init(&p->action);
1584         }
1585
1586         BT_DBG("All LE pending actions cleared");
1587 }
1588
1589 static int hci_dev_do_close(struct hci_dev *hdev)
1590 {
1591         BT_DBG("%s %p", hdev->name, hdev);
1592
1593         cancel_delayed_work(&hdev->power_off);
1594
1595         hci_req_cancel(hdev, ENODEV);
1596         hci_req_lock(hdev);
1597
1598         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1599                 cancel_delayed_work_sync(&hdev->cmd_timer);
1600                 hci_req_unlock(hdev);
1601                 return 0;
1602         }
1603
1604         /* Flush RX and TX works */
1605         flush_work(&hdev->tx_work);
1606         flush_work(&hdev->rx_work);
1607
1608         if (hdev->discov_timeout > 0) {
1609                 cancel_delayed_work(&hdev->discov_off);
1610                 hdev->discov_timeout = 0;
1611                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1612                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1613         }
1614
1615         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1616                 cancel_delayed_work(&hdev->service_cache);
1617
1618         cancel_delayed_work_sync(&hdev->le_scan_disable);
1619
1620         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1621                 cancel_delayed_work_sync(&hdev->rpa_expired);
1622
1623         /* Avoid potential lockdep warnings from the *_flush() calls by
1624          * ensuring the workqueue is empty up front.
1625          */
1626         drain_workqueue(hdev->workqueue);
1627
1628         hci_dev_lock(hdev);
1629
1630         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1631                 if (hdev->dev_type == HCI_BREDR)
1632                         mgmt_powered(hdev, 0);
1633         }
1634
1635         hci_inquiry_cache_flush(hdev);
1636         hci_pend_le_actions_clear(hdev);
1637         hci_conn_hash_flush(hdev);
1638         hci_dev_unlock(hdev);
1639
1640         hci_notify(hdev, HCI_DEV_DOWN);
1641
1642         if (hdev->flush)
1643                 hdev->flush(hdev);
1644
1645         /* Reset device */
1646         skb_queue_purge(&hdev->cmd_q);
1647         atomic_set(&hdev->cmd_cnt, 1);
1648         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1649             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1650             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1651                 set_bit(HCI_INIT, &hdev->flags);
1652                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1653                 clear_bit(HCI_INIT, &hdev->flags);
1654         }
1655
1656         /* flush cmd  work */
1657         flush_work(&hdev->cmd_work);
1658
1659         /* Drop queues */
1660         skb_queue_purge(&hdev->rx_q);
1661         skb_queue_purge(&hdev->cmd_q);
1662         skb_queue_purge(&hdev->raw_q);
1663
1664         /* Drop last sent command */
1665         if (hdev->sent_cmd) {
1666                 cancel_delayed_work_sync(&hdev->cmd_timer);
1667                 kfree_skb(hdev->sent_cmd);
1668                 hdev->sent_cmd = NULL;
1669         }
1670
1671         kfree_skb(hdev->recv_evt);
1672         hdev->recv_evt = NULL;
1673
1674         /* After this point our queues are empty
1675          * and no tasks are scheduled. */
1676         hdev->close(hdev);
1677
1678         /* Clear flags */
1679         hdev->flags &= BIT(HCI_RAW);
1680         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1681
1682         /* Controller radio is available but is currently powered down */
1683         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1684
1685         memset(hdev->eir, 0, sizeof(hdev->eir));
1686         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1687         bacpy(&hdev->random_addr, BDADDR_ANY);
1688
1689         hci_req_unlock(hdev);
1690
1691         hci_dev_put(hdev);
1692         return 0;
1693 }
1694
1695 int hci_dev_close(__u16 dev)
1696 {
1697         struct hci_dev *hdev;
1698         int err;
1699
1700         hdev = hci_dev_get(dev);
1701         if (!hdev)
1702                 return -ENODEV;
1703
1704         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1705                 err = -EBUSY;
1706                 goto done;
1707         }
1708
1709         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1710                 cancel_delayed_work(&hdev->power_off);
1711
1712         err = hci_dev_do_close(hdev);
1713
1714 done:
1715         hci_dev_put(hdev);
1716         return err;
1717 }
1718
1719 int hci_dev_reset(__u16 dev)
1720 {
1721         struct hci_dev *hdev;
1722         int ret = 0;
1723
1724         hdev = hci_dev_get(dev);
1725         if (!hdev)
1726                 return -ENODEV;
1727
1728         hci_req_lock(hdev);
1729
1730         if (!test_bit(HCI_UP, &hdev->flags)) {
1731                 ret = -ENETDOWN;
1732                 goto done;
1733         }
1734
1735         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1736                 ret = -EBUSY;
1737                 goto done;
1738         }
1739
1740         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1741                 ret = -EOPNOTSUPP;
1742                 goto done;
1743         }
1744
1745         /* Drop queues */
1746         skb_queue_purge(&hdev->rx_q);
1747         skb_queue_purge(&hdev->cmd_q);
1748
1749         /* Avoid potential lockdep warnings from the *_flush() calls by
1750          * ensuring the workqueue is empty up front.
1751          */
1752         drain_workqueue(hdev->workqueue);
1753
1754         hci_dev_lock(hdev);
1755         hci_inquiry_cache_flush(hdev);
1756         hci_conn_hash_flush(hdev);
1757         hci_dev_unlock(hdev);
1758
1759         if (hdev->flush)
1760                 hdev->flush(hdev);
1761
1762         atomic_set(&hdev->cmd_cnt, 1);
1763         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1764
1765         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1766
1767 done:
1768         hci_req_unlock(hdev);
1769         hci_dev_put(hdev);
1770         return ret;
1771 }
1772
1773 int hci_dev_reset_stat(__u16 dev)
1774 {
1775         struct hci_dev *hdev;
1776         int ret = 0;
1777
1778         hdev = hci_dev_get(dev);
1779         if (!hdev)
1780                 return -ENODEV;
1781
1782         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1783                 ret = -EBUSY;
1784                 goto done;
1785         }
1786
1787         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1788                 ret = -EOPNOTSUPP;
1789                 goto done;
1790         }
1791
1792         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1793
1794 done:
1795         hci_dev_put(hdev);
1796         return ret;
1797 }
1798
1799 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1800 {
1801         bool conn_changed, discov_changed;
1802
1803         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1804
1805         if ((scan & SCAN_PAGE))
1806                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1807                                                  &hdev->dev_flags);
1808         else
1809                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1810                                                   &hdev->dev_flags);
1811
1812         if ((scan & SCAN_INQUIRY)) {
1813                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1814                                                    &hdev->dev_flags);
1815         } else {
1816                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1817                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1818                                                     &hdev->dev_flags);
1819         }
1820
1821         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1822                 return;
1823
1824         if (conn_changed || discov_changed) {
1825                 /* In case this was disabled through mgmt */
1826                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1827
1828                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1829                         mgmt_update_adv_data(hdev);
1830
1831                 mgmt_new_settings(hdev);
1832         }
1833 }
1834
1835 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1836 {
1837         struct hci_dev *hdev;
1838         struct hci_dev_req dr;
1839         int err = 0;
1840
1841         if (copy_from_user(&dr, arg, sizeof(dr)))
1842                 return -EFAULT;
1843
1844         hdev = hci_dev_get(dr.dev_id);
1845         if (!hdev)
1846                 return -ENODEV;
1847
1848         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1849                 err = -EBUSY;
1850                 goto done;
1851         }
1852
1853         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1854                 err = -EOPNOTSUPP;
1855                 goto done;
1856         }
1857
1858         if (hdev->dev_type != HCI_BREDR) {
1859                 err = -EOPNOTSUPP;
1860                 goto done;
1861         }
1862
1863         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1864                 err = -EOPNOTSUPP;
1865                 goto done;
1866         }
1867
1868         switch (cmd) {
1869         case HCISETAUTH:
1870                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871                                    HCI_INIT_TIMEOUT);
1872                 break;
1873
1874         case HCISETENCRYPT:
1875                 if (!lmp_encrypt_capable(hdev)) {
1876                         err = -EOPNOTSUPP;
1877                         break;
1878                 }
1879
1880                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1881                         /* Auth must be enabled first */
1882                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1883                                            HCI_INIT_TIMEOUT);
1884                         if (err)
1885                                 break;
1886                 }
1887
1888                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1889                                    HCI_INIT_TIMEOUT);
1890                 break;
1891
1892         case HCISETSCAN:
1893                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1894                                    HCI_INIT_TIMEOUT);
1895
1896                 /* Ensure that the connectable and discoverable states
1897                  * get correctly modified as this was a non-mgmt change.
1898                  */
1899                 if (!err)
1900                         hci_update_scan_state(hdev, dr.dev_opt);
1901                 break;
1902
1903         case HCISETLINKPOL:
1904                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1905                                    HCI_INIT_TIMEOUT);
1906                 break;
1907
1908         case HCISETLINKMODE:
1909                 hdev->link_mode = ((__u16) dr.dev_opt) &
1910                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1911                 break;
1912
1913         case HCISETPTYPE:
1914                 hdev->pkt_type = (__u16) dr.dev_opt;
1915                 break;
1916
1917         case HCISETACLMTU:
1918                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1919                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1920                 break;
1921
1922         case HCISETSCOMTU:
1923                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1924                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1925                 break;
1926
1927         default:
1928                 err = -EINVAL;
1929                 break;
1930         }
1931
1932 done:
1933         hci_dev_put(hdev);
1934         return err;
1935 }
1936
1937 int hci_get_dev_list(void __user *arg)
1938 {
1939         struct hci_dev *hdev;
1940         struct hci_dev_list_req *dl;
1941         struct hci_dev_req *dr;
1942         int n = 0, size, err;
1943         __u16 dev_num;
1944
1945         if (get_user(dev_num, (__u16 __user *) arg))
1946                 return -EFAULT;
1947
1948         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1949                 return -EINVAL;
1950
1951         size = sizeof(*dl) + dev_num * sizeof(*dr);
1952
1953         dl = kzalloc(size, GFP_KERNEL);
1954         if (!dl)
1955                 return -ENOMEM;
1956
1957         dr = dl->dev_req;
1958
1959         read_lock(&hci_dev_list_lock);
1960         list_for_each_entry(hdev, &hci_dev_list, list) {
1961                 unsigned long flags = hdev->flags;
1962
1963                 /* When the auto-off is configured it means the transport
1964                  * is running, but in that case still indicate that the
1965                  * device is actually down.
1966                  */
1967                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1968                         flags &= ~BIT(HCI_UP);
1969
1970                 (dr + n)->dev_id  = hdev->id;
1971                 (dr + n)->dev_opt = flags;
1972
1973                 if (++n >= dev_num)
1974                         break;
1975         }
1976         read_unlock(&hci_dev_list_lock);
1977
1978         dl->dev_num = n;
1979         size = sizeof(*dl) + n * sizeof(*dr);
1980
1981         err = copy_to_user(arg, dl, size);
1982         kfree(dl);
1983
1984         return err ? -EFAULT : 0;
1985 }
1986
1987 int hci_get_dev_info(void __user *arg)
1988 {
1989         struct hci_dev *hdev;
1990         struct hci_dev_info di;
1991         unsigned long flags;
1992         int err = 0;
1993
1994         if (copy_from_user(&di, arg, sizeof(di)))
1995                 return -EFAULT;
1996
1997         hdev = hci_dev_get(di.dev_id);
1998         if (!hdev)
1999                 return -ENODEV;
2000
2001         /* When the auto-off is configured it means the transport
2002          * is running, but in that case still indicate that the
2003          * device is actually down.
2004          */
2005         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2006                 flags = hdev->flags & ~BIT(HCI_UP);
2007         else
2008                 flags = hdev->flags;
2009
2010         strcpy(di.name, hdev->name);
2011         di.bdaddr   = hdev->bdaddr;
2012         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2013         di.flags    = flags;
2014         di.pkt_type = hdev->pkt_type;
2015         if (lmp_bredr_capable(hdev)) {
2016                 di.acl_mtu  = hdev->acl_mtu;
2017                 di.acl_pkts = hdev->acl_pkts;
2018                 di.sco_mtu  = hdev->sco_mtu;
2019                 di.sco_pkts = hdev->sco_pkts;
2020         } else {
2021                 di.acl_mtu  = hdev->le_mtu;
2022                 di.acl_pkts = hdev->le_pkts;
2023                 di.sco_mtu  = 0;
2024                 di.sco_pkts = 0;
2025         }
2026         di.link_policy = hdev->link_policy;
2027         di.link_mode   = hdev->link_mode;
2028
2029         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2030         memcpy(&di.features, &hdev->features, sizeof(di.features));
2031
2032         if (copy_to_user(arg, &di, sizeof(di)))
2033                 err = -EFAULT;
2034
2035         hci_dev_put(hdev);
2036
2037         return err;
2038 }
2039
2040 /* ---- Interface to HCI drivers ---- */
2041
2042 static int hci_rfkill_set_block(void *data, bool blocked)
2043 {
2044         struct hci_dev *hdev = data;
2045
2046         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2047
2048         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2049                 return -EBUSY;
2050
2051         if (blocked) {
2052                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2053                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2054                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2055                         hci_dev_do_close(hdev);
2056         } else {
2057                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2058         }
2059
2060         return 0;
2061 }
2062
2063 static const struct rfkill_ops hci_rfkill_ops = {
2064         .set_block = hci_rfkill_set_block,
2065 };
2066
2067 static void hci_power_on(struct work_struct *work)
2068 {
2069         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2070         int err;
2071
2072         BT_DBG("%s", hdev->name);
2073
2074         err = hci_dev_do_open(hdev);
2075         if (err < 0) {
2076                 hci_dev_lock(hdev);
2077                 mgmt_set_powered_failed(hdev, err);
2078                 hci_dev_unlock(hdev);
2079                 return;
2080         }
2081
2082         /* During the HCI setup phase, a few error conditions are
2083          * ignored and they need to be checked now. If they are still
2084          * valid, it is important to turn the device back off.
2085          */
2086         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2087             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2088             (hdev->dev_type == HCI_BREDR &&
2089              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2090              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2091                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2092                 hci_dev_do_close(hdev);
2093         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2094                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2095                                    HCI_AUTO_OFF_TIMEOUT);
2096         }
2097
2098         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2099                 /* For unconfigured devices, set the HCI_RAW flag
2100                  * so that userspace can easily identify them.
2101                  */
2102                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2103                         set_bit(HCI_RAW, &hdev->flags);
2104
2105                 /* For fully configured devices, this will send
2106                  * the Index Added event. For unconfigured devices,
2107                  * it will send Unconfigued Index Added event.
2108                  *
2109                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2110                  * and no event will be send.
2111                  */
2112                 mgmt_index_added(hdev);
2113         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2114                 /* When the controller is now configured, then it
2115                  * is important to clear the HCI_RAW flag.
2116                  */
2117                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2118                         clear_bit(HCI_RAW, &hdev->flags);
2119
2120                 /* Powering on the controller with HCI_CONFIG set only
2121                  * happens with the transition from unconfigured to
2122                  * configured. This will send the Index Added event.
2123                  */
2124                 mgmt_index_added(hdev);
2125         }
2126 }
2127
2128 static void hci_power_off(struct work_struct *work)
2129 {
2130         struct hci_dev *hdev = container_of(work, struct hci_dev,
2131                                             power_off.work);
2132
2133         BT_DBG("%s", hdev->name);
2134
2135         hci_dev_do_close(hdev);
2136 }
2137
2138 static void hci_discov_off(struct work_struct *work)
2139 {
2140         struct hci_dev *hdev;
2141
2142         hdev = container_of(work, struct hci_dev, discov_off.work);
2143
2144         BT_DBG("%s", hdev->name);
2145
2146         mgmt_discoverable_timeout(hdev);
2147 }
2148
2149 void hci_uuids_clear(struct hci_dev *hdev)
2150 {
2151         struct bt_uuid *uuid, *tmp;
2152
2153         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2154                 list_del(&uuid->list);
2155                 kfree(uuid);
2156         }
2157 }
2158
2159 void hci_link_keys_clear(struct hci_dev *hdev)
2160 {
2161         struct link_key *key;
2162
2163         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2164                 list_del_rcu(&key->list);
2165                 kfree_rcu(key, rcu);
2166         }
2167 }
2168
2169 void hci_smp_ltks_clear(struct hci_dev *hdev)
2170 {
2171         struct smp_ltk *k;
2172
2173         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2174                 list_del_rcu(&k->list);
2175                 kfree_rcu(k, rcu);
2176         }
2177 }
2178
2179 void hci_smp_irks_clear(struct hci_dev *hdev)
2180 {
2181         struct smp_irk *k;
2182
2183         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2184                 list_del_rcu(&k->list);
2185                 kfree_rcu(k, rcu);
2186         }
2187 }
2188
2189 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2190 {
2191         struct link_key *k;
2192
2193         rcu_read_lock();
2194         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2195                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2196                         rcu_read_unlock();
2197                         return k;
2198                 }
2199         }
2200         rcu_read_unlock();
2201
2202         return NULL;
2203 }
2204
2205 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2206                                u8 key_type, u8 old_key_type)
2207 {
2208         /* Legacy key */
2209         if (key_type < 0x03)
2210                 return true;
2211
2212         /* Debug keys are insecure so don't store them persistently */
2213         if (key_type == HCI_LK_DEBUG_COMBINATION)
2214                 return false;
2215
2216         /* Changed combination key and there's no previous one */
2217         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2218                 return false;
2219
2220         /* Security mode 3 case */
2221         if (!conn)
2222                 return true;
2223
2224         /* BR/EDR key derived using SC from an LE link */
2225         if (conn->type == LE_LINK)
2226                 return true;
2227
2228         /* Neither local nor remote side had no-bonding as requirement */
2229         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2230                 return true;
2231
2232         /* Local side had dedicated bonding as requirement */
2233         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2234                 return true;
2235
2236         /* Remote side had dedicated bonding as requirement */
2237         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2238                 return true;
2239
2240         /* If none of the above criteria match, then don't store the key
2241          * persistently */
2242         return false;
2243 }
2244
2245 static u8 ltk_role(u8 type)
2246 {
2247         if (type == SMP_LTK)
2248                 return HCI_ROLE_MASTER;
2249
2250         return HCI_ROLE_SLAVE;
2251 }
2252
2253 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2254                              u8 addr_type, u8 role)
2255 {
2256         struct smp_ltk *k;
2257
2258         rcu_read_lock();
2259         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2260                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2261                         continue;
2262
2263                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2264                         rcu_read_unlock();
2265                         return k;
2266                 }
2267         }
2268         rcu_read_unlock();
2269
2270         return NULL;
2271 }
2272
2273 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2274 {
2275         struct smp_irk *irk;
2276
2277         rcu_read_lock();
2278         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2279                 if (!bacmp(&irk->rpa, rpa)) {
2280                         rcu_read_unlock();
2281                         return irk;
2282                 }
2283         }
2284
2285         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2286                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2287                         bacpy(&irk->rpa, rpa);
2288                         rcu_read_unlock();
2289                         return irk;
2290                 }
2291         }
2292         rcu_read_unlock();
2293
2294         return NULL;
2295 }
2296
2297 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2298                                      u8 addr_type)
2299 {
2300         struct smp_irk *irk;
2301
2302         /* Identity Address must be public or static random */
2303         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2304                 return NULL;
2305
2306         rcu_read_lock();
2307         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2308                 if (addr_type == irk->addr_type &&
2309                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2310                         rcu_read_unlock();
2311                         return irk;
2312                 }
2313         }
2314         rcu_read_unlock();
2315
2316         return NULL;
2317 }
2318
2319 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2320                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2321                                   u8 pin_len, bool *persistent)
2322 {
2323         struct link_key *key, *old_key;
2324         u8 old_key_type;
2325
2326         old_key = hci_find_link_key(hdev, bdaddr);
2327         if (old_key) {
2328                 old_key_type = old_key->type;
2329                 key = old_key;
2330         } else {
2331                 old_key_type = conn ? conn->key_type : 0xff;
2332                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2333                 if (!key)
2334                         return NULL;
2335                 list_add_rcu(&key->list, &hdev->link_keys);
2336         }
2337
2338         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2339
2340         /* Some buggy controller combinations generate a changed
2341          * combination key for legacy pairing even when there's no
2342          * previous key */
2343         if (type == HCI_LK_CHANGED_COMBINATION &&
2344             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2345                 type = HCI_LK_COMBINATION;
2346                 if (conn)
2347                         conn->key_type = type;
2348         }
2349
2350         bacpy(&key->bdaddr, bdaddr);
2351         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2352         key->pin_len = pin_len;
2353
2354         if (type == HCI_LK_CHANGED_COMBINATION)
2355                 key->type = old_key_type;
2356         else
2357                 key->type = type;
2358
2359         if (persistent)
2360                 *persistent = hci_persistent_key(hdev, conn, type,
2361                                                  old_key_type);
2362
2363         return key;
2364 }
2365
2366 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2367                             u8 addr_type, u8 type, u8 authenticated,
2368                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2369 {
2370         struct smp_ltk *key, *old_key;
2371         u8 role = ltk_role(type);
2372
2373         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2374         if (old_key)
2375                 key = old_key;
2376         else {
2377                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2378                 if (!key)
2379                         return NULL;
2380                 list_add_rcu(&key->list, &hdev->long_term_keys);
2381         }
2382
2383         bacpy(&key->bdaddr, bdaddr);
2384         key->bdaddr_type = addr_type;
2385         memcpy(key->val, tk, sizeof(key->val));
2386         key->authenticated = authenticated;
2387         key->ediv = ediv;
2388         key->rand = rand;
2389         key->enc_size = enc_size;
2390         key->type = type;
2391
2392         return key;
2393 }
2394
2395 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2396                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2397 {
2398         struct smp_irk *irk;
2399
2400         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2401         if (!irk) {
2402                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2403                 if (!irk)
2404                         return NULL;
2405
2406                 bacpy(&irk->bdaddr, bdaddr);
2407                 irk->addr_type = addr_type;
2408
2409                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2410         }
2411
2412         memcpy(irk->val, val, 16);
2413         bacpy(&irk->rpa, rpa);
2414
2415         return irk;
2416 }
2417
2418 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2419 {
2420         struct link_key *key;
2421
2422         key = hci_find_link_key(hdev, bdaddr);
2423         if (!key)
2424                 return -ENOENT;
2425
2426         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2427
2428         list_del_rcu(&key->list);
2429         kfree_rcu(key, rcu);
2430
2431         return 0;
2432 }
2433
2434 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2435 {
2436         struct smp_ltk *k;
2437         int removed = 0;
2438
2439         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2440                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2441                         continue;
2442
2443                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2444
2445                 list_del_rcu(&k->list);
2446                 kfree_rcu(k, rcu);
2447                 removed++;
2448         }
2449
2450         return removed ? 0 : -ENOENT;
2451 }
2452
2453 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2454 {
2455         struct smp_irk *k;
2456
2457         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2458                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2459                         continue;
2460
2461                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462
2463                 list_del_rcu(&k->list);
2464                 kfree_rcu(k, rcu);
2465         }
2466 }
2467
2468 /* HCI command timer function */
2469 static void hci_cmd_timeout(struct work_struct *work)
2470 {
2471         struct hci_dev *hdev = container_of(work, struct hci_dev,
2472                                             cmd_timer.work);
2473
2474         if (hdev->sent_cmd) {
2475                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2476                 u16 opcode = __le16_to_cpu(sent->opcode);
2477
2478                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2479         } else {
2480                 BT_ERR("%s command tx timeout", hdev->name);
2481         }
2482
2483         atomic_set(&hdev->cmd_cnt, 1);
2484         queue_work(hdev->workqueue, &hdev->cmd_work);
2485 }
2486
2487 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2488                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2489 {
2490         struct oob_data *data;
2491
2492         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2493                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2494                         continue;
2495                 if (data->bdaddr_type != bdaddr_type)
2496                         continue;
2497                 return data;
2498         }
2499
2500         return NULL;
2501 }
2502
2503 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504                                u8 bdaddr_type)
2505 {
2506         struct oob_data *data;
2507
2508         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2509         if (!data)
2510                 return -ENOENT;
2511
2512         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2513
2514         list_del(&data->list);
2515         kfree(data);
2516
2517         return 0;
2518 }
2519
2520 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2521 {
2522         struct oob_data *data, *n;
2523
2524         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2525                 list_del(&data->list);
2526                 kfree(data);
2527         }
2528 }
2529
2530 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2531                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2532                             u8 *hash256, u8 *rand256)
2533 {
2534         struct oob_data *data;
2535
2536         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2537         if (!data) {
2538                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2539                 if (!data)
2540                         return -ENOMEM;
2541
2542                 bacpy(&data->bdaddr, bdaddr);
2543                 data->bdaddr_type = bdaddr_type;
2544                 list_add(&data->list, &hdev->remote_oob_data);
2545         }
2546
2547         if (hash192 && rand192) {
2548                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2549                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2550         } else {
2551                 memset(data->hash192, 0, sizeof(data->hash192));
2552                 memset(data->rand192, 0, sizeof(data->rand192));
2553         }
2554
2555         if (hash256 && rand256) {
2556                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2557                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2558         } else {
2559                 memset(data->hash256, 0, sizeof(data->hash256));
2560                 memset(data->rand256, 0, sizeof(data->rand256));
2561         }
2562
2563         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2564
2565         return 0;
2566 }
2567
2568 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2569                                          bdaddr_t *bdaddr, u8 type)
2570 {
2571         struct bdaddr_list *b;
2572
2573         list_for_each_entry(b, bdaddr_list, list) {
2574                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2575                         return b;
2576         }
2577
2578         return NULL;
2579 }
2580
2581 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2582 {
2583         struct list_head *p, *n;
2584
2585         list_for_each_safe(p, n, bdaddr_list) {
2586                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2587
2588                 list_del(p);
2589                 kfree(b);
2590         }
2591 }
2592
2593 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2594 {
2595         struct bdaddr_list *entry;
2596
2597         if (!bacmp(bdaddr, BDADDR_ANY))
2598                 return -EBADF;
2599
2600         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2601                 return -EEXIST;
2602
2603         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2604         if (!entry)
2605                 return -ENOMEM;
2606
2607         bacpy(&entry->bdaddr, bdaddr);
2608         entry->bdaddr_type = type;
2609
2610         list_add(&entry->list, list);
2611
2612         return 0;
2613 }
2614
2615 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2616 {
2617         struct bdaddr_list *entry;
2618
2619         if (!bacmp(bdaddr, BDADDR_ANY)) {
2620                 hci_bdaddr_list_clear(list);
2621                 return 0;
2622         }
2623
2624         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2625         if (!entry)
2626                 return -ENOENT;
2627
2628         list_del(&entry->list);
2629         kfree(entry);
2630
2631         return 0;
2632 }
2633
2634 /* This function requires the caller holds hdev->lock */
2635 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2636                                                bdaddr_t *addr, u8 addr_type)
2637 {
2638         struct hci_conn_params *params;
2639
2640         /* The conn params list only contains identity addresses */
2641         if (!hci_is_identity_address(addr, addr_type))
2642                 return NULL;
2643
2644         list_for_each_entry(params, &hdev->le_conn_params, list) {
2645                 if (bacmp(&params->addr, addr) == 0 &&
2646                     params->addr_type == addr_type) {
2647                         return params;
2648                 }
2649         }
2650
2651         return NULL;
2652 }
2653
2654 /* This function requires the caller holds hdev->lock */
2655 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2656                                                   bdaddr_t *addr, u8 addr_type)
2657 {
2658         struct hci_conn_params *param;
2659
2660         /* The list only contains identity addresses */
2661         if (!hci_is_identity_address(addr, addr_type))
2662                 return NULL;
2663
2664         list_for_each_entry(param, list, action) {
2665                 if (bacmp(&param->addr, addr) == 0 &&
2666                     param->addr_type == addr_type)
2667                         return param;
2668         }
2669
2670         return NULL;
2671 }
2672
2673 /* This function requires the caller holds hdev->lock */
2674 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2675                                             bdaddr_t *addr, u8 addr_type)
2676 {
2677         struct hci_conn_params *params;
2678
2679         if (!hci_is_identity_address(addr, addr_type))
2680                 return NULL;
2681
2682         params = hci_conn_params_lookup(hdev, addr, addr_type);
2683         if (params)
2684                 return params;
2685
2686         params = kzalloc(sizeof(*params), GFP_KERNEL);
2687         if (!params) {
2688                 BT_ERR("Out of memory");
2689                 return NULL;
2690         }
2691
2692         bacpy(&params->addr, addr);
2693         params->addr_type = addr_type;
2694
2695         list_add(&params->list, &hdev->le_conn_params);
2696         INIT_LIST_HEAD(&params->action);
2697
2698         params->conn_min_interval = hdev->le_conn_min_interval;
2699         params->conn_max_interval = hdev->le_conn_max_interval;
2700         params->conn_latency = hdev->le_conn_latency;
2701         params->supervision_timeout = hdev->le_supv_timeout;
2702         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2703
2704         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2705
2706         return params;
2707 }
2708
2709 static void hci_conn_params_free(struct hci_conn_params *params)
2710 {
2711         if (params->conn) {
2712                 hci_conn_drop(params->conn);
2713                 hci_conn_put(params->conn);
2714         }
2715
2716         list_del(&params->action);
2717         list_del(&params->list);
2718         kfree(params);
2719 }
2720
2721 /* This function requires the caller holds hdev->lock */
2722 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2723 {
2724         struct hci_conn_params *params;
2725
2726         params = hci_conn_params_lookup(hdev, addr, addr_type);
2727         if (!params)
2728                 return;
2729
2730         hci_conn_params_free(params);
2731
2732         hci_update_background_scan(hdev);
2733
2734         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2735 }
2736
2737 /* This function requires the caller holds hdev->lock */
2738 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2739 {
2740         struct hci_conn_params *params, *tmp;
2741
2742         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2743                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2744                         continue;
2745                 list_del(&params->list);
2746                 kfree(params);
2747         }
2748
2749         BT_DBG("All LE disabled connection parameters were removed");
2750 }
2751
2752 /* This function requires the caller holds hdev->lock */
2753 void hci_conn_params_clear_all(struct hci_dev *hdev)
2754 {
2755         struct hci_conn_params *params, *tmp;
2756
2757         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2758                 hci_conn_params_free(params);
2759
2760         hci_update_background_scan(hdev);
2761
2762         BT_DBG("All LE connection parameters were removed");
2763 }
2764
2765 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2766 {
2767         if (status) {
2768                 BT_ERR("Failed to start inquiry: status %d", status);
2769
2770                 hci_dev_lock(hdev);
2771                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2772                 hci_dev_unlock(hdev);
2773                 return;
2774         }
2775 }
2776
2777 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2778                                           u16 opcode)
2779 {
2780         /* General inquiry access code (GIAC) */
2781         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2782         struct hci_request req;
2783         struct hci_cp_inquiry cp;
2784         int err;
2785
2786         if (status) {
2787                 BT_ERR("Failed to disable LE scanning: status %d", status);
2788                 return;
2789         }
2790
2791         switch (hdev->discovery.type) {
2792         case DISCOV_TYPE_LE:
2793                 hci_dev_lock(hdev);
2794                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2795                 hci_dev_unlock(hdev);
2796                 break;
2797
2798         case DISCOV_TYPE_INTERLEAVED:
2799                 hci_req_init(&req, hdev);
2800
2801                 memset(&cp, 0, sizeof(cp));
2802                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2803                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2804                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2805
2806                 hci_dev_lock(hdev);
2807
2808                 hci_inquiry_cache_flush(hdev);
2809
2810                 err = hci_req_run(&req, inquiry_complete);
2811                 if (err) {
2812                         BT_ERR("Inquiry request failed: err %d", err);
2813                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2814                 }
2815
2816                 hci_dev_unlock(hdev);
2817                 break;
2818         }
2819 }
2820
2821 static void le_scan_disable_work(struct work_struct *work)
2822 {
2823         struct hci_dev *hdev = container_of(work, struct hci_dev,
2824                                             le_scan_disable.work);
2825         struct hci_request req;
2826         int err;
2827
2828         BT_DBG("%s", hdev->name);
2829
2830         hci_req_init(&req, hdev);
2831
2832         hci_req_add_le_scan_disable(&req);
2833
2834         err = hci_req_run(&req, le_scan_disable_work_complete);
2835         if (err)
2836                 BT_ERR("Disable LE scanning request failed: err %d", err);
2837 }
2838
2839 /* Copy the Identity Address of the controller.
2840  *
2841  * If the controller has a public BD_ADDR, then by default use that one.
2842  * If this is a LE only controller without a public address, default to
2843  * the static random address.
2844  *
2845  * For debugging purposes it is possible to force controllers with a
2846  * public address to use the static random address instead.
2847  *
2848  * In case BR/EDR has been disabled on a dual-mode controller and
2849  * userspace has configured a static address, then that address
2850  * becomes the identity address instead of the public BR/EDR address.
2851  */
2852 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2853                                u8 *bdaddr_type)
2854 {
2855         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2856             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2857             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2858              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2859                 bacpy(bdaddr, &hdev->static_addr);
2860                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2861         } else {
2862                 bacpy(bdaddr, &hdev->bdaddr);
2863                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2864         }
2865 }
2866
2867 /* Alloc HCI device */
2868 struct hci_dev *hci_alloc_dev(void)
2869 {
2870         struct hci_dev *hdev;
2871
2872         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2873         if (!hdev)
2874                 return NULL;
2875
2876         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2877         hdev->esco_type = (ESCO_HV1);
2878         hdev->link_mode = (HCI_LM_ACCEPT);
2879         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2880         hdev->io_capability = 0x03;     /* No Input No Output */
2881         hdev->manufacturer = 0xffff;    /* Default to internal use */
2882         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2883         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2884
2885         hdev->sniff_max_interval = 800;
2886         hdev->sniff_min_interval = 80;
2887
2888         hdev->le_adv_channel_map = 0x07;
2889         hdev->le_adv_min_interval = 0x0800;
2890         hdev->le_adv_max_interval = 0x0800;
2891         hdev->le_scan_interval = 0x0060;
2892         hdev->le_scan_window = 0x0030;
2893         hdev->le_conn_min_interval = 0x0028;
2894         hdev->le_conn_max_interval = 0x0038;
2895         hdev->le_conn_latency = 0x0000;
2896         hdev->le_supv_timeout = 0x002a;
2897         hdev->le_def_tx_len = 0x001b;
2898         hdev->le_def_tx_time = 0x0148;
2899         hdev->le_max_tx_len = 0x001b;
2900         hdev->le_max_tx_time = 0x0148;
2901         hdev->le_max_rx_len = 0x001b;
2902         hdev->le_max_rx_time = 0x0148;
2903
2904         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2905         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2906         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2907         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2908
2909         mutex_init(&hdev->lock);
2910         mutex_init(&hdev->req_lock);
2911
2912         INIT_LIST_HEAD(&hdev->mgmt_pending);
2913         INIT_LIST_HEAD(&hdev->blacklist);
2914         INIT_LIST_HEAD(&hdev->whitelist);
2915         INIT_LIST_HEAD(&hdev->uuids);
2916         INIT_LIST_HEAD(&hdev->link_keys);
2917         INIT_LIST_HEAD(&hdev->long_term_keys);
2918         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2919         INIT_LIST_HEAD(&hdev->remote_oob_data);
2920         INIT_LIST_HEAD(&hdev->le_white_list);
2921         INIT_LIST_HEAD(&hdev->le_conn_params);
2922         INIT_LIST_HEAD(&hdev->pend_le_conns);
2923         INIT_LIST_HEAD(&hdev->pend_le_reports);
2924         INIT_LIST_HEAD(&hdev->conn_hash.list);
2925
2926         INIT_WORK(&hdev->rx_work, hci_rx_work);
2927         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2928         INIT_WORK(&hdev->tx_work, hci_tx_work);
2929         INIT_WORK(&hdev->power_on, hci_power_on);
2930
2931         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2932         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2933         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2934
2935         skb_queue_head_init(&hdev->rx_q);
2936         skb_queue_head_init(&hdev->cmd_q);
2937         skb_queue_head_init(&hdev->raw_q);
2938
2939         init_waitqueue_head(&hdev->req_wait_q);
2940
2941         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2942
2943         hci_init_sysfs(hdev);
2944         discovery_init(hdev);
2945
2946         return hdev;
2947 }
2948 EXPORT_SYMBOL(hci_alloc_dev);
2949
2950 /* Free HCI device */
2951 void hci_free_dev(struct hci_dev *hdev)
2952 {
2953         /* will free via device release */
2954         put_device(&hdev->dev);
2955 }
2956 EXPORT_SYMBOL(hci_free_dev);
2957
2958 /* Register HCI device */
2959 int hci_register_dev(struct hci_dev *hdev)
2960 {
2961         int id, error;
2962
2963         if (!hdev->open || !hdev->close || !hdev->send)
2964                 return -EINVAL;
2965
2966         /* Do not allow HCI_AMP devices to register at index 0,
2967          * so the index can be used as the AMP controller ID.
2968          */
2969         switch (hdev->dev_type) {
2970         case HCI_BREDR:
2971                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2972                 break;
2973         case HCI_AMP:
2974                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2975                 break;
2976         default:
2977                 return -EINVAL;
2978         }
2979
2980         if (id < 0)
2981                 return id;
2982
2983         sprintf(hdev->name, "hci%d", id);
2984         hdev->id = id;
2985
2986         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2987
2988         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2989                                           WQ_MEM_RECLAIM, 1, hdev->name);
2990         if (!hdev->workqueue) {
2991                 error = -ENOMEM;
2992                 goto err;
2993         }
2994
2995         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2996                                               WQ_MEM_RECLAIM, 1, hdev->name);
2997         if (!hdev->req_workqueue) {
2998                 destroy_workqueue(hdev->workqueue);
2999                 error = -ENOMEM;
3000                 goto err;
3001         }
3002
3003         if (!IS_ERR_OR_NULL(bt_debugfs))
3004                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3005
3006         dev_set_name(&hdev->dev, "%s", hdev->name);
3007
3008         error = device_add(&hdev->dev);
3009         if (error < 0)
3010                 goto err_wqueue;
3011
3012         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3013                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3014                                     hdev);
3015         if (hdev->rfkill) {
3016                 if (rfkill_register(hdev->rfkill) < 0) {
3017                         rfkill_destroy(hdev->rfkill);
3018                         hdev->rfkill = NULL;
3019                 }
3020         }
3021
3022         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3023                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3024
3025         set_bit(HCI_SETUP, &hdev->dev_flags);
3026         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3027
3028         if (hdev->dev_type == HCI_BREDR) {
3029                 /* Assume BR/EDR support until proven otherwise (such as
3030                  * through reading supported features during init.
3031                  */
3032                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3033         }
3034
3035         write_lock(&hci_dev_list_lock);
3036         list_add(&hdev->list, &hci_dev_list);
3037         write_unlock(&hci_dev_list_lock);
3038
3039         /* Devices that are marked for raw-only usage are unconfigured
3040          * and should not be included in normal operation.
3041          */
3042         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3043                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3044
3045         hci_notify(hdev, HCI_DEV_REG);
3046         hci_dev_hold(hdev);
3047
3048         queue_work(hdev->req_workqueue, &hdev->power_on);
3049
3050         return id;
3051
3052 err_wqueue:
3053         destroy_workqueue(hdev->workqueue);
3054         destroy_workqueue(hdev->req_workqueue);
3055 err:
3056         ida_simple_remove(&hci_index_ida, hdev->id);
3057
3058         return error;
3059 }
3060 EXPORT_SYMBOL(hci_register_dev);
3061
3062 /* Unregister HCI device */
3063 void hci_unregister_dev(struct hci_dev *hdev)
3064 {
3065         int i, id;
3066
3067         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3068
3069         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3070
3071         id = hdev->id;
3072
3073         write_lock(&hci_dev_list_lock);
3074         list_del(&hdev->list);
3075         write_unlock(&hci_dev_list_lock);
3076
3077         hci_dev_do_close(hdev);
3078
3079         for (i = 0; i < NUM_REASSEMBLY; i++)
3080                 kfree_skb(hdev->reassembly[i]);
3081
3082         cancel_work_sync(&hdev->power_on);
3083
3084         if (!test_bit(HCI_INIT, &hdev->flags) &&
3085             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3086             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3087                 hci_dev_lock(hdev);
3088                 mgmt_index_removed(hdev);
3089                 hci_dev_unlock(hdev);
3090         }
3091
3092         /* mgmt_index_removed should take care of emptying the
3093          * pending list */
3094         BUG_ON(!list_empty(&hdev->mgmt_pending));
3095
3096         hci_notify(hdev, HCI_DEV_UNREG);
3097
3098         if (hdev->rfkill) {
3099                 rfkill_unregister(hdev->rfkill);
3100                 rfkill_destroy(hdev->rfkill);
3101         }
3102
3103         smp_unregister(hdev);
3104
3105         device_del(&hdev->dev);
3106
3107         debugfs_remove_recursive(hdev->debugfs);
3108
3109         destroy_workqueue(hdev->workqueue);
3110         destroy_workqueue(hdev->req_workqueue);
3111
3112         hci_dev_lock(hdev);
3113         hci_bdaddr_list_clear(&hdev->blacklist);
3114         hci_bdaddr_list_clear(&hdev->whitelist);
3115         hci_uuids_clear(hdev);
3116         hci_link_keys_clear(hdev);
3117         hci_smp_ltks_clear(hdev);
3118         hci_smp_irks_clear(hdev);
3119         hci_remote_oob_data_clear(hdev);
3120         hci_bdaddr_list_clear(&hdev->le_white_list);
3121         hci_conn_params_clear_all(hdev);
3122         hci_discovery_filter_clear(hdev);
3123         hci_dev_unlock(hdev);
3124
3125         hci_dev_put(hdev);
3126
3127         ida_simple_remove(&hci_index_ida, id);
3128 }
3129 EXPORT_SYMBOL(hci_unregister_dev);
3130
3131 /* Suspend HCI device */
3132 int hci_suspend_dev(struct hci_dev *hdev)
3133 {
3134         hci_notify(hdev, HCI_DEV_SUSPEND);
3135         return 0;
3136 }
3137 EXPORT_SYMBOL(hci_suspend_dev);
3138
3139 /* Resume HCI device */
3140 int hci_resume_dev(struct hci_dev *hdev)
3141 {
3142         hci_notify(hdev, HCI_DEV_RESUME);
3143         return 0;
3144 }
3145 EXPORT_SYMBOL(hci_resume_dev);
3146
3147 /* Reset HCI device */
3148 int hci_reset_dev(struct hci_dev *hdev)
3149 {
3150         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3151         struct sk_buff *skb;
3152
3153         skb = bt_skb_alloc(3, GFP_ATOMIC);
3154         if (!skb)
3155                 return -ENOMEM;
3156
3157         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3158         memcpy(skb_put(skb, 3), hw_err, 3);
3159
3160         /* Send Hardware Error to upper stack */
3161         return hci_recv_frame(hdev, skb);
3162 }
3163 EXPORT_SYMBOL(hci_reset_dev);
3164
3165 /* Receive frame from HCI drivers */
3166 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3167 {
3168         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3169                       && !test_bit(HCI_INIT, &hdev->flags))) {
3170                 kfree_skb(skb);
3171                 return -ENXIO;
3172         }
3173
3174         /* Incoming skb */
3175         bt_cb(skb)->incoming = 1;
3176
3177         /* Time stamp */
3178         __net_timestamp(skb);
3179
3180         skb_queue_tail(&hdev->rx_q, skb);
3181         queue_work(hdev->workqueue, &hdev->rx_work);
3182
3183         return 0;
3184 }
3185 EXPORT_SYMBOL(hci_recv_frame);
3186
3187 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3188                           int count, __u8 index)
3189 {
3190         int len = 0;
3191         int hlen = 0;
3192         int remain = count;
3193         struct sk_buff *skb;
3194         struct bt_skb_cb *scb;
3195
3196         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3197             index >= NUM_REASSEMBLY)
3198                 return -EILSEQ;
3199
3200         skb = hdev->reassembly[index];
3201
3202         if (!skb) {
3203                 switch (type) {
3204                 case HCI_ACLDATA_PKT:
3205                         len = HCI_MAX_FRAME_SIZE;
3206                         hlen = HCI_ACL_HDR_SIZE;
3207                         break;
3208                 case HCI_EVENT_PKT:
3209                         len = HCI_MAX_EVENT_SIZE;
3210                         hlen = HCI_EVENT_HDR_SIZE;
3211                         break;
3212                 case HCI_SCODATA_PKT:
3213                         len = HCI_MAX_SCO_SIZE;
3214                         hlen = HCI_SCO_HDR_SIZE;
3215                         break;
3216                 }
3217
3218                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3219                 if (!skb)
3220                         return -ENOMEM;
3221
3222                 scb = (void *) skb->cb;
3223                 scb->expect = hlen;
3224                 scb->pkt_type = type;
3225
3226                 hdev->reassembly[index] = skb;
3227         }
3228
3229         while (count) {
3230                 scb = (void *) skb->cb;
3231                 len = min_t(uint, scb->expect, count);
3232
3233                 memcpy(skb_put(skb, len), data, len);
3234
3235                 count -= len;
3236                 data += len;
3237                 scb->expect -= len;
3238                 remain = count;
3239
3240                 switch (type) {
3241                 case HCI_EVENT_PKT:
3242                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3243                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3244                                 scb->expect = h->plen;
3245
3246                                 if (skb_tailroom(skb) < scb->expect) {
3247                                         kfree_skb(skb);
3248                                         hdev->reassembly[index] = NULL;
3249                                         return -ENOMEM;
3250                                 }
3251                         }
3252                         break;
3253
3254                 case HCI_ACLDATA_PKT:
3255                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3256                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3257                                 scb->expect = __le16_to_cpu(h->dlen);
3258
3259                                 if (skb_tailroom(skb) < scb->expect) {
3260                                         kfree_skb(skb);
3261                                         hdev->reassembly[index] = NULL;
3262                                         return -ENOMEM;
3263                                 }
3264                         }
3265                         break;
3266
3267                 case HCI_SCODATA_PKT:
3268                         if (skb->len == HCI_SCO_HDR_SIZE) {
3269                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3270                                 scb->expect = h->dlen;
3271
3272                                 if (skb_tailroom(skb) < scb->expect) {
3273                                         kfree_skb(skb);
3274                                         hdev->reassembly[index] = NULL;
3275                                         return -ENOMEM;
3276                                 }
3277                         }
3278                         break;
3279                 }
3280
3281                 if (scb->expect == 0) {
3282                         /* Complete frame */
3283
3284                         bt_cb(skb)->pkt_type = type;
3285                         hci_recv_frame(hdev, skb);
3286
3287                         hdev->reassembly[index] = NULL;
3288                         return remain;
3289                 }
3290         }
3291
3292         return remain;
3293 }
3294
3295 #define STREAM_REASSEMBLY 0
3296
3297 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3298 {
3299         int type;
3300         int rem = 0;
3301
3302         while (count) {
3303                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3304
3305                 if (!skb) {
3306                         struct { char type; } *pkt;
3307
3308                         /* Start of the frame */
3309                         pkt = data;
3310                         type = pkt->type;
3311
3312                         data++;
3313                         count--;
3314                 } else
3315                         type = bt_cb(skb)->pkt_type;
3316
3317                 rem = hci_reassembly(hdev, type, data, count,
3318                                      STREAM_REASSEMBLY);
3319                 if (rem < 0)
3320                         return rem;
3321
3322                 data += (count - rem);
3323                 count = rem;
3324         }
3325
3326         return rem;
3327 }
3328 EXPORT_SYMBOL(hci_recv_stream_fragment);
3329
3330 /* ---- Interface to upper protocols ---- */
3331
3332 int hci_register_cb(struct hci_cb *cb)
3333 {
3334         BT_DBG("%p name %s", cb, cb->name);
3335
3336         write_lock(&hci_cb_list_lock);
3337         list_add(&cb->list, &hci_cb_list);
3338         write_unlock(&hci_cb_list_lock);
3339
3340         return 0;
3341 }
3342 EXPORT_SYMBOL(hci_register_cb);
3343
3344 int hci_unregister_cb(struct hci_cb *cb)
3345 {
3346         BT_DBG("%p name %s", cb, cb->name);
3347
3348         write_lock(&hci_cb_list_lock);
3349         list_del(&cb->list);
3350         write_unlock(&hci_cb_list_lock);
3351
3352         return 0;
3353 }
3354 EXPORT_SYMBOL(hci_unregister_cb);
3355
3356 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3357 {
3358         int err;
3359
3360         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3361
3362         /* Time stamp */
3363         __net_timestamp(skb);
3364
3365         /* Send copy to monitor */
3366         hci_send_to_monitor(hdev, skb);
3367
3368         if (atomic_read(&hdev->promisc)) {
3369                 /* Send copy to the sockets */
3370                 hci_send_to_sock(hdev, skb);
3371         }
3372
3373         /* Get rid of skb owner, prior to sending to the driver. */
3374         skb_orphan(skb);
3375
3376         err = hdev->send(hdev, skb);
3377         if (err < 0) {
3378                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3379                 kfree_skb(skb);
3380         }
3381 }
3382
3383 bool hci_req_pending(struct hci_dev *hdev)
3384 {
3385         return (hdev->req_status == HCI_REQ_PEND);
3386 }
3387
3388 /* Send HCI command */
3389 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3390                  const void *param)
3391 {
3392         struct sk_buff *skb;
3393
3394         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3395
3396         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3397         if (!skb) {
3398                 BT_ERR("%s no memory for command", hdev->name);
3399                 return -ENOMEM;
3400         }
3401
3402         /* Stand-alone HCI commands must be flagged as
3403          * single-command requests.
3404          */
3405         bt_cb(skb)->req.start = true;
3406
3407         skb_queue_tail(&hdev->cmd_q, skb);
3408         queue_work(hdev->workqueue, &hdev->cmd_work);
3409
3410         return 0;
3411 }
3412
3413 /* Get data from the previously sent command */
3414 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3415 {
3416         struct hci_command_hdr *hdr;
3417
3418         if (!hdev->sent_cmd)
3419                 return NULL;
3420
3421         hdr = (void *) hdev->sent_cmd->data;
3422
3423         if (hdr->opcode != cpu_to_le16(opcode))
3424                 return NULL;
3425
3426         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3427
3428         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3429 }
3430
3431 /* Send ACL data */
3432 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3433 {
3434         struct hci_acl_hdr *hdr;
3435         int len = skb->len;
3436
3437         skb_push(skb, HCI_ACL_HDR_SIZE);
3438         skb_reset_transport_header(skb);
3439         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3440         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3441         hdr->dlen   = cpu_to_le16(len);
3442 }
3443
3444 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3445                           struct sk_buff *skb, __u16 flags)
3446 {
3447         struct hci_conn *conn = chan->conn;
3448         struct hci_dev *hdev = conn->hdev;
3449         struct sk_buff *list;
3450
3451         skb->len = skb_headlen(skb);
3452         skb->data_len = 0;
3453
3454         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3455
3456         switch (hdev->dev_type) {
3457         case HCI_BREDR:
3458                 hci_add_acl_hdr(skb, conn->handle, flags);
3459                 break;
3460         case HCI_AMP:
3461                 hci_add_acl_hdr(skb, chan->handle, flags);
3462                 break;
3463         default:
3464                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3465                 return;
3466         }
3467
3468         list = skb_shinfo(skb)->frag_list;
3469         if (!list) {
3470                 /* Non fragmented */
3471                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3472
3473                 skb_queue_tail(queue, skb);
3474         } else {
3475                 /* Fragmented */
3476                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3477
3478                 skb_shinfo(skb)->frag_list = NULL;
3479
3480                 /* Queue all fragments atomically. We need to use spin_lock_bh
3481                  * here because of 6LoWPAN links, as there this function is
3482                  * called from softirq and using normal spin lock could cause
3483                  * deadlocks.
3484                  */
3485                 spin_lock_bh(&queue->lock);
3486
3487                 __skb_queue_tail(queue, skb);
3488
3489                 flags &= ~ACL_START;
3490                 flags |= ACL_CONT;
3491                 do {
3492                         skb = list; list = list->next;
3493
3494                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3495                         hci_add_acl_hdr(skb, conn->handle, flags);
3496
3497                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3498
3499                         __skb_queue_tail(queue, skb);
3500                 } while (list);
3501
3502                 spin_unlock_bh(&queue->lock);
3503         }
3504 }
3505
3506 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3507 {
3508         struct hci_dev *hdev = chan->conn->hdev;
3509
3510         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3511
3512         hci_queue_acl(chan, &chan->data_q, skb, flags);
3513
3514         queue_work(hdev->workqueue, &hdev->tx_work);
3515 }
3516
3517 /* Send SCO data */
3518 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3519 {
3520         struct hci_dev *hdev = conn->hdev;
3521         struct hci_sco_hdr hdr;
3522
3523         BT_DBG("%s len %d", hdev->name, skb->len);
3524
3525         hdr.handle = cpu_to_le16(conn->handle);
3526         hdr.dlen   = skb->len;
3527
3528         skb_push(skb, HCI_SCO_HDR_SIZE);
3529         skb_reset_transport_header(skb);
3530         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3531
3532         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3533
3534         skb_queue_tail(&conn->data_q, skb);
3535         queue_work(hdev->workqueue, &hdev->tx_work);
3536 }
3537
3538 /* ---- HCI TX task (outgoing data) ---- */
3539
3540 /* HCI Connection scheduler */
3541 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3542                                      int *quote)
3543 {
3544         struct hci_conn_hash *h = &hdev->conn_hash;
3545         struct hci_conn *conn = NULL, *c;
3546         unsigned int num = 0, min = ~0;
3547
3548         /* We don't have to lock device here. Connections are always
3549          * added and removed with TX task disabled. */
3550
3551         rcu_read_lock();
3552
3553         list_for_each_entry_rcu(c, &h->list, list) {
3554                 if (c->type != type || skb_queue_empty(&c->data_q))
3555                         continue;
3556
3557                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3558                         continue;
3559
3560                 num++;
3561
3562                 if (c->sent < min) {
3563                         min  = c->sent;
3564                         conn = c;
3565                 }
3566
3567                 if (hci_conn_num(hdev, type) == num)
3568                         break;
3569         }
3570
3571         rcu_read_unlock();
3572
3573         if (conn) {
3574                 int cnt, q;
3575
3576                 switch (conn->type) {
3577                 case ACL_LINK:
3578                         cnt = hdev->acl_cnt;
3579                         break;
3580                 case SCO_LINK:
3581                 case ESCO_LINK:
3582                         cnt = hdev->sco_cnt;
3583                         break;
3584                 case LE_LINK:
3585                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3586                         break;
3587                 default:
3588                         cnt = 0;
3589                         BT_ERR("Unknown link type");
3590                 }
3591
3592                 q = cnt / num;
3593                 *quote = q ? q : 1;
3594         } else
3595                 *quote = 0;
3596
3597         BT_DBG("conn %p quote %d", conn, *quote);
3598         return conn;
3599 }
3600
3601 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3602 {
3603         struct hci_conn_hash *h = &hdev->conn_hash;
3604         struct hci_conn *c;
3605
3606         BT_ERR("%s link tx timeout", hdev->name);
3607
3608         rcu_read_lock();
3609
3610         /* Kill stalled connections */
3611         list_for_each_entry_rcu(c, &h->list, list) {
3612                 if (c->type == type && c->sent) {
3613                         BT_ERR("%s killing stalled connection %pMR",
3614                                hdev->name, &c->dst);
3615                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3616                 }
3617         }
3618
3619         rcu_read_unlock();
3620 }
3621
3622 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3623                                       int *quote)
3624 {
3625         struct hci_conn_hash *h = &hdev->conn_hash;
3626         struct hci_chan *chan = NULL;
3627         unsigned int num = 0, min = ~0, cur_prio = 0;
3628         struct hci_conn *conn;
3629         int cnt, q, conn_num = 0;
3630
3631         BT_DBG("%s", hdev->name);
3632
3633         rcu_read_lock();
3634
3635         list_for_each_entry_rcu(conn, &h->list, list) {
3636                 struct hci_chan *tmp;
3637
3638                 if (conn->type != type)
3639                         continue;
3640
3641                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3642                         continue;
3643
3644                 conn_num++;
3645
3646                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3647                         struct sk_buff *skb;
3648
3649                         if (skb_queue_empty(&tmp->data_q))
3650                                 continue;
3651
3652                         skb = skb_peek(&tmp->data_q);
3653                         if (skb->priority < cur_prio)
3654                                 continue;
3655
3656                         if (skb->priority > cur_prio) {
3657                                 num = 0;
3658                                 min = ~0;
3659                                 cur_prio = skb->priority;
3660                         }
3661
3662                         num++;
3663
3664                         if (conn->sent < min) {
3665                                 min  = conn->sent;
3666                                 chan = tmp;
3667                         }
3668                 }
3669
3670                 if (hci_conn_num(hdev, type) == conn_num)
3671                         break;
3672         }
3673
3674         rcu_read_unlock();
3675
3676         if (!chan)
3677                 return NULL;
3678
3679         switch (chan->conn->type) {
3680         case ACL_LINK:
3681                 cnt = hdev->acl_cnt;
3682                 break;
3683         case AMP_LINK:
3684                 cnt = hdev->block_cnt;
3685                 break;
3686         case SCO_LINK:
3687         case ESCO_LINK:
3688                 cnt = hdev->sco_cnt;
3689                 break;
3690         case LE_LINK:
3691                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3692                 break;
3693         default:
3694                 cnt = 0;
3695                 BT_ERR("Unknown link type");
3696         }
3697
3698         q = cnt / num;
3699         *quote = q ? q : 1;
3700         BT_DBG("chan %p quote %d", chan, *quote);
3701         return chan;
3702 }
3703
3704 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3705 {
3706         struct hci_conn_hash *h = &hdev->conn_hash;
3707         struct hci_conn *conn;
3708         int num = 0;
3709
3710         BT_DBG("%s", hdev->name);
3711
3712         rcu_read_lock();
3713
3714         list_for_each_entry_rcu(conn, &h->list, list) {
3715                 struct hci_chan *chan;
3716
3717                 if (conn->type != type)
3718                         continue;
3719
3720                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3721                         continue;
3722
3723                 num++;
3724
3725                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3726                         struct sk_buff *skb;
3727
3728                         if (chan->sent) {
3729                                 chan->sent = 0;
3730                                 continue;
3731                         }
3732
3733                         if (skb_queue_empty(&chan->data_q))
3734                                 continue;
3735
3736                         skb = skb_peek(&chan->data_q);
3737                         if (skb->priority >= HCI_PRIO_MAX - 1)
3738                                 continue;
3739
3740                         skb->priority = HCI_PRIO_MAX - 1;
3741
3742                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3743                                skb->priority);
3744                 }
3745
3746                 if (hci_conn_num(hdev, type) == num)
3747                         break;
3748         }
3749
3750         rcu_read_unlock();
3751
3752 }
3753
3754 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3755 {
3756         /* Calculate count of blocks used by this packet */
3757         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3758 }
3759
3760 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3761 {
3762         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3763                 /* ACL tx timeout must be longer than maximum
3764                  * link supervision timeout (40.9 seconds) */
3765                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3766                                        HCI_ACL_TX_TIMEOUT))
3767                         hci_link_tx_to(hdev, ACL_LINK);
3768         }
3769 }
3770
3771 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3772 {
3773         unsigned int cnt = hdev->acl_cnt;
3774         struct hci_chan *chan;
3775         struct sk_buff *skb;
3776         int quote;
3777
3778         __check_timeout(hdev, cnt);
3779
3780         while (hdev->acl_cnt &&
3781                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3782                 u32 priority = (skb_peek(&chan->data_q))->priority;
3783                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3784                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3785                                skb->len, skb->priority);
3786
3787                         /* Stop if priority has changed */
3788                         if (skb->priority < priority)
3789                                 break;
3790
3791                         skb = skb_dequeue(&chan->data_q);
3792
3793                         hci_conn_enter_active_mode(chan->conn,
3794                                                    bt_cb(skb)->force_active);
3795
3796                         hci_send_frame(hdev, skb);
3797                         hdev->acl_last_tx = jiffies;
3798
3799                         hdev->acl_cnt--;
3800                         chan->sent++;
3801                         chan->conn->sent++;
3802                 }
3803         }
3804
3805         if (cnt != hdev->acl_cnt)
3806                 hci_prio_recalculate(hdev, ACL_LINK);
3807 }
3808
3809 static void hci_sched_acl_blk(struct hci_dev *hdev)
3810 {
3811         unsigned int cnt = hdev->block_cnt;
3812         struct hci_chan *chan;
3813         struct sk_buff *skb;
3814         int quote;
3815         u8 type;
3816
3817         __check_timeout(hdev, cnt);
3818
3819         BT_DBG("%s", hdev->name);
3820
3821         if (hdev->dev_type == HCI_AMP)
3822                 type = AMP_LINK;
3823         else
3824                 type = ACL_LINK;
3825
3826         while (hdev->block_cnt > 0 &&
3827                (chan = hci_chan_sent(hdev, type, &quote))) {
3828                 u32 priority = (skb_peek(&chan->data_q))->priority;
3829                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3830                         int blocks;
3831
3832                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3833                                skb->len, skb->priority);
3834
3835                         /* Stop if priority has changed */
3836                         if (skb->priority < priority)
3837                                 break;
3838
3839                         skb = skb_dequeue(&chan->data_q);
3840
3841                         blocks = __get_blocks(hdev, skb);
3842                         if (blocks > hdev->block_cnt)
3843                                 return;
3844
3845                         hci_conn_enter_active_mode(chan->conn,
3846                                                    bt_cb(skb)->force_active);
3847
3848                         hci_send_frame(hdev, skb);
3849                         hdev->acl_last_tx = jiffies;
3850
3851                         hdev->block_cnt -= blocks;
3852                         quote -= blocks;
3853
3854                         chan->sent += blocks;
3855                         chan->conn->sent += blocks;
3856                 }
3857         }
3858
3859         if (cnt != hdev->block_cnt)
3860                 hci_prio_recalculate(hdev, type);
3861 }
3862
3863 static void hci_sched_acl(struct hci_dev *hdev)
3864 {
3865         BT_DBG("%s", hdev->name);
3866
3867         /* No ACL link over BR/EDR controller */
3868         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3869                 return;
3870
3871         /* No AMP link over AMP controller */
3872         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3873                 return;
3874
3875         switch (hdev->flow_ctl_mode) {
3876         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3877                 hci_sched_acl_pkt(hdev);
3878                 break;
3879
3880         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3881                 hci_sched_acl_blk(hdev);
3882                 break;
3883         }
3884 }
3885
3886 /* Schedule SCO */
3887 static void hci_sched_sco(struct hci_dev *hdev)
3888 {
3889         struct hci_conn *conn;
3890         struct sk_buff *skb;
3891         int quote;
3892
3893         BT_DBG("%s", hdev->name);
3894
3895         if (!hci_conn_num(hdev, SCO_LINK))
3896                 return;
3897
3898         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3899                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3900                         BT_DBG("skb %p len %d", skb, skb->len);
3901                         hci_send_frame(hdev, skb);
3902
3903                         conn->sent++;
3904                         if (conn->sent == ~0)
3905                                 conn->sent = 0;
3906                 }
3907         }
3908 }
3909
3910 static void hci_sched_esco(struct hci_dev *hdev)
3911 {
3912         struct hci_conn *conn;
3913         struct sk_buff *skb;
3914         int quote;
3915
3916         BT_DBG("%s", hdev->name);
3917
3918         if (!hci_conn_num(hdev, ESCO_LINK))
3919                 return;
3920
3921         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3922                                                      &quote))) {
3923                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3924                         BT_DBG("skb %p len %d", skb, skb->len);
3925                         hci_send_frame(hdev, skb);
3926
3927                         conn->sent++;
3928                         if (conn->sent == ~0)
3929                                 conn->sent = 0;
3930                 }
3931         }
3932 }
3933
3934 static void hci_sched_le(struct hci_dev *hdev)
3935 {
3936         struct hci_chan *chan;
3937         struct sk_buff *skb;
3938         int quote, cnt, tmp;
3939
3940         BT_DBG("%s", hdev->name);
3941
3942         if (!hci_conn_num(hdev, LE_LINK))
3943                 return;
3944
3945         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3946                 /* LE tx timeout must be longer than maximum
3947                  * link supervision timeout (40.9 seconds) */
3948                 if (!hdev->le_cnt && hdev->le_pkts &&
3949                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3950                         hci_link_tx_to(hdev, LE_LINK);
3951         }
3952
3953         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3954         tmp = cnt;
3955         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3956                 u32 priority = (skb_peek(&chan->data_q))->priority;
3957                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3958                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3959                                skb->len, skb->priority);
3960
3961                         /* Stop if priority has changed */
3962                         if (skb->priority < priority)
3963                                 break;
3964
3965                         skb = skb_dequeue(&chan->data_q);
3966
3967                         hci_send_frame(hdev, skb);
3968                         hdev->le_last_tx = jiffies;
3969
3970                         cnt--;
3971                         chan->sent++;
3972                         chan->conn->sent++;
3973                 }
3974         }
3975
3976         if (hdev->le_pkts)
3977                 hdev->le_cnt = cnt;
3978         else
3979                 hdev->acl_cnt = cnt;
3980
3981         if (cnt != tmp)
3982                 hci_prio_recalculate(hdev, LE_LINK);
3983 }
3984
3985 static void hci_tx_work(struct work_struct *work)
3986 {
3987         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3988         struct sk_buff *skb;
3989
3990         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3991                hdev->sco_cnt, hdev->le_cnt);
3992
3993         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3994                 /* Schedule queues and send stuff to HCI driver */
3995                 hci_sched_acl(hdev);
3996                 hci_sched_sco(hdev);
3997                 hci_sched_esco(hdev);
3998                 hci_sched_le(hdev);
3999         }
4000
4001         /* Send next queued raw (unknown type) packet */
4002         while ((skb = skb_dequeue(&hdev->raw_q)))
4003                 hci_send_frame(hdev, skb);
4004 }
4005
4006 /* ----- HCI RX task (incoming data processing) ----- */
4007
4008 /* ACL data packet */
4009 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4010 {
4011         struct hci_acl_hdr *hdr = (void *) skb->data;
4012         struct hci_conn *conn;
4013         __u16 handle, flags;
4014
4015         skb_pull(skb, HCI_ACL_HDR_SIZE);
4016
4017         handle = __le16_to_cpu(hdr->handle);
4018         flags  = hci_flags(handle);
4019         handle = hci_handle(handle);
4020
4021         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4022                handle, flags);
4023
4024         hdev->stat.acl_rx++;
4025
4026         hci_dev_lock(hdev);
4027         conn = hci_conn_hash_lookup_handle(hdev, handle);
4028         hci_dev_unlock(hdev);
4029
4030         if (conn) {
4031                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4032
4033                 /* Send to upper protocol */
4034                 l2cap_recv_acldata(conn, skb, flags);
4035                 return;
4036         } else {
4037                 BT_ERR("%s ACL packet for unknown connection handle %d",
4038                        hdev->name, handle);
4039         }
4040
4041         kfree_skb(skb);
4042 }
4043
4044 /* SCO data packet */
4045 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4046 {
4047         struct hci_sco_hdr *hdr = (void *) skb->data;
4048         struct hci_conn *conn;
4049         __u16 handle;
4050
4051         skb_pull(skb, HCI_SCO_HDR_SIZE);
4052
4053         handle = __le16_to_cpu(hdr->handle);
4054
4055         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4056
4057         hdev->stat.sco_rx++;
4058
4059         hci_dev_lock(hdev);
4060         conn = hci_conn_hash_lookup_handle(hdev, handle);
4061         hci_dev_unlock(hdev);
4062
4063         if (conn) {
4064                 /* Send to upper protocol */
4065                 sco_recv_scodata(conn, skb);
4066                 return;
4067         } else {
4068                 BT_ERR("%s SCO packet for unknown connection handle %d",
4069                        hdev->name, handle);
4070         }
4071
4072         kfree_skb(skb);
4073 }
4074
4075 static bool hci_req_is_complete(struct hci_dev *hdev)
4076 {
4077         struct sk_buff *skb;
4078
4079         skb = skb_peek(&hdev->cmd_q);
4080         if (!skb)
4081                 return true;
4082
4083         return bt_cb(skb)->req.start;
4084 }
4085
4086 static void hci_resend_last(struct hci_dev *hdev)
4087 {
4088         struct hci_command_hdr *sent;
4089         struct sk_buff *skb;
4090         u16 opcode;
4091
4092         if (!hdev->sent_cmd)
4093                 return;
4094
4095         sent = (void *) hdev->sent_cmd->data;
4096         opcode = __le16_to_cpu(sent->opcode);
4097         if (opcode == HCI_OP_RESET)
4098                 return;
4099
4100         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4101         if (!skb)
4102                 return;
4103
4104         skb_queue_head(&hdev->cmd_q, skb);
4105         queue_work(hdev->workqueue, &hdev->cmd_work);
4106 }
4107
4108 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4109 {
4110         hci_req_complete_t req_complete = NULL;
4111         struct sk_buff *skb;
4112         unsigned long flags;
4113
4114         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4115
4116         /* If the completed command doesn't match the last one that was
4117          * sent we need to do special handling of it.
4118          */
4119         if (!hci_sent_cmd_data(hdev, opcode)) {
4120                 /* Some CSR based controllers generate a spontaneous
4121                  * reset complete event during init and any pending
4122                  * command will never be completed. In such a case we
4123                  * need to resend whatever was the last sent
4124                  * command.
4125                  */
4126                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4127                         hci_resend_last(hdev);
4128
4129                 return;
4130         }
4131
4132         /* If the command succeeded and there's still more commands in
4133          * this request the request is not yet complete.
4134          */
4135         if (!status && !hci_req_is_complete(hdev))
4136                 return;
4137
4138         /* If this was the last command in a request the complete
4139          * callback would be found in hdev->sent_cmd instead of the
4140          * command queue (hdev->cmd_q).
4141          */
4142         if (hdev->sent_cmd) {
4143                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4144
4145                 if (req_complete) {
4146                         /* We must set the complete callback to NULL to
4147                          * avoid calling the callback more than once if
4148                          * this function gets called again.
4149                          */
4150                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4151
4152                         goto call_complete;
4153                 }
4154         }
4155
4156         /* Remove all pending commands belonging to this request */
4157         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4158         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4159                 if (bt_cb(skb)->req.start) {
4160                         __skb_queue_head(&hdev->cmd_q, skb);
4161                         break;
4162                 }
4163
4164                 req_complete = bt_cb(skb)->req.complete;
4165                 kfree_skb(skb);
4166         }
4167         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4168
4169 call_complete:
4170         if (req_complete)
4171                 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4172 }
4173
4174 static void hci_rx_work(struct work_struct *work)
4175 {
4176         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4177         struct sk_buff *skb;
4178
4179         BT_DBG("%s", hdev->name);
4180
4181         while ((skb = skb_dequeue(&hdev->rx_q))) {
4182                 /* Send copy to monitor */
4183                 hci_send_to_monitor(hdev, skb);
4184
4185                 if (atomic_read(&hdev->promisc)) {
4186                         /* Send copy to the sockets */
4187                         hci_send_to_sock(hdev, skb);
4188                 }
4189
4190                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4191                         kfree_skb(skb);
4192                         continue;
4193                 }
4194
4195                 if (test_bit(HCI_INIT, &hdev->flags)) {
4196                         /* Don't process data packets in this states. */
4197                         switch (bt_cb(skb)->pkt_type) {
4198                         case HCI_ACLDATA_PKT:
4199                         case HCI_SCODATA_PKT:
4200                                 kfree_skb(skb);
4201                                 continue;
4202                         }
4203                 }
4204
4205                 /* Process frame */
4206                 switch (bt_cb(skb)->pkt_type) {
4207                 case HCI_EVENT_PKT:
4208                         BT_DBG("%s Event packet", hdev->name);
4209                         hci_event_packet(hdev, skb);
4210                         break;
4211
4212                 case HCI_ACLDATA_PKT:
4213                         BT_DBG("%s ACL data packet", hdev->name);
4214                         hci_acldata_packet(hdev, skb);
4215                         break;
4216
4217                 case HCI_SCODATA_PKT:
4218                         BT_DBG("%s SCO data packet", hdev->name);
4219                         hci_scodata_packet(hdev, skb);
4220                         break;
4221
4222                 default:
4223                         kfree_skb(skb);
4224                         break;
4225                 }
4226         }
4227 }
4228
4229 static void hci_cmd_work(struct work_struct *work)
4230 {
4231         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4232         struct sk_buff *skb;
4233
4234         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4235                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4236
4237         /* Send queued commands */
4238         if (atomic_read(&hdev->cmd_cnt)) {
4239                 skb = skb_dequeue(&hdev->cmd_q);
4240                 if (!skb)
4241                         return;
4242
4243                 kfree_skb(hdev->sent_cmd);
4244
4245                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4246                 if (hdev->sent_cmd) {
4247                         atomic_dec(&hdev->cmd_cnt);
4248                         hci_send_frame(hdev, skb);
4249                         if (test_bit(HCI_RESET, &hdev->flags))
4250                                 cancel_delayed_work(&hdev->cmd_timer);
4251                         else
4252                                 schedule_delayed_work(&hdev->cmd_timer,
4253                                                       HCI_CMD_TIMEOUT);
4254                 } else {
4255                         skb_queue_head(&hdev->cmd_q, skb);
4256                         queue_work(hdev->workqueue, &hdev->cmd_work);
4257                 }
4258         }
4259 }