Bluetooth: Add shutdown callback before closing the device
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_RWLOCK(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
145 {
146         BT_DBG("%s result 0x%2.2x", hdev->name, result);
147
148         if (hdev->req_status == HCI_REQ_PEND) {
149                 hdev->req_result = result;
150                 hdev->req_status = HCI_REQ_DONE;
151                 wake_up_interruptible(&hdev->req_wait_q);
152         }
153 }
154
155 static void hci_req_cancel(struct hci_dev *hdev, int err)
156 {
157         BT_DBG("%s err 0x%2.2x", hdev->name, err);
158
159         if (hdev->req_status == HCI_REQ_PEND) {
160                 hdev->req_result = err;
161                 hdev->req_status = HCI_REQ_CANCELED;
162                 wake_up_interruptible(&hdev->req_wait_q);
163         }
164 }
165
166 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
167                                             u8 event)
168 {
169         struct hci_ev_cmd_complete *ev;
170         struct hci_event_hdr *hdr;
171         struct sk_buff *skb;
172
173         hci_dev_lock(hdev);
174
175         skb = hdev->recv_evt;
176         hdev->recv_evt = NULL;
177
178         hci_dev_unlock(hdev);
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         if (skb->len < sizeof(*hdr)) {
184                 BT_ERR("Too short HCI event");
185                 goto failed;
186         }
187
188         hdr = (void *) skb->data;
189         skb_pull(skb, HCI_EVENT_HDR_SIZE);
190
191         if (event) {
192                 if (hdr->evt != event)
193                         goto failed;
194                 return skb;
195         }
196
197         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
198                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
199                 goto failed;
200         }
201
202         if (skb->len < sizeof(*ev)) {
203                 BT_ERR("Too short cmd_complete event");
204                 goto failed;
205         }
206
207         ev = (void *) skb->data;
208         skb_pull(skb, sizeof(*ev));
209
210         if (opcode == __le16_to_cpu(ev->opcode))
211                 return skb;
212
213         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
214                __le16_to_cpu(ev->opcode));
215
216 failed:
217         kfree_skb(skb);
218         return ERR_PTR(-ENODATA);
219 }
220
221 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
222                                   const void *param, u8 event, u32 timeout)
223 {
224         DECLARE_WAITQUEUE(wait, current);
225         struct hci_request req;
226         int err = 0;
227
228         BT_DBG("%s", hdev->name);
229
230         hci_req_init(&req, hdev);
231
232         hci_req_add_ev(&req, opcode, plen, param, event);
233
234         hdev->req_status = HCI_REQ_PEND;
235
236         add_wait_queue(&hdev->req_wait_q, &wait);
237         set_current_state(TASK_INTERRUPTIBLE);
238
239         err = hci_req_run(&req, hci_req_sync_complete);
240         if (err < 0) {
241                 remove_wait_queue(&hdev->req_wait_q, &wait);
242                 set_current_state(TASK_RUNNING);
243                 return ERR_PTR(err);
244         }
245
246         schedule_timeout(timeout);
247
248         remove_wait_queue(&hdev->req_wait_q, &wait);
249
250         if (signal_pending(current))
251                 return ERR_PTR(-EINTR);
252
253         switch (hdev->req_status) {
254         case HCI_REQ_DONE:
255                 err = -bt_to_errno(hdev->req_result);
256                 break;
257
258         case HCI_REQ_CANCELED:
259                 err = -hdev->req_result;
260                 break;
261
262         default:
263                 err = -ETIMEDOUT;
264                 break;
265         }
266
267         hdev->req_status = hdev->req_result = 0;
268
269         BT_DBG("%s end: err %d", hdev->name, err);
270
271         if (err < 0)
272                 return ERR_PTR(err);
273
274         return hci_get_cmd_complete(hdev, opcode, event);
275 }
276 EXPORT_SYMBOL(__hci_cmd_sync_ev);
277
278 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
279                                const void *param, u32 timeout)
280 {
281         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
282 }
283 EXPORT_SYMBOL(__hci_cmd_sync);
284
285 /* Execute request and wait for completion. */
286 static int __hci_req_sync(struct hci_dev *hdev,
287                           void (*func)(struct hci_request *req,
288                                       unsigned long opt),
289                           unsigned long opt, __u32 timeout)
290 {
291         struct hci_request req;
292         DECLARE_WAITQUEUE(wait, current);
293         int err = 0;
294
295         BT_DBG("%s start", hdev->name);
296
297         hci_req_init(&req, hdev);
298
299         hdev->req_status = HCI_REQ_PEND;
300
301         func(&req, opt);
302
303         add_wait_queue(&hdev->req_wait_q, &wait);
304         set_current_state(TASK_INTERRUPTIBLE);
305
306         err = hci_req_run(&req, hci_req_sync_complete);
307         if (err < 0) {
308                 hdev->req_status = 0;
309
310                 remove_wait_queue(&hdev->req_wait_q, &wait);
311                 set_current_state(TASK_RUNNING);
312
313                 /* ENODATA means the HCI request command queue is empty.
314                  * This can happen when a request with conditionals doesn't
315                  * trigger any commands to be sent. This is normal behavior
316                  * and should not trigger an error return.
317                  */
318                 if (err == -ENODATA)
319                         return 0;
320
321                 return err;
322         }
323
324         schedule_timeout(timeout);
325
326         remove_wait_queue(&hdev->req_wait_q, &wait);
327
328         if (signal_pending(current))
329                 return -EINTR;
330
331         switch (hdev->req_status) {
332         case HCI_REQ_DONE:
333                 err = -bt_to_errno(hdev->req_result);
334                 break;
335
336         case HCI_REQ_CANCELED:
337                 err = -hdev->req_result;
338                 break;
339
340         default:
341                 err = -ETIMEDOUT;
342                 break;
343         }
344
345         hdev->req_status = hdev->req_result = 0;
346
347         BT_DBG("%s end: err %d", hdev->name, err);
348
349         return err;
350 }
351
352 static int hci_req_sync(struct hci_dev *hdev,
353                         void (*req)(struct hci_request *req,
354                                     unsigned long opt),
355                         unsigned long opt, __u32 timeout)
356 {
357         int ret;
358
359         if (!test_bit(HCI_UP, &hdev->flags))
360                 return -ENETDOWN;
361
362         /* Serialize all requests */
363         hci_req_lock(hdev);
364         ret = __hci_req_sync(hdev, req, opt, timeout);
365         hci_req_unlock(hdev);
366
367         return ret;
368 }
369
370 static void hci_reset_req(struct hci_request *req, unsigned long opt)
371 {
372         BT_DBG("%s %ld", req->hdev->name, opt);
373
374         /* Reset device */
375         set_bit(HCI_RESET, &req->hdev->flags);
376         hci_req_add(req, HCI_OP_RESET, 0, NULL);
377 }
378
379 static void bredr_init(struct hci_request *req)
380 {
381         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
382
383         /* Read Local Supported Features */
384         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read Local Version */
387         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
388
389         /* Read BD Address */
390         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
391 }
392
393 static void amp_init(struct hci_request *req)
394 {
395         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
396
397         /* Read Local Version */
398         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
399
400         /* Read Local Supported Commands */
401         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
402
403         /* Read Local Supported Features */
404         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
405
406         /* Read Local AMP Info */
407         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
408
409         /* Read Data Blk size */
410         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
411
412         /* Read Flow Control Mode */
413         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
414
415         /* Read Location Data */
416         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
417 }
418
419 static void hci_init1_req(struct hci_request *req, unsigned long opt)
420 {
421         struct hci_dev *hdev = req->hdev;
422
423         BT_DBG("%s %ld", hdev->name, opt);
424
425         /* Reset */
426         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
427                 hci_reset_req(req, 0);
428
429         switch (hdev->dev_type) {
430         case HCI_BREDR:
431                 bredr_init(req);
432                 break;
433
434         case HCI_AMP:
435                 amp_init(req);
436                 break;
437
438         default:
439                 BT_ERR("Unknown device type %d", hdev->dev_type);
440                 break;
441         }
442 }
443
444 static void bredr_setup(struct hci_request *req)
445 {
446         __le16 param;
447         __u8 flt_type;
448
449         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
450         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
451
452         /* Read Class of Device */
453         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
454
455         /* Read Local Name */
456         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
457
458         /* Read Voice Setting */
459         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
460
461         /* Read Number of Supported IAC */
462         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
463
464         /* Read Current IAC LAP */
465         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
466
467         /* Clear Event Filters */
468         flt_type = HCI_FLT_CLEAR_ALL;
469         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
470
471         /* Connection accept timeout ~20 secs */
472         param = cpu_to_le16(0x7d00);
473         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
474 }
475
476 static void le_setup(struct hci_request *req)
477 {
478         struct hci_dev *hdev = req->hdev;
479
480         /* Read LE Buffer Size */
481         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
482
483         /* Read LE Local Supported Features */
484         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
485
486         /* Read LE Supported States */
487         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
488
489         /* Read LE White List Size */
490         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
491
492         /* Clear LE White List */
493         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
494
495         /* LE-only controllers have LE implicitly enabled */
496         if (!lmp_bredr_capable(hdev))
497                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
498 }
499
500 static void hci_setup_event_mask(struct hci_request *req)
501 {
502         struct hci_dev *hdev = req->hdev;
503
504         /* The second byte is 0xff instead of 0x9f (two reserved bits
505          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
506          * command otherwise.
507          */
508         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511          * any event mask for pre 1.2 devices.
512          */
513         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
514                 return;
515
516         if (lmp_bredr_capable(hdev)) {
517                 events[4] |= 0x01; /* Flow Specification Complete */
518                 events[4] |= 0x02; /* Inquiry Result with RSSI */
519                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
520                 events[5] |= 0x08; /* Synchronous Connection Complete */
521                 events[5] |= 0x10; /* Synchronous Connection Changed */
522         } else {
523                 /* Use a different default for LE-only devices */
524                 memset(events, 0, sizeof(events));
525                 events[0] |= 0x10; /* Disconnection Complete */
526                 events[1] |= 0x08; /* Read Remote Version Information Complete */
527                 events[1] |= 0x20; /* Command Complete */
528                 events[1] |= 0x40; /* Command Status */
529                 events[1] |= 0x80; /* Hardware Error */
530                 events[2] |= 0x04; /* Number of Completed Packets */
531                 events[3] |= 0x02; /* Data Buffer Overflow */
532
533                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
534                         events[0] |= 0x80; /* Encryption Change */
535                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 events[4] |= 0x02; /* Inquiry Result with RSSI */
541
542         if (lmp_sniffsubr_capable(hdev))
543                 events[5] |= 0x20; /* Sniff Subrating */
544
545         if (lmp_pause_enc_capable(hdev))
546                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
547
548         if (lmp_ext_inq_capable(hdev))
549                 events[5] |= 0x40; /* Extended Inquiry Result */
550
551         if (lmp_no_flush_capable(hdev))
552                 events[7] |= 0x01; /* Enhanced Flush Complete */
553
554         if (lmp_lsto_capable(hdev))
555                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
556
557         if (lmp_ssp_capable(hdev)) {
558                 events[6] |= 0x01;      /* IO Capability Request */
559                 events[6] |= 0x02;      /* IO Capability Response */
560                 events[6] |= 0x04;      /* User Confirmation Request */
561                 events[6] |= 0x08;      /* User Passkey Request */
562                 events[6] |= 0x10;      /* Remote OOB Data Request */
563                 events[6] |= 0x20;      /* Simple Pairing Complete */
564                 events[7] |= 0x04;      /* User Passkey Notification */
565                 events[7] |= 0x08;      /* Keypress Notification */
566                 events[7] |= 0x10;      /* Remote Host Supported
567                                          * Features Notification
568                                          */
569         }
570
571         if (lmp_le_capable(hdev))
572                 events[7] |= 0x20;      /* LE Meta-Event */
573
574         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
575 }
576
577 static void hci_init2_req(struct hci_request *req, unsigned long opt)
578 {
579         struct hci_dev *hdev = req->hdev;
580
581         if (lmp_bredr_capable(hdev))
582                 bredr_setup(req);
583         else
584                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
585
586         if (lmp_le_capable(hdev))
587                 le_setup(req);
588
589         /* All Bluetooth 1.2 and later controllers should support the
590          * HCI command for reading the local supported commands.
591          *
592          * Unfortunately some controllers indicate Bluetooth 1.2 support,
593          * but do not have support for this command. If that is the case,
594          * the driver can quirk the behavior and skip reading the local
595          * supported commands.
596          */
597         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
598             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
599                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
600
601         if (lmp_ssp_capable(hdev)) {
602                 /* When SSP is available, then the host features page
603                  * should also be available as well. However some
604                  * controllers list the max_page as 0 as long as SSP
605                  * has not been enabled. To achieve proper debugging
606                  * output, force the minimum max_page to 1 at least.
607                  */
608                 hdev->max_page = 0x01;
609
610                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
611                         u8 mode = 0x01;
612
613                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
614                                     sizeof(mode), &mode);
615                 } else {
616                         struct hci_cp_write_eir cp;
617
618                         memset(hdev->eir, 0, sizeof(hdev->eir));
619                         memset(&cp, 0, sizeof(cp));
620
621                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
622                 }
623         }
624
625         if (lmp_inq_rssi_capable(hdev) ||
626             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
627                 u8 mode;
628
629                 /* If Extended Inquiry Result events are supported, then
630                  * they are clearly preferred over Inquiry Result with RSSI
631                  * events.
632                  */
633                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
634
635                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
636         }
637
638         if (lmp_inq_tx_pwr_capable(hdev))
639                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
640
641         if (lmp_ext_feat_capable(hdev)) {
642                 struct hci_cp_read_local_ext_features cp;
643
644                 cp.page = 0x01;
645                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
646                             sizeof(cp), &cp);
647         }
648
649         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
650                 u8 enable = 1;
651                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
652                             &enable);
653         }
654 }
655
656 static void hci_setup_link_policy(struct hci_request *req)
657 {
658         struct hci_dev *hdev = req->hdev;
659         struct hci_cp_write_def_link_policy cp;
660         u16 link_policy = 0;
661
662         if (lmp_rswitch_capable(hdev))
663                 link_policy |= HCI_LP_RSWITCH;
664         if (lmp_hold_capable(hdev))
665                 link_policy |= HCI_LP_HOLD;
666         if (lmp_sniff_capable(hdev))
667                 link_policy |= HCI_LP_SNIFF;
668         if (lmp_park_capable(hdev))
669                 link_policy |= HCI_LP_PARK;
670
671         cp.policy = cpu_to_le16(link_policy);
672         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
673 }
674
675 static void hci_set_le_support(struct hci_request *req)
676 {
677         struct hci_dev *hdev = req->hdev;
678         struct hci_cp_write_le_host_supported cp;
679
680         /* LE-only devices do not support explicit enablement */
681         if (!lmp_bredr_capable(hdev))
682                 return;
683
684         memset(&cp, 0, sizeof(cp));
685
686         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
687                 cp.le = 0x01;
688                 cp.simul = 0x00;
689         }
690
691         if (cp.le != lmp_host_le_capable(hdev))
692                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
693                             &cp);
694 }
695
696 static void hci_set_event_mask_page_2(struct hci_request *req)
697 {
698         struct hci_dev *hdev = req->hdev;
699         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
700
701         /* If Connectionless Slave Broadcast master role is supported
702          * enable all necessary events for it.
703          */
704         if (lmp_csb_master_capable(hdev)) {
705                 events[1] |= 0x40;      /* Triggered Clock Capture */
706                 events[1] |= 0x80;      /* Synchronization Train Complete */
707                 events[2] |= 0x10;      /* Slave Page Response Timeout */
708                 events[2] |= 0x20;      /* CSB Channel Map Change */
709         }
710
711         /* If Connectionless Slave Broadcast slave role is supported
712          * enable all necessary events for it.
713          */
714         if (lmp_csb_slave_capable(hdev)) {
715                 events[2] |= 0x01;      /* Synchronization Train Received */
716                 events[2] |= 0x02;      /* CSB Receive */
717                 events[2] |= 0x04;      /* CSB Timeout */
718                 events[2] |= 0x08;      /* Truncated Page Complete */
719         }
720
721         /* Enable Authenticated Payload Timeout Expired event if supported */
722         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
723                 events[2] |= 0x80;
724
725         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
726 }
727
728 static void hci_init3_req(struct hci_request *req, unsigned long opt)
729 {
730         struct hci_dev *hdev = req->hdev;
731         u8 p;
732
733         hci_setup_event_mask(req);
734
735         if (hdev->commands[6] & 0x20) {
736                 struct hci_cp_read_stored_link_key cp;
737
738                 bacpy(&cp.bdaddr, BDADDR_ANY);
739                 cp.read_all = 0x01;
740                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
741         }
742
743         if (hdev->commands[5] & 0x10)
744                 hci_setup_link_policy(req);
745
746         if (hdev->commands[8] & 0x01)
747                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
748
749         /* Some older Broadcom based Bluetooth 1.2 controllers do not
750          * support the Read Page Scan Type command. Check support for
751          * this command in the bit mask of supported commands.
752          */
753         if (hdev->commands[13] & 0x01)
754                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
755
756         if (lmp_le_capable(hdev)) {
757                 u8 events[8];
758
759                 memset(events, 0, sizeof(events));
760                 events[0] = 0x0f;
761
762                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
763                         events[0] |= 0x10;      /* LE Long Term Key Request */
764
765                 /* If controller supports the Connection Parameters Request
766                  * Link Layer Procedure, enable the corresponding event.
767                  */
768                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
769                         events[0] |= 0x20;      /* LE Remote Connection
770                                                  * Parameter Request
771                                                  */
772
773                 /* If the controller supports the Data Length Extension
774                  * feature, enable the corresponding event.
775                  */
776                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
777                         events[0] |= 0x40;      /* LE Data Length Change */
778
779                 /* If the controller supports Extended Scanner Filter
780                  * Policies, enable the correspondig event.
781                  */
782                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
783                         events[1] |= 0x04;      /* LE Direct Advertising
784                                                  * Report
785                                                  */
786
787                 /* If the controller supports the LE Read Local P-256
788                  * Public Key command, enable the corresponding event.
789                  */
790                 if (hdev->commands[34] & 0x02)
791                         events[0] |= 0x80;      /* LE Read Local P-256
792                                                  * Public Key Complete
793                                                  */
794
795                 /* If the controller supports the LE Generate DHKey
796                  * command, enable the corresponding event.
797                  */
798                 if (hdev->commands[34] & 0x04)
799                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
800
801                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
802                             events);
803
804                 if (hdev->commands[25] & 0x40) {
805                         /* Read LE Advertising Channel TX Power */
806                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
807                 }
808
809                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
810                         /* Read LE Maximum Data Length */
811                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
812
813                         /* Read LE Suggested Default Data Length */
814                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
815                 }
816
817                 hci_set_le_support(req);
818         }
819
820         /* Read features beyond page 1 if available */
821         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
822                 struct hci_cp_read_local_ext_features cp;
823
824                 cp.page = p;
825                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
826                             sizeof(cp), &cp);
827         }
828 }
829
830 static void hci_init4_req(struct hci_request *req, unsigned long opt)
831 {
832         struct hci_dev *hdev = req->hdev;
833
834         /* Some Broadcom based Bluetooth controllers do not support the
835          * Delete Stored Link Key command. They are clearly indicating its
836          * absence in the bit mask of supported commands.
837          *
838          * Check the supported commands and only if the the command is marked
839          * as supported send it. If not supported assume that the controller
840          * does not have actual support for stored link keys which makes this
841          * command redundant anyway.
842          *
843          * Some controllers indicate that they support handling deleting
844          * stored link keys, but they don't. The quirk lets a driver
845          * just disable this command.
846          */
847         if (hdev->commands[6] & 0x80 &&
848             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
849                 struct hci_cp_delete_stored_link_key cp;
850
851                 bacpy(&cp.bdaddr, BDADDR_ANY);
852                 cp.delete_all = 0x01;
853                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
854                             sizeof(cp), &cp);
855         }
856
857         /* Set event mask page 2 if the HCI command for it is supported */
858         if (hdev->commands[22] & 0x04)
859                 hci_set_event_mask_page_2(req);
860
861         /* Read local codec list if the HCI command is supported */
862         if (hdev->commands[29] & 0x20)
863                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
864
865         /* Get MWS transport configuration if the HCI command is supported */
866         if (hdev->commands[30] & 0x08)
867                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
868
869         /* Check for Synchronization Train support */
870         if (lmp_sync_train_capable(hdev))
871                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
872
873         /* Enable Secure Connections if supported and configured */
874         if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
875             bredr_sc_enabled(hdev)) {
876                 u8 support = 0x01;
877
878                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
879                             sizeof(support), &support);
880         }
881 }
882
883 static int __hci_init(struct hci_dev *hdev)
884 {
885         int err;
886
887         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
888         if (err < 0)
889                 return err;
890
891         /* The Device Under Test (DUT) mode is special and available for
892          * all controller types. So just create it early on.
893          */
894         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
895                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
896                                     &dut_mode_fops);
897         }
898
899         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
900          * BR/EDR/LE type controllers. AMP controllers only need the
901          * first stage init.
902          */
903         if (hdev->dev_type != HCI_BREDR)
904                 return 0;
905
906         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
907         if (err < 0)
908                 return err;
909
910         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
911         if (err < 0)
912                 return err;
913
914         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
915         if (err < 0)
916                 return err;
917
918         /* This function is only called when the controller is actually in
919          * configured state. When the controller is marked as unconfigured,
920          * this initialization procedure is not run.
921          *
922          * It means that it is possible that a controller runs through its
923          * setup phase and then discovers missing settings. If that is the
924          * case, then this function will not be called. It then will only
925          * be called during the config phase.
926          *
927          * So only when in setup phase or config phase, create the debugfs
928          * entries and register the SMP channels.
929          */
930         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
931             !test_bit(HCI_CONFIG, &hdev->dev_flags))
932                 return 0;
933
934         hci_debugfs_create_common(hdev);
935
936         if (lmp_bredr_capable(hdev))
937                 hci_debugfs_create_bredr(hdev);
938
939         if (lmp_le_capable(hdev))
940                 hci_debugfs_create_le(hdev);
941
942         return 0;
943 }
944
945 static void hci_init0_req(struct hci_request *req, unsigned long opt)
946 {
947         struct hci_dev *hdev = req->hdev;
948
949         BT_DBG("%s %ld", hdev->name, opt);
950
951         /* Reset */
952         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
953                 hci_reset_req(req, 0);
954
955         /* Read Local Version */
956         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
957
958         /* Read BD Address */
959         if (hdev->set_bdaddr)
960                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
961 }
962
963 static int __hci_unconf_init(struct hci_dev *hdev)
964 {
965         int err;
966
967         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
968                 return 0;
969
970         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
971         if (err < 0)
972                 return err;
973
974         return 0;
975 }
976
977 static void hci_scan_req(struct hci_request *req, unsigned long opt)
978 {
979         __u8 scan = opt;
980
981         BT_DBG("%s %x", req->hdev->name, scan);
982
983         /* Inquiry and Page scans */
984         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
985 }
986
987 static void hci_auth_req(struct hci_request *req, unsigned long opt)
988 {
989         __u8 auth = opt;
990
991         BT_DBG("%s %x", req->hdev->name, auth);
992
993         /* Authentication */
994         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
995 }
996
997 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
998 {
999         __u8 encrypt = opt;
1000
1001         BT_DBG("%s %x", req->hdev->name, encrypt);
1002
1003         /* Encryption */
1004         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1005 }
1006
1007 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1008 {
1009         __le16 policy = cpu_to_le16(opt);
1010
1011         BT_DBG("%s %x", req->hdev->name, policy);
1012
1013         /* Default link policy */
1014         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1015 }
1016
1017 /* Get HCI device by index.
1018  * Device is held on return. */
1019 struct hci_dev *hci_dev_get(int index)
1020 {
1021         struct hci_dev *hdev = NULL, *d;
1022
1023         BT_DBG("%d", index);
1024
1025         if (index < 0)
1026                 return NULL;
1027
1028         read_lock(&hci_dev_list_lock);
1029         list_for_each_entry(d, &hci_dev_list, list) {
1030                 if (d->id == index) {
1031                         hdev = hci_dev_hold(d);
1032                         break;
1033                 }
1034         }
1035         read_unlock(&hci_dev_list_lock);
1036         return hdev;
1037 }
1038
1039 /* ---- Inquiry support ---- */
1040
1041 bool hci_discovery_active(struct hci_dev *hdev)
1042 {
1043         struct discovery_state *discov = &hdev->discovery;
1044
1045         switch (discov->state) {
1046         case DISCOVERY_FINDING:
1047         case DISCOVERY_RESOLVING:
1048                 return true;
1049
1050         default:
1051                 return false;
1052         }
1053 }
1054
1055 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1056 {
1057         int old_state = hdev->discovery.state;
1058
1059         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1060
1061         if (old_state == state)
1062                 return;
1063
1064         hdev->discovery.state = state;
1065
1066         switch (state) {
1067         case DISCOVERY_STOPPED:
1068                 hci_update_background_scan(hdev);
1069
1070                 if (old_state != DISCOVERY_STARTING)
1071                         mgmt_discovering(hdev, 0);
1072                 break;
1073         case DISCOVERY_STARTING:
1074                 break;
1075         case DISCOVERY_FINDING:
1076                 mgmt_discovering(hdev, 1);
1077                 break;
1078         case DISCOVERY_RESOLVING:
1079                 break;
1080         case DISCOVERY_STOPPING:
1081                 break;
1082         }
1083 }
1084
1085 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1086 {
1087         struct discovery_state *cache = &hdev->discovery;
1088         struct inquiry_entry *p, *n;
1089
1090         list_for_each_entry_safe(p, n, &cache->all, all) {
1091                 list_del(&p->all);
1092                 kfree(p);
1093         }
1094
1095         INIT_LIST_HEAD(&cache->unknown);
1096         INIT_LIST_HEAD(&cache->resolve);
1097 }
1098
1099 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1100                                                bdaddr_t *bdaddr)
1101 {
1102         struct discovery_state *cache = &hdev->discovery;
1103         struct inquiry_entry *e;
1104
1105         BT_DBG("cache %p, %pMR", cache, bdaddr);
1106
1107         list_for_each_entry(e, &cache->all, all) {
1108                 if (!bacmp(&e->data.bdaddr, bdaddr))
1109                         return e;
1110         }
1111
1112         return NULL;
1113 }
1114
1115 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1116                                                        bdaddr_t *bdaddr)
1117 {
1118         struct discovery_state *cache = &hdev->discovery;
1119         struct inquiry_entry *e;
1120
1121         BT_DBG("cache %p, %pMR", cache, bdaddr);
1122
1123         list_for_each_entry(e, &cache->unknown, list) {
1124                 if (!bacmp(&e->data.bdaddr, bdaddr))
1125                         return e;
1126         }
1127
1128         return NULL;
1129 }
1130
1131 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1132                                                        bdaddr_t *bdaddr,
1133                                                        int state)
1134 {
1135         struct discovery_state *cache = &hdev->discovery;
1136         struct inquiry_entry *e;
1137
1138         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1139
1140         list_for_each_entry(e, &cache->resolve, list) {
1141                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1142                         return e;
1143                 if (!bacmp(&e->data.bdaddr, bdaddr))
1144                         return e;
1145         }
1146
1147         return NULL;
1148 }
1149
1150 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1151                                       struct inquiry_entry *ie)
1152 {
1153         struct discovery_state *cache = &hdev->discovery;
1154         struct list_head *pos = &cache->resolve;
1155         struct inquiry_entry *p;
1156
1157         list_del(&ie->list);
1158
1159         list_for_each_entry(p, &cache->resolve, list) {
1160                 if (p->name_state != NAME_PENDING &&
1161                     abs(p->data.rssi) >= abs(ie->data.rssi))
1162                         break;
1163                 pos = &p->list;
1164         }
1165
1166         list_add(&ie->list, pos);
1167 }
1168
1169 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1170                              bool name_known)
1171 {
1172         struct discovery_state *cache = &hdev->discovery;
1173         struct inquiry_entry *ie;
1174         u32 flags = 0;
1175
1176         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1177
1178         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1179
1180         if (!data->ssp_mode)
1181                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1182
1183         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1184         if (ie) {
1185                 if (!ie->data.ssp_mode)
1186                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1187
1188                 if (ie->name_state == NAME_NEEDED &&
1189                     data->rssi != ie->data.rssi) {
1190                         ie->data.rssi = data->rssi;
1191                         hci_inquiry_cache_update_resolve(hdev, ie);
1192                 }
1193
1194                 goto update;
1195         }
1196
1197         /* Entry not in the cache. Add new one. */
1198         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1199         if (!ie) {
1200                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1201                 goto done;
1202         }
1203
1204         list_add(&ie->all, &cache->all);
1205
1206         if (name_known) {
1207                 ie->name_state = NAME_KNOWN;
1208         } else {
1209                 ie->name_state = NAME_NOT_KNOWN;
1210                 list_add(&ie->list, &cache->unknown);
1211         }
1212
1213 update:
1214         if (name_known && ie->name_state != NAME_KNOWN &&
1215             ie->name_state != NAME_PENDING) {
1216                 ie->name_state = NAME_KNOWN;
1217                 list_del(&ie->list);
1218         }
1219
1220         memcpy(&ie->data, data, sizeof(*data));
1221         ie->timestamp = jiffies;
1222         cache->timestamp = jiffies;
1223
1224         if (ie->name_state == NAME_NOT_KNOWN)
1225                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1226
1227 done:
1228         return flags;
1229 }
1230
1231 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1232 {
1233         struct discovery_state *cache = &hdev->discovery;
1234         struct inquiry_info *info = (struct inquiry_info *) buf;
1235         struct inquiry_entry *e;
1236         int copied = 0;
1237
1238         list_for_each_entry(e, &cache->all, all) {
1239                 struct inquiry_data *data = &e->data;
1240
1241                 if (copied >= num)
1242                         break;
1243
1244                 bacpy(&info->bdaddr, &data->bdaddr);
1245                 info->pscan_rep_mode    = data->pscan_rep_mode;
1246                 info->pscan_period_mode = data->pscan_period_mode;
1247                 info->pscan_mode        = data->pscan_mode;
1248                 memcpy(info->dev_class, data->dev_class, 3);
1249                 info->clock_offset      = data->clock_offset;
1250
1251                 info++;
1252                 copied++;
1253         }
1254
1255         BT_DBG("cache %p, copied %d", cache, copied);
1256         return copied;
1257 }
1258
1259 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1260 {
1261         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1262         struct hci_dev *hdev = req->hdev;
1263         struct hci_cp_inquiry cp;
1264
1265         BT_DBG("%s", hdev->name);
1266
1267         if (test_bit(HCI_INQUIRY, &hdev->flags))
1268                 return;
1269
1270         /* Start Inquiry */
1271         memcpy(&cp.lap, &ir->lap, 3);
1272         cp.length  = ir->length;
1273         cp.num_rsp = ir->num_rsp;
1274         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1275 }
1276
1277 int hci_inquiry(void __user *arg)
1278 {
1279         __u8 __user *ptr = arg;
1280         struct hci_inquiry_req ir;
1281         struct hci_dev *hdev;
1282         int err = 0, do_inquiry = 0, max_rsp;
1283         long timeo;
1284         __u8 *buf;
1285
1286         if (copy_from_user(&ir, ptr, sizeof(ir)))
1287                 return -EFAULT;
1288
1289         hdev = hci_dev_get(ir.dev_id);
1290         if (!hdev)
1291                 return -ENODEV;
1292
1293         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1294                 err = -EBUSY;
1295                 goto done;
1296         }
1297
1298         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1299                 err = -EOPNOTSUPP;
1300                 goto done;
1301         }
1302
1303         if (hdev->dev_type != HCI_BREDR) {
1304                 err = -EOPNOTSUPP;
1305                 goto done;
1306         }
1307
1308         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1309                 err = -EOPNOTSUPP;
1310                 goto done;
1311         }
1312
1313         hci_dev_lock(hdev);
1314         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1315             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1316                 hci_inquiry_cache_flush(hdev);
1317                 do_inquiry = 1;
1318         }
1319         hci_dev_unlock(hdev);
1320
1321         timeo = ir.length * msecs_to_jiffies(2000);
1322
1323         if (do_inquiry) {
1324                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1325                                    timeo);
1326                 if (err < 0)
1327                         goto done;
1328
1329                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1330                  * cleared). If it is interrupted by a signal, return -EINTR.
1331                  */
1332                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1333                                 TASK_INTERRUPTIBLE))
1334                         return -EINTR;
1335         }
1336
1337         /* for unlimited number of responses we will use buffer with
1338          * 255 entries
1339          */
1340         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1341
1342         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1343          * copy it to the user space.
1344          */
1345         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1346         if (!buf) {
1347                 err = -ENOMEM;
1348                 goto done;
1349         }
1350
1351         hci_dev_lock(hdev);
1352         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1353         hci_dev_unlock(hdev);
1354
1355         BT_DBG("num_rsp %d", ir.num_rsp);
1356
1357         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1358                 ptr += sizeof(ir);
1359                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1360                                  ir.num_rsp))
1361                         err = -EFAULT;
1362         } else
1363                 err = -EFAULT;
1364
1365         kfree(buf);
1366
1367 done:
1368         hci_dev_put(hdev);
1369         return err;
1370 }
1371
1372 static int hci_dev_do_open(struct hci_dev *hdev)
1373 {
1374         int ret = 0;
1375
1376         BT_DBG("%s %p", hdev->name, hdev);
1377
1378         hci_req_lock(hdev);
1379
1380         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1381                 ret = -ENODEV;
1382                 goto done;
1383         }
1384
1385         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1386             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1387                 /* Check for rfkill but allow the HCI setup stage to
1388                  * proceed (which in itself doesn't cause any RF activity).
1389                  */
1390                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1391                         ret = -ERFKILL;
1392                         goto done;
1393                 }
1394
1395                 /* Check for valid public address or a configured static
1396                  * random adddress, but let the HCI setup proceed to
1397                  * be able to determine if there is a public address
1398                  * or not.
1399                  *
1400                  * In case of user channel usage, it is not important
1401                  * if a public address or static random address is
1402                  * available.
1403                  *
1404                  * This check is only valid for BR/EDR controllers
1405                  * since AMP controllers do not have an address.
1406                  */
1407                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1408                     hdev->dev_type == HCI_BREDR &&
1409                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1410                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1411                         ret = -EADDRNOTAVAIL;
1412                         goto done;
1413                 }
1414         }
1415
1416         if (test_bit(HCI_UP, &hdev->flags)) {
1417                 ret = -EALREADY;
1418                 goto done;
1419         }
1420
1421         if (hdev->open(hdev)) {
1422                 ret = -EIO;
1423                 goto done;
1424         }
1425
1426         atomic_set(&hdev->cmd_cnt, 1);
1427         set_bit(HCI_INIT, &hdev->flags);
1428
1429         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1430                 if (hdev->setup)
1431                         ret = hdev->setup(hdev);
1432
1433                 /* The transport driver can set these quirks before
1434                  * creating the HCI device or in its setup callback.
1435                  *
1436                  * In case any of them is set, the controller has to
1437                  * start up as unconfigured.
1438                  */
1439                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1440                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1441                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1442
1443                 /* For an unconfigured controller it is required to
1444                  * read at least the version information provided by
1445                  * the Read Local Version Information command.
1446                  *
1447                  * If the set_bdaddr driver callback is provided, then
1448                  * also the original Bluetooth public device address
1449                  * will be read using the Read BD Address command.
1450                  */
1451                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1452                         ret = __hci_unconf_init(hdev);
1453         }
1454
1455         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1456                 /* If public address change is configured, ensure that
1457                  * the address gets programmed. If the driver does not
1458                  * support changing the public address, fail the power
1459                  * on procedure.
1460                  */
1461                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1462                     hdev->set_bdaddr)
1463                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1464                 else
1465                         ret = -EADDRNOTAVAIL;
1466         }
1467
1468         if (!ret) {
1469                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1470                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1471                         ret = __hci_init(hdev);
1472         }
1473
1474         clear_bit(HCI_INIT, &hdev->flags);
1475
1476         if (!ret) {
1477                 hci_dev_hold(hdev);
1478                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1479                 set_bit(HCI_UP, &hdev->flags);
1480                 hci_notify(hdev, HCI_DEV_UP);
1481                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1482                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1483                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1484                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1485                     hdev->dev_type == HCI_BREDR) {
1486                         hci_dev_lock(hdev);
1487                         mgmt_powered(hdev, 1);
1488                         hci_dev_unlock(hdev);
1489                 }
1490         } else {
1491                 /* Init failed, cleanup */
1492                 flush_work(&hdev->tx_work);
1493                 flush_work(&hdev->cmd_work);
1494                 flush_work(&hdev->rx_work);
1495
1496                 skb_queue_purge(&hdev->cmd_q);
1497                 skb_queue_purge(&hdev->rx_q);
1498
1499                 if (hdev->flush)
1500                         hdev->flush(hdev);
1501
1502                 if (hdev->sent_cmd) {
1503                         kfree_skb(hdev->sent_cmd);
1504                         hdev->sent_cmd = NULL;
1505                 }
1506
1507                 hdev->close(hdev);
1508                 hdev->flags &= BIT(HCI_RAW);
1509         }
1510
1511 done:
1512         hci_req_unlock(hdev);
1513         return ret;
1514 }
1515
1516 /* ---- HCI ioctl helpers ---- */
1517
1518 int hci_dev_open(__u16 dev)
1519 {
1520         struct hci_dev *hdev;
1521         int err;
1522
1523         hdev = hci_dev_get(dev);
1524         if (!hdev)
1525                 return -ENODEV;
1526
1527         /* Devices that are marked as unconfigured can only be powered
1528          * up as user channel. Trying to bring them up as normal devices
1529          * will result into a failure. Only user channel operation is
1530          * possible.
1531          *
1532          * When this function is called for a user channel, the flag
1533          * HCI_USER_CHANNEL will be set first before attempting to
1534          * open the device.
1535          */
1536         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1537             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1538                 err = -EOPNOTSUPP;
1539                 goto done;
1540         }
1541
1542         /* We need to ensure that no other power on/off work is pending
1543          * before proceeding to call hci_dev_do_open. This is
1544          * particularly important if the setup procedure has not yet
1545          * completed.
1546          */
1547         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1548                 cancel_delayed_work(&hdev->power_off);
1549
1550         /* After this call it is guaranteed that the setup procedure
1551          * has finished. This means that error conditions like RFKILL
1552          * or no valid public or static random address apply.
1553          */
1554         flush_workqueue(hdev->req_workqueue);
1555
1556         /* For controllers not using the management interface and that
1557          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1558          * so that pairing works for them. Once the management interface
1559          * is in use this bit will be cleared again and userspace has
1560          * to explicitly enable it.
1561          */
1562         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1563             !test_bit(HCI_MGMT, &hdev->dev_flags))
1564                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1565
1566         err = hci_dev_do_open(hdev);
1567
1568 done:
1569         hci_dev_put(hdev);
1570         return err;
1571 }
1572
1573 /* This function requires the caller holds hdev->lock */
1574 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1575 {
1576         struct hci_conn_params *p;
1577
1578         list_for_each_entry(p, &hdev->le_conn_params, list) {
1579                 if (p->conn) {
1580                         hci_conn_drop(p->conn);
1581                         hci_conn_put(p->conn);
1582                         p->conn = NULL;
1583                 }
1584                 list_del_init(&p->action);
1585         }
1586
1587         BT_DBG("All LE pending actions cleared");
1588 }
1589
1590 static int hci_dev_do_close(struct hci_dev *hdev)
1591 {
1592         BT_DBG("%s %p", hdev->name, hdev);
1593
1594         if (!test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1595                 /* Execute vendor specific shutdown routine */
1596                 if (hdev->shutdown)
1597                         hdev->shutdown(hdev);
1598         }
1599
1600         cancel_delayed_work(&hdev->power_off);
1601
1602         hci_req_cancel(hdev, ENODEV);
1603         hci_req_lock(hdev);
1604
1605         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1606                 cancel_delayed_work_sync(&hdev->cmd_timer);
1607                 hci_req_unlock(hdev);
1608                 return 0;
1609         }
1610
1611         /* Flush RX and TX works */
1612         flush_work(&hdev->tx_work);
1613         flush_work(&hdev->rx_work);
1614
1615         if (hdev->discov_timeout > 0) {
1616                 cancel_delayed_work(&hdev->discov_off);
1617                 hdev->discov_timeout = 0;
1618                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1619                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1620         }
1621
1622         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1623                 cancel_delayed_work(&hdev->service_cache);
1624
1625         cancel_delayed_work_sync(&hdev->le_scan_disable);
1626         cancel_delayed_work_sync(&hdev->le_scan_restart);
1627
1628         if (test_bit(HCI_MGMT, &hdev->dev_flags))
1629                 cancel_delayed_work_sync(&hdev->rpa_expired);
1630
1631         /* Avoid potential lockdep warnings from the *_flush() calls by
1632          * ensuring the workqueue is empty up front.
1633          */
1634         drain_workqueue(hdev->workqueue);
1635
1636         hci_dev_lock(hdev);
1637
1638         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1639
1640         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1641                 if (hdev->dev_type == HCI_BREDR)
1642                         mgmt_powered(hdev, 0);
1643         }
1644
1645         hci_inquiry_cache_flush(hdev);
1646         hci_pend_le_actions_clear(hdev);
1647         hci_conn_hash_flush(hdev);
1648         hci_dev_unlock(hdev);
1649
1650         smp_unregister(hdev);
1651
1652         hci_notify(hdev, HCI_DEV_DOWN);
1653
1654         if (hdev->flush)
1655                 hdev->flush(hdev);
1656
1657         /* Reset device */
1658         skb_queue_purge(&hdev->cmd_q);
1659         atomic_set(&hdev->cmd_cnt, 1);
1660         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1661             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1662             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1663                 set_bit(HCI_INIT, &hdev->flags);
1664                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1665                 clear_bit(HCI_INIT, &hdev->flags);
1666         }
1667
1668         /* flush cmd  work */
1669         flush_work(&hdev->cmd_work);
1670
1671         /* Drop queues */
1672         skb_queue_purge(&hdev->rx_q);
1673         skb_queue_purge(&hdev->cmd_q);
1674         skb_queue_purge(&hdev->raw_q);
1675
1676         /* Drop last sent command */
1677         if (hdev->sent_cmd) {
1678                 cancel_delayed_work_sync(&hdev->cmd_timer);
1679                 kfree_skb(hdev->sent_cmd);
1680                 hdev->sent_cmd = NULL;
1681         }
1682
1683         kfree_skb(hdev->recv_evt);
1684         hdev->recv_evt = NULL;
1685
1686         /* After this point our queues are empty
1687          * and no tasks are scheduled. */
1688         hdev->close(hdev);
1689
1690         /* Clear flags */
1691         hdev->flags &= BIT(HCI_RAW);
1692         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1693
1694         /* Controller radio is available but is currently powered down */
1695         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1696
1697         memset(hdev->eir, 0, sizeof(hdev->eir));
1698         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1699         bacpy(&hdev->random_addr, BDADDR_ANY);
1700
1701         hci_req_unlock(hdev);
1702
1703         hci_dev_put(hdev);
1704         return 0;
1705 }
1706
1707 int hci_dev_close(__u16 dev)
1708 {
1709         struct hci_dev *hdev;
1710         int err;
1711
1712         hdev = hci_dev_get(dev);
1713         if (!hdev)
1714                 return -ENODEV;
1715
1716         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1717                 err = -EBUSY;
1718                 goto done;
1719         }
1720
1721         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1722                 cancel_delayed_work(&hdev->power_off);
1723
1724         err = hci_dev_do_close(hdev);
1725
1726 done:
1727         hci_dev_put(hdev);
1728         return err;
1729 }
1730
1731 static int hci_dev_do_reset(struct hci_dev *hdev)
1732 {
1733         int ret;
1734
1735         BT_DBG("%s %p", hdev->name, hdev);
1736
1737         hci_req_lock(hdev);
1738
1739         /* Drop queues */
1740         skb_queue_purge(&hdev->rx_q);
1741         skb_queue_purge(&hdev->cmd_q);
1742
1743         /* Avoid potential lockdep warnings from the *_flush() calls by
1744          * ensuring the workqueue is empty up front.
1745          */
1746         drain_workqueue(hdev->workqueue);
1747
1748         hci_dev_lock(hdev);
1749         hci_inquiry_cache_flush(hdev);
1750         hci_conn_hash_flush(hdev);
1751         hci_dev_unlock(hdev);
1752
1753         if (hdev->flush)
1754                 hdev->flush(hdev);
1755
1756         atomic_set(&hdev->cmd_cnt, 1);
1757         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1758
1759         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1760
1761         hci_req_unlock(hdev);
1762         return ret;
1763 }
1764
1765 int hci_dev_reset(__u16 dev)
1766 {
1767         struct hci_dev *hdev;
1768         int err;
1769
1770         hdev = hci_dev_get(dev);
1771         if (!hdev)
1772                 return -ENODEV;
1773
1774         if (!test_bit(HCI_UP, &hdev->flags)) {
1775                 err = -ENETDOWN;
1776                 goto done;
1777         }
1778
1779         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1780                 err = -EBUSY;
1781                 goto done;
1782         }
1783
1784         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1785                 err = -EOPNOTSUPP;
1786                 goto done;
1787         }
1788
1789         err = hci_dev_do_reset(hdev);
1790
1791 done:
1792         hci_dev_put(hdev);
1793         return err;
1794 }
1795
1796 int hci_dev_reset_stat(__u16 dev)
1797 {
1798         struct hci_dev *hdev;
1799         int ret = 0;
1800
1801         hdev = hci_dev_get(dev);
1802         if (!hdev)
1803                 return -ENODEV;
1804
1805         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1806                 ret = -EBUSY;
1807                 goto done;
1808         }
1809
1810         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1811                 ret = -EOPNOTSUPP;
1812                 goto done;
1813         }
1814
1815         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1816
1817 done:
1818         hci_dev_put(hdev);
1819         return ret;
1820 }
1821
1822 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1823 {
1824         bool conn_changed, discov_changed;
1825
1826         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1827
1828         if ((scan & SCAN_PAGE))
1829                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1830                                                  &hdev->dev_flags);
1831         else
1832                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1833                                                   &hdev->dev_flags);
1834
1835         if ((scan & SCAN_INQUIRY)) {
1836                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1837                                                    &hdev->dev_flags);
1838         } else {
1839                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1840                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1841                                                     &hdev->dev_flags);
1842         }
1843
1844         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1845                 return;
1846
1847         if (conn_changed || discov_changed) {
1848                 /* In case this was disabled through mgmt */
1849                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1850
1851                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1852                         mgmt_update_adv_data(hdev);
1853
1854                 mgmt_new_settings(hdev);
1855         }
1856 }
1857
1858 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1859 {
1860         struct hci_dev *hdev;
1861         struct hci_dev_req dr;
1862         int err = 0;
1863
1864         if (copy_from_user(&dr, arg, sizeof(dr)))
1865                 return -EFAULT;
1866
1867         hdev = hci_dev_get(dr.dev_id);
1868         if (!hdev)
1869                 return -ENODEV;
1870
1871         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1872                 err = -EBUSY;
1873                 goto done;
1874         }
1875
1876         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1877                 err = -EOPNOTSUPP;
1878                 goto done;
1879         }
1880
1881         if (hdev->dev_type != HCI_BREDR) {
1882                 err = -EOPNOTSUPP;
1883                 goto done;
1884         }
1885
1886         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1887                 err = -EOPNOTSUPP;
1888                 goto done;
1889         }
1890
1891         switch (cmd) {
1892         case HCISETAUTH:
1893                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1894                                    HCI_INIT_TIMEOUT);
1895                 break;
1896
1897         case HCISETENCRYPT:
1898                 if (!lmp_encrypt_capable(hdev)) {
1899                         err = -EOPNOTSUPP;
1900                         break;
1901                 }
1902
1903                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1904                         /* Auth must be enabled first */
1905                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1906                                            HCI_INIT_TIMEOUT);
1907                         if (err)
1908                                 break;
1909                 }
1910
1911                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1912                                    HCI_INIT_TIMEOUT);
1913                 break;
1914
1915         case HCISETSCAN:
1916                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1917                                    HCI_INIT_TIMEOUT);
1918
1919                 /* Ensure that the connectable and discoverable states
1920                  * get correctly modified as this was a non-mgmt change.
1921                  */
1922                 if (!err)
1923                         hci_update_scan_state(hdev, dr.dev_opt);
1924                 break;
1925
1926         case HCISETLINKPOL:
1927                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1928                                    HCI_INIT_TIMEOUT);
1929                 break;
1930
1931         case HCISETLINKMODE:
1932                 hdev->link_mode = ((__u16) dr.dev_opt) &
1933                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1934                 break;
1935
1936         case HCISETPTYPE:
1937                 hdev->pkt_type = (__u16) dr.dev_opt;
1938                 break;
1939
1940         case HCISETACLMTU:
1941                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1942                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1943                 break;
1944
1945         case HCISETSCOMTU:
1946                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1947                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1948                 break;
1949
1950         default:
1951                 err = -EINVAL;
1952                 break;
1953         }
1954
1955 done:
1956         hci_dev_put(hdev);
1957         return err;
1958 }
1959
1960 int hci_get_dev_list(void __user *arg)
1961 {
1962         struct hci_dev *hdev;
1963         struct hci_dev_list_req *dl;
1964         struct hci_dev_req *dr;
1965         int n = 0, size, err;
1966         __u16 dev_num;
1967
1968         if (get_user(dev_num, (__u16 __user *) arg))
1969                 return -EFAULT;
1970
1971         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1972                 return -EINVAL;
1973
1974         size = sizeof(*dl) + dev_num * sizeof(*dr);
1975
1976         dl = kzalloc(size, GFP_KERNEL);
1977         if (!dl)
1978                 return -ENOMEM;
1979
1980         dr = dl->dev_req;
1981
1982         read_lock(&hci_dev_list_lock);
1983         list_for_each_entry(hdev, &hci_dev_list, list) {
1984                 unsigned long flags = hdev->flags;
1985
1986                 /* When the auto-off is configured it means the transport
1987                  * is running, but in that case still indicate that the
1988                  * device is actually down.
1989                  */
1990                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1991                         flags &= ~BIT(HCI_UP);
1992
1993                 (dr + n)->dev_id  = hdev->id;
1994                 (dr + n)->dev_opt = flags;
1995
1996                 if (++n >= dev_num)
1997                         break;
1998         }
1999         read_unlock(&hci_dev_list_lock);
2000
2001         dl->dev_num = n;
2002         size = sizeof(*dl) + n * sizeof(*dr);
2003
2004         err = copy_to_user(arg, dl, size);
2005         kfree(dl);
2006
2007         return err ? -EFAULT : 0;
2008 }
2009
2010 int hci_get_dev_info(void __user *arg)
2011 {
2012         struct hci_dev *hdev;
2013         struct hci_dev_info di;
2014         unsigned long flags;
2015         int err = 0;
2016
2017         if (copy_from_user(&di, arg, sizeof(di)))
2018                 return -EFAULT;
2019
2020         hdev = hci_dev_get(di.dev_id);
2021         if (!hdev)
2022                 return -ENODEV;
2023
2024         /* When the auto-off is configured it means the transport
2025          * is running, but in that case still indicate that the
2026          * device is actually down.
2027          */
2028         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2029                 flags = hdev->flags & ~BIT(HCI_UP);
2030         else
2031                 flags = hdev->flags;
2032
2033         strcpy(di.name, hdev->name);
2034         di.bdaddr   = hdev->bdaddr;
2035         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2036         di.flags    = flags;
2037         di.pkt_type = hdev->pkt_type;
2038         if (lmp_bredr_capable(hdev)) {
2039                 di.acl_mtu  = hdev->acl_mtu;
2040                 di.acl_pkts = hdev->acl_pkts;
2041                 di.sco_mtu  = hdev->sco_mtu;
2042                 di.sco_pkts = hdev->sco_pkts;
2043         } else {
2044                 di.acl_mtu  = hdev->le_mtu;
2045                 di.acl_pkts = hdev->le_pkts;
2046                 di.sco_mtu  = 0;
2047                 di.sco_pkts = 0;
2048         }
2049         di.link_policy = hdev->link_policy;
2050         di.link_mode   = hdev->link_mode;
2051
2052         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2053         memcpy(&di.features, &hdev->features, sizeof(di.features));
2054
2055         if (copy_to_user(arg, &di, sizeof(di)))
2056                 err = -EFAULT;
2057
2058         hci_dev_put(hdev);
2059
2060         return err;
2061 }
2062
2063 /* ---- Interface to HCI drivers ---- */
2064
2065 static int hci_rfkill_set_block(void *data, bool blocked)
2066 {
2067         struct hci_dev *hdev = data;
2068
2069         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2070
2071         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2072                 return -EBUSY;
2073
2074         if (blocked) {
2075                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2076                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2077                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
2078                         hci_dev_do_close(hdev);
2079         } else {
2080                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2081         }
2082
2083         return 0;
2084 }
2085
2086 static const struct rfkill_ops hci_rfkill_ops = {
2087         .set_block = hci_rfkill_set_block,
2088 };
2089
2090 static void hci_power_on(struct work_struct *work)
2091 {
2092         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2093         int err;
2094
2095         BT_DBG("%s", hdev->name);
2096
2097         err = hci_dev_do_open(hdev);
2098         if (err < 0) {
2099                 hci_dev_lock(hdev);
2100                 mgmt_set_powered_failed(hdev, err);
2101                 hci_dev_unlock(hdev);
2102                 return;
2103         }
2104
2105         /* During the HCI setup phase, a few error conditions are
2106          * ignored and they need to be checked now. If they are still
2107          * valid, it is important to turn the device back off.
2108          */
2109         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2110             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2111             (hdev->dev_type == HCI_BREDR &&
2112              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2113              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2114                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2115                 hci_dev_do_close(hdev);
2116         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2117                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2118                                    HCI_AUTO_OFF_TIMEOUT);
2119         }
2120
2121         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2122                 /* For unconfigured devices, set the HCI_RAW flag
2123                  * so that userspace can easily identify them.
2124                  */
2125                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2126                         set_bit(HCI_RAW, &hdev->flags);
2127
2128                 /* For fully configured devices, this will send
2129                  * the Index Added event. For unconfigured devices,
2130                  * it will send Unconfigued Index Added event.
2131                  *
2132                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2133                  * and no event will be send.
2134                  */
2135                 mgmt_index_added(hdev);
2136         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2137                 /* When the controller is now configured, then it
2138                  * is important to clear the HCI_RAW flag.
2139                  */
2140                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2141                         clear_bit(HCI_RAW, &hdev->flags);
2142
2143                 /* Powering on the controller with HCI_CONFIG set only
2144                  * happens with the transition from unconfigured to
2145                  * configured. This will send the Index Added event.
2146                  */
2147                 mgmt_index_added(hdev);
2148         }
2149 }
2150
2151 static void hci_power_off(struct work_struct *work)
2152 {
2153         struct hci_dev *hdev = container_of(work, struct hci_dev,
2154                                             power_off.work);
2155
2156         BT_DBG("%s", hdev->name);
2157
2158         hci_dev_do_close(hdev);
2159 }
2160
2161 static void hci_error_reset(struct work_struct *work)
2162 {
2163         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2164
2165         BT_DBG("%s", hdev->name);
2166
2167         if (hdev->hw_error)
2168                 hdev->hw_error(hdev, hdev->hw_error_code);
2169         else
2170                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2171                        hdev->hw_error_code);
2172
2173         if (hci_dev_do_close(hdev))
2174                 return;
2175
2176         hci_dev_do_open(hdev);
2177 }
2178
2179 static void hci_discov_off(struct work_struct *work)
2180 {
2181         struct hci_dev *hdev;
2182
2183         hdev = container_of(work, struct hci_dev, discov_off.work);
2184
2185         BT_DBG("%s", hdev->name);
2186
2187         mgmt_discoverable_timeout(hdev);
2188 }
2189
2190 void hci_uuids_clear(struct hci_dev *hdev)
2191 {
2192         struct bt_uuid *uuid, *tmp;
2193
2194         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2195                 list_del(&uuid->list);
2196                 kfree(uuid);
2197         }
2198 }
2199
2200 void hci_link_keys_clear(struct hci_dev *hdev)
2201 {
2202         struct link_key *key;
2203
2204         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2205                 list_del_rcu(&key->list);
2206                 kfree_rcu(key, rcu);
2207         }
2208 }
2209
2210 void hci_smp_ltks_clear(struct hci_dev *hdev)
2211 {
2212         struct smp_ltk *k;
2213
2214         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2215                 list_del_rcu(&k->list);
2216                 kfree_rcu(k, rcu);
2217         }
2218 }
2219
2220 void hci_smp_irks_clear(struct hci_dev *hdev)
2221 {
2222         struct smp_irk *k;
2223
2224         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2225                 list_del_rcu(&k->list);
2226                 kfree_rcu(k, rcu);
2227         }
2228 }
2229
2230 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2231 {
2232         struct link_key *k;
2233
2234         rcu_read_lock();
2235         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2236                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2237                         rcu_read_unlock();
2238                         return k;
2239                 }
2240         }
2241         rcu_read_unlock();
2242
2243         return NULL;
2244 }
2245
2246 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2247                                u8 key_type, u8 old_key_type)
2248 {
2249         /* Legacy key */
2250         if (key_type < 0x03)
2251                 return true;
2252
2253         /* Debug keys are insecure so don't store them persistently */
2254         if (key_type == HCI_LK_DEBUG_COMBINATION)
2255                 return false;
2256
2257         /* Changed combination key and there's no previous one */
2258         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2259                 return false;
2260
2261         /* Security mode 3 case */
2262         if (!conn)
2263                 return true;
2264
2265         /* BR/EDR key derived using SC from an LE link */
2266         if (conn->type == LE_LINK)
2267                 return true;
2268
2269         /* Neither local nor remote side had no-bonding as requirement */
2270         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2271                 return true;
2272
2273         /* Local side had dedicated bonding as requirement */
2274         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2275                 return true;
2276
2277         /* Remote side had dedicated bonding as requirement */
2278         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2279                 return true;
2280
2281         /* If none of the above criteria match, then don't store the key
2282          * persistently */
2283         return false;
2284 }
2285
2286 static u8 ltk_role(u8 type)
2287 {
2288         if (type == SMP_LTK)
2289                 return HCI_ROLE_MASTER;
2290
2291         return HCI_ROLE_SLAVE;
2292 }
2293
2294 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2295                              u8 addr_type, u8 role)
2296 {
2297         struct smp_ltk *k;
2298
2299         rcu_read_lock();
2300         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2301                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2302                         continue;
2303
2304                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2305                         rcu_read_unlock();
2306                         return k;
2307                 }
2308         }
2309         rcu_read_unlock();
2310
2311         return NULL;
2312 }
2313
2314 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2315 {
2316         struct smp_irk *irk;
2317
2318         rcu_read_lock();
2319         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2320                 if (!bacmp(&irk->rpa, rpa)) {
2321                         rcu_read_unlock();
2322                         return irk;
2323                 }
2324         }
2325
2326         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2327                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2328                         bacpy(&irk->rpa, rpa);
2329                         rcu_read_unlock();
2330                         return irk;
2331                 }
2332         }
2333         rcu_read_unlock();
2334
2335         return NULL;
2336 }
2337
2338 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2339                                      u8 addr_type)
2340 {
2341         struct smp_irk *irk;
2342
2343         /* Identity Address must be public or static random */
2344         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2345                 return NULL;
2346
2347         rcu_read_lock();
2348         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2349                 if (addr_type == irk->addr_type &&
2350                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2351                         rcu_read_unlock();
2352                         return irk;
2353                 }
2354         }
2355         rcu_read_unlock();
2356
2357         return NULL;
2358 }
2359
2360 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2361                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2362                                   u8 pin_len, bool *persistent)
2363 {
2364         struct link_key *key, *old_key;
2365         u8 old_key_type;
2366
2367         old_key = hci_find_link_key(hdev, bdaddr);
2368         if (old_key) {
2369                 old_key_type = old_key->type;
2370                 key = old_key;
2371         } else {
2372                 old_key_type = conn ? conn->key_type : 0xff;
2373                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2374                 if (!key)
2375                         return NULL;
2376                 list_add_rcu(&key->list, &hdev->link_keys);
2377         }
2378
2379         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2380
2381         /* Some buggy controller combinations generate a changed
2382          * combination key for legacy pairing even when there's no
2383          * previous key */
2384         if (type == HCI_LK_CHANGED_COMBINATION &&
2385             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2386                 type = HCI_LK_COMBINATION;
2387                 if (conn)
2388                         conn->key_type = type;
2389         }
2390
2391         bacpy(&key->bdaddr, bdaddr);
2392         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2393         key->pin_len = pin_len;
2394
2395         if (type == HCI_LK_CHANGED_COMBINATION)
2396                 key->type = old_key_type;
2397         else
2398                 key->type = type;
2399
2400         if (persistent)
2401                 *persistent = hci_persistent_key(hdev, conn, type,
2402                                                  old_key_type);
2403
2404         return key;
2405 }
2406
2407 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2408                             u8 addr_type, u8 type, u8 authenticated,
2409                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2410 {
2411         struct smp_ltk *key, *old_key;
2412         u8 role = ltk_role(type);
2413
2414         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2415         if (old_key)
2416                 key = old_key;
2417         else {
2418                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2419                 if (!key)
2420                         return NULL;
2421                 list_add_rcu(&key->list, &hdev->long_term_keys);
2422         }
2423
2424         bacpy(&key->bdaddr, bdaddr);
2425         key->bdaddr_type = addr_type;
2426         memcpy(key->val, tk, sizeof(key->val));
2427         key->authenticated = authenticated;
2428         key->ediv = ediv;
2429         key->rand = rand;
2430         key->enc_size = enc_size;
2431         key->type = type;
2432
2433         return key;
2434 }
2435
2436 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2437                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2438 {
2439         struct smp_irk *irk;
2440
2441         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2442         if (!irk) {
2443                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2444                 if (!irk)
2445                         return NULL;
2446
2447                 bacpy(&irk->bdaddr, bdaddr);
2448                 irk->addr_type = addr_type;
2449
2450                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2451         }
2452
2453         memcpy(irk->val, val, 16);
2454         bacpy(&irk->rpa, rpa);
2455
2456         return irk;
2457 }
2458
2459 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2460 {
2461         struct link_key *key;
2462
2463         key = hci_find_link_key(hdev, bdaddr);
2464         if (!key)
2465                 return -ENOENT;
2466
2467         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2468
2469         list_del_rcu(&key->list);
2470         kfree_rcu(key, rcu);
2471
2472         return 0;
2473 }
2474
2475 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2476 {
2477         struct smp_ltk *k;
2478         int removed = 0;
2479
2480         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2481                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2482                         continue;
2483
2484                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2485
2486                 list_del_rcu(&k->list);
2487                 kfree_rcu(k, rcu);
2488                 removed++;
2489         }
2490
2491         return removed ? 0 : -ENOENT;
2492 }
2493
2494 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2495 {
2496         struct smp_irk *k;
2497
2498         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2499                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2500                         continue;
2501
2502                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2503
2504                 list_del_rcu(&k->list);
2505                 kfree_rcu(k, rcu);
2506         }
2507 }
2508
2509 /* HCI command timer function */
2510 static void hci_cmd_timeout(struct work_struct *work)
2511 {
2512         struct hci_dev *hdev = container_of(work, struct hci_dev,
2513                                             cmd_timer.work);
2514
2515         if (hdev->sent_cmd) {
2516                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2517                 u16 opcode = __le16_to_cpu(sent->opcode);
2518
2519                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2520         } else {
2521                 BT_ERR("%s command tx timeout", hdev->name);
2522         }
2523
2524         atomic_set(&hdev->cmd_cnt, 1);
2525         queue_work(hdev->workqueue, &hdev->cmd_work);
2526 }
2527
2528 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2529                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2530 {
2531         struct oob_data *data;
2532
2533         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2534                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2535                         continue;
2536                 if (data->bdaddr_type != bdaddr_type)
2537                         continue;
2538                 return data;
2539         }
2540
2541         return NULL;
2542 }
2543
2544 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2545                                u8 bdaddr_type)
2546 {
2547         struct oob_data *data;
2548
2549         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2550         if (!data)
2551                 return -ENOENT;
2552
2553         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2554
2555         list_del(&data->list);
2556         kfree(data);
2557
2558         return 0;
2559 }
2560
2561 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2562 {
2563         struct oob_data *data, *n;
2564
2565         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2566                 list_del(&data->list);
2567                 kfree(data);
2568         }
2569 }
2570
2571 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2572                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2573                             u8 *hash256, u8 *rand256)
2574 {
2575         struct oob_data *data;
2576
2577         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2578         if (!data) {
2579                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2580                 if (!data)
2581                         return -ENOMEM;
2582
2583                 bacpy(&data->bdaddr, bdaddr);
2584                 data->bdaddr_type = bdaddr_type;
2585                 list_add(&data->list, &hdev->remote_oob_data);
2586         }
2587
2588         if (hash192 && rand192) {
2589                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2590                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2591                 if (hash256 && rand256)
2592                         data->present = 0x03;
2593         } else {
2594                 memset(data->hash192, 0, sizeof(data->hash192));
2595                 memset(data->rand192, 0, sizeof(data->rand192));
2596                 if (hash256 && rand256)
2597                         data->present = 0x02;
2598                 else
2599                         data->present = 0x00;
2600         }
2601
2602         if (hash256 && rand256) {
2603                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2604                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2605         } else {
2606                 memset(data->hash256, 0, sizeof(data->hash256));
2607                 memset(data->rand256, 0, sizeof(data->rand256));
2608                 if (hash192 && rand192)
2609                         data->present = 0x01;
2610         }
2611
2612         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2613
2614         return 0;
2615 }
2616
2617 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2618                                          bdaddr_t *bdaddr, u8 type)
2619 {
2620         struct bdaddr_list *b;
2621
2622         list_for_each_entry(b, bdaddr_list, list) {
2623                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2624                         return b;
2625         }
2626
2627         return NULL;
2628 }
2629
2630 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2631 {
2632         struct list_head *p, *n;
2633
2634         list_for_each_safe(p, n, bdaddr_list) {
2635                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2636
2637                 list_del(p);
2638                 kfree(b);
2639         }
2640 }
2641
2642 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2643 {
2644         struct bdaddr_list *entry;
2645
2646         if (!bacmp(bdaddr, BDADDR_ANY))
2647                 return -EBADF;
2648
2649         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2650                 return -EEXIST;
2651
2652         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2653         if (!entry)
2654                 return -ENOMEM;
2655
2656         bacpy(&entry->bdaddr, bdaddr);
2657         entry->bdaddr_type = type;
2658
2659         list_add(&entry->list, list);
2660
2661         return 0;
2662 }
2663
2664 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2665 {
2666         struct bdaddr_list *entry;
2667
2668         if (!bacmp(bdaddr, BDADDR_ANY)) {
2669                 hci_bdaddr_list_clear(list);
2670                 return 0;
2671         }
2672
2673         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2674         if (!entry)
2675                 return -ENOENT;
2676
2677         list_del(&entry->list);
2678         kfree(entry);
2679
2680         return 0;
2681 }
2682
2683 /* This function requires the caller holds hdev->lock */
2684 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2685                                                bdaddr_t *addr, u8 addr_type)
2686 {
2687         struct hci_conn_params *params;
2688
2689         /* The conn params list only contains identity addresses */
2690         if (!hci_is_identity_address(addr, addr_type))
2691                 return NULL;
2692
2693         list_for_each_entry(params, &hdev->le_conn_params, list) {
2694                 if (bacmp(&params->addr, addr) == 0 &&
2695                     params->addr_type == addr_type) {
2696                         return params;
2697                 }
2698         }
2699
2700         return NULL;
2701 }
2702
2703 /* This function requires the caller holds hdev->lock */
2704 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2705                                                   bdaddr_t *addr, u8 addr_type)
2706 {
2707         struct hci_conn_params *param;
2708
2709         /* The list only contains identity addresses */
2710         if (!hci_is_identity_address(addr, addr_type))
2711                 return NULL;
2712
2713         list_for_each_entry(param, list, action) {
2714                 if (bacmp(&param->addr, addr) == 0 &&
2715                     param->addr_type == addr_type)
2716                         return param;
2717         }
2718
2719         return NULL;
2720 }
2721
2722 /* This function requires the caller holds hdev->lock */
2723 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2724                                             bdaddr_t *addr, u8 addr_type)
2725 {
2726         struct hci_conn_params *params;
2727
2728         if (!hci_is_identity_address(addr, addr_type))
2729                 return NULL;
2730
2731         params = hci_conn_params_lookup(hdev, addr, addr_type);
2732         if (params)
2733                 return params;
2734
2735         params = kzalloc(sizeof(*params), GFP_KERNEL);
2736         if (!params) {
2737                 BT_ERR("Out of memory");
2738                 return NULL;
2739         }
2740
2741         bacpy(&params->addr, addr);
2742         params->addr_type = addr_type;
2743
2744         list_add(&params->list, &hdev->le_conn_params);
2745         INIT_LIST_HEAD(&params->action);
2746
2747         params->conn_min_interval = hdev->le_conn_min_interval;
2748         params->conn_max_interval = hdev->le_conn_max_interval;
2749         params->conn_latency = hdev->le_conn_latency;
2750         params->supervision_timeout = hdev->le_supv_timeout;
2751         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2752
2753         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754
2755         return params;
2756 }
2757
2758 static void hci_conn_params_free(struct hci_conn_params *params)
2759 {
2760         if (params->conn) {
2761                 hci_conn_drop(params->conn);
2762                 hci_conn_put(params->conn);
2763         }
2764
2765         list_del(&params->action);
2766         list_del(&params->list);
2767         kfree(params);
2768 }
2769
2770 /* This function requires the caller holds hdev->lock */
2771 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2772 {
2773         struct hci_conn_params *params;
2774
2775         params = hci_conn_params_lookup(hdev, addr, addr_type);
2776         if (!params)
2777                 return;
2778
2779         hci_conn_params_free(params);
2780
2781         hci_update_background_scan(hdev);
2782
2783         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2784 }
2785
2786 /* This function requires the caller holds hdev->lock */
2787 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2788 {
2789         struct hci_conn_params *params, *tmp;
2790
2791         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2792                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2793                         continue;
2794                 list_del(&params->list);
2795                 kfree(params);
2796         }
2797
2798         BT_DBG("All LE disabled connection parameters were removed");
2799 }
2800
2801 /* This function requires the caller holds hdev->lock */
2802 void hci_conn_params_clear_all(struct hci_dev *hdev)
2803 {
2804         struct hci_conn_params *params, *tmp;
2805
2806         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2807                 hci_conn_params_free(params);
2808
2809         hci_update_background_scan(hdev);
2810
2811         BT_DBG("All LE connection parameters were removed");
2812 }
2813
2814 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2815 {
2816         if (status) {
2817                 BT_ERR("Failed to start inquiry: status %d", status);
2818
2819                 hci_dev_lock(hdev);
2820                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2821                 hci_dev_unlock(hdev);
2822                 return;
2823         }
2824 }
2825
2826 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2827                                           u16 opcode)
2828 {
2829         /* General inquiry access code (GIAC) */
2830         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2831         struct hci_request req;
2832         struct hci_cp_inquiry cp;
2833         int err;
2834
2835         if (status) {
2836                 BT_ERR("Failed to disable LE scanning: status %d", status);
2837                 return;
2838         }
2839
2840         hdev->discovery.scan_start = 0;
2841
2842         switch (hdev->discovery.type) {
2843         case DISCOV_TYPE_LE:
2844                 hci_dev_lock(hdev);
2845                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2846                 hci_dev_unlock(hdev);
2847                 break;
2848
2849         case DISCOV_TYPE_INTERLEAVED:
2850                 hci_req_init(&req, hdev);
2851
2852                 memset(&cp, 0, sizeof(cp));
2853                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2854                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2855                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2856
2857                 hci_dev_lock(hdev);
2858
2859                 hci_inquiry_cache_flush(hdev);
2860
2861                 err = hci_req_run(&req, inquiry_complete);
2862                 if (err) {
2863                         BT_ERR("Inquiry request failed: err %d", err);
2864                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2865                 }
2866
2867                 hci_dev_unlock(hdev);
2868                 break;
2869         }
2870 }
2871
2872 static void le_scan_disable_work(struct work_struct *work)
2873 {
2874         struct hci_dev *hdev = container_of(work, struct hci_dev,
2875                                             le_scan_disable.work);
2876         struct hci_request req;
2877         int err;
2878
2879         BT_DBG("%s", hdev->name);
2880
2881         cancel_delayed_work_sync(&hdev->le_scan_restart);
2882
2883         hci_req_init(&req, hdev);
2884
2885         hci_req_add_le_scan_disable(&req);
2886
2887         err = hci_req_run(&req, le_scan_disable_work_complete);
2888         if (err)
2889                 BT_ERR("Disable LE scanning request failed: err %d", err);
2890 }
2891
2892 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2893                                           u16 opcode)
2894 {
2895         unsigned long timeout, duration, scan_start, now;
2896
2897         BT_DBG("%s", hdev->name);
2898
2899         if (status) {
2900                 BT_ERR("Failed to restart LE scan: status %d", status);
2901                 return;
2902         }
2903
2904         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2905             !hdev->discovery.scan_start)
2906                 return;
2907
2908         /* When the scan was started, hdev->le_scan_disable has been queued
2909          * after duration from scan_start. During scan restart this job
2910          * has been canceled, and we need to queue it again after proper
2911          * timeout, to make sure that scan does not run indefinitely.
2912          */
2913         duration = hdev->discovery.scan_duration;
2914         scan_start = hdev->discovery.scan_start;
2915         now = jiffies;
2916         if (now - scan_start <= duration) {
2917                 int elapsed;
2918
2919                 if (now >= scan_start)
2920                         elapsed = now - scan_start;
2921                 else
2922                         elapsed = ULONG_MAX - scan_start + now;
2923
2924                 timeout = duration - elapsed;
2925         } else {
2926                 timeout = 0;
2927         }
2928         queue_delayed_work(hdev->workqueue,
2929                            &hdev->le_scan_disable, timeout);
2930 }
2931
2932 static void le_scan_restart_work(struct work_struct *work)
2933 {
2934         struct hci_dev *hdev = container_of(work, struct hci_dev,
2935                                             le_scan_restart.work);
2936         struct hci_request req;
2937         struct hci_cp_le_set_scan_enable cp;
2938         int err;
2939
2940         BT_DBG("%s", hdev->name);
2941
2942         /* If controller is not scanning we are done. */
2943         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2944                 return;
2945
2946         hci_req_init(&req, hdev);
2947
2948         hci_req_add_le_scan_disable(&req);
2949
2950         memset(&cp, 0, sizeof(cp));
2951         cp.enable = LE_SCAN_ENABLE;
2952         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2953         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2954
2955         err = hci_req_run(&req, le_scan_restart_work_complete);
2956         if (err)
2957                 BT_ERR("Restart LE scan request failed: err %d", err);
2958 }
2959
2960 /* Copy the Identity Address of the controller.
2961  *
2962  * If the controller has a public BD_ADDR, then by default use that one.
2963  * If this is a LE only controller without a public address, default to
2964  * the static random address.
2965  *
2966  * For debugging purposes it is possible to force controllers with a
2967  * public address to use the static random address instead.
2968  *
2969  * In case BR/EDR has been disabled on a dual-mode controller and
2970  * userspace has configured a static address, then that address
2971  * becomes the identity address instead of the public BR/EDR address.
2972  */
2973 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2974                                u8 *bdaddr_type)
2975 {
2976         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2977             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2978             (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2979              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2980                 bacpy(bdaddr, &hdev->static_addr);
2981                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2982         } else {
2983                 bacpy(bdaddr, &hdev->bdaddr);
2984                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2985         }
2986 }
2987
2988 /* Alloc HCI device */
2989 struct hci_dev *hci_alloc_dev(void)
2990 {
2991         struct hci_dev *hdev;
2992
2993         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2994         if (!hdev)
2995                 return NULL;
2996
2997         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2998         hdev->esco_type = (ESCO_HV1);
2999         hdev->link_mode = (HCI_LM_ACCEPT);
3000         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3001         hdev->io_capability = 0x03;     /* No Input No Output */
3002         hdev->manufacturer = 0xffff;    /* Default to internal use */
3003         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3004         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3005
3006         hdev->sniff_max_interval = 800;
3007         hdev->sniff_min_interval = 80;
3008
3009         hdev->le_adv_channel_map = 0x07;
3010         hdev->le_adv_min_interval = 0x0800;
3011         hdev->le_adv_max_interval = 0x0800;
3012         hdev->le_scan_interval = 0x0060;
3013         hdev->le_scan_window = 0x0030;
3014         hdev->le_conn_min_interval = 0x0028;
3015         hdev->le_conn_max_interval = 0x0038;
3016         hdev->le_conn_latency = 0x0000;
3017         hdev->le_supv_timeout = 0x002a;
3018         hdev->le_def_tx_len = 0x001b;
3019         hdev->le_def_tx_time = 0x0148;
3020         hdev->le_max_tx_len = 0x001b;
3021         hdev->le_max_tx_time = 0x0148;
3022         hdev->le_max_rx_len = 0x001b;
3023         hdev->le_max_rx_time = 0x0148;
3024
3025         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3026         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3027         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3028         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3029
3030         mutex_init(&hdev->lock);
3031         mutex_init(&hdev->req_lock);
3032
3033         INIT_LIST_HEAD(&hdev->mgmt_pending);
3034         INIT_LIST_HEAD(&hdev->blacklist);
3035         INIT_LIST_HEAD(&hdev->whitelist);
3036         INIT_LIST_HEAD(&hdev->uuids);
3037         INIT_LIST_HEAD(&hdev->link_keys);
3038         INIT_LIST_HEAD(&hdev->long_term_keys);
3039         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3040         INIT_LIST_HEAD(&hdev->remote_oob_data);
3041         INIT_LIST_HEAD(&hdev->le_white_list);
3042         INIT_LIST_HEAD(&hdev->le_conn_params);
3043         INIT_LIST_HEAD(&hdev->pend_le_conns);
3044         INIT_LIST_HEAD(&hdev->pend_le_reports);
3045         INIT_LIST_HEAD(&hdev->conn_hash.list);
3046
3047         INIT_WORK(&hdev->rx_work, hci_rx_work);
3048         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3049         INIT_WORK(&hdev->tx_work, hci_tx_work);
3050         INIT_WORK(&hdev->power_on, hci_power_on);
3051         INIT_WORK(&hdev->error_reset, hci_error_reset);
3052
3053         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3054         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3055         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3056         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3057
3058         skb_queue_head_init(&hdev->rx_q);
3059         skb_queue_head_init(&hdev->cmd_q);
3060         skb_queue_head_init(&hdev->raw_q);
3061
3062         init_waitqueue_head(&hdev->req_wait_q);
3063
3064         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3065
3066         hci_init_sysfs(hdev);
3067         discovery_init(hdev);
3068
3069         return hdev;
3070 }
3071 EXPORT_SYMBOL(hci_alloc_dev);
3072
3073 /* Free HCI device */
3074 void hci_free_dev(struct hci_dev *hdev)
3075 {
3076         /* will free via device release */
3077         put_device(&hdev->dev);
3078 }
3079 EXPORT_SYMBOL(hci_free_dev);
3080
3081 /* Register HCI device */
3082 int hci_register_dev(struct hci_dev *hdev)
3083 {
3084         int id, error;
3085
3086         if (!hdev->open || !hdev->close || !hdev->send)
3087                 return -EINVAL;
3088
3089         /* Do not allow HCI_AMP devices to register at index 0,
3090          * so the index can be used as the AMP controller ID.
3091          */
3092         switch (hdev->dev_type) {
3093         case HCI_BREDR:
3094                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3095                 break;
3096         case HCI_AMP:
3097                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3098                 break;
3099         default:
3100                 return -EINVAL;
3101         }
3102
3103         if (id < 0)
3104                 return id;
3105
3106         sprintf(hdev->name, "hci%d", id);
3107         hdev->id = id;
3108
3109         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3110
3111         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3112                                           WQ_MEM_RECLAIM, 1, hdev->name);
3113         if (!hdev->workqueue) {
3114                 error = -ENOMEM;
3115                 goto err;
3116         }
3117
3118         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3119                                               WQ_MEM_RECLAIM, 1, hdev->name);
3120         if (!hdev->req_workqueue) {
3121                 destroy_workqueue(hdev->workqueue);
3122                 error = -ENOMEM;
3123                 goto err;
3124         }
3125
3126         if (!IS_ERR_OR_NULL(bt_debugfs))
3127                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3128
3129         dev_set_name(&hdev->dev, "%s", hdev->name);
3130
3131         error = device_add(&hdev->dev);
3132         if (error < 0)
3133                 goto err_wqueue;
3134
3135         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3136                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3137                                     hdev);
3138         if (hdev->rfkill) {
3139                 if (rfkill_register(hdev->rfkill) < 0) {
3140                         rfkill_destroy(hdev->rfkill);
3141                         hdev->rfkill = NULL;
3142                 }
3143         }
3144
3145         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3146                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3147
3148         set_bit(HCI_SETUP, &hdev->dev_flags);
3149         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3150
3151         if (hdev->dev_type == HCI_BREDR) {
3152                 /* Assume BR/EDR support until proven otherwise (such as
3153                  * through reading supported features during init.
3154                  */
3155                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3156         }
3157
3158         write_lock(&hci_dev_list_lock);
3159         list_add(&hdev->list, &hci_dev_list);
3160         write_unlock(&hci_dev_list_lock);
3161
3162         /* Devices that are marked for raw-only usage are unconfigured
3163          * and should not be included in normal operation.
3164          */
3165         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3166                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3167
3168         hci_notify(hdev, HCI_DEV_REG);
3169         hci_dev_hold(hdev);
3170
3171         queue_work(hdev->req_workqueue, &hdev->power_on);
3172
3173         return id;
3174
3175 err_wqueue:
3176         destroy_workqueue(hdev->workqueue);
3177         destroy_workqueue(hdev->req_workqueue);
3178 err:
3179         ida_simple_remove(&hci_index_ida, hdev->id);
3180
3181         return error;
3182 }
3183 EXPORT_SYMBOL(hci_register_dev);
3184
3185 /* Unregister HCI device */
3186 void hci_unregister_dev(struct hci_dev *hdev)
3187 {
3188         int i, id;
3189
3190         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3191
3192         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3193
3194         id = hdev->id;
3195
3196         write_lock(&hci_dev_list_lock);
3197         list_del(&hdev->list);
3198         write_unlock(&hci_dev_list_lock);
3199
3200         hci_dev_do_close(hdev);
3201
3202         for (i = 0; i < NUM_REASSEMBLY; i++)
3203                 kfree_skb(hdev->reassembly[i]);
3204
3205         cancel_work_sync(&hdev->power_on);
3206
3207         if (!test_bit(HCI_INIT, &hdev->flags) &&
3208             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3209             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3210                 hci_dev_lock(hdev);
3211                 mgmt_index_removed(hdev);
3212                 hci_dev_unlock(hdev);
3213         }
3214
3215         /* mgmt_index_removed should take care of emptying the
3216          * pending list */
3217         BUG_ON(!list_empty(&hdev->mgmt_pending));
3218
3219         hci_notify(hdev, HCI_DEV_UNREG);
3220
3221         if (hdev->rfkill) {
3222                 rfkill_unregister(hdev->rfkill);
3223                 rfkill_destroy(hdev->rfkill);
3224         }
3225
3226         device_del(&hdev->dev);
3227
3228         debugfs_remove_recursive(hdev->debugfs);
3229
3230         destroy_workqueue(hdev->workqueue);
3231         destroy_workqueue(hdev->req_workqueue);
3232
3233         hci_dev_lock(hdev);
3234         hci_bdaddr_list_clear(&hdev->blacklist);
3235         hci_bdaddr_list_clear(&hdev->whitelist);
3236         hci_uuids_clear(hdev);
3237         hci_link_keys_clear(hdev);
3238         hci_smp_ltks_clear(hdev);
3239         hci_smp_irks_clear(hdev);
3240         hci_remote_oob_data_clear(hdev);
3241         hci_bdaddr_list_clear(&hdev->le_white_list);
3242         hci_conn_params_clear_all(hdev);
3243         hci_discovery_filter_clear(hdev);
3244         hci_dev_unlock(hdev);
3245
3246         hci_dev_put(hdev);
3247
3248         ida_simple_remove(&hci_index_ida, id);
3249 }
3250 EXPORT_SYMBOL(hci_unregister_dev);
3251
3252 /* Suspend HCI device */
3253 int hci_suspend_dev(struct hci_dev *hdev)
3254 {
3255         hci_notify(hdev, HCI_DEV_SUSPEND);
3256         return 0;
3257 }
3258 EXPORT_SYMBOL(hci_suspend_dev);
3259
3260 /* Resume HCI device */
3261 int hci_resume_dev(struct hci_dev *hdev)
3262 {
3263         hci_notify(hdev, HCI_DEV_RESUME);
3264         return 0;
3265 }
3266 EXPORT_SYMBOL(hci_resume_dev);
3267
3268 /* Reset HCI device */
3269 int hci_reset_dev(struct hci_dev *hdev)
3270 {
3271         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3272         struct sk_buff *skb;
3273
3274         skb = bt_skb_alloc(3, GFP_ATOMIC);
3275         if (!skb)
3276                 return -ENOMEM;
3277
3278         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3279         memcpy(skb_put(skb, 3), hw_err, 3);
3280
3281         /* Send Hardware Error to upper stack */
3282         return hci_recv_frame(hdev, skb);
3283 }
3284 EXPORT_SYMBOL(hci_reset_dev);
3285
3286 /* Receive frame from HCI drivers */
3287 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3288 {
3289         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3290                       && !test_bit(HCI_INIT, &hdev->flags))) {
3291                 kfree_skb(skb);
3292                 return -ENXIO;
3293         }
3294
3295         /* Incoming skb */
3296         bt_cb(skb)->incoming = 1;
3297
3298         /* Time stamp */
3299         __net_timestamp(skb);
3300
3301         skb_queue_tail(&hdev->rx_q, skb);
3302         queue_work(hdev->workqueue, &hdev->rx_work);
3303
3304         return 0;
3305 }
3306 EXPORT_SYMBOL(hci_recv_frame);
3307
3308 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3309                           int count, __u8 index)
3310 {
3311         int len = 0;
3312         int hlen = 0;
3313         int remain = count;
3314         struct sk_buff *skb;
3315         struct bt_skb_cb *scb;
3316
3317         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3318             index >= NUM_REASSEMBLY)
3319                 return -EILSEQ;
3320
3321         skb = hdev->reassembly[index];
3322
3323         if (!skb) {
3324                 switch (type) {
3325                 case HCI_ACLDATA_PKT:
3326                         len = HCI_MAX_FRAME_SIZE;
3327                         hlen = HCI_ACL_HDR_SIZE;
3328                         break;
3329                 case HCI_EVENT_PKT:
3330                         len = HCI_MAX_EVENT_SIZE;
3331                         hlen = HCI_EVENT_HDR_SIZE;
3332                         break;
3333                 case HCI_SCODATA_PKT:
3334                         len = HCI_MAX_SCO_SIZE;
3335                         hlen = HCI_SCO_HDR_SIZE;
3336                         break;
3337                 }
3338
3339                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3340                 if (!skb)
3341                         return -ENOMEM;
3342
3343                 scb = (void *) skb->cb;
3344                 scb->expect = hlen;
3345                 scb->pkt_type = type;
3346
3347                 hdev->reassembly[index] = skb;
3348         }
3349
3350         while (count) {
3351                 scb = (void *) skb->cb;
3352                 len = min_t(uint, scb->expect, count);
3353
3354                 memcpy(skb_put(skb, len), data, len);
3355
3356                 count -= len;
3357                 data += len;
3358                 scb->expect -= len;
3359                 remain = count;
3360
3361                 switch (type) {
3362                 case HCI_EVENT_PKT:
3363                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3364                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3365                                 scb->expect = h->plen;
3366
3367                                 if (skb_tailroom(skb) < scb->expect) {
3368                                         kfree_skb(skb);
3369                                         hdev->reassembly[index] = NULL;
3370                                         return -ENOMEM;
3371                                 }
3372                         }
3373                         break;
3374
3375                 case HCI_ACLDATA_PKT:
3376                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3377                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3378                                 scb->expect = __le16_to_cpu(h->dlen);
3379
3380                                 if (skb_tailroom(skb) < scb->expect) {
3381                                         kfree_skb(skb);
3382                                         hdev->reassembly[index] = NULL;
3383                                         return -ENOMEM;
3384                                 }
3385                         }
3386                         break;
3387
3388                 case HCI_SCODATA_PKT:
3389                         if (skb->len == HCI_SCO_HDR_SIZE) {
3390                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3391                                 scb->expect = h->dlen;
3392
3393                                 if (skb_tailroom(skb) < scb->expect) {
3394                                         kfree_skb(skb);
3395                                         hdev->reassembly[index] = NULL;
3396                                         return -ENOMEM;
3397                                 }
3398                         }
3399                         break;
3400                 }
3401
3402                 if (scb->expect == 0) {
3403                         /* Complete frame */
3404
3405                         bt_cb(skb)->pkt_type = type;
3406                         hci_recv_frame(hdev, skb);
3407
3408                         hdev->reassembly[index] = NULL;
3409                         return remain;
3410                 }
3411         }
3412
3413         return remain;
3414 }
3415
3416 #define STREAM_REASSEMBLY 0
3417
3418 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3419 {
3420         int type;
3421         int rem = 0;
3422
3423         while (count) {
3424                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3425
3426                 if (!skb) {
3427                         struct { char type; } *pkt;
3428
3429                         /* Start of the frame */
3430                         pkt = data;
3431                         type = pkt->type;
3432
3433                         data++;
3434                         count--;
3435                 } else
3436                         type = bt_cb(skb)->pkt_type;
3437
3438                 rem = hci_reassembly(hdev, type, data, count,
3439                                      STREAM_REASSEMBLY);
3440                 if (rem < 0)
3441                         return rem;
3442
3443                 data += (count - rem);
3444                 count = rem;
3445         }
3446
3447         return rem;
3448 }
3449 EXPORT_SYMBOL(hci_recv_stream_fragment);
3450
3451 /* ---- Interface to upper protocols ---- */
3452
3453 int hci_register_cb(struct hci_cb *cb)
3454 {
3455         BT_DBG("%p name %s", cb, cb->name);
3456
3457         write_lock(&hci_cb_list_lock);
3458         list_add(&cb->list, &hci_cb_list);
3459         write_unlock(&hci_cb_list_lock);
3460
3461         return 0;
3462 }
3463 EXPORT_SYMBOL(hci_register_cb);
3464
3465 int hci_unregister_cb(struct hci_cb *cb)
3466 {
3467         BT_DBG("%p name %s", cb, cb->name);
3468
3469         write_lock(&hci_cb_list_lock);
3470         list_del(&cb->list);
3471         write_unlock(&hci_cb_list_lock);
3472
3473         return 0;
3474 }
3475 EXPORT_SYMBOL(hci_unregister_cb);
3476
3477 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3478 {
3479         int err;
3480
3481         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3482
3483         /* Time stamp */
3484         __net_timestamp(skb);
3485
3486         /* Send copy to monitor */
3487         hci_send_to_monitor(hdev, skb);
3488
3489         if (atomic_read(&hdev->promisc)) {
3490                 /* Send copy to the sockets */
3491                 hci_send_to_sock(hdev, skb);
3492         }
3493
3494         /* Get rid of skb owner, prior to sending to the driver. */
3495         skb_orphan(skb);
3496
3497         err = hdev->send(hdev, skb);
3498         if (err < 0) {
3499                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3500                 kfree_skb(skb);
3501         }
3502 }
3503
3504 bool hci_req_pending(struct hci_dev *hdev)
3505 {
3506         return (hdev->req_status == HCI_REQ_PEND);
3507 }
3508
3509 /* Send HCI command */
3510 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3511                  const void *param)
3512 {
3513         struct sk_buff *skb;
3514
3515         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3516
3517         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3518         if (!skb) {
3519                 BT_ERR("%s no memory for command", hdev->name);
3520                 return -ENOMEM;
3521         }
3522
3523         /* Stand-alone HCI commands must be flagged as
3524          * single-command requests.
3525          */
3526         bt_cb(skb)->req.start = true;
3527
3528         skb_queue_tail(&hdev->cmd_q, skb);
3529         queue_work(hdev->workqueue, &hdev->cmd_work);
3530
3531         return 0;
3532 }
3533
3534 /* Get data from the previously sent command */
3535 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3536 {
3537         struct hci_command_hdr *hdr;
3538
3539         if (!hdev->sent_cmd)
3540                 return NULL;
3541
3542         hdr = (void *) hdev->sent_cmd->data;
3543
3544         if (hdr->opcode != cpu_to_le16(opcode))
3545                 return NULL;
3546
3547         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3548
3549         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3550 }
3551
3552 /* Send ACL data */
3553 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3554 {
3555         struct hci_acl_hdr *hdr;
3556         int len = skb->len;
3557
3558         skb_push(skb, HCI_ACL_HDR_SIZE);
3559         skb_reset_transport_header(skb);
3560         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3561         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3562         hdr->dlen   = cpu_to_le16(len);
3563 }
3564
3565 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3566                           struct sk_buff *skb, __u16 flags)
3567 {
3568         struct hci_conn *conn = chan->conn;
3569         struct hci_dev *hdev = conn->hdev;
3570         struct sk_buff *list;
3571
3572         skb->len = skb_headlen(skb);
3573         skb->data_len = 0;
3574
3575         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3576
3577         switch (hdev->dev_type) {
3578         case HCI_BREDR:
3579                 hci_add_acl_hdr(skb, conn->handle, flags);
3580                 break;
3581         case HCI_AMP:
3582                 hci_add_acl_hdr(skb, chan->handle, flags);
3583                 break;
3584         default:
3585                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3586                 return;
3587         }
3588
3589         list = skb_shinfo(skb)->frag_list;
3590         if (!list) {
3591                 /* Non fragmented */
3592                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3593
3594                 skb_queue_tail(queue, skb);
3595         } else {
3596                 /* Fragmented */
3597                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3598
3599                 skb_shinfo(skb)->frag_list = NULL;
3600
3601                 /* Queue all fragments atomically. We need to use spin_lock_bh
3602                  * here because of 6LoWPAN links, as there this function is
3603                  * called from softirq and using normal spin lock could cause
3604                  * deadlocks.
3605                  */
3606                 spin_lock_bh(&queue->lock);
3607
3608                 __skb_queue_tail(queue, skb);
3609
3610                 flags &= ~ACL_START;
3611                 flags |= ACL_CONT;
3612                 do {
3613                         skb = list; list = list->next;
3614
3615                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3616                         hci_add_acl_hdr(skb, conn->handle, flags);
3617
3618                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3619
3620                         __skb_queue_tail(queue, skb);
3621                 } while (list);
3622
3623                 spin_unlock_bh(&queue->lock);
3624         }
3625 }
3626
3627 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3628 {
3629         struct hci_dev *hdev = chan->conn->hdev;
3630
3631         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3632
3633         hci_queue_acl(chan, &chan->data_q, skb, flags);
3634
3635         queue_work(hdev->workqueue, &hdev->tx_work);
3636 }
3637
3638 /* Send SCO data */
3639 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3640 {
3641         struct hci_dev *hdev = conn->hdev;
3642         struct hci_sco_hdr hdr;
3643
3644         BT_DBG("%s len %d", hdev->name, skb->len);
3645
3646         hdr.handle = cpu_to_le16(conn->handle);
3647         hdr.dlen   = skb->len;
3648
3649         skb_push(skb, HCI_SCO_HDR_SIZE);
3650         skb_reset_transport_header(skb);
3651         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3652
3653         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3654
3655         skb_queue_tail(&conn->data_q, skb);
3656         queue_work(hdev->workqueue, &hdev->tx_work);
3657 }
3658
3659 /* ---- HCI TX task (outgoing data) ---- */
3660
3661 /* HCI Connection scheduler */
3662 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3663                                      int *quote)
3664 {
3665         struct hci_conn_hash *h = &hdev->conn_hash;
3666         struct hci_conn *conn = NULL, *c;
3667         unsigned int num = 0, min = ~0;
3668
3669         /* We don't have to lock device here. Connections are always
3670          * added and removed with TX task disabled. */
3671
3672         rcu_read_lock();
3673
3674         list_for_each_entry_rcu(c, &h->list, list) {
3675                 if (c->type != type || skb_queue_empty(&c->data_q))
3676                         continue;
3677
3678                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3679                         continue;
3680
3681                 num++;
3682
3683                 if (c->sent < min) {
3684                         min  = c->sent;
3685                         conn = c;
3686                 }
3687
3688                 if (hci_conn_num(hdev, type) == num)
3689                         break;
3690         }
3691
3692         rcu_read_unlock();
3693
3694         if (conn) {
3695                 int cnt, q;
3696
3697                 switch (conn->type) {
3698                 case ACL_LINK:
3699                         cnt = hdev->acl_cnt;
3700                         break;
3701                 case SCO_LINK:
3702                 case ESCO_LINK:
3703                         cnt = hdev->sco_cnt;
3704                         break;
3705                 case LE_LINK:
3706                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3707                         break;
3708                 default:
3709                         cnt = 0;
3710                         BT_ERR("Unknown link type");
3711                 }
3712
3713                 q = cnt / num;
3714                 *quote = q ? q : 1;
3715         } else
3716                 *quote = 0;
3717
3718         BT_DBG("conn %p quote %d", conn, *quote);
3719         return conn;
3720 }
3721
3722 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3723 {
3724         struct hci_conn_hash *h = &hdev->conn_hash;
3725         struct hci_conn *c;
3726
3727         BT_ERR("%s link tx timeout", hdev->name);
3728
3729         rcu_read_lock();
3730
3731         /* Kill stalled connections */
3732         list_for_each_entry_rcu(c, &h->list, list) {
3733                 if (c->type == type && c->sent) {
3734                         BT_ERR("%s killing stalled connection %pMR",
3735                                hdev->name, &c->dst);
3736                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3737                 }
3738         }
3739
3740         rcu_read_unlock();
3741 }
3742
3743 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3744                                       int *quote)
3745 {
3746         struct hci_conn_hash *h = &hdev->conn_hash;
3747         struct hci_chan *chan = NULL;
3748         unsigned int num = 0, min = ~0, cur_prio = 0;
3749         struct hci_conn *conn;
3750         int cnt, q, conn_num = 0;
3751
3752         BT_DBG("%s", hdev->name);
3753
3754         rcu_read_lock();
3755
3756         list_for_each_entry_rcu(conn, &h->list, list) {
3757                 struct hci_chan *tmp;
3758
3759                 if (conn->type != type)
3760                         continue;
3761
3762                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3763                         continue;
3764
3765                 conn_num++;
3766
3767                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3768                         struct sk_buff *skb;
3769
3770                         if (skb_queue_empty(&tmp->data_q))
3771                                 continue;
3772
3773                         skb = skb_peek(&tmp->data_q);
3774                         if (skb->priority < cur_prio)
3775                                 continue;
3776
3777                         if (skb->priority > cur_prio) {
3778                                 num = 0;
3779                                 min = ~0;
3780                                 cur_prio = skb->priority;
3781                         }
3782
3783                         num++;
3784
3785                         if (conn->sent < min) {
3786                                 min  = conn->sent;
3787                                 chan = tmp;
3788                         }
3789                 }
3790
3791                 if (hci_conn_num(hdev, type) == conn_num)
3792                         break;
3793         }
3794
3795         rcu_read_unlock();
3796
3797         if (!chan)
3798                 return NULL;
3799
3800         switch (chan->conn->type) {
3801         case ACL_LINK:
3802                 cnt = hdev->acl_cnt;
3803                 break;
3804         case AMP_LINK:
3805                 cnt = hdev->block_cnt;
3806                 break;
3807         case SCO_LINK:
3808         case ESCO_LINK:
3809                 cnt = hdev->sco_cnt;
3810                 break;
3811         case LE_LINK:
3812                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3813                 break;
3814         default:
3815                 cnt = 0;
3816                 BT_ERR("Unknown link type");
3817         }
3818
3819         q = cnt / num;
3820         *quote = q ? q : 1;
3821         BT_DBG("chan %p quote %d", chan, *quote);
3822         return chan;
3823 }
3824
3825 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3826 {
3827         struct hci_conn_hash *h = &hdev->conn_hash;
3828         struct hci_conn *conn;
3829         int num = 0;
3830
3831         BT_DBG("%s", hdev->name);
3832
3833         rcu_read_lock();
3834
3835         list_for_each_entry_rcu(conn, &h->list, list) {
3836                 struct hci_chan *chan;
3837
3838                 if (conn->type != type)
3839                         continue;
3840
3841                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3842                         continue;
3843
3844                 num++;
3845
3846                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3847                         struct sk_buff *skb;
3848
3849                         if (chan->sent) {
3850                                 chan->sent = 0;
3851                                 continue;
3852                         }
3853
3854                         if (skb_queue_empty(&chan->data_q))
3855                                 continue;
3856
3857                         skb = skb_peek(&chan->data_q);
3858                         if (skb->priority >= HCI_PRIO_MAX - 1)
3859                                 continue;
3860
3861                         skb->priority = HCI_PRIO_MAX - 1;
3862
3863                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3864                                skb->priority);
3865                 }
3866
3867                 if (hci_conn_num(hdev, type) == num)
3868                         break;
3869         }
3870
3871         rcu_read_unlock();
3872
3873 }
3874
3875 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3876 {
3877         /* Calculate count of blocks used by this packet */
3878         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3879 }
3880
3881 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3882 {
3883         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
3884                 /* ACL tx timeout must be longer than maximum
3885                  * link supervision timeout (40.9 seconds) */
3886                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3887                                        HCI_ACL_TX_TIMEOUT))
3888                         hci_link_tx_to(hdev, ACL_LINK);
3889         }
3890 }
3891
3892 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3893 {
3894         unsigned int cnt = hdev->acl_cnt;
3895         struct hci_chan *chan;
3896         struct sk_buff *skb;
3897         int quote;
3898
3899         __check_timeout(hdev, cnt);
3900
3901         while (hdev->acl_cnt &&
3902                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3903                 u32 priority = (skb_peek(&chan->data_q))->priority;
3904                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3905                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3906                                skb->len, skb->priority);
3907
3908                         /* Stop if priority has changed */
3909                         if (skb->priority < priority)
3910                                 break;
3911
3912                         skb = skb_dequeue(&chan->data_q);
3913
3914                         hci_conn_enter_active_mode(chan->conn,
3915                                                    bt_cb(skb)->force_active);
3916
3917                         hci_send_frame(hdev, skb);
3918                         hdev->acl_last_tx = jiffies;
3919
3920                         hdev->acl_cnt--;
3921                         chan->sent++;
3922                         chan->conn->sent++;
3923                 }
3924         }
3925
3926         if (cnt != hdev->acl_cnt)
3927                 hci_prio_recalculate(hdev, ACL_LINK);
3928 }
3929
3930 static void hci_sched_acl_blk(struct hci_dev *hdev)
3931 {
3932         unsigned int cnt = hdev->block_cnt;
3933         struct hci_chan *chan;
3934         struct sk_buff *skb;
3935         int quote;
3936         u8 type;
3937
3938         __check_timeout(hdev, cnt);
3939
3940         BT_DBG("%s", hdev->name);
3941
3942         if (hdev->dev_type == HCI_AMP)
3943                 type = AMP_LINK;
3944         else
3945                 type = ACL_LINK;
3946
3947         while (hdev->block_cnt > 0 &&
3948                (chan = hci_chan_sent(hdev, type, &quote))) {
3949                 u32 priority = (skb_peek(&chan->data_q))->priority;
3950                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3951                         int blocks;
3952
3953                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3954                                skb->len, skb->priority);
3955
3956                         /* Stop if priority has changed */
3957                         if (skb->priority < priority)
3958                                 break;
3959
3960                         skb = skb_dequeue(&chan->data_q);
3961
3962                         blocks = __get_blocks(hdev, skb);
3963                         if (blocks > hdev->block_cnt)
3964                                 return;
3965
3966                         hci_conn_enter_active_mode(chan->conn,
3967                                                    bt_cb(skb)->force_active);
3968
3969                         hci_send_frame(hdev, skb);
3970                         hdev->acl_last_tx = jiffies;
3971
3972                         hdev->block_cnt -= blocks;
3973                         quote -= blocks;
3974
3975                         chan->sent += blocks;
3976                         chan->conn->sent += blocks;
3977                 }
3978         }
3979
3980         if (cnt != hdev->block_cnt)
3981                 hci_prio_recalculate(hdev, type);
3982 }
3983
3984 static void hci_sched_acl(struct hci_dev *hdev)
3985 {
3986         BT_DBG("%s", hdev->name);
3987
3988         /* No ACL link over BR/EDR controller */
3989         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3990                 return;
3991
3992         /* No AMP link over AMP controller */
3993         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3994                 return;
3995
3996         switch (hdev->flow_ctl_mode) {
3997         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3998                 hci_sched_acl_pkt(hdev);
3999                 break;
4000
4001         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4002                 hci_sched_acl_blk(hdev);
4003                 break;
4004         }
4005 }
4006
4007 /* Schedule SCO */
4008 static void hci_sched_sco(struct hci_dev *hdev)
4009 {
4010         struct hci_conn *conn;
4011         struct sk_buff *skb;
4012         int quote;
4013
4014         BT_DBG("%s", hdev->name);
4015
4016         if (!hci_conn_num(hdev, SCO_LINK))
4017                 return;
4018
4019         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4020                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4021                         BT_DBG("skb %p len %d", skb, skb->len);
4022                         hci_send_frame(hdev, skb);
4023
4024                         conn->sent++;
4025                         if (conn->sent == ~0)
4026                                 conn->sent = 0;
4027                 }
4028         }
4029 }
4030
4031 static void hci_sched_esco(struct hci_dev *hdev)
4032 {
4033         struct hci_conn *conn;
4034         struct sk_buff *skb;
4035         int quote;
4036
4037         BT_DBG("%s", hdev->name);
4038
4039         if (!hci_conn_num(hdev, ESCO_LINK))
4040                 return;
4041
4042         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4043                                                      &quote))) {
4044                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4045                         BT_DBG("skb %p len %d", skb, skb->len);
4046                         hci_send_frame(hdev, skb);
4047
4048                         conn->sent++;
4049                         if (conn->sent == ~0)
4050                                 conn->sent = 0;
4051                 }
4052         }
4053 }
4054
4055 static void hci_sched_le(struct hci_dev *hdev)
4056 {
4057         struct hci_chan *chan;
4058         struct sk_buff *skb;
4059         int quote, cnt, tmp;
4060
4061         BT_DBG("%s", hdev->name);
4062
4063         if (!hci_conn_num(hdev, LE_LINK))
4064                 return;
4065
4066         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4067                 /* LE tx timeout must be longer than maximum
4068                  * link supervision timeout (40.9 seconds) */
4069                 if (!hdev->le_cnt && hdev->le_pkts &&
4070                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4071                         hci_link_tx_to(hdev, LE_LINK);
4072         }
4073
4074         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4075         tmp = cnt;
4076         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4077                 u32 priority = (skb_peek(&chan->data_q))->priority;
4078                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4079                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4080                                skb->len, skb->priority);
4081
4082                         /* Stop if priority has changed */
4083                         if (skb->priority < priority)
4084                                 break;
4085
4086                         skb = skb_dequeue(&chan->data_q);
4087
4088                         hci_send_frame(hdev, skb);
4089                         hdev->le_last_tx = jiffies;
4090
4091                         cnt--;
4092                         chan->sent++;
4093                         chan->conn->sent++;
4094                 }
4095         }
4096
4097         if (hdev->le_pkts)
4098                 hdev->le_cnt = cnt;
4099         else
4100                 hdev->acl_cnt = cnt;
4101
4102         if (cnt != tmp)
4103                 hci_prio_recalculate(hdev, LE_LINK);
4104 }
4105
4106 static void hci_tx_work(struct work_struct *work)
4107 {
4108         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4109         struct sk_buff *skb;
4110
4111         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4112                hdev->sco_cnt, hdev->le_cnt);
4113
4114         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4115                 /* Schedule queues and send stuff to HCI driver */
4116                 hci_sched_acl(hdev);
4117                 hci_sched_sco(hdev);
4118                 hci_sched_esco(hdev);
4119                 hci_sched_le(hdev);
4120         }
4121
4122         /* Send next queued raw (unknown type) packet */
4123         while ((skb = skb_dequeue(&hdev->raw_q)))
4124                 hci_send_frame(hdev, skb);
4125 }
4126
4127 /* ----- HCI RX task (incoming data processing) ----- */
4128
4129 /* ACL data packet */
4130 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4131 {
4132         struct hci_acl_hdr *hdr = (void *) skb->data;
4133         struct hci_conn *conn;
4134         __u16 handle, flags;
4135
4136         skb_pull(skb, HCI_ACL_HDR_SIZE);
4137
4138         handle = __le16_to_cpu(hdr->handle);
4139         flags  = hci_flags(handle);
4140         handle = hci_handle(handle);
4141
4142         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4143                handle, flags);
4144
4145         hdev->stat.acl_rx++;
4146
4147         hci_dev_lock(hdev);
4148         conn = hci_conn_hash_lookup_handle(hdev, handle);
4149         hci_dev_unlock(hdev);
4150
4151         if (conn) {
4152                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4153
4154                 /* Send to upper protocol */
4155                 l2cap_recv_acldata(conn, skb, flags);
4156                 return;
4157         } else {
4158                 BT_ERR("%s ACL packet for unknown connection handle %d",
4159                        hdev->name, handle);
4160         }
4161
4162         kfree_skb(skb);
4163 }
4164
4165 /* SCO data packet */
4166 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4167 {
4168         struct hci_sco_hdr *hdr = (void *) skb->data;
4169         struct hci_conn *conn;
4170         __u16 handle;
4171
4172         skb_pull(skb, HCI_SCO_HDR_SIZE);
4173
4174         handle = __le16_to_cpu(hdr->handle);
4175
4176         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4177
4178         hdev->stat.sco_rx++;
4179
4180         hci_dev_lock(hdev);
4181         conn = hci_conn_hash_lookup_handle(hdev, handle);
4182         hci_dev_unlock(hdev);
4183
4184         if (conn) {
4185                 /* Send to upper protocol */
4186                 sco_recv_scodata(conn, skb);
4187                 return;
4188         } else {
4189                 BT_ERR("%s SCO packet for unknown connection handle %d",
4190                        hdev->name, handle);
4191         }
4192
4193         kfree_skb(skb);
4194 }
4195
4196 static bool hci_req_is_complete(struct hci_dev *hdev)
4197 {
4198         struct sk_buff *skb;
4199
4200         skb = skb_peek(&hdev->cmd_q);
4201         if (!skb)
4202                 return true;
4203
4204         return bt_cb(skb)->req.start;
4205 }
4206
4207 static void hci_resend_last(struct hci_dev *hdev)
4208 {
4209         struct hci_command_hdr *sent;
4210         struct sk_buff *skb;
4211         u16 opcode;
4212
4213         if (!hdev->sent_cmd)
4214                 return;
4215
4216         sent = (void *) hdev->sent_cmd->data;
4217         opcode = __le16_to_cpu(sent->opcode);
4218         if (opcode == HCI_OP_RESET)
4219                 return;
4220
4221         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4222         if (!skb)
4223                 return;
4224
4225         skb_queue_head(&hdev->cmd_q, skb);
4226         queue_work(hdev->workqueue, &hdev->cmd_work);
4227 }
4228
4229 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4230 {
4231         hci_req_complete_t req_complete = NULL;
4232         struct sk_buff *skb;
4233         unsigned long flags;
4234
4235         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4236
4237         /* If the completed command doesn't match the last one that was
4238          * sent we need to do special handling of it.
4239          */
4240         if (!hci_sent_cmd_data(hdev, opcode)) {
4241                 /* Some CSR based controllers generate a spontaneous
4242                  * reset complete event during init and any pending
4243                  * command will never be completed. In such a case we
4244                  * need to resend whatever was the last sent
4245                  * command.
4246                  */
4247                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4248                         hci_resend_last(hdev);
4249
4250                 return;
4251         }
4252
4253         /* If the command succeeded and there's still more commands in
4254          * this request the request is not yet complete.
4255          */
4256         if (!status && !hci_req_is_complete(hdev))
4257                 return;
4258
4259         /* If this was the last command in a request the complete
4260          * callback would be found in hdev->sent_cmd instead of the
4261          * command queue (hdev->cmd_q).
4262          */
4263         if (hdev->sent_cmd) {
4264                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4265
4266                 if (req_complete) {
4267                         /* We must set the complete callback to NULL to
4268                          * avoid calling the callback more than once if
4269                          * this function gets called again.
4270                          */
4271                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4272
4273                         goto call_complete;
4274                 }
4275         }
4276
4277         /* Remove all pending commands belonging to this request */
4278         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4279         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4280                 if (bt_cb(skb)->req.start) {
4281                         __skb_queue_head(&hdev->cmd_q, skb);
4282                         break;
4283                 }
4284
4285                 req_complete = bt_cb(skb)->req.complete;
4286                 kfree_skb(skb);
4287         }
4288         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4289
4290 call_complete:
4291         if (req_complete)
4292                 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4293 }
4294
4295 static void hci_rx_work(struct work_struct *work)
4296 {
4297         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4298         struct sk_buff *skb;
4299
4300         BT_DBG("%s", hdev->name);
4301
4302         while ((skb = skb_dequeue(&hdev->rx_q))) {
4303                 /* Send copy to monitor */
4304                 hci_send_to_monitor(hdev, skb);
4305
4306                 if (atomic_read(&hdev->promisc)) {
4307                         /* Send copy to the sockets */
4308                         hci_send_to_sock(hdev, skb);
4309                 }
4310
4311                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4312                         kfree_skb(skb);
4313                         continue;
4314                 }
4315
4316                 if (test_bit(HCI_INIT, &hdev->flags)) {
4317                         /* Don't process data packets in this states. */
4318                         switch (bt_cb(skb)->pkt_type) {
4319                         case HCI_ACLDATA_PKT:
4320                         case HCI_SCODATA_PKT:
4321                                 kfree_skb(skb);
4322                                 continue;
4323                         }
4324                 }
4325
4326                 /* Process frame */
4327                 switch (bt_cb(skb)->pkt_type) {
4328                 case HCI_EVENT_PKT:
4329                         BT_DBG("%s Event packet", hdev->name);
4330                         hci_event_packet(hdev, skb);
4331                         break;
4332
4333                 case HCI_ACLDATA_PKT:
4334                         BT_DBG("%s ACL data packet", hdev->name);
4335                         hci_acldata_packet(hdev, skb);
4336                         break;
4337
4338                 case HCI_SCODATA_PKT:
4339                         BT_DBG("%s SCO data packet", hdev->name);
4340                         hci_scodata_packet(hdev, skb);
4341                         break;
4342
4343                 default:
4344                         kfree_skb(skb);
4345                         break;
4346                 }
4347         }
4348 }
4349
4350 static void hci_cmd_work(struct work_struct *work)
4351 {
4352         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4353         struct sk_buff *skb;
4354
4355         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4356                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4357
4358         /* Send queued commands */
4359         if (atomic_read(&hdev->cmd_cnt)) {
4360                 skb = skb_dequeue(&hdev->cmd_q);
4361                 if (!skb)
4362                         return;
4363
4364                 kfree_skb(hdev->sent_cmd);
4365
4366                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4367                 if (hdev->sent_cmd) {
4368                         atomic_dec(&hdev->cmd_cnt);
4369                         hci_send_frame(hdev, skb);
4370                         if (test_bit(HCI_RESET, &hdev->flags))
4371                                 cancel_delayed_work(&hdev->cmd_timer);
4372                         else
4373                                 schedule_delayed_work(&hdev->cmd_timer,
4374                                                       HCI_CMD_TIMEOUT);
4375                 } else {
4376                         skb_queue_head(&hdev->cmd_q, skb);
4377                         queue_work(hdev->workqueue, &hdev->cmd_work);
4378                 }
4379         }
4380 }