Bluetooth: Add mgmt HCI channel registration API
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static LIST_HEAD(mgmt_chan_list);
35 static DEFINE_MUTEX(mgmt_chan_list_lock);
36
37 static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
39 /* ----- HCI socket interface ----- */
40
41 /* Socket info */
42 #define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44 struct hci_pinfo {
45         struct bt_sock    bt;
46         struct hci_dev    *hdev;
47         struct hci_filter filter;
48         __u32             cmsg_mask;
49         unsigned short    channel;
50 };
51
52 static inline int hci_test_bit(int nr, const void *addr)
53 {
54         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
55 }
56
57 /* Security filter */
58 #define HCI_SFLT_MAX_OGF  5
59
60 struct hci_sec_filter {
61         __u32 type_mask;
62         __u32 event_mask[2];
63         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
64 };
65
66 static const struct hci_sec_filter hci_sec_filter = {
67         /* Packet types */
68         0x10,
69         /* Events */
70         { 0x1000d9fe, 0x0000b00c },
71         /* Commands */
72         {
73                 { 0x0 },
74                 /* OGF_LINK_CTL */
75                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
76                 /* OGF_LINK_POLICY */
77                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
78                 /* OGF_HOST_CTL */
79                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
80                 /* OGF_INFO_PARAM */
81                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
82                 /* OGF_STATUS_PARAM */
83                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84         }
85 };
86
87 static struct bt_sock_list hci_sk_list = {
88         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
89 };
90
91 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
92 {
93         struct hci_filter *flt;
94         int flt_type, flt_event;
95
96         /* Apply filter */
97         flt = &hci_pi(sk)->filter;
98
99         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
100                 flt_type = 0;
101         else
102                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
103
104         if (!test_bit(flt_type, &flt->type_mask))
105                 return true;
106
107         /* Extra filter for event packets only */
108         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
109                 return false;
110
111         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
112
113         if (!hci_test_bit(flt_event, &flt->event_mask))
114                 return true;
115
116         /* Check filter only when opcode is set */
117         if (!flt->opcode)
118                 return false;
119
120         if (flt_event == HCI_EV_CMD_COMPLETE &&
121             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
122                 return true;
123
124         if (flt_event == HCI_EV_CMD_STATUS &&
125             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
126                 return true;
127
128         return false;
129 }
130
131 /* Send frame to RAW socket */
132 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
133 {
134         struct sock *sk;
135         struct sk_buff *skb_copy = NULL;
136
137         BT_DBG("hdev %p len %d", hdev, skb->len);
138
139         read_lock(&hci_sk_list.lock);
140
141         sk_for_each(sk, &hci_sk_list.head) {
142                 struct sk_buff *nskb;
143
144                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
145                         continue;
146
147                 /* Don't send frame to the socket it came from */
148                 if (skb->sk == sk)
149                         continue;
150
151                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
152                         if (is_filtered_packet(sk, skb))
153                                 continue;
154                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
155                         if (!bt_cb(skb)->incoming)
156                                 continue;
157                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
158                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
159                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
160                                 continue;
161                 } else {
162                         /* Don't send frame to other channel types */
163                         continue;
164                 }
165
166                 if (!skb_copy) {
167                         /* Create a private copy with headroom */
168                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
169                         if (!skb_copy)
170                                 continue;
171
172                         /* Put type byte before the data */
173                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
174                 }
175
176                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
177                 if (!nskb)
178                         continue;
179
180                 if (sock_queue_rcv_skb(sk, nskb))
181                         kfree_skb(nskb);
182         }
183
184         read_unlock(&hci_sk_list.lock);
185
186         kfree_skb(skb_copy);
187 }
188
189 /* Send frame to sockets with specific channel */
190 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
191                          struct sock *skip_sk)
192 {
193         struct sock *sk;
194
195         BT_DBG("channel %u len %d", channel, skb->len);
196
197         read_lock(&hci_sk_list.lock);
198
199         sk_for_each(sk, &hci_sk_list.head) {
200                 struct sk_buff *nskb;
201
202                 /* Skip the original socket */
203                 if (sk == skip_sk)
204                         continue;
205
206                 if (sk->sk_state != BT_BOUND)
207                         continue;
208
209                 if (hci_pi(sk)->channel != channel)
210                         continue;
211
212                 nskb = skb_clone(skb, GFP_ATOMIC);
213                 if (!nskb)
214                         continue;
215
216                 if (sock_queue_rcv_skb(sk, nskb))
217                         kfree_skb(nskb);
218         }
219
220         read_unlock(&hci_sk_list.lock);
221 }
222
223 /* Send frame to monitor socket */
224 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
225 {
226         struct sk_buff *skb_copy = NULL;
227         struct hci_mon_hdr *hdr;
228         __le16 opcode;
229
230         if (!atomic_read(&monitor_promisc))
231                 return;
232
233         BT_DBG("hdev %p len %d", hdev, skb->len);
234
235         switch (bt_cb(skb)->pkt_type) {
236         case HCI_COMMAND_PKT:
237                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
238                 break;
239         case HCI_EVENT_PKT:
240                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
241                 break;
242         case HCI_ACLDATA_PKT:
243                 if (bt_cb(skb)->incoming)
244                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
245                 else
246                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
247                 break;
248         case HCI_SCODATA_PKT:
249                 if (bt_cb(skb)->incoming)
250                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
251                 else
252                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
253                 break;
254         default:
255                 return;
256         }
257
258         /* Create a private copy with headroom */
259         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
260         if (!skb_copy)
261                 return;
262
263         /* Put header before the data */
264         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265         hdr->opcode = opcode;
266         hdr->index = cpu_to_le16(hdev->id);
267         hdr->len = cpu_to_le16(skb->len);
268
269         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
270         kfree_skb(skb_copy);
271 }
272
273 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
274 {
275         struct hci_mon_hdr *hdr;
276         struct hci_mon_new_index *ni;
277         struct sk_buff *skb;
278         __le16 opcode;
279
280         switch (event) {
281         case HCI_DEV_REG:
282                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
283                 if (!skb)
284                         return NULL;
285
286                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
287                 ni->type = hdev->dev_type;
288                 ni->bus = hdev->bus;
289                 bacpy(&ni->bdaddr, &hdev->bdaddr);
290                 memcpy(ni->name, hdev->name, 8);
291
292                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
293                 break;
294
295         case HCI_DEV_UNREG:
296                 skb = bt_skb_alloc(0, GFP_ATOMIC);
297                 if (!skb)
298                         return NULL;
299
300                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
301                 break;
302
303         default:
304                 return NULL;
305         }
306
307         __net_timestamp(skb);
308
309         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
310         hdr->opcode = opcode;
311         hdr->index = cpu_to_le16(hdev->id);
312         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
313
314         return skb;
315 }
316
317 static void send_monitor_replay(struct sock *sk)
318 {
319         struct hci_dev *hdev;
320
321         read_lock(&hci_dev_list_lock);
322
323         list_for_each_entry(hdev, &hci_dev_list, list) {
324                 struct sk_buff *skb;
325
326                 skb = create_monitor_event(hdev, HCI_DEV_REG);
327                 if (!skb)
328                         continue;
329
330                 if (sock_queue_rcv_skb(sk, skb))
331                         kfree_skb(skb);
332         }
333
334         read_unlock(&hci_dev_list_lock);
335 }
336
337 /* Generate internal stack event */
338 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
339 {
340         struct hci_event_hdr *hdr;
341         struct hci_ev_stack_internal *ev;
342         struct sk_buff *skb;
343
344         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
345         if (!skb)
346                 return;
347
348         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
349         hdr->evt  = HCI_EV_STACK_INTERNAL;
350         hdr->plen = sizeof(*ev) + dlen;
351
352         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
353         ev->type = type;
354         memcpy(ev->data, data, dlen);
355
356         bt_cb(skb)->incoming = 1;
357         __net_timestamp(skb);
358
359         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
360         hci_send_to_sock(hdev, skb);
361         kfree_skb(skb);
362 }
363
364 void hci_sock_dev_event(struct hci_dev *hdev, int event)
365 {
366         struct hci_ev_si_device ev;
367
368         BT_DBG("hdev %s event %d", hdev->name, event);
369
370         /* Send event to monitor */
371         if (atomic_read(&monitor_promisc)) {
372                 struct sk_buff *skb;
373
374                 skb = create_monitor_event(hdev, event);
375                 if (skb) {
376                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
377                         kfree_skb(skb);
378                 }
379         }
380
381         /* Send event to sockets */
382         ev.event  = event;
383         ev.dev_id = hdev->id;
384         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
385
386         if (event == HCI_DEV_UNREG) {
387                 struct sock *sk;
388
389                 /* Detach sockets from device */
390                 read_lock(&hci_sk_list.lock);
391                 sk_for_each(sk, &hci_sk_list.head) {
392                         bh_lock_sock_nested(sk);
393                         if (hci_pi(sk)->hdev == hdev) {
394                                 hci_pi(sk)->hdev = NULL;
395                                 sk->sk_err = EPIPE;
396                                 sk->sk_state = BT_OPEN;
397                                 sk->sk_state_change(sk);
398
399                                 hci_dev_put(hdev);
400                         }
401                         bh_unlock_sock(sk);
402                 }
403                 read_unlock(&hci_sk_list.lock);
404         }
405 }
406
407 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
408 {
409         struct hci_mgmt_chan *c;
410
411         list_for_each_entry(c, &mgmt_chan_list, list) {
412                 if (c->channel == channel)
413                         return c;
414         }
415
416         return NULL;
417 }
418
419 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
420 {
421         struct hci_mgmt_chan *c;
422
423         mutex_lock(&mgmt_chan_list_lock);
424         c = __hci_mgmt_chan_find(channel);
425         mutex_unlock(&mgmt_chan_list_lock);
426
427         return c;
428 }
429
430 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
431 {
432         if (c->channel < HCI_CHANNEL_CONTROL)
433                 return -EINVAL;
434
435         mutex_lock(&mgmt_chan_list_lock);
436         if (__hci_mgmt_chan_find(c->channel)) {
437                 mutex_unlock(&mgmt_chan_list_lock);
438                 return -EALREADY;
439         }
440
441         list_add_tail(&c->list, &mgmt_chan_list);
442
443         mutex_unlock(&mgmt_chan_list_lock);
444
445         return 0;
446 }
447 EXPORT_SYMBOL(hci_mgmt_chan_register);
448
449 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
450 {
451         mutex_lock(&mgmt_chan_list_lock);
452         list_del(&c->list);
453         mutex_unlock(&mgmt_chan_list_lock);
454 }
455 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
456
457 static int hci_sock_release(struct socket *sock)
458 {
459         struct sock *sk = sock->sk;
460         struct hci_dev *hdev;
461
462         BT_DBG("sock %p sk %p", sock, sk);
463
464         if (!sk)
465                 return 0;
466
467         hdev = hci_pi(sk)->hdev;
468
469         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
470                 atomic_dec(&monitor_promisc);
471
472         bt_sock_unlink(&hci_sk_list, sk);
473
474         if (hdev) {
475                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
476                         mgmt_index_added(hdev);
477                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
478                         hci_dev_close(hdev->id);
479                 }
480
481                 atomic_dec(&hdev->promisc);
482                 hci_dev_put(hdev);
483         }
484
485         sock_orphan(sk);
486
487         skb_queue_purge(&sk->sk_receive_queue);
488         skb_queue_purge(&sk->sk_write_queue);
489
490         sock_put(sk);
491         return 0;
492 }
493
494 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
495 {
496         bdaddr_t bdaddr;
497         int err;
498
499         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
500                 return -EFAULT;
501
502         hci_dev_lock(hdev);
503
504         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
505
506         hci_dev_unlock(hdev);
507
508         return err;
509 }
510
511 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
512 {
513         bdaddr_t bdaddr;
514         int err;
515
516         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
517                 return -EFAULT;
518
519         hci_dev_lock(hdev);
520
521         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
522
523         hci_dev_unlock(hdev);
524
525         return err;
526 }
527
528 /* Ioctls that require bound socket */
529 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
530                                 unsigned long arg)
531 {
532         struct hci_dev *hdev = hci_pi(sk)->hdev;
533
534         if (!hdev)
535                 return -EBADFD;
536
537         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
538                 return -EBUSY;
539
540         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
541                 return -EOPNOTSUPP;
542
543         if (hdev->dev_type != HCI_BREDR)
544                 return -EOPNOTSUPP;
545
546         switch (cmd) {
547         case HCISETRAW:
548                 if (!capable(CAP_NET_ADMIN))
549                         return -EPERM;
550                 return -EOPNOTSUPP;
551
552         case HCIGETCONNINFO:
553                 return hci_get_conn_info(hdev, (void __user *) arg);
554
555         case HCIGETAUTHINFO:
556                 return hci_get_auth_info(hdev, (void __user *) arg);
557
558         case HCIBLOCKADDR:
559                 if (!capable(CAP_NET_ADMIN))
560                         return -EPERM;
561                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
562
563         case HCIUNBLOCKADDR:
564                 if (!capable(CAP_NET_ADMIN))
565                         return -EPERM;
566                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
567         }
568
569         return -ENOIOCTLCMD;
570 }
571
572 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
573                           unsigned long arg)
574 {
575         void __user *argp = (void __user *) arg;
576         struct sock *sk = sock->sk;
577         int err;
578
579         BT_DBG("cmd %x arg %lx", cmd, arg);
580
581         lock_sock(sk);
582
583         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
584                 err = -EBADFD;
585                 goto done;
586         }
587
588         release_sock(sk);
589
590         switch (cmd) {
591         case HCIGETDEVLIST:
592                 return hci_get_dev_list(argp);
593
594         case HCIGETDEVINFO:
595                 return hci_get_dev_info(argp);
596
597         case HCIGETCONNLIST:
598                 return hci_get_conn_list(argp);
599
600         case HCIDEVUP:
601                 if (!capable(CAP_NET_ADMIN))
602                         return -EPERM;
603                 return hci_dev_open(arg);
604
605         case HCIDEVDOWN:
606                 if (!capable(CAP_NET_ADMIN))
607                         return -EPERM;
608                 return hci_dev_close(arg);
609
610         case HCIDEVRESET:
611                 if (!capable(CAP_NET_ADMIN))
612                         return -EPERM;
613                 return hci_dev_reset(arg);
614
615         case HCIDEVRESTAT:
616                 if (!capable(CAP_NET_ADMIN))
617                         return -EPERM;
618                 return hci_dev_reset_stat(arg);
619
620         case HCISETSCAN:
621         case HCISETAUTH:
622         case HCISETENCRYPT:
623         case HCISETPTYPE:
624         case HCISETLINKPOL:
625         case HCISETLINKMODE:
626         case HCISETACLMTU:
627         case HCISETSCOMTU:
628                 if (!capable(CAP_NET_ADMIN))
629                         return -EPERM;
630                 return hci_dev_cmd(cmd, argp);
631
632         case HCIINQUIRY:
633                 return hci_inquiry(argp);
634         }
635
636         lock_sock(sk);
637
638         err = hci_sock_bound_ioctl(sk, cmd, arg);
639
640 done:
641         release_sock(sk);
642         return err;
643 }
644
645 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
646                          int addr_len)
647 {
648         struct sockaddr_hci haddr;
649         struct sock *sk = sock->sk;
650         struct hci_dev *hdev = NULL;
651         int len, err = 0;
652
653         BT_DBG("sock %p sk %p", sock, sk);
654
655         if (!addr)
656                 return -EINVAL;
657
658         memset(&haddr, 0, sizeof(haddr));
659         len = min_t(unsigned int, sizeof(haddr), addr_len);
660         memcpy(&haddr, addr, len);
661
662         if (haddr.hci_family != AF_BLUETOOTH)
663                 return -EINVAL;
664
665         lock_sock(sk);
666
667         if (sk->sk_state == BT_BOUND) {
668                 err = -EALREADY;
669                 goto done;
670         }
671
672         switch (haddr.hci_channel) {
673         case HCI_CHANNEL_RAW:
674                 if (hci_pi(sk)->hdev) {
675                         err = -EALREADY;
676                         goto done;
677                 }
678
679                 if (haddr.hci_dev != HCI_DEV_NONE) {
680                         hdev = hci_dev_get(haddr.hci_dev);
681                         if (!hdev) {
682                                 err = -ENODEV;
683                                 goto done;
684                         }
685
686                         atomic_inc(&hdev->promisc);
687                 }
688
689                 hci_pi(sk)->hdev = hdev;
690                 break;
691
692         case HCI_CHANNEL_USER:
693                 if (hci_pi(sk)->hdev) {
694                         err = -EALREADY;
695                         goto done;
696                 }
697
698                 if (haddr.hci_dev == HCI_DEV_NONE) {
699                         err = -EINVAL;
700                         goto done;
701                 }
702
703                 if (!capable(CAP_NET_ADMIN)) {
704                         err = -EPERM;
705                         goto done;
706                 }
707
708                 hdev = hci_dev_get(haddr.hci_dev);
709                 if (!hdev) {
710                         err = -ENODEV;
711                         goto done;
712                 }
713
714                 if (test_bit(HCI_UP, &hdev->flags) ||
715                     test_bit(HCI_INIT, &hdev->flags) ||
716                     test_bit(HCI_SETUP, &hdev->dev_flags) ||
717                     test_bit(HCI_CONFIG, &hdev->dev_flags)) {
718                         err = -EBUSY;
719                         hci_dev_put(hdev);
720                         goto done;
721                 }
722
723                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
724                         err = -EUSERS;
725                         hci_dev_put(hdev);
726                         goto done;
727                 }
728
729                 mgmt_index_removed(hdev);
730
731                 err = hci_dev_open(hdev->id);
732                 if (err) {
733                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
734                         mgmt_index_added(hdev);
735                         hci_dev_put(hdev);
736                         goto done;
737                 }
738
739                 atomic_inc(&hdev->promisc);
740
741                 hci_pi(sk)->hdev = hdev;
742                 break;
743
744         case HCI_CHANNEL_CONTROL:
745                 if (haddr.hci_dev != HCI_DEV_NONE) {
746                         err = -EINVAL;
747                         goto done;
748                 }
749
750                 if (!capable(CAP_NET_ADMIN)) {
751                         err = -EPERM;
752                         goto done;
753                 }
754
755                 break;
756
757         case HCI_CHANNEL_MONITOR:
758                 if (haddr.hci_dev != HCI_DEV_NONE) {
759                         err = -EINVAL;
760                         goto done;
761                 }
762
763                 if (!capable(CAP_NET_RAW)) {
764                         err = -EPERM;
765                         goto done;
766                 }
767
768                 send_monitor_replay(sk);
769
770                 atomic_inc(&monitor_promisc);
771                 break;
772
773         default:
774                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
775                         err = -EINVAL;
776                         goto done;
777                 }
778
779                 if (haddr.hci_dev != HCI_DEV_NONE) {
780                         err = -EINVAL;
781                         goto done;
782                 }
783
784                 if (!capable(CAP_NET_ADMIN)) {
785                         err = -EPERM;
786                         goto done;
787                 }
788
789                 break;
790         }
791
792
793         hci_pi(sk)->channel = haddr.hci_channel;
794         sk->sk_state = BT_BOUND;
795
796 done:
797         release_sock(sk);
798         return err;
799 }
800
801 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
802                             int *addr_len, int peer)
803 {
804         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
805         struct sock *sk = sock->sk;
806         struct hci_dev *hdev;
807         int err = 0;
808
809         BT_DBG("sock %p sk %p", sock, sk);
810
811         if (peer)
812                 return -EOPNOTSUPP;
813
814         lock_sock(sk);
815
816         hdev = hci_pi(sk)->hdev;
817         if (!hdev) {
818                 err = -EBADFD;
819                 goto done;
820         }
821
822         *addr_len = sizeof(*haddr);
823         haddr->hci_family = AF_BLUETOOTH;
824         haddr->hci_dev    = hdev->id;
825         haddr->hci_channel= hci_pi(sk)->channel;
826
827 done:
828         release_sock(sk);
829         return err;
830 }
831
832 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
833                           struct sk_buff *skb)
834 {
835         __u32 mask = hci_pi(sk)->cmsg_mask;
836
837         if (mask & HCI_CMSG_DIR) {
838                 int incoming = bt_cb(skb)->incoming;
839                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
840                          &incoming);
841         }
842
843         if (mask & HCI_CMSG_TSTAMP) {
844 #ifdef CONFIG_COMPAT
845                 struct compat_timeval ctv;
846 #endif
847                 struct timeval tv;
848                 void *data;
849                 int len;
850
851                 skb_get_timestamp(skb, &tv);
852
853                 data = &tv;
854                 len = sizeof(tv);
855 #ifdef CONFIG_COMPAT
856                 if (!COMPAT_USE_64BIT_TIME &&
857                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
858                         ctv.tv_sec = tv.tv_sec;
859                         ctv.tv_usec = tv.tv_usec;
860                         data = &ctv;
861                         len = sizeof(ctv);
862                 }
863 #endif
864
865                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
866         }
867 }
868
869 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
870                             int flags)
871 {
872         int noblock = flags & MSG_DONTWAIT;
873         struct sock *sk = sock->sk;
874         struct sk_buff *skb;
875         int copied, err;
876
877         BT_DBG("sock %p, sk %p", sock, sk);
878
879         if (flags & (MSG_OOB))
880                 return -EOPNOTSUPP;
881
882         if (sk->sk_state == BT_CLOSED)
883                 return 0;
884
885         skb = skb_recv_datagram(sk, flags, noblock, &err);
886         if (!skb)
887                 return err;
888
889         copied = skb->len;
890         if (len < copied) {
891                 msg->msg_flags |= MSG_TRUNC;
892                 copied = len;
893         }
894
895         skb_reset_transport_header(skb);
896         err = skb_copy_datagram_msg(skb, 0, msg, copied);
897
898         switch (hci_pi(sk)->channel) {
899         case HCI_CHANNEL_RAW:
900                 hci_sock_cmsg(sk, msg, skb);
901                 break;
902         case HCI_CHANNEL_USER:
903         case HCI_CHANNEL_CONTROL:
904         case HCI_CHANNEL_MONITOR:
905                 sock_recv_timestamp(msg, sk, skb);
906                 break;
907         default:
908                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
909                         sock_recv_timestamp(msg, sk, skb);
910                 break;
911         }
912
913         skb_free_datagram(sk, skb);
914
915         return err ? : copied;
916 }
917
918 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
919                             size_t len)
920 {
921         struct sock *sk = sock->sk;
922         struct hci_mgmt_chan *chan;
923         struct hci_dev *hdev;
924         struct sk_buff *skb;
925         int err;
926
927         BT_DBG("sock %p sk %p", sock, sk);
928
929         if (msg->msg_flags & MSG_OOB)
930                 return -EOPNOTSUPP;
931
932         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
933                 return -EINVAL;
934
935         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
936                 return -EINVAL;
937
938         lock_sock(sk);
939
940         switch (hci_pi(sk)->channel) {
941         case HCI_CHANNEL_RAW:
942         case HCI_CHANNEL_USER:
943                 break;
944         case HCI_CHANNEL_CONTROL:
945                 err = mgmt_control(sk, msg, len);
946                 goto done;
947         case HCI_CHANNEL_MONITOR:
948                 err = -EOPNOTSUPP;
949                 goto done;
950         default:
951                 mutex_lock(&mgmt_chan_list_lock);
952                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
953                 if (chan)
954                         err = -ENOSYS; /* FIXME: call handler */
955                 else
956                         err = -EINVAL;
957
958                 mutex_unlock(&mgmt_chan_list_lock);
959                 goto done;
960         }
961
962         hdev = hci_pi(sk)->hdev;
963         if (!hdev) {
964                 err = -EBADFD;
965                 goto done;
966         }
967
968         if (!test_bit(HCI_UP, &hdev->flags)) {
969                 err = -ENETDOWN;
970                 goto done;
971         }
972
973         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
974         if (!skb)
975                 goto done;
976
977         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
978                 err = -EFAULT;
979                 goto drop;
980         }
981
982         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
983         skb_pull(skb, 1);
984
985         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
986                 /* No permission check is needed for user channel
987                  * since that gets enforced when binding the socket.
988                  *
989                  * However check that the packet type is valid.
990                  */
991                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
992                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
993                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
994                         err = -EINVAL;
995                         goto drop;
996                 }
997
998                 skb_queue_tail(&hdev->raw_q, skb);
999                 queue_work(hdev->workqueue, &hdev->tx_work);
1000         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1001                 u16 opcode = get_unaligned_le16(skb->data);
1002                 u16 ogf = hci_opcode_ogf(opcode);
1003                 u16 ocf = hci_opcode_ocf(opcode);
1004
1005                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1006                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1007                                    &hci_sec_filter.ocf_mask[ogf])) &&
1008                     !capable(CAP_NET_RAW)) {
1009                         err = -EPERM;
1010                         goto drop;
1011                 }
1012
1013                 if (ogf == 0x3f) {
1014                         skb_queue_tail(&hdev->raw_q, skb);
1015                         queue_work(hdev->workqueue, &hdev->tx_work);
1016                 } else {
1017                         /* Stand-alone HCI commands must be flagged as
1018                          * single-command requests.
1019                          */
1020                         bt_cb(skb)->req_start = 1;
1021
1022                         skb_queue_tail(&hdev->cmd_q, skb);
1023                         queue_work(hdev->workqueue, &hdev->cmd_work);
1024                 }
1025         } else {
1026                 if (!capable(CAP_NET_RAW)) {
1027                         err = -EPERM;
1028                         goto drop;
1029                 }
1030
1031                 skb_queue_tail(&hdev->raw_q, skb);
1032                 queue_work(hdev->workqueue, &hdev->tx_work);
1033         }
1034
1035         err = len;
1036
1037 done:
1038         release_sock(sk);
1039         return err;
1040
1041 drop:
1042         kfree_skb(skb);
1043         goto done;
1044 }
1045
1046 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1047                                char __user *optval, unsigned int len)
1048 {
1049         struct hci_ufilter uf = { .opcode = 0 };
1050         struct sock *sk = sock->sk;
1051         int err = 0, opt = 0;
1052
1053         BT_DBG("sk %p, opt %d", sk, optname);
1054
1055         lock_sock(sk);
1056
1057         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1058                 err = -EBADFD;
1059                 goto done;
1060         }
1061
1062         switch (optname) {
1063         case HCI_DATA_DIR:
1064                 if (get_user(opt, (int __user *)optval)) {
1065                         err = -EFAULT;
1066                         break;
1067                 }
1068
1069                 if (opt)
1070                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1071                 else
1072                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1073                 break;
1074
1075         case HCI_TIME_STAMP:
1076                 if (get_user(opt, (int __user *)optval)) {
1077                         err = -EFAULT;
1078                         break;
1079                 }
1080
1081                 if (opt)
1082                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1083                 else
1084                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1085                 break;
1086
1087         case HCI_FILTER:
1088                 {
1089                         struct hci_filter *f = &hci_pi(sk)->filter;
1090
1091                         uf.type_mask = f->type_mask;
1092                         uf.opcode    = f->opcode;
1093                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1094                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1095                 }
1096
1097                 len = min_t(unsigned int, len, sizeof(uf));
1098                 if (copy_from_user(&uf, optval, len)) {
1099                         err = -EFAULT;
1100                         break;
1101                 }
1102
1103                 if (!capable(CAP_NET_RAW)) {
1104                         uf.type_mask &= hci_sec_filter.type_mask;
1105                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1106                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1107                 }
1108
1109                 {
1110                         struct hci_filter *f = &hci_pi(sk)->filter;
1111
1112                         f->type_mask = uf.type_mask;
1113                         f->opcode    = uf.opcode;
1114                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1115                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1116                 }
1117                 break;
1118
1119         default:
1120                 err = -ENOPROTOOPT;
1121                 break;
1122         }
1123
1124 done:
1125         release_sock(sk);
1126         return err;
1127 }
1128
1129 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1130                                char __user *optval, int __user *optlen)
1131 {
1132         struct hci_ufilter uf;
1133         struct sock *sk = sock->sk;
1134         int len, opt, err = 0;
1135
1136         BT_DBG("sk %p, opt %d", sk, optname);
1137
1138         if (get_user(len, optlen))
1139                 return -EFAULT;
1140
1141         lock_sock(sk);
1142
1143         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1144                 err = -EBADFD;
1145                 goto done;
1146         }
1147
1148         switch (optname) {
1149         case HCI_DATA_DIR:
1150                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1151                         opt = 1;
1152                 else
1153                         opt = 0;
1154
1155                 if (put_user(opt, optval))
1156                         err = -EFAULT;
1157                 break;
1158
1159         case HCI_TIME_STAMP:
1160                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1161                         opt = 1;
1162                 else
1163                         opt = 0;
1164
1165                 if (put_user(opt, optval))
1166                         err = -EFAULT;
1167                 break;
1168
1169         case HCI_FILTER:
1170                 {
1171                         struct hci_filter *f = &hci_pi(sk)->filter;
1172
1173                         memset(&uf, 0, sizeof(uf));
1174                         uf.type_mask = f->type_mask;
1175                         uf.opcode    = f->opcode;
1176                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1177                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1178                 }
1179
1180                 len = min_t(unsigned int, len, sizeof(uf));
1181                 if (copy_to_user(optval, &uf, len))
1182                         err = -EFAULT;
1183                 break;
1184
1185         default:
1186                 err = -ENOPROTOOPT;
1187                 break;
1188         }
1189
1190 done:
1191         release_sock(sk);
1192         return err;
1193 }
1194
1195 static const struct proto_ops hci_sock_ops = {
1196         .family         = PF_BLUETOOTH,
1197         .owner          = THIS_MODULE,
1198         .release        = hci_sock_release,
1199         .bind           = hci_sock_bind,
1200         .getname        = hci_sock_getname,
1201         .sendmsg        = hci_sock_sendmsg,
1202         .recvmsg        = hci_sock_recvmsg,
1203         .ioctl          = hci_sock_ioctl,
1204         .poll           = datagram_poll,
1205         .listen         = sock_no_listen,
1206         .shutdown       = sock_no_shutdown,
1207         .setsockopt     = hci_sock_setsockopt,
1208         .getsockopt     = hci_sock_getsockopt,
1209         .connect        = sock_no_connect,
1210         .socketpair     = sock_no_socketpair,
1211         .accept         = sock_no_accept,
1212         .mmap           = sock_no_mmap
1213 };
1214
1215 static struct proto hci_sk_proto = {
1216         .name           = "HCI",
1217         .owner          = THIS_MODULE,
1218         .obj_size       = sizeof(struct hci_pinfo)
1219 };
1220
1221 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1222                            int kern)
1223 {
1224         struct sock *sk;
1225
1226         BT_DBG("sock %p", sock);
1227
1228         if (sock->type != SOCK_RAW)
1229                 return -ESOCKTNOSUPPORT;
1230
1231         sock->ops = &hci_sock_ops;
1232
1233         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1234         if (!sk)
1235                 return -ENOMEM;
1236
1237         sock_init_data(sock, sk);
1238
1239         sock_reset_flag(sk, SOCK_ZAPPED);
1240
1241         sk->sk_protocol = protocol;
1242
1243         sock->state = SS_UNCONNECTED;
1244         sk->sk_state = BT_OPEN;
1245
1246         bt_sock_link(&hci_sk_list, sk);
1247         return 0;
1248 }
1249
1250 static const struct net_proto_family hci_sock_family_ops = {
1251         .family = PF_BLUETOOTH,
1252         .owner  = THIS_MODULE,
1253         .create = hci_sock_create,
1254 };
1255
1256 int __init hci_sock_init(void)
1257 {
1258         int err;
1259
1260         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1261
1262         err = proto_register(&hci_sk_proto, 0);
1263         if (err < 0)
1264                 return err;
1265
1266         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1267         if (err < 0) {
1268                 BT_ERR("HCI socket registration failed");
1269                 goto error;
1270         }
1271
1272         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1273         if (err < 0) {
1274                 BT_ERR("Failed to create HCI proc file");
1275                 bt_sock_unregister(BTPROTO_HCI);
1276                 goto error;
1277         }
1278
1279         BT_INFO("HCI socket layer initialized");
1280
1281         return 0;
1282
1283 error:
1284         proto_unregister(&hci_sk_proto);
1285         return err;
1286 }
1287
1288 void hci_sock_cleanup(void)
1289 {
1290         bt_procfs_cleanup(&init_net, "hci");
1291         bt_sock_unregister(BTPROTO_HCI);
1292         proto_unregister(&hci_sk_proto);
1293 }