Bluetooth: Add helper to get HCI channel of a socket
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static LIST_HEAD(mgmt_chan_list);
35 static DEFINE_MUTEX(mgmt_chan_list_lock);
36
37 static atomic_t monitor_promisc = ATOMIC_INIT(0);
38
39 /* ----- HCI socket interface ----- */
40
41 /* Socket info */
42 #define hci_pi(sk) ((struct hci_pinfo *) sk)
43
44 struct hci_pinfo {
45         struct bt_sock    bt;
46         struct hci_dev    *hdev;
47         struct hci_filter filter;
48         __u32             cmsg_mask;
49         unsigned short    channel;
50         unsigned long     flags;
51 };
52
53 void hci_sock_set_flag(struct sock *sk, int nr)
54 {
55         set_bit(nr, &hci_pi(sk)->flags);
56 }
57
58 void hci_sock_clear_flag(struct sock *sk, int nr)
59 {
60         clear_bit(nr, &hci_pi(sk)->flags);
61 }
62
63 int hci_sock_test_flag(struct sock *sk, int nr)
64 {
65         return test_bit(nr, &hci_pi(sk)->flags);
66 }
67
68 unsigned short hci_sock_get_channel(struct sock *sk)
69 {
70         return hci_pi(sk)->channel;
71 }
72
73 static inline int hci_test_bit(int nr, const void *addr)
74 {
75         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
76 }
77
78 /* Security filter */
79 #define HCI_SFLT_MAX_OGF  5
80
81 struct hci_sec_filter {
82         __u32 type_mask;
83         __u32 event_mask[2];
84         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
85 };
86
87 static const struct hci_sec_filter hci_sec_filter = {
88         /* Packet types */
89         0x10,
90         /* Events */
91         { 0x1000d9fe, 0x0000b00c },
92         /* Commands */
93         {
94                 { 0x0 },
95                 /* OGF_LINK_CTL */
96                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
97                 /* OGF_LINK_POLICY */
98                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
99                 /* OGF_HOST_CTL */
100                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
101                 /* OGF_INFO_PARAM */
102                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
103                 /* OGF_STATUS_PARAM */
104                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
105         }
106 };
107
108 static struct bt_sock_list hci_sk_list = {
109         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
110 };
111
112 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
113 {
114         struct hci_filter *flt;
115         int flt_type, flt_event;
116
117         /* Apply filter */
118         flt = &hci_pi(sk)->filter;
119
120         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
121                 flt_type = 0;
122         else
123                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
124
125         if (!test_bit(flt_type, &flt->type_mask))
126                 return true;
127
128         /* Extra filter for event packets only */
129         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130                 return false;
131
132         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134         if (!hci_test_bit(flt_event, &flt->event_mask))
135                 return true;
136
137         /* Check filter only when opcode is set */
138         if (!flt->opcode)
139                 return false;
140
141         if (flt_event == HCI_EV_CMD_COMPLETE &&
142             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143                 return true;
144
145         if (flt_event == HCI_EV_CMD_STATUS &&
146             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147                 return true;
148
149         return false;
150 }
151
152 /* Send frame to RAW socket */
153 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
154 {
155         struct sock *sk;
156         struct sk_buff *skb_copy = NULL;
157
158         BT_DBG("hdev %p len %d", hdev, skb->len);
159
160         read_lock(&hci_sk_list.lock);
161
162         sk_for_each(sk, &hci_sk_list.head) {
163                 struct sk_buff *nskb;
164
165                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166                         continue;
167
168                 /* Don't send frame to the socket it came from */
169                 if (skb->sk == sk)
170                         continue;
171
172                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173                         if (is_filtered_packet(sk, skb))
174                                 continue;
175                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
176                         if (!bt_cb(skb)->incoming)
177                                 continue;
178                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
179                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
180                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
181                                 continue;
182                 } else {
183                         /* Don't send frame to other channel types */
184                         continue;
185                 }
186
187                 if (!skb_copy) {
188                         /* Create a private copy with headroom */
189                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
190                         if (!skb_copy)
191                                 continue;
192
193                         /* Put type byte before the data */
194                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
195                 }
196
197                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
198                 if (!nskb)
199                         continue;
200
201                 if (sock_queue_rcv_skb(sk, nskb))
202                         kfree_skb(nskb);
203         }
204
205         read_unlock(&hci_sk_list.lock);
206
207         kfree_skb(skb_copy);
208 }
209
210 /* Send frame to sockets with specific channel */
211 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
212                          int flag, struct sock *skip_sk)
213 {
214         struct sock *sk;
215
216         BT_DBG("channel %u len %d", channel, skb->len);
217
218         read_lock(&hci_sk_list.lock);
219
220         sk_for_each(sk, &hci_sk_list.head) {
221                 struct sk_buff *nskb;
222
223                 /* Ignore socket without the flag set */
224                 if (!hci_sock_test_flag(sk, flag))
225                         continue;
226
227                 /* Skip the original socket */
228                 if (sk == skip_sk)
229                         continue;
230
231                 if (sk->sk_state != BT_BOUND)
232                         continue;
233
234                 if (hci_pi(sk)->channel != channel)
235                         continue;
236
237                 nskb = skb_clone(skb, GFP_ATOMIC);
238                 if (!nskb)
239                         continue;
240
241                 if (sock_queue_rcv_skb(sk, nskb))
242                         kfree_skb(nskb);
243         }
244
245         read_unlock(&hci_sk_list.lock);
246 }
247
248 /* Send frame to monitor socket */
249 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251         struct sk_buff *skb_copy = NULL;
252         struct hci_mon_hdr *hdr;
253         __le16 opcode;
254
255         if (!atomic_read(&monitor_promisc))
256                 return;
257
258         BT_DBG("hdev %p len %d", hdev, skb->len);
259
260         switch (bt_cb(skb)->pkt_type) {
261         case HCI_COMMAND_PKT:
262                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
263                 break;
264         case HCI_EVENT_PKT:
265                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
266                 break;
267         case HCI_ACLDATA_PKT:
268                 if (bt_cb(skb)->incoming)
269                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
270                 else
271                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
272                 break;
273         case HCI_SCODATA_PKT:
274                 if (bt_cb(skb)->incoming)
275                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
276                 else
277                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
278                 break;
279         default:
280                 return;
281         }
282
283         /* Create a private copy with headroom */
284         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
285         if (!skb_copy)
286                 return;
287
288         /* Put header before the data */
289         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
290         hdr->opcode = opcode;
291         hdr->index = cpu_to_le16(hdev->id);
292         hdr->len = cpu_to_le16(skb->len);
293
294         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
295                             HCI_SOCK_TRUSTED, NULL);
296         kfree_skb(skb_copy);
297 }
298
299 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
300 {
301         struct hci_mon_hdr *hdr;
302         struct hci_mon_new_index *ni;
303         struct sk_buff *skb;
304         __le16 opcode;
305
306         switch (event) {
307         case HCI_DEV_REG:
308                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
309                 if (!skb)
310                         return NULL;
311
312                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
313                 ni->type = hdev->dev_type;
314                 ni->bus = hdev->bus;
315                 bacpy(&ni->bdaddr, &hdev->bdaddr);
316                 memcpy(ni->name, hdev->name, 8);
317
318                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
319                 break;
320
321         case HCI_DEV_UNREG:
322                 skb = bt_skb_alloc(0, GFP_ATOMIC);
323                 if (!skb)
324                         return NULL;
325
326                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
327                 break;
328
329         default:
330                 return NULL;
331         }
332
333         __net_timestamp(skb);
334
335         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
336         hdr->opcode = opcode;
337         hdr->index = cpu_to_le16(hdev->id);
338         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
339
340         return skb;
341 }
342
343 static void send_monitor_replay(struct sock *sk)
344 {
345         struct hci_dev *hdev;
346
347         read_lock(&hci_dev_list_lock);
348
349         list_for_each_entry(hdev, &hci_dev_list, list) {
350                 struct sk_buff *skb;
351
352                 skb = create_monitor_event(hdev, HCI_DEV_REG);
353                 if (!skb)
354                         continue;
355
356                 if (sock_queue_rcv_skb(sk, skb))
357                         kfree_skb(skb);
358         }
359
360         read_unlock(&hci_dev_list_lock);
361 }
362
363 /* Generate internal stack event */
364 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
365 {
366         struct hci_event_hdr *hdr;
367         struct hci_ev_stack_internal *ev;
368         struct sk_buff *skb;
369
370         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
371         if (!skb)
372                 return;
373
374         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
375         hdr->evt  = HCI_EV_STACK_INTERNAL;
376         hdr->plen = sizeof(*ev) + dlen;
377
378         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
379         ev->type = type;
380         memcpy(ev->data, data, dlen);
381
382         bt_cb(skb)->incoming = 1;
383         __net_timestamp(skb);
384
385         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
386         hci_send_to_sock(hdev, skb);
387         kfree_skb(skb);
388 }
389
390 void hci_sock_dev_event(struct hci_dev *hdev, int event)
391 {
392         struct hci_ev_si_device ev;
393
394         BT_DBG("hdev %s event %d", hdev->name, event);
395
396         /* Send event to monitor */
397         if (atomic_read(&monitor_promisc)) {
398                 struct sk_buff *skb;
399
400                 skb = create_monitor_event(hdev, event);
401                 if (skb) {
402                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403                                             HCI_SOCK_TRUSTED, NULL);
404                         kfree_skb(skb);
405                 }
406         }
407
408         /* Send event to sockets */
409         ev.event  = event;
410         ev.dev_id = hdev->id;
411         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
412
413         if (event == HCI_DEV_UNREG) {
414                 struct sock *sk;
415
416                 /* Detach sockets from device */
417                 read_lock(&hci_sk_list.lock);
418                 sk_for_each(sk, &hci_sk_list.head) {
419                         bh_lock_sock_nested(sk);
420                         if (hci_pi(sk)->hdev == hdev) {
421                                 hci_pi(sk)->hdev = NULL;
422                                 sk->sk_err = EPIPE;
423                                 sk->sk_state = BT_OPEN;
424                                 sk->sk_state_change(sk);
425
426                                 hci_dev_put(hdev);
427                         }
428                         bh_unlock_sock(sk);
429                 }
430                 read_unlock(&hci_sk_list.lock);
431         }
432 }
433
434 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
435 {
436         struct hci_mgmt_chan *c;
437
438         list_for_each_entry(c, &mgmt_chan_list, list) {
439                 if (c->channel == channel)
440                         return c;
441         }
442
443         return NULL;
444 }
445
446 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
447 {
448         struct hci_mgmt_chan *c;
449
450         mutex_lock(&mgmt_chan_list_lock);
451         c = __hci_mgmt_chan_find(channel);
452         mutex_unlock(&mgmt_chan_list_lock);
453
454         return c;
455 }
456
457 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
458 {
459         if (c->channel < HCI_CHANNEL_CONTROL)
460                 return -EINVAL;
461
462         mutex_lock(&mgmt_chan_list_lock);
463         if (__hci_mgmt_chan_find(c->channel)) {
464                 mutex_unlock(&mgmt_chan_list_lock);
465                 return -EALREADY;
466         }
467
468         list_add_tail(&c->list, &mgmt_chan_list);
469
470         mutex_unlock(&mgmt_chan_list_lock);
471
472         return 0;
473 }
474 EXPORT_SYMBOL(hci_mgmt_chan_register);
475
476 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
477 {
478         mutex_lock(&mgmt_chan_list_lock);
479         list_del(&c->list);
480         mutex_unlock(&mgmt_chan_list_lock);
481 }
482 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
483
484 static int hci_sock_release(struct socket *sock)
485 {
486         struct sock *sk = sock->sk;
487         struct hci_dev *hdev;
488
489         BT_DBG("sock %p sk %p", sock, sk);
490
491         if (!sk)
492                 return 0;
493
494         hdev = hci_pi(sk)->hdev;
495
496         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
497                 atomic_dec(&monitor_promisc);
498
499         bt_sock_unlink(&hci_sk_list, sk);
500
501         if (hdev) {
502                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
503                         mgmt_index_added(hdev);
504                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
505                         hci_dev_close(hdev->id);
506                 }
507
508                 atomic_dec(&hdev->promisc);
509                 hci_dev_put(hdev);
510         }
511
512         sock_orphan(sk);
513
514         skb_queue_purge(&sk->sk_receive_queue);
515         skb_queue_purge(&sk->sk_write_queue);
516
517         sock_put(sk);
518         return 0;
519 }
520
521 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
522 {
523         bdaddr_t bdaddr;
524         int err;
525
526         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
527                 return -EFAULT;
528
529         hci_dev_lock(hdev);
530
531         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
532
533         hci_dev_unlock(hdev);
534
535         return err;
536 }
537
538 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
539 {
540         bdaddr_t bdaddr;
541         int err;
542
543         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
544                 return -EFAULT;
545
546         hci_dev_lock(hdev);
547
548         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
549
550         hci_dev_unlock(hdev);
551
552         return err;
553 }
554
555 /* Ioctls that require bound socket */
556 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
557                                 unsigned long arg)
558 {
559         struct hci_dev *hdev = hci_pi(sk)->hdev;
560
561         if (!hdev)
562                 return -EBADFD;
563
564         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
565                 return -EBUSY;
566
567         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
568                 return -EOPNOTSUPP;
569
570         if (hdev->dev_type != HCI_BREDR)
571                 return -EOPNOTSUPP;
572
573         switch (cmd) {
574         case HCISETRAW:
575                 if (!capable(CAP_NET_ADMIN))
576                         return -EPERM;
577                 return -EOPNOTSUPP;
578
579         case HCIGETCONNINFO:
580                 return hci_get_conn_info(hdev, (void __user *) arg);
581
582         case HCIGETAUTHINFO:
583                 return hci_get_auth_info(hdev, (void __user *) arg);
584
585         case HCIBLOCKADDR:
586                 if (!capable(CAP_NET_ADMIN))
587                         return -EPERM;
588                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
589
590         case HCIUNBLOCKADDR:
591                 if (!capable(CAP_NET_ADMIN))
592                         return -EPERM;
593                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
594         }
595
596         return -ENOIOCTLCMD;
597 }
598
599 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
600                           unsigned long arg)
601 {
602         void __user *argp = (void __user *) arg;
603         struct sock *sk = sock->sk;
604         int err;
605
606         BT_DBG("cmd %x arg %lx", cmd, arg);
607
608         lock_sock(sk);
609
610         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
611                 err = -EBADFD;
612                 goto done;
613         }
614
615         release_sock(sk);
616
617         switch (cmd) {
618         case HCIGETDEVLIST:
619                 return hci_get_dev_list(argp);
620
621         case HCIGETDEVINFO:
622                 return hci_get_dev_info(argp);
623
624         case HCIGETCONNLIST:
625                 return hci_get_conn_list(argp);
626
627         case HCIDEVUP:
628                 if (!capable(CAP_NET_ADMIN))
629                         return -EPERM;
630                 return hci_dev_open(arg);
631
632         case HCIDEVDOWN:
633                 if (!capable(CAP_NET_ADMIN))
634                         return -EPERM;
635                 return hci_dev_close(arg);
636
637         case HCIDEVRESET:
638                 if (!capable(CAP_NET_ADMIN))
639                         return -EPERM;
640                 return hci_dev_reset(arg);
641
642         case HCIDEVRESTAT:
643                 if (!capable(CAP_NET_ADMIN))
644                         return -EPERM;
645                 return hci_dev_reset_stat(arg);
646
647         case HCISETSCAN:
648         case HCISETAUTH:
649         case HCISETENCRYPT:
650         case HCISETPTYPE:
651         case HCISETLINKPOL:
652         case HCISETLINKMODE:
653         case HCISETACLMTU:
654         case HCISETSCOMTU:
655                 if (!capable(CAP_NET_ADMIN))
656                         return -EPERM;
657                 return hci_dev_cmd(cmd, argp);
658
659         case HCIINQUIRY:
660                 return hci_inquiry(argp);
661         }
662
663         lock_sock(sk);
664
665         err = hci_sock_bound_ioctl(sk, cmd, arg);
666
667 done:
668         release_sock(sk);
669         return err;
670 }
671
672 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
673                          int addr_len)
674 {
675         struct sockaddr_hci haddr;
676         struct sock *sk = sock->sk;
677         struct hci_dev *hdev = NULL;
678         int len, err = 0;
679
680         BT_DBG("sock %p sk %p", sock, sk);
681
682         if (!addr)
683                 return -EINVAL;
684
685         memset(&haddr, 0, sizeof(haddr));
686         len = min_t(unsigned int, sizeof(haddr), addr_len);
687         memcpy(&haddr, addr, len);
688
689         if (haddr.hci_family != AF_BLUETOOTH)
690                 return -EINVAL;
691
692         lock_sock(sk);
693
694         if (sk->sk_state == BT_BOUND) {
695                 err = -EALREADY;
696                 goto done;
697         }
698
699         switch (haddr.hci_channel) {
700         case HCI_CHANNEL_RAW:
701                 if (hci_pi(sk)->hdev) {
702                         err = -EALREADY;
703                         goto done;
704                 }
705
706                 if (haddr.hci_dev != HCI_DEV_NONE) {
707                         hdev = hci_dev_get(haddr.hci_dev);
708                         if (!hdev) {
709                                 err = -ENODEV;
710                                 goto done;
711                         }
712
713                         atomic_inc(&hdev->promisc);
714                 }
715
716                 hci_pi(sk)->hdev = hdev;
717                 break;
718
719         case HCI_CHANNEL_USER:
720                 if (hci_pi(sk)->hdev) {
721                         err = -EALREADY;
722                         goto done;
723                 }
724
725                 if (haddr.hci_dev == HCI_DEV_NONE) {
726                         err = -EINVAL;
727                         goto done;
728                 }
729
730                 if (!capable(CAP_NET_ADMIN)) {
731                         err = -EPERM;
732                         goto done;
733                 }
734
735                 hdev = hci_dev_get(haddr.hci_dev);
736                 if (!hdev) {
737                         err = -ENODEV;
738                         goto done;
739                 }
740
741                 if (test_bit(HCI_UP, &hdev->flags) ||
742                     test_bit(HCI_INIT, &hdev->flags) ||
743                     hci_dev_test_flag(hdev, HCI_SETUP) ||
744                     hci_dev_test_flag(hdev, HCI_CONFIG)) {
745                         err = -EBUSY;
746                         hci_dev_put(hdev);
747                         goto done;
748                 }
749
750                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
751                         err = -EUSERS;
752                         hci_dev_put(hdev);
753                         goto done;
754                 }
755
756                 mgmt_index_removed(hdev);
757
758                 err = hci_dev_open(hdev->id);
759                 if (err) {
760                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
761                         mgmt_index_added(hdev);
762                         hci_dev_put(hdev);
763                         goto done;
764                 }
765
766                 atomic_inc(&hdev->promisc);
767
768                 hci_pi(sk)->hdev = hdev;
769                 break;
770
771         case HCI_CHANNEL_MONITOR:
772                 if (haddr.hci_dev != HCI_DEV_NONE) {
773                         err = -EINVAL;
774                         goto done;
775                 }
776
777                 if (!capable(CAP_NET_RAW)) {
778                         err = -EPERM;
779                         goto done;
780                 }
781
782                 /* The monitor interface is restricted to CAP_NET_RAW
783                  * capabilities and with that implicitly trusted.
784                  */
785                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
786
787                 send_monitor_replay(sk);
788
789                 atomic_inc(&monitor_promisc);
790                 break;
791
792         default:
793                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
794                         err = -EINVAL;
795                         goto done;
796                 }
797
798                 if (haddr.hci_dev != HCI_DEV_NONE) {
799                         err = -EINVAL;
800                         goto done;
801                 }
802
803                 /* Users with CAP_NET_ADMIN capabilities are allowed
804                  * access to all management commands and events. For
805                  * untrusted users the interface is restricted and
806                  * also only untrusted events are sent.
807                  */
808                 if (capable(CAP_NET_ADMIN))
809                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
810
811                 /* At the moment the index and unconfigured index events
812                  * are enabled unconditionally. Setting them on each
813                  * socket when binding keeps this functionality. They
814                  * however might be cleared later and then sending of these
815                  * events will be disabled, but that is then intentional.
816                  *
817                  * This also enables generic events that are safe to be
818                  * received by untrusted users. Example for such events
819                  * are changes to settings, class of device, name etc.
820                  */
821                 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
822                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
823                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
824                         hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
825                 }
826                 break;
827         }
828
829
830         hci_pi(sk)->channel = haddr.hci_channel;
831         sk->sk_state = BT_BOUND;
832
833 done:
834         release_sock(sk);
835         return err;
836 }
837
838 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
839                             int *addr_len, int peer)
840 {
841         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
842         struct sock *sk = sock->sk;
843         struct hci_dev *hdev;
844         int err = 0;
845
846         BT_DBG("sock %p sk %p", sock, sk);
847
848         if (peer)
849                 return -EOPNOTSUPP;
850
851         lock_sock(sk);
852
853         hdev = hci_pi(sk)->hdev;
854         if (!hdev) {
855                 err = -EBADFD;
856                 goto done;
857         }
858
859         *addr_len = sizeof(*haddr);
860         haddr->hci_family = AF_BLUETOOTH;
861         haddr->hci_dev    = hdev->id;
862         haddr->hci_channel= hci_pi(sk)->channel;
863
864 done:
865         release_sock(sk);
866         return err;
867 }
868
869 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
870                           struct sk_buff *skb)
871 {
872         __u32 mask = hci_pi(sk)->cmsg_mask;
873
874         if (mask & HCI_CMSG_DIR) {
875                 int incoming = bt_cb(skb)->incoming;
876                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
877                          &incoming);
878         }
879
880         if (mask & HCI_CMSG_TSTAMP) {
881 #ifdef CONFIG_COMPAT
882                 struct compat_timeval ctv;
883 #endif
884                 struct timeval tv;
885                 void *data;
886                 int len;
887
888                 skb_get_timestamp(skb, &tv);
889
890                 data = &tv;
891                 len = sizeof(tv);
892 #ifdef CONFIG_COMPAT
893                 if (!COMPAT_USE_64BIT_TIME &&
894                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
895                         ctv.tv_sec = tv.tv_sec;
896                         ctv.tv_usec = tv.tv_usec;
897                         data = &ctv;
898                         len = sizeof(ctv);
899                 }
900 #endif
901
902                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
903         }
904 }
905
906 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
907                             int flags)
908 {
909         int noblock = flags & MSG_DONTWAIT;
910         struct sock *sk = sock->sk;
911         struct sk_buff *skb;
912         int copied, err;
913
914         BT_DBG("sock %p, sk %p", sock, sk);
915
916         if (flags & (MSG_OOB))
917                 return -EOPNOTSUPP;
918
919         if (sk->sk_state == BT_CLOSED)
920                 return 0;
921
922         skb = skb_recv_datagram(sk, flags, noblock, &err);
923         if (!skb)
924                 return err;
925
926         copied = skb->len;
927         if (len < copied) {
928                 msg->msg_flags |= MSG_TRUNC;
929                 copied = len;
930         }
931
932         skb_reset_transport_header(skb);
933         err = skb_copy_datagram_msg(skb, 0, msg, copied);
934
935         switch (hci_pi(sk)->channel) {
936         case HCI_CHANNEL_RAW:
937                 hci_sock_cmsg(sk, msg, skb);
938                 break;
939         case HCI_CHANNEL_USER:
940         case HCI_CHANNEL_MONITOR:
941                 sock_recv_timestamp(msg, sk, skb);
942                 break;
943         default:
944                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
945                         sock_recv_timestamp(msg, sk, skb);
946                 break;
947         }
948
949         skb_free_datagram(sk, skb);
950
951         return err ? : copied;
952 }
953
954 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
955                             size_t len)
956 {
957         struct sock *sk = sock->sk;
958         struct hci_mgmt_chan *chan;
959         struct hci_dev *hdev;
960         struct sk_buff *skb;
961         int err;
962
963         BT_DBG("sock %p sk %p", sock, sk);
964
965         if (msg->msg_flags & MSG_OOB)
966                 return -EOPNOTSUPP;
967
968         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
969                 return -EINVAL;
970
971         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
972                 return -EINVAL;
973
974         lock_sock(sk);
975
976         switch (hci_pi(sk)->channel) {
977         case HCI_CHANNEL_RAW:
978         case HCI_CHANNEL_USER:
979                 break;
980         case HCI_CHANNEL_MONITOR:
981                 err = -EOPNOTSUPP;
982                 goto done;
983         default:
984                 mutex_lock(&mgmt_chan_list_lock);
985                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
986                 if (chan)
987                         err = mgmt_control(chan, sk, msg, len);
988                 else
989                         err = -EINVAL;
990
991                 mutex_unlock(&mgmt_chan_list_lock);
992                 goto done;
993         }
994
995         hdev = hci_pi(sk)->hdev;
996         if (!hdev) {
997                 err = -EBADFD;
998                 goto done;
999         }
1000
1001         if (!test_bit(HCI_UP, &hdev->flags)) {
1002                 err = -ENETDOWN;
1003                 goto done;
1004         }
1005
1006         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1007         if (!skb)
1008                 goto done;
1009
1010         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1011                 err = -EFAULT;
1012                 goto drop;
1013         }
1014
1015         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1016         skb_pull(skb, 1);
1017
1018         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1019                 /* No permission check is needed for user channel
1020                  * since that gets enforced when binding the socket.
1021                  *
1022                  * However check that the packet type is valid.
1023                  */
1024                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1025                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1026                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1027                         err = -EINVAL;
1028                         goto drop;
1029                 }
1030
1031                 skb_queue_tail(&hdev->raw_q, skb);
1032                 queue_work(hdev->workqueue, &hdev->tx_work);
1033         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1034                 u16 opcode = get_unaligned_le16(skb->data);
1035                 u16 ogf = hci_opcode_ogf(opcode);
1036                 u16 ocf = hci_opcode_ocf(opcode);
1037
1038                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1039                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1040                                    &hci_sec_filter.ocf_mask[ogf])) &&
1041                     !capable(CAP_NET_RAW)) {
1042                         err = -EPERM;
1043                         goto drop;
1044                 }
1045
1046                 if (ogf == 0x3f) {
1047                         skb_queue_tail(&hdev->raw_q, skb);
1048                         queue_work(hdev->workqueue, &hdev->tx_work);
1049                 } else {
1050                         /* Stand-alone HCI commands must be flagged as
1051                          * single-command requests.
1052                          */
1053                         bt_cb(skb)->req_start = 1;
1054
1055                         skb_queue_tail(&hdev->cmd_q, skb);
1056                         queue_work(hdev->workqueue, &hdev->cmd_work);
1057                 }
1058         } else {
1059                 if (!capable(CAP_NET_RAW)) {
1060                         err = -EPERM;
1061                         goto drop;
1062                 }
1063
1064                 skb_queue_tail(&hdev->raw_q, skb);
1065                 queue_work(hdev->workqueue, &hdev->tx_work);
1066         }
1067
1068         err = len;
1069
1070 done:
1071         release_sock(sk);
1072         return err;
1073
1074 drop:
1075         kfree_skb(skb);
1076         goto done;
1077 }
1078
1079 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1080                                char __user *optval, unsigned int len)
1081 {
1082         struct hci_ufilter uf = { .opcode = 0 };
1083         struct sock *sk = sock->sk;
1084         int err = 0, opt = 0;
1085
1086         BT_DBG("sk %p, opt %d", sk, optname);
1087
1088         lock_sock(sk);
1089
1090         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1091                 err = -EBADFD;
1092                 goto done;
1093         }
1094
1095         switch (optname) {
1096         case HCI_DATA_DIR:
1097                 if (get_user(opt, (int __user *)optval)) {
1098                         err = -EFAULT;
1099                         break;
1100                 }
1101
1102                 if (opt)
1103                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1104                 else
1105                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1106                 break;
1107
1108         case HCI_TIME_STAMP:
1109                 if (get_user(opt, (int __user *)optval)) {
1110                         err = -EFAULT;
1111                         break;
1112                 }
1113
1114                 if (opt)
1115                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1116                 else
1117                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1118                 break;
1119
1120         case HCI_FILTER:
1121                 {
1122                         struct hci_filter *f = &hci_pi(sk)->filter;
1123
1124                         uf.type_mask = f->type_mask;
1125                         uf.opcode    = f->opcode;
1126                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1127                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1128                 }
1129
1130                 len = min_t(unsigned int, len, sizeof(uf));
1131                 if (copy_from_user(&uf, optval, len)) {
1132                         err = -EFAULT;
1133                         break;
1134                 }
1135
1136                 if (!capable(CAP_NET_RAW)) {
1137                         uf.type_mask &= hci_sec_filter.type_mask;
1138                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1139                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1140                 }
1141
1142                 {
1143                         struct hci_filter *f = &hci_pi(sk)->filter;
1144
1145                         f->type_mask = uf.type_mask;
1146                         f->opcode    = uf.opcode;
1147                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1148                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1149                 }
1150                 break;
1151
1152         default:
1153                 err = -ENOPROTOOPT;
1154                 break;
1155         }
1156
1157 done:
1158         release_sock(sk);
1159         return err;
1160 }
1161
1162 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1163                                char __user *optval, int __user *optlen)
1164 {
1165         struct hci_ufilter uf;
1166         struct sock *sk = sock->sk;
1167         int len, opt, err = 0;
1168
1169         BT_DBG("sk %p, opt %d", sk, optname);
1170
1171         if (get_user(len, optlen))
1172                 return -EFAULT;
1173
1174         lock_sock(sk);
1175
1176         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1177                 err = -EBADFD;
1178                 goto done;
1179         }
1180
1181         switch (optname) {
1182         case HCI_DATA_DIR:
1183                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1184                         opt = 1;
1185                 else
1186                         opt = 0;
1187
1188                 if (put_user(opt, optval))
1189                         err = -EFAULT;
1190                 break;
1191
1192         case HCI_TIME_STAMP:
1193                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1194                         opt = 1;
1195                 else
1196                         opt = 0;
1197
1198                 if (put_user(opt, optval))
1199                         err = -EFAULT;
1200                 break;
1201
1202         case HCI_FILTER:
1203                 {
1204                         struct hci_filter *f = &hci_pi(sk)->filter;
1205
1206                         memset(&uf, 0, sizeof(uf));
1207                         uf.type_mask = f->type_mask;
1208                         uf.opcode    = f->opcode;
1209                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1210                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1211                 }
1212
1213                 len = min_t(unsigned int, len, sizeof(uf));
1214                 if (copy_to_user(optval, &uf, len))
1215                         err = -EFAULT;
1216                 break;
1217
1218         default:
1219                 err = -ENOPROTOOPT;
1220                 break;
1221         }
1222
1223 done:
1224         release_sock(sk);
1225         return err;
1226 }
1227
1228 static const struct proto_ops hci_sock_ops = {
1229         .family         = PF_BLUETOOTH,
1230         .owner          = THIS_MODULE,
1231         .release        = hci_sock_release,
1232         .bind           = hci_sock_bind,
1233         .getname        = hci_sock_getname,
1234         .sendmsg        = hci_sock_sendmsg,
1235         .recvmsg        = hci_sock_recvmsg,
1236         .ioctl          = hci_sock_ioctl,
1237         .poll           = datagram_poll,
1238         .listen         = sock_no_listen,
1239         .shutdown       = sock_no_shutdown,
1240         .setsockopt     = hci_sock_setsockopt,
1241         .getsockopt     = hci_sock_getsockopt,
1242         .connect        = sock_no_connect,
1243         .socketpair     = sock_no_socketpair,
1244         .accept         = sock_no_accept,
1245         .mmap           = sock_no_mmap
1246 };
1247
1248 static struct proto hci_sk_proto = {
1249         .name           = "HCI",
1250         .owner          = THIS_MODULE,
1251         .obj_size       = sizeof(struct hci_pinfo)
1252 };
1253
1254 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1255                            int kern)
1256 {
1257         struct sock *sk;
1258
1259         BT_DBG("sock %p", sock);
1260
1261         if (sock->type != SOCK_RAW)
1262                 return -ESOCKTNOSUPPORT;
1263
1264         sock->ops = &hci_sock_ops;
1265
1266         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1267         if (!sk)
1268                 return -ENOMEM;
1269
1270         sock_init_data(sock, sk);
1271
1272         sock_reset_flag(sk, SOCK_ZAPPED);
1273
1274         sk->sk_protocol = protocol;
1275
1276         sock->state = SS_UNCONNECTED;
1277         sk->sk_state = BT_OPEN;
1278
1279         bt_sock_link(&hci_sk_list, sk);
1280         return 0;
1281 }
1282
1283 static const struct net_proto_family hci_sock_family_ops = {
1284         .family = PF_BLUETOOTH,
1285         .owner  = THIS_MODULE,
1286         .create = hci_sock_create,
1287 };
1288
1289 int __init hci_sock_init(void)
1290 {
1291         int err;
1292
1293         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1294
1295         err = proto_register(&hci_sk_proto, 0);
1296         if (err < 0)
1297                 return err;
1298
1299         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1300         if (err < 0) {
1301                 BT_ERR("HCI socket registration failed");
1302                 goto error;
1303         }
1304
1305         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1306         if (err < 0) {
1307                 BT_ERR("Failed to create HCI proc file");
1308                 bt_sock_unregister(BTPROTO_HCI);
1309                 goto error;
1310         }
1311
1312         BT_INFO("HCI socket layer initialized");
1313
1314         return 0;
1315
1316 error:
1317         proto_unregister(&hci_sk_proto);
1318         return err;
1319 }
1320
1321 void hci_sock_cleanup(void)
1322 {
1323         bt_procfs_cleanup(&init_net, "hci");
1324         bt_sock_unregister(BTPROTO_HCI);
1325         proto_unregister(&hci_sk_proto);
1326 }