Bluetooth: Rename hci_send_to_control to hci_send_to_channel
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 /* Socket info */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41 struct hci_pinfo {
42         struct bt_sock    bt;
43         struct hci_dev    *hdev;
44         struct hci_filter filter;
45         __u32             cmsg_mask;
46         unsigned short    channel;
47 };
48
49 static inline int hci_test_bit(int nr, void *addr)
50 {
51         return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52 }
53
54 /* Security filter */
55 #define HCI_SFLT_MAX_OGF  5
56
57 struct hci_sec_filter {
58         __u32 type_mask;
59         __u32 event_mask[2];
60         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61 };
62
63 static const struct hci_sec_filter hci_sec_filter = {
64         /* Packet types */
65         0x10,
66         /* Events */
67         { 0x1000d9fe, 0x0000b00c },
68         /* Commands */
69         {
70                 { 0x0 },
71                 /* OGF_LINK_CTL */
72                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73                 /* OGF_LINK_POLICY */
74                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75                 /* OGF_HOST_CTL */
76                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77                 /* OGF_INFO_PARAM */
78                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79                 /* OGF_STATUS_PARAM */
80                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
81         }
82 };
83
84 static struct bt_sock_list hci_sk_list = {
85         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
86 };
87
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89 {
90         struct hci_filter *flt;
91         int flt_type, flt_event;
92
93         /* Apply filter */
94         flt = &hci_pi(sk)->filter;
95
96         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97                 flt_type = 0;
98         else
99                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101         if (!test_bit(flt_type, &flt->type_mask))
102                 return true;
103
104         /* Extra filter for event packets only */
105         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106                 return false;
107
108         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110         if (!hci_test_bit(flt_event, &flt->event_mask))
111                 return true;
112
113         /* Check filter only when opcode is set */
114         if (!flt->opcode)
115                 return false;
116
117         if (flt_event == HCI_EV_CMD_COMPLETE &&
118             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119                 return true;
120
121         if (flt_event == HCI_EV_CMD_STATUS &&
122             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123                 return true;
124
125         return false;
126 }
127
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131         struct sock *sk;
132         struct sk_buff *skb_copy = NULL;
133
134         BT_DBG("hdev %p len %d", hdev, skb->len);
135
136         read_lock(&hci_sk_list.lock);
137
138         sk_for_each(sk, &hci_sk_list.head) {
139                 struct sk_buff *nskb;
140
141                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142                         continue;
143
144                 /* Don't send frame to the socket it came from */
145                 if (skb->sk == sk)
146                         continue;
147
148                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149                         if (is_filtered_packet(sk, skb))
150                                 continue;
151                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152                         if (!bt_cb(skb)->incoming)
153                                 continue;
154                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157                                 continue;
158                 } else {
159                         /* Don't send frame to other channel types */
160                         continue;
161                 }
162
163                 if (!skb_copy) {
164                         /* Create a private copy with headroom */
165                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
166                         if (!skb_copy)
167                                 continue;
168
169                         /* Put type byte before the data */
170                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171                 }
172
173                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
174                 if (!nskb)
175                         continue;
176
177                 if (sock_queue_rcv_skb(sk, nskb))
178                         kfree_skb(nskb);
179         }
180
181         read_unlock(&hci_sk_list.lock);
182
183         kfree_skb(skb_copy);
184 }
185
186 /* Send frame to sockets with specific channel */
187 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
188                          struct sock *skip_sk)
189 {
190         struct sock *sk;
191
192         BT_DBG("channel %u len %d", channel, skb->len);
193
194         read_lock(&hci_sk_list.lock);
195
196         sk_for_each(sk, &hci_sk_list.head) {
197                 struct sk_buff *nskb;
198
199                 /* Skip the original socket */
200                 if (sk == skip_sk)
201                         continue;
202
203                 if (sk->sk_state != BT_BOUND)
204                         continue;
205
206                 if (hci_pi(sk)->channel != channel)
207                         continue;
208
209                 nskb = skb_clone(skb, GFP_ATOMIC);
210                 if (!nskb)
211                         continue;
212
213                 if (sock_queue_rcv_skb(sk, nskb))
214                         kfree_skb(nskb);
215         }
216
217         read_unlock(&hci_sk_list.lock);
218 }
219
220 static void queue_monitor_skb(struct sk_buff *skb)
221 {
222         struct sock *sk;
223
224         BT_DBG("len %d", skb->len);
225
226         read_lock(&hci_sk_list.lock);
227
228         sk_for_each(sk, &hci_sk_list.head) {
229                 struct sk_buff *nskb;
230
231                 if (sk->sk_state != BT_BOUND)
232                         continue;
233
234                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
235                         continue;
236
237                 nskb = skb_clone(skb, GFP_ATOMIC);
238                 if (!nskb)
239                         continue;
240
241                 if (sock_queue_rcv_skb(sk, nskb))
242                         kfree_skb(nskb);
243         }
244
245         read_unlock(&hci_sk_list.lock);
246 }
247
248 /* Send frame to monitor socket */
249 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251         struct sk_buff *skb_copy = NULL;
252         struct hci_mon_hdr *hdr;
253         __le16 opcode;
254
255         if (!atomic_read(&monitor_promisc))
256                 return;
257
258         BT_DBG("hdev %p len %d", hdev, skb->len);
259
260         switch (bt_cb(skb)->pkt_type) {
261         case HCI_COMMAND_PKT:
262                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
263                 break;
264         case HCI_EVENT_PKT:
265                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
266                 break;
267         case HCI_ACLDATA_PKT:
268                 if (bt_cb(skb)->incoming)
269                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
270                 else
271                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
272                 break;
273         case HCI_SCODATA_PKT:
274                 if (bt_cb(skb)->incoming)
275                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
276                 else
277                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
278                 break;
279         default:
280                 return;
281         }
282
283         /* Create a private copy with headroom */
284         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
285         if (!skb_copy)
286                 return;
287
288         /* Put header before the data */
289         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
290         hdr->opcode = opcode;
291         hdr->index = cpu_to_le16(hdev->id);
292         hdr->len = cpu_to_le16(skb->len);
293
294         queue_monitor_skb(skb_copy);
295         kfree_skb(skb_copy);
296 }
297
298 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
299 {
300         struct hci_mon_hdr *hdr;
301         struct hci_mon_new_index *ni;
302         struct sk_buff *skb;
303         __le16 opcode;
304
305         switch (event) {
306         case HCI_DEV_REG:
307                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
308                 if (!skb)
309                         return NULL;
310
311                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
312                 ni->type = hdev->dev_type;
313                 ni->bus = hdev->bus;
314                 bacpy(&ni->bdaddr, &hdev->bdaddr);
315                 memcpy(ni->name, hdev->name, 8);
316
317                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
318                 break;
319
320         case HCI_DEV_UNREG:
321                 skb = bt_skb_alloc(0, GFP_ATOMIC);
322                 if (!skb)
323                         return NULL;
324
325                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
326                 break;
327
328         default:
329                 return NULL;
330         }
331
332         __net_timestamp(skb);
333
334         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
335         hdr->opcode = opcode;
336         hdr->index = cpu_to_le16(hdev->id);
337         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
338
339         return skb;
340 }
341
342 static void send_monitor_replay(struct sock *sk)
343 {
344         struct hci_dev *hdev;
345
346         read_lock(&hci_dev_list_lock);
347
348         list_for_each_entry(hdev, &hci_dev_list, list) {
349                 struct sk_buff *skb;
350
351                 skb = create_monitor_event(hdev, HCI_DEV_REG);
352                 if (!skb)
353                         continue;
354
355                 if (sock_queue_rcv_skb(sk, skb))
356                         kfree_skb(skb);
357         }
358
359         read_unlock(&hci_dev_list_lock);
360 }
361
362 /* Generate internal stack event */
363 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
364 {
365         struct hci_event_hdr *hdr;
366         struct hci_ev_stack_internal *ev;
367         struct sk_buff *skb;
368
369         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
370         if (!skb)
371                 return;
372
373         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
374         hdr->evt  = HCI_EV_STACK_INTERNAL;
375         hdr->plen = sizeof(*ev) + dlen;
376
377         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
378         ev->type = type;
379         memcpy(ev->data, data, dlen);
380
381         bt_cb(skb)->incoming = 1;
382         __net_timestamp(skb);
383
384         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
385         hci_send_to_sock(hdev, skb);
386         kfree_skb(skb);
387 }
388
389 void hci_sock_dev_event(struct hci_dev *hdev, int event)
390 {
391         struct hci_ev_si_device ev;
392
393         BT_DBG("hdev %s event %d", hdev->name, event);
394
395         /* Send event to monitor */
396         if (atomic_read(&monitor_promisc)) {
397                 struct sk_buff *skb;
398
399                 skb = create_monitor_event(hdev, event);
400                 if (skb) {
401                         queue_monitor_skb(skb);
402                         kfree_skb(skb);
403                 }
404         }
405
406         /* Send event to sockets */
407         ev.event  = event;
408         ev.dev_id = hdev->id;
409         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
410
411         if (event == HCI_DEV_UNREG) {
412                 struct sock *sk;
413
414                 /* Detach sockets from device */
415                 read_lock(&hci_sk_list.lock);
416                 sk_for_each(sk, &hci_sk_list.head) {
417                         bh_lock_sock_nested(sk);
418                         if (hci_pi(sk)->hdev == hdev) {
419                                 hci_pi(sk)->hdev = NULL;
420                                 sk->sk_err = EPIPE;
421                                 sk->sk_state = BT_OPEN;
422                                 sk->sk_state_change(sk);
423
424                                 hci_dev_put(hdev);
425                         }
426                         bh_unlock_sock(sk);
427                 }
428                 read_unlock(&hci_sk_list.lock);
429         }
430 }
431
432 static int hci_sock_release(struct socket *sock)
433 {
434         struct sock *sk = sock->sk;
435         struct hci_dev *hdev;
436
437         BT_DBG("sock %p sk %p", sock, sk);
438
439         if (!sk)
440                 return 0;
441
442         hdev = hci_pi(sk)->hdev;
443
444         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
445                 atomic_dec(&monitor_promisc);
446
447         bt_sock_unlink(&hci_sk_list, sk);
448
449         if (hdev) {
450                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
451                         mgmt_index_added(hdev);
452                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
453                         hci_dev_close(hdev->id);
454                 }
455
456                 atomic_dec(&hdev->promisc);
457                 hci_dev_put(hdev);
458         }
459
460         sock_orphan(sk);
461
462         skb_queue_purge(&sk->sk_receive_queue);
463         skb_queue_purge(&sk->sk_write_queue);
464
465         sock_put(sk);
466         return 0;
467 }
468
469 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
470 {
471         bdaddr_t bdaddr;
472         int err;
473
474         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
475                 return -EFAULT;
476
477         hci_dev_lock(hdev);
478
479         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
480
481         hci_dev_unlock(hdev);
482
483         return err;
484 }
485
486 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
487 {
488         bdaddr_t bdaddr;
489         int err;
490
491         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
492                 return -EFAULT;
493
494         hci_dev_lock(hdev);
495
496         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
497
498         hci_dev_unlock(hdev);
499
500         return err;
501 }
502
503 /* Ioctls that require bound socket */
504 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
505                                 unsigned long arg)
506 {
507         struct hci_dev *hdev = hci_pi(sk)->hdev;
508
509         if (!hdev)
510                 return -EBADFD;
511
512         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
513                 return -EBUSY;
514
515         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
516                 return -EOPNOTSUPP;
517
518         if (hdev->dev_type != HCI_BREDR)
519                 return -EOPNOTSUPP;
520
521         switch (cmd) {
522         case HCISETRAW:
523                 if (!capable(CAP_NET_ADMIN))
524                         return -EPERM;
525                 return -EOPNOTSUPP;
526
527         case HCIGETCONNINFO:
528                 return hci_get_conn_info(hdev, (void __user *) arg);
529
530         case HCIGETAUTHINFO:
531                 return hci_get_auth_info(hdev, (void __user *) arg);
532
533         case HCIBLOCKADDR:
534                 if (!capable(CAP_NET_ADMIN))
535                         return -EPERM;
536                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
537
538         case HCIUNBLOCKADDR:
539                 if (!capable(CAP_NET_ADMIN))
540                         return -EPERM;
541                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
542         }
543
544         return -ENOIOCTLCMD;
545 }
546
547 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
548                           unsigned long arg)
549 {
550         void __user *argp = (void __user *) arg;
551         struct sock *sk = sock->sk;
552         int err;
553
554         BT_DBG("cmd %x arg %lx", cmd, arg);
555
556         lock_sock(sk);
557
558         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
559                 err = -EBADFD;
560                 goto done;
561         }
562
563         release_sock(sk);
564
565         switch (cmd) {
566         case HCIGETDEVLIST:
567                 return hci_get_dev_list(argp);
568
569         case HCIGETDEVINFO:
570                 return hci_get_dev_info(argp);
571
572         case HCIGETCONNLIST:
573                 return hci_get_conn_list(argp);
574
575         case HCIDEVUP:
576                 if (!capable(CAP_NET_ADMIN))
577                         return -EPERM;
578                 return hci_dev_open(arg);
579
580         case HCIDEVDOWN:
581                 if (!capable(CAP_NET_ADMIN))
582                         return -EPERM;
583                 return hci_dev_close(arg);
584
585         case HCIDEVRESET:
586                 if (!capable(CAP_NET_ADMIN))
587                         return -EPERM;
588                 return hci_dev_reset(arg);
589
590         case HCIDEVRESTAT:
591                 if (!capable(CAP_NET_ADMIN))
592                         return -EPERM;
593                 return hci_dev_reset_stat(arg);
594
595         case HCISETSCAN:
596         case HCISETAUTH:
597         case HCISETENCRYPT:
598         case HCISETPTYPE:
599         case HCISETLINKPOL:
600         case HCISETLINKMODE:
601         case HCISETACLMTU:
602         case HCISETSCOMTU:
603                 if (!capable(CAP_NET_ADMIN))
604                         return -EPERM;
605                 return hci_dev_cmd(cmd, argp);
606
607         case HCIINQUIRY:
608                 return hci_inquiry(argp);
609         }
610
611         lock_sock(sk);
612
613         err = hci_sock_bound_ioctl(sk, cmd, arg);
614
615 done:
616         release_sock(sk);
617         return err;
618 }
619
620 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
621                          int addr_len)
622 {
623         struct sockaddr_hci haddr;
624         struct sock *sk = sock->sk;
625         struct hci_dev *hdev = NULL;
626         int len, err = 0;
627
628         BT_DBG("sock %p sk %p", sock, sk);
629
630         if (!addr)
631                 return -EINVAL;
632
633         memset(&haddr, 0, sizeof(haddr));
634         len = min_t(unsigned int, sizeof(haddr), addr_len);
635         memcpy(&haddr, addr, len);
636
637         if (haddr.hci_family != AF_BLUETOOTH)
638                 return -EINVAL;
639
640         lock_sock(sk);
641
642         if (sk->sk_state == BT_BOUND) {
643                 err = -EALREADY;
644                 goto done;
645         }
646
647         switch (haddr.hci_channel) {
648         case HCI_CHANNEL_RAW:
649                 if (hci_pi(sk)->hdev) {
650                         err = -EALREADY;
651                         goto done;
652                 }
653
654                 if (haddr.hci_dev != HCI_DEV_NONE) {
655                         hdev = hci_dev_get(haddr.hci_dev);
656                         if (!hdev) {
657                                 err = -ENODEV;
658                                 goto done;
659                         }
660
661                         atomic_inc(&hdev->promisc);
662                 }
663
664                 hci_pi(sk)->hdev = hdev;
665                 break;
666
667         case HCI_CHANNEL_USER:
668                 if (hci_pi(sk)->hdev) {
669                         err = -EALREADY;
670                         goto done;
671                 }
672
673                 if (haddr.hci_dev == HCI_DEV_NONE) {
674                         err = -EINVAL;
675                         goto done;
676                 }
677
678                 if (!capable(CAP_NET_ADMIN)) {
679                         err = -EPERM;
680                         goto done;
681                 }
682
683                 hdev = hci_dev_get(haddr.hci_dev);
684                 if (!hdev) {
685                         err = -ENODEV;
686                         goto done;
687                 }
688
689                 if (test_bit(HCI_UP, &hdev->flags) ||
690                     test_bit(HCI_INIT, &hdev->flags) ||
691                     test_bit(HCI_SETUP, &hdev->dev_flags) ||
692                     test_bit(HCI_CONFIG, &hdev->dev_flags)) {
693                         err = -EBUSY;
694                         hci_dev_put(hdev);
695                         goto done;
696                 }
697
698                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
699                         err = -EUSERS;
700                         hci_dev_put(hdev);
701                         goto done;
702                 }
703
704                 mgmt_index_removed(hdev);
705
706                 err = hci_dev_open(hdev->id);
707                 if (err) {
708                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
709                         mgmt_index_added(hdev);
710                         hci_dev_put(hdev);
711                         goto done;
712                 }
713
714                 atomic_inc(&hdev->promisc);
715
716                 hci_pi(sk)->hdev = hdev;
717                 break;
718
719         case HCI_CHANNEL_CONTROL:
720                 if (haddr.hci_dev != HCI_DEV_NONE) {
721                         err = -EINVAL;
722                         goto done;
723                 }
724
725                 if (!capable(CAP_NET_ADMIN)) {
726                         err = -EPERM;
727                         goto done;
728                 }
729
730                 break;
731
732         case HCI_CHANNEL_MONITOR:
733                 if (haddr.hci_dev != HCI_DEV_NONE) {
734                         err = -EINVAL;
735                         goto done;
736                 }
737
738                 if (!capable(CAP_NET_RAW)) {
739                         err = -EPERM;
740                         goto done;
741                 }
742
743                 send_monitor_replay(sk);
744
745                 atomic_inc(&monitor_promisc);
746                 break;
747
748         default:
749                 err = -EINVAL;
750                 goto done;
751         }
752
753
754         hci_pi(sk)->channel = haddr.hci_channel;
755         sk->sk_state = BT_BOUND;
756
757 done:
758         release_sock(sk);
759         return err;
760 }
761
762 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
763                             int *addr_len, int peer)
764 {
765         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
766         struct sock *sk = sock->sk;
767         struct hci_dev *hdev;
768         int err = 0;
769
770         BT_DBG("sock %p sk %p", sock, sk);
771
772         if (peer)
773                 return -EOPNOTSUPP;
774
775         lock_sock(sk);
776
777         hdev = hci_pi(sk)->hdev;
778         if (!hdev) {
779                 err = -EBADFD;
780                 goto done;
781         }
782
783         *addr_len = sizeof(*haddr);
784         haddr->hci_family = AF_BLUETOOTH;
785         haddr->hci_dev    = hdev->id;
786         haddr->hci_channel= hci_pi(sk)->channel;
787
788 done:
789         release_sock(sk);
790         return err;
791 }
792
793 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
794                           struct sk_buff *skb)
795 {
796         __u32 mask = hci_pi(sk)->cmsg_mask;
797
798         if (mask & HCI_CMSG_DIR) {
799                 int incoming = bt_cb(skb)->incoming;
800                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
801                          &incoming);
802         }
803
804         if (mask & HCI_CMSG_TSTAMP) {
805 #ifdef CONFIG_COMPAT
806                 struct compat_timeval ctv;
807 #endif
808                 struct timeval tv;
809                 void *data;
810                 int len;
811
812                 skb_get_timestamp(skb, &tv);
813
814                 data = &tv;
815                 len = sizeof(tv);
816 #ifdef CONFIG_COMPAT
817                 if (!COMPAT_USE_64BIT_TIME &&
818                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
819                         ctv.tv_sec = tv.tv_sec;
820                         ctv.tv_usec = tv.tv_usec;
821                         data = &ctv;
822                         len = sizeof(ctv);
823                 }
824 #endif
825
826                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
827         }
828 }
829
830 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
831                             struct msghdr *msg, size_t len, int flags)
832 {
833         int noblock = flags & MSG_DONTWAIT;
834         struct sock *sk = sock->sk;
835         struct sk_buff *skb;
836         int copied, err;
837
838         BT_DBG("sock %p, sk %p", sock, sk);
839
840         if (flags & (MSG_OOB))
841                 return -EOPNOTSUPP;
842
843         if (sk->sk_state == BT_CLOSED)
844                 return 0;
845
846         skb = skb_recv_datagram(sk, flags, noblock, &err);
847         if (!skb)
848                 return err;
849
850         copied = skb->len;
851         if (len < copied) {
852                 msg->msg_flags |= MSG_TRUNC;
853                 copied = len;
854         }
855
856         skb_reset_transport_header(skb);
857         err = skb_copy_datagram_msg(skb, 0, msg, copied);
858
859         switch (hci_pi(sk)->channel) {
860         case HCI_CHANNEL_RAW:
861                 hci_sock_cmsg(sk, msg, skb);
862                 break;
863         case HCI_CHANNEL_USER:
864         case HCI_CHANNEL_CONTROL:
865         case HCI_CHANNEL_MONITOR:
866                 sock_recv_timestamp(msg, sk, skb);
867                 break;
868         }
869
870         skb_free_datagram(sk, skb);
871
872         return err ? : copied;
873 }
874
875 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
876                             struct msghdr *msg, size_t len)
877 {
878         struct sock *sk = sock->sk;
879         struct hci_dev *hdev;
880         struct sk_buff *skb;
881         int err;
882
883         BT_DBG("sock %p sk %p", sock, sk);
884
885         if (msg->msg_flags & MSG_OOB)
886                 return -EOPNOTSUPP;
887
888         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
889                 return -EINVAL;
890
891         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
892                 return -EINVAL;
893
894         lock_sock(sk);
895
896         switch (hci_pi(sk)->channel) {
897         case HCI_CHANNEL_RAW:
898         case HCI_CHANNEL_USER:
899                 break;
900         case HCI_CHANNEL_CONTROL:
901                 err = mgmt_control(sk, msg, len);
902                 goto done;
903         case HCI_CHANNEL_MONITOR:
904                 err = -EOPNOTSUPP;
905                 goto done;
906         default:
907                 err = -EINVAL;
908                 goto done;
909         }
910
911         hdev = hci_pi(sk)->hdev;
912         if (!hdev) {
913                 err = -EBADFD;
914                 goto done;
915         }
916
917         if (!test_bit(HCI_UP, &hdev->flags)) {
918                 err = -ENETDOWN;
919                 goto done;
920         }
921
922         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
923         if (!skb)
924                 goto done;
925
926         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
927                 err = -EFAULT;
928                 goto drop;
929         }
930
931         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
932         skb_pull(skb, 1);
933
934         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
935                 /* No permission check is needed for user channel
936                  * since that gets enforced when binding the socket.
937                  *
938                  * However check that the packet type is valid.
939                  */
940                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
941                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
942                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
943                         err = -EINVAL;
944                         goto drop;
945                 }
946
947                 skb_queue_tail(&hdev->raw_q, skb);
948                 queue_work(hdev->workqueue, &hdev->tx_work);
949         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
950                 u16 opcode = get_unaligned_le16(skb->data);
951                 u16 ogf = hci_opcode_ogf(opcode);
952                 u16 ocf = hci_opcode_ocf(opcode);
953
954                 if (((ogf > HCI_SFLT_MAX_OGF) ||
955                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
956                                    &hci_sec_filter.ocf_mask[ogf])) &&
957                     !capable(CAP_NET_RAW)) {
958                         err = -EPERM;
959                         goto drop;
960                 }
961
962                 if (ogf == 0x3f) {
963                         skb_queue_tail(&hdev->raw_q, skb);
964                         queue_work(hdev->workqueue, &hdev->tx_work);
965                 } else {
966                         /* Stand-alone HCI commands must be flagged as
967                          * single-command requests.
968                          */
969                         bt_cb(skb)->req.start = true;
970
971                         skb_queue_tail(&hdev->cmd_q, skb);
972                         queue_work(hdev->workqueue, &hdev->cmd_work);
973                 }
974         } else {
975                 if (!capable(CAP_NET_RAW)) {
976                         err = -EPERM;
977                         goto drop;
978                 }
979
980                 skb_queue_tail(&hdev->raw_q, skb);
981                 queue_work(hdev->workqueue, &hdev->tx_work);
982         }
983
984         err = len;
985
986 done:
987         release_sock(sk);
988         return err;
989
990 drop:
991         kfree_skb(skb);
992         goto done;
993 }
994
995 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
996                                char __user *optval, unsigned int len)
997 {
998         struct hci_ufilter uf = { .opcode = 0 };
999         struct sock *sk = sock->sk;
1000         int err = 0, opt = 0;
1001
1002         BT_DBG("sk %p, opt %d", sk, optname);
1003
1004         lock_sock(sk);
1005
1006         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1007                 err = -EBADFD;
1008                 goto done;
1009         }
1010
1011         switch (optname) {
1012         case HCI_DATA_DIR:
1013                 if (get_user(opt, (int __user *)optval)) {
1014                         err = -EFAULT;
1015                         break;
1016                 }
1017
1018                 if (opt)
1019                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1020                 else
1021                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1022                 break;
1023
1024         case HCI_TIME_STAMP:
1025                 if (get_user(opt, (int __user *)optval)) {
1026                         err = -EFAULT;
1027                         break;
1028                 }
1029
1030                 if (opt)
1031                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1032                 else
1033                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1034                 break;
1035
1036         case HCI_FILTER:
1037                 {
1038                         struct hci_filter *f = &hci_pi(sk)->filter;
1039
1040                         uf.type_mask = f->type_mask;
1041                         uf.opcode    = f->opcode;
1042                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1043                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1044                 }
1045
1046                 len = min_t(unsigned int, len, sizeof(uf));
1047                 if (copy_from_user(&uf, optval, len)) {
1048                         err = -EFAULT;
1049                         break;
1050                 }
1051
1052                 if (!capable(CAP_NET_RAW)) {
1053                         uf.type_mask &= hci_sec_filter.type_mask;
1054                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1055                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1056                 }
1057
1058                 {
1059                         struct hci_filter *f = &hci_pi(sk)->filter;
1060
1061                         f->type_mask = uf.type_mask;
1062                         f->opcode    = uf.opcode;
1063                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1064                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1065                 }
1066                 break;
1067
1068         default:
1069                 err = -ENOPROTOOPT;
1070                 break;
1071         }
1072
1073 done:
1074         release_sock(sk);
1075         return err;
1076 }
1077
1078 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1079                                char __user *optval, int __user *optlen)
1080 {
1081         struct hci_ufilter uf;
1082         struct sock *sk = sock->sk;
1083         int len, opt, err = 0;
1084
1085         BT_DBG("sk %p, opt %d", sk, optname);
1086
1087         if (get_user(len, optlen))
1088                 return -EFAULT;
1089
1090         lock_sock(sk);
1091
1092         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1093                 err = -EBADFD;
1094                 goto done;
1095         }
1096
1097         switch (optname) {
1098         case HCI_DATA_DIR:
1099                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1100                         opt = 1;
1101                 else
1102                         opt = 0;
1103
1104                 if (put_user(opt, optval))
1105                         err = -EFAULT;
1106                 break;
1107
1108         case HCI_TIME_STAMP:
1109                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1110                         opt = 1;
1111                 else
1112                         opt = 0;
1113
1114                 if (put_user(opt, optval))
1115                         err = -EFAULT;
1116                 break;
1117
1118         case HCI_FILTER:
1119                 {
1120                         struct hci_filter *f = &hci_pi(sk)->filter;
1121
1122                         memset(&uf, 0, sizeof(uf));
1123                         uf.type_mask = f->type_mask;
1124                         uf.opcode    = f->opcode;
1125                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1126                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1127                 }
1128
1129                 len = min_t(unsigned int, len, sizeof(uf));
1130                 if (copy_to_user(optval, &uf, len))
1131                         err = -EFAULT;
1132                 break;
1133
1134         default:
1135                 err = -ENOPROTOOPT;
1136                 break;
1137         }
1138
1139 done:
1140         release_sock(sk);
1141         return err;
1142 }
1143
1144 static const struct proto_ops hci_sock_ops = {
1145         .family         = PF_BLUETOOTH,
1146         .owner          = THIS_MODULE,
1147         .release        = hci_sock_release,
1148         .bind           = hci_sock_bind,
1149         .getname        = hci_sock_getname,
1150         .sendmsg        = hci_sock_sendmsg,
1151         .recvmsg        = hci_sock_recvmsg,
1152         .ioctl          = hci_sock_ioctl,
1153         .poll           = datagram_poll,
1154         .listen         = sock_no_listen,
1155         .shutdown       = sock_no_shutdown,
1156         .setsockopt     = hci_sock_setsockopt,
1157         .getsockopt     = hci_sock_getsockopt,
1158         .connect        = sock_no_connect,
1159         .socketpair     = sock_no_socketpair,
1160         .accept         = sock_no_accept,
1161         .mmap           = sock_no_mmap
1162 };
1163
1164 static struct proto hci_sk_proto = {
1165         .name           = "HCI",
1166         .owner          = THIS_MODULE,
1167         .obj_size       = sizeof(struct hci_pinfo)
1168 };
1169
1170 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1171                            int kern)
1172 {
1173         struct sock *sk;
1174
1175         BT_DBG("sock %p", sock);
1176
1177         if (sock->type != SOCK_RAW)
1178                 return -ESOCKTNOSUPPORT;
1179
1180         sock->ops = &hci_sock_ops;
1181
1182         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1183         if (!sk)
1184                 return -ENOMEM;
1185
1186         sock_init_data(sock, sk);
1187
1188         sock_reset_flag(sk, SOCK_ZAPPED);
1189
1190         sk->sk_protocol = protocol;
1191
1192         sock->state = SS_UNCONNECTED;
1193         sk->sk_state = BT_OPEN;
1194
1195         bt_sock_link(&hci_sk_list, sk);
1196         return 0;
1197 }
1198
1199 static const struct net_proto_family hci_sock_family_ops = {
1200         .family = PF_BLUETOOTH,
1201         .owner  = THIS_MODULE,
1202         .create = hci_sock_create,
1203 };
1204
1205 int __init hci_sock_init(void)
1206 {
1207         int err;
1208
1209         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1210
1211         err = proto_register(&hci_sk_proto, 0);
1212         if (err < 0)
1213                 return err;
1214
1215         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1216         if (err < 0) {
1217                 BT_ERR("HCI socket registration failed");
1218                 goto error;
1219         }
1220
1221         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1222         if (err < 0) {
1223                 BT_ERR("Failed to create HCI proc file");
1224                 bt_sock_unregister(BTPROTO_HCI);
1225                 goto error;
1226         }
1227
1228         BT_INFO("HCI socket layer initialized");
1229
1230         return 0;
1231
1232 error:
1233         proto_unregister(&hci_sk_proto);
1234         return err;
1235 }
1236
1237 void hci_sock_cleanup(void)
1238 {
1239         bt_procfs_cleanup(&init_net, "hci");
1240         bt_sock_unregister(BTPROTO_HCI);
1241         proto_unregister(&hci_sk_proto);
1242 }