ba13ad8e25c6b81157bcdade9a51d12c3871bc07
[firefly-linux-kernel-4.4.55.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 static inline int hci_test_bit(int nr, void *addr)
39 {
40         return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41 }
42
43 /* Security filter */
44 static struct hci_sec_filter hci_sec_filter = {
45         /* Packet types */
46         0x10,
47         /* Events */
48         { 0x1000d9fe, 0x0000b00c },
49         /* Commands */
50         {
51                 { 0x0 },
52                 /* OGF_LINK_CTL */
53                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54                 /* OGF_LINK_POLICY */
55                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
56                 /* OGF_HOST_CTL */
57                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58                 /* OGF_INFO_PARAM */
59                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60                 /* OGF_STATUS_PARAM */
61                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
62         }
63 };
64
65 static struct bt_sock_list hci_sk_list = {
66         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
67 };
68
69 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70 {
71         struct hci_filter *flt;
72         int flt_type, flt_event;
73
74         /* Apply filter */
75         flt = &hci_pi(sk)->filter;
76
77         if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78                 flt_type = 0;
79         else
80                 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82         if (!test_bit(flt_type, &flt->type_mask))
83                 return true;
84
85         /* Extra filter for event packets only */
86         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87                 return false;
88
89         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91         if (!hci_test_bit(flt_event, &flt->event_mask))
92                 return true;
93
94         /* Check filter only when opcode is set */
95         if (!flt->opcode)
96                 return false;
97
98         if (flt_event == HCI_EV_CMD_COMPLETE &&
99             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100                 return true;
101
102         if (flt_event == HCI_EV_CMD_STATUS &&
103             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104                 return true;
105
106         return false;
107 }
108
109 /* Send frame to RAW socket */
110 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112         struct sock *sk;
113         struct sk_buff *skb_copy = NULL;
114
115         BT_DBG("hdev %p len %d", hdev, skb->len);
116
117         read_lock(&hci_sk_list.lock);
118
119         sk_for_each(sk, &hci_sk_list.head) {
120                 struct sk_buff *nskb;
121
122                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123                         continue;
124
125                 /* Don't send frame to the socket it came from */
126                 if (skb->sk == sk)
127                         continue;
128
129                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
130                         if (is_filtered_packet(sk, skb))
131                                 continue;
132                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
133                         if (!bt_cb(skb)->incoming)
134                                 continue;
135                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
136                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
137                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
138                                 continue;
139                 } else {
140                         /* Don't send frame to other channel types */
141                         continue;
142                 }
143
144                 if (!skb_copy) {
145                         /* Create a private copy with headroom */
146                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
147                         if (!skb_copy)
148                                 continue;
149
150                         /* Put type byte before the data */
151                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
152                 }
153
154                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
155                 if (!nskb)
156                         continue;
157
158                 if (sock_queue_rcv_skb(sk, nskb))
159                         kfree_skb(nskb);
160         }
161
162         read_unlock(&hci_sk_list.lock);
163
164         kfree_skb(skb_copy);
165 }
166
167 /* Send frame to control socket */
168 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
169 {
170         struct sock *sk;
171
172         BT_DBG("len %d", skb->len);
173
174         read_lock(&hci_sk_list.lock);
175
176         sk_for_each(sk, &hci_sk_list.head) {
177                 struct sk_buff *nskb;
178
179                 /* Skip the original socket */
180                 if (sk == skip_sk)
181                         continue;
182
183                 if (sk->sk_state != BT_BOUND)
184                         continue;
185
186                 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
187                         continue;
188
189                 nskb = skb_clone(skb, GFP_ATOMIC);
190                 if (!nskb)
191                         continue;
192
193                 if (sock_queue_rcv_skb(sk, nskb))
194                         kfree_skb(nskb);
195         }
196
197         read_unlock(&hci_sk_list.lock);
198 }
199
200 /* Send frame to monitor socket */
201 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203         struct sock *sk;
204         struct sk_buff *skb_copy = NULL;
205         __le16 opcode;
206
207         if (!atomic_read(&monitor_promisc))
208                 return;
209
210         BT_DBG("hdev %p len %d", hdev, skb->len);
211
212         switch (bt_cb(skb)->pkt_type) {
213         case HCI_COMMAND_PKT:
214                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
215                 break;
216         case HCI_EVENT_PKT:
217                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
218                 break;
219         case HCI_ACLDATA_PKT:
220                 if (bt_cb(skb)->incoming)
221                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
222                 else
223                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
224                 break;
225         case HCI_SCODATA_PKT:
226                 if (bt_cb(skb)->incoming)
227                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
228                 else
229                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
230                 break;
231         default:
232                 return;
233         }
234
235         read_lock(&hci_sk_list.lock);
236
237         sk_for_each(sk, &hci_sk_list.head) {
238                 struct sk_buff *nskb;
239
240                 if (sk->sk_state != BT_BOUND)
241                         continue;
242
243                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
244                         continue;
245
246                 if (!skb_copy) {
247                         struct hci_mon_hdr *hdr;
248
249                         /* Create a private copy with headroom */
250                         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
251                                                       GFP_ATOMIC, true);
252                         if (!skb_copy)
253                                 continue;
254
255                         /* Put header before the data */
256                         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
257                         hdr->opcode = opcode;
258                         hdr->index = cpu_to_le16(hdev->id);
259                         hdr->len = cpu_to_le16(skb->len);
260                 }
261
262                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
263                 if (!nskb)
264                         continue;
265
266                 if (sock_queue_rcv_skb(sk, nskb))
267                         kfree_skb(nskb);
268         }
269
270         read_unlock(&hci_sk_list.lock);
271
272         kfree_skb(skb_copy);
273 }
274
275 static void send_monitor_event(struct sk_buff *skb)
276 {
277         struct sock *sk;
278
279         BT_DBG("len %d", skb->len);
280
281         read_lock(&hci_sk_list.lock);
282
283         sk_for_each(sk, &hci_sk_list.head) {
284                 struct sk_buff *nskb;
285
286                 if (sk->sk_state != BT_BOUND)
287                         continue;
288
289                 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
290                         continue;
291
292                 nskb = skb_clone(skb, GFP_ATOMIC);
293                 if (!nskb)
294                         continue;
295
296                 if (sock_queue_rcv_skb(sk, nskb))
297                         kfree_skb(nskb);
298         }
299
300         read_unlock(&hci_sk_list.lock);
301 }
302
303 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
304 {
305         struct hci_mon_hdr *hdr;
306         struct hci_mon_new_index *ni;
307         struct sk_buff *skb;
308         __le16 opcode;
309
310         switch (event) {
311         case HCI_DEV_REG:
312                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313                 if (!skb)
314                         return NULL;
315
316                 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
317                 ni->type = hdev->dev_type;
318                 ni->bus = hdev->bus;
319                 bacpy(&ni->bdaddr, &hdev->bdaddr);
320                 memcpy(ni->name, hdev->name, 8);
321
322                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
323                 break;
324
325         case HCI_DEV_UNREG:
326                 skb = bt_skb_alloc(0, GFP_ATOMIC);
327                 if (!skb)
328                         return NULL;
329
330                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
331                 break;
332
333         default:
334                 return NULL;
335         }
336
337         __net_timestamp(skb);
338
339         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
340         hdr->opcode = opcode;
341         hdr->index = cpu_to_le16(hdev->id);
342         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
343
344         return skb;
345 }
346
347 static void send_monitor_replay(struct sock *sk)
348 {
349         struct hci_dev *hdev;
350
351         read_lock(&hci_dev_list_lock);
352
353         list_for_each_entry(hdev, &hci_dev_list, list) {
354                 struct sk_buff *skb;
355
356                 skb = create_monitor_event(hdev, HCI_DEV_REG);
357                 if (!skb)
358                         continue;
359
360                 if (sock_queue_rcv_skb(sk, skb))
361                         kfree_skb(skb);
362         }
363
364         read_unlock(&hci_dev_list_lock);
365 }
366
367 /* Generate internal stack event */
368 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
369 {
370         struct hci_event_hdr *hdr;
371         struct hci_ev_stack_internal *ev;
372         struct sk_buff *skb;
373
374         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
375         if (!skb)
376                 return;
377
378         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
379         hdr->evt  = HCI_EV_STACK_INTERNAL;
380         hdr->plen = sizeof(*ev) + dlen;
381
382         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
383         ev->type = type;
384         memcpy(ev->data, data, dlen);
385
386         bt_cb(skb)->incoming = 1;
387         __net_timestamp(skb);
388
389         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
390         hci_send_to_sock(hdev, skb);
391         kfree_skb(skb);
392 }
393
394 void hci_sock_dev_event(struct hci_dev *hdev, int event)
395 {
396         struct hci_ev_si_device ev;
397
398         BT_DBG("hdev %s event %d", hdev->name, event);
399
400         /* Send event to monitor */
401         if (atomic_read(&monitor_promisc)) {
402                 struct sk_buff *skb;
403
404                 skb = create_monitor_event(hdev, event);
405                 if (skb) {
406                         send_monitor_event(skb);
407                         kfree_skb(skb);
408                 }
409         }
410
411         /* Send event to sockets */
412         ev.event  = event;
413         ev.dev_id = hdev->id;
414         hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416         if (event == HCI_DEV_UNREG) {
417                 struct sock *sk;
418
419                 /* Detach sockets from device */
420                 read_lock(&hci_sk_list.lock);
421                 sk_for_each(sk, &hci_sk_list.head) {
422                         bh_lock_sock_nested(sk);
423                         if (hci_pi(sk)->hdev == hdev) {
424                                 hci_pi(sk)->hdev = NULL;
425                                 sk->sk_err = EPIPE;
426                                 sk->sk_state = BT_OPEN;
427                                 sk->sk_state_change(sk);
428
429                                 hci_dev_put(hdev);
430                         }
431                         bh_unlock_sock(sk);
432                 }
433                 read_unlock(&hci_sk_list.lock);
434         }
435 }
436
437 static int hci_sock_release(struct socket *sock)
438 {
439         struct sock *sk = sock->sk;
440         struct hci_dev *hdev;
441
442         BT_DBG("sock %p sk %p", sock, sk);
443
444         if (!sk)
445                 return 0;
446
447         hdev = hci_pi(sk)->hdev;
448
449         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
450                 atomic_dec(&monitor_promisc);
451
452         bt_sock_unlink(&hci_sk_list, sk);
453
454         if (hdev) {
455                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
456                         mgmt_index_added(hdev);
457                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
458                         hci_dev_close(hdev->id);
459                 }
460
461                 atomic_dec(&hdev->promisc);
462                 hci_dev_put(hdev);
463         }
464
465         sock_orphan(sk);
466
467         skb_queue_purge(&sk->sk_receive_queue);
468         skb_queue_purge(&sk->sk_write_queue);
469
470         sock_put(sk);
471         return 0;
472 }
473
474 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
475 {
476         bdaddr_t bdaddr;
477         int err;
478
479         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
480                 return -EFAULT;
481
482         hci_dev_lock(hdev);
483
484         err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
485
486         hci_dev_unlock(hdev);
487
488         return err;
489 }
490
491 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
492 {
493         bdaddr_t bdaddr;
494         int err;
495
496         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
497                 return -EFAULT;
498
499         hci_dev_lock(hdev);
500
501         err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
502
503         hci_dev_unlock(hdev);
504
505         return err;
506 }
507
508 /* Ioctls that require bound socket */
509 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
510                                 unsigned long arg)
511 {
512         struct hci_dev *hdev = hci_pi(sk)->hdev;
513
514         if (!hdev)
515                 return -EBADFD;
516
517         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
518                 return -EBUSY;
519
520         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
521                 return -EOPNOTSUPP;
522
523         if (hdev->dev_type != HCI_BREDR)
524                 return -EOPNOTSUPP;
525
526         switch (cmd) {
527         case HCISETRAW:
528                 if (!capable(CAP_NET_ADMIN))
529                         return -EPERM;
530                 return -EOPNOTSUPP;
531
532         case HCIGETCONNINFO:
533                 return hci_get_conn_info(hdev, (void __user *) arg);
534
535         case HCIGETAUTHINFO:
536                 return hci_get_auth_info(hdev, (void __user *) arg);
537
538         case HCIBLOCKADDR:
539                 if (!capable(CAP_NET_ADMIN))
540                         return -EPERM;
541                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
542
543         case HCIUNBLOCKADDR:
544                 if (!capable(CAP_NET_ADMIN))
545                         return -EPERM;
546                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
547         }
548
549         return -ENOIOCTLCMD;
550 }
551
552 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
553                           unsigned long arg)
554 {
555         void __user *argp = (void __user *) arg;
556         struct sock *sk = sock->sk;
557         int err;
558
559         BT_DBG("cmd %x arg %lx", cmd, arg);
560
561         lock_sock(sk);
562
563         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
564                 err = -EBADFD;
565                 goto done;
566         }
567
568         release_sock(sk);
569
570         switch (cmd) {
571         case HCIGETDEVLIST:
572                 return hci_get_dev_list(argp);
573
574         case HCIGETDEVINFO:
575                 return hci_get_dev_info(argp);
576
577         case HCIGETCONNLIST:
578                 return hci_get_conn_list(argp);
579
580         case HCIDEVUP:
581                 if (!capable(CAP_NET_ADMIN))
582                         return -EPERM;
583                 return hci_dev_open(arg);
584
585         case HCIDEVDOWN:
586                 if (!capable(CAP_NET_ADMIN))
587                         return -EPERM;
588                 return hci_dev_close(arg);
589
590         case HCIDEVRESET:
591                 if (!capable(CAP_NET_ADMIN))
592                         return -EPERM;
593                 return hci_dev_reset(arg);
594
595         case HCIDEVRESTAT:
596                 if (!capable(CAP_NET_ADMIN))
597                         return -EPERM;
598                 return hci_dev_reset_stat(arg);
599
600         case HCISETSCAN:
601         case HCISETAUTH:
602         case HCISETENCRYPT:
603         case HCISETPTYPE:
604         case HCISETLINKPOL:
605         case HCISETLINKMODE:
606         case HCISETACLMTU:
607         case HCISETSCOMTU:
608                 if (!capable(CAP_NET_ADMIN))
609                         return -EPERM;
610                 return hci_dev_cmd(cmd, argp);
611
612         case HCIINQUIRY:
613                 return hci_inquiry(argp);
614         }
615
616         lock_sock(sk);
617
618         err = hci_sock_bound_ioctl(sk, cmd, arg);
619
620 done:
621         release_sock(sk);
622         return err;
623 }
624
625 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
626                          int addr_len)
627 {
628         struct sockaddr_hci haddr;
629         struct sock *sk = sock->sk;
630         struct hci_dev *hdev = NULL;
631         int len, err = 0;
632
633         BT_DBG("sock %p sk %p", sock, sk);
634
635         if (!addr)
636                 return -EINVAL;
637
638         memset(&haddr, 0, sizeof(haddr));
639         len = min_t(unsigned int, sizeof(haddr), addr_len);
640         memcpy(&haddr, addr, len);
641
642         if (haddr.hci_family != AF_BLUETOOTH)
643                 return -EINVAL;
644
645         lock_sock(sk);
646
647         if (sk->sk_state == BT_BOUND) {
648                 err = -EALREADY;
649                 goto done;
650         }
651
652         switch (haddr.hci_channel) {
653         case HCI_CHANNEL_RAW:
654                 if (hci_pi(sk)->hdev) {
655                         err = -EALREADY;
656                         goto done;
657                 }
658
659                 if (haddr.hci_dev != HCI_DEV_NONE) {
660                         hdev = hci_dev_get(haddr.hci_dev);
661                         if (!hdev) {
662                                 err = -ENODEV;
663                                 goto done;
664                         }
665
666                         atomic_inc(&hdev->promisc);
667                 }
668
669                 hci_pi(sk)->hdev = hdev;
670                 break;
671
672         case HCI_CHANNEL_USER:
673                 if (hci_pi(sk)->hdev) {
674                         err = -EALREADY;
675                         goto done;
676                 }
677
678                 if (haddr.hci_dev == HCI_DEV_NONE) {
679                         err = -EINVAL;
680                         goto done;
681                 }
682
683                 if (!capable(CAP_NET_ADMIN)) {
684                         err = -EPERM;
685                         goto done;
686                 }
687
688                 hdev = hci_dev_get(haddr.hci_dev);
689                 if (!hdev) {
690                         err = -ENODEV;
691                         goto done;
692                 }
693
694                 if (test_bit(HCI_UP, &hdev->flags) ||
695                     test_bit(HCI_INIT, &hdev->flags) ||
696                     test_bit(HCI_SETUP, &hdev->dev_flags)) {
697                         err = -EBUSY;
698                         hci_dev_put(hdev);
699                         goto done;
700                 }
701
702                 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
703                         err = -EUSERS;
704                         hci_dev_put(hdev);
705                         goto done;
706                 }
707
708                 mgmt_index_removed(hdev);
709
710                 err = hci_dev_open(hdev->id);
711                 if (err) {
712                         clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
713                         mgmt_index_added(hdev);
714                         hci_dev_put(hdev);
715                         goto done;
716                 }
717
718                 atomic_inc(&hdev->promisc);
719
720                 hci_pi(sk)->hdev = hdev;
721                 break;
722
723         case HCI_CHANNEL_CONTROL:
724                 if (haddr.hci_dev != HCI_DEV_NONE) {
725                         err = -EINVAL;
726                         goto done;
727                 }
728
729                 if (!capable(CAP_NET_ADMIN)) {
730                         err = -EPERM;
731                         goto done;
732                 }
733
734                 break;
735
736         case HCI_CHANNEL_MONITOR:
737                 if (haddr.hci_dev != HCI_DEV_NONE) {
738                         err = -EINVAL;
739                         goto done;
740                 }
741
742                 if (!capable(CAP_NET_RAW)) {
743                         err = -EPERM;
744                         goto done;
745                 }
746
747                 send_monitor_replay(sk);
748
749                 atomic_inc(&monitor_promisc);
750                 break;
751
752         default:
753                 err = -EINVAL;
754                 goto done;
755         }
756
757
758         hci_pi(sk)->channel = haddr.hci_channel;
759         sk->sk_state = BT_BOUND;
760
761 done:
762         release_sock(sk);
763         return err;
764 }
765
766 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
767                             int *addr_len, int peer)
768 {
769         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
770         struct sock *sk = sock->sk;
771         struct hci_dev *hdev;
772         int err = 0;
773
774         BT_DBG("sock %p sk %p", sock, sk);
775
776         if (peer)
777                 return -EOPNOTSUPP;
778
779         lock_sock(sk);
780
781         hdev = hci_pi(sk)->hdev;
782         if (!hdev) {
783                 err = -EBADFD;
784                 goto done;
785         }
786
787         *addr_len = sizeof(*haddr);
788         haddr->hci_family = AF_BLUETOOTH;
789         haddr->hci_dev    = hdev->id;
790         haddr->hci_channel= hci_pi(sk)->channel;
791
792 done:
793         release_sock(sk);
794         return err;
795 }
796
797 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
798                           struct sk_buff *skb)
799 {
800         __u32 mask = hci_pi(sk)->cmsg_mask;
801
802         if (mask & HCI_CMSG_DIR) {
803                 int incoming = bt_cb(skb)->incoming;
804                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
805                          &incoming);
806         }
807
808         if (mask & HCI_CMSG_TSTAMP) {
809 #ifdef CONFIG_COMPAT
810                 struct compat_timeval ctv;
811 #endif
812                 struct timeval tv;
813                 void *data;
814                 int len;
815
816                 skb_get_timestamp(skb, &tv);
817
818                 data = &tv;
819                 len = sizeof(tv);
820 #ifdef CONFIG_COMPAT
821                 if (!COMPAT_USE_64BIT_TIME &&
822                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
823                         ctv.tv_sec = tv.tv_sec;
824                         ctv.tv_usec = tv.tv_usec;
825                         data = &ctv;
826                         len = sizeof(ctv);
827                 }
828 #endif
829
830                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
831         }
832 }
833
834 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
835                             struct msghdr *msg, size_t len, int flags)
836 {
837         int noblock = flags & MSG_DONTWAIT;
838         struct sock *sk = sock->sk;
839         struct sk_buff *skb;
840         int copied, err;
841
842         BT_DBG("sock %p, sk %p", sock, sk);
843
844         if (flags & (MSG_OOB))
845                 return -EOPNOTSUPP;
846
847         if (sk->sk_state == BT_CLOSED)
848                 return 0;
849
850         skb = skb_recv_datagram(sk, flags, noblock, &err);
851         if (!skb)
852                 return err;
853
854         copied = skb->len;
855         if (len < copied) {
856                 msg->msg_flags |= MSG_TRUNC;
857                 copied = len;
858         }
859
860         skb_reset_transport_header(skb);
861         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
862
863         switch (hci_pi(sk)->channel) {
864         case HCI_CHANNEL_RAW:
865                 hci_sock_cmsg(sk, msg, skb);
866                 break;
867         case HCI_CHANNEL_USER:
868         case HCI_CHANNEL_CONTROL:
869         case HCI_CHANNEL_MONITOR:
870                 sock_recv_timestamp(msg, sk, skb);
871                 break;
872         }
873
874         skb_free_datagram(sk, skb);
875
876         return err ? : copied;
877 }
878
879 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
880                             struct msghdr *msg, size_t len)
881 {
882         struct sock *sk = sock->sk;
883         struct hci_dev *hdev;
884         struct sk_buff *skb;
885         int err;
886
887         BT_DBG("sock %p sk %p", sock, sk);
888
889         if (msg->msg_flags & MSG_OOB)
890                 return -EOPNOTSUPP;
891
892         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
893                 return -EINVAL;
894
895         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
896                 return -EINVAL;
897
898         lock_sock(sk);
899
900         switch (hci_pi(sk)->channel) {
901         case HCI_CHANNEL_RAW:
902         case HCI_CHANNEL_USER:
903                 break;
904         case HCI_CHANNEL_CONTROL:
905                 err = mgmt_control(sk, msg, len);
906                 goto done;
907         case HCI_CHANNEL_MONITOR:
908                 err = -EOPNOTSUPP;
909                 goto done;
910         default:
911                 err = -EINVAL;
912                 goto done;
913         }
914
915         hdev = hci_pi(sk)->hdev;
916         if (!hdev) {
917                 err = -EBADFD;
918                 goto done;
919         }
920
921         if (!test_bit(HCI_UP, &hdev->flags)) {
922                 err = -ENETDOWN;
923                 goto done;
924         }
925
926         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
927         if (!skb)
928                 goto done;
929
930         if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
931                 err = -EFAULT;
932                 goto drop;
933         }
934
935         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
936         skb_pull(skb, 1);
937
938         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
939                 /* No permission check is needed for user channel
940                  * since that gets enforced when binding the socket.
941                  *
942                  * However check that the packet type is valid.
943                  */
944                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
945                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
946                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
947                         err = -EINVAL;
948                         goto drop;
949                 }
950
951                 skb_queue_tail(&hdev->raw_q, skb);
952                 queue_work(hdev->workqueue, &hdev->tx_work);
953         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
954                 u16 opcode = get_unaligned_le16(skb->data);
955                 u16 ogf = hci_opcode_ogf(opcode);
956                 u16 ocf = hci_opcode_ocf(opcode);
957
958                 if (((ogf > HCI_SFLT_MAX_OGF) ||
959                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
960                                    &hci_sec_filter.ocf_mask[ogf])) &&
961                     !capable(CAP_NET_RAW)) {
962                         err = -EPERM;
963                         goto drop;
964                 }
965
966                 if (ogf == 0x3f) {
967                         skb_queue_tail(&hdev->raw_q, skb);
968                         queue_work(hdev->workqueue, &hdev->tx_work);
969                 } else {
970                         /* Stand-alone HCI commands must be flaged as
971                          * single-command requests.
972                          */
973                         bt_cb(skb)->req.start = true;
974
975                         skb_queue_tail(&hdev->cmd_q, skb);
976                         queue_work(hdev->workqueue, &hdev->cmd_work);
977                 }
978         } else {
979                 if (!capable(CAP_NET_RAW)) {
980                         err = -EPERM;
981                         goto drop;
982                 }
983
984                 skb_queue_tail(&hdev->raw_q, skb);
985                 queue_work(hdev->workqueue, &hdev->tx_work);
986         }
987
988         err = len;
989
990 done:
991         release_sock(sk);
992         return err;
993
994 drop:
995         kfree_skb(skb);
996         goto done;
997 }
998
999 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1000                                char __user *optval, unsigned int len)
1001 {
1002         struct hci_ufilter uf = { .opcode = 0 };
1003         struct sock *sk = sock->sk;
1004         int err = 0, opt = 0;
1005
1006         BT_DBG("sk %p, opt %d", sk, optname);
1007
1008         lock_sock(sk);
1009
1010         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1011                 err = -EBADFD;
1012                 goto done;
1013         }
1014
1015         switch (optname) {
1016         case HCI_DATA_DIR:
1017                 if (get_user(opt, (int __user *)optval)) {
1018                         err = -EFAULT;
1019                         break;
1020                 }
1021
1022                 if (opt)
1023                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1024                 else
1025                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1026                 break;
1027
1028         case HCI_TIME_STAMP:
1029                 if (get_user(opt, (int __user *)optval)) {
1030                         err = -EFAULT;
1031                         break;
1032                 }
1033
1034                 if (opt)
1035                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1036                 else
1037                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1038                 break;
1039
1040         case HCI_FILTER:
1041                 {
1042                         struct hci_filter *f = &hci_pi(sk)->filter;
1043
1044                         uf.type_mask = f->type_mask;
1045                         uf.opcode    = f->opcode;
1046                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1047                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1048                 }
1049
1050                 len = min_t(unsigned int, len, sizeof(uf));
1051                 if (copy_from_user(&uf, optval, len)) {
1052                         err = -EFAULT;
1053                         break;
1054                 }
1055
1056                 if (!capable(CAP_NET_RAW)) {
1057                         uf.type_mask &= hci_sec_filter.type_mask;
1058                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1059                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1060                 }
1061
1062                 {
1063                         struct hci_filter *f = &hci_pi(sk)->filter;
1064
1065                         f->type_mask = uf.type_mask;
1066                         f->opcode    = uf.opcode;
1067                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1068                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1069                 }
1070                 break;
1071
1072         default:
1073                 err = -ENOPROTOOPT;
1074                 break;
1075         }
1076
1077 done:
1078         release_sock(sk);
1079         return err;
1080 }
1081
1082 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1083                                char __user *optval, int __user *optlen)
1084 {
1085         struct hci_ufilter uf;
1086         struct sock *sk = sock->sk;
1087         int len, opt, err = 0;
1088
1089         BT_DBG("sk %p, opt %d", sk, optname);
1090
1091         if (get_user(len, optlen))
1092                 return -EFAULT;
1093
1094         lock_sock(sk);
1095
1096         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1097                 err = -EBADFD;
1098                 goto done;
1099         }
1100
1101         switch (optname) {
1102         case HCI_DATA_DIR:
1103                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1104                         opt = 1;
1105                 else
1106                         opt = 0;
1107
1108                 if (put_user(opt, optval))
1109                         err = -EFAULT;
1110                 break;
1111
1112         case HCI_TIME_STAMP:
1113                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1114                         opt = 1;
1115                 else
1116                         opt = 0;
1117
1118                 if (put_user(opt, optval))
1119                         err = -EFAULT;
1120                 break;
1121
1122         case HCI_FILTER:
1123                 {
1124                         struct hci_filter *f = &hci_pi(sk)->filter;
1125
1126                         memset(&uf, 0, sizeof(uf));
1127                         uf.type_mask = f->type_mask;
1128                         uf.opcode    = f->opcode;
1129                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1130                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1131                 }
1132
1133                 len = min_t(unsigned int, len, sizeof(uf));
1134                 if (copy_to_user(optval, &uf, len))
1135                         err = -EFAULT;
1136                 break;
1137
1138         default:
1139                 err = -ENOPROTOOPT;
1140                 break;
1141         }
1142
1143 done:
1144         release_sock(sk);
1145         return err;
1146 }
1147
1148 static const struct proto_ops hci_sock_ops = {
1149         .family         = PF_BLUETOOTH,
1150         .owner          = THIS_MODULE,
1151         .release        = hci_sock_release,
1152         .bind           = hci_sock_bind,
1153         .getname        = hci_sock_getname,
1154         .sendmsg        = hci_sock_sendmsg,
1155         .recvmsg        = hci_sock_recvmsg,
1156         .ioctl          = hci_sock_ioctl,
1157         .poll           = datagram_poll,
1158         .listen         = sock_no_listen,
1159         .shutdown       = sock_no_shutdown,
1160         .setsockopt     = hci_sock_setsockopt,
1161         .getsockopt     = hci_sock_getsockopt,
1162         .connect        = sock_no_connect,
1163         .socketpair     = sock_no_socketpair,
1164         .accept         = sock_no_accept,
1165         .mmap           = sock_no_mmap
1166 };
1167
1168 static struct proto hci_sk_proto = {
1169         .name           = "HCI",
1170         .owner          = THIS_MODULE,
1171         .obj_size       = sizeof(struct hci_pinfo)
1172 };
1173
1174 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1175                            int kern)
1176 {
1177         struct sock *sk;
1178
1179         BT_DBG("sock %p", sock);
1180
1181         if (sock->type != SOCK_RAW)
1182                 return -ESOCKTNOSUPPORT;
1183
1184         sock->ops = &hci_sock_ops;
1185
1186         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1187         if (!sk)
1188                 return -ENOMEM;
1189
1190         sock_init_data(sock, sk);
1191
1192         sock_reset_flag(sk, SOCK_ZAPPED);
1193
1194         sk->sk_protocol = protocol;
1195
1196         sock->state = SS_UNCONNECTED;
1197         sk->sk_state = BT_OPEN;
1198
1199         bt_sock_link(&hci_sk_list, sk);
1200         return 0;
1201 }
1202
1203 static const struct net_proto_family hci_sock_family_ops = {
1204         .family = PF_BLUETOOTH,
1205         .owner  = THIS_MODULE,
1206         .create = hci_sock_create,
1207 };
1208
1209 int __init hci_sock_init(void)
1210 {
1211         int err;
1212
1213         err = proto_register(&hci_sk_proto, 0);
1214         if (err < 0)
1215                 return err;
1216
1217         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1218         if (err < 0) {
1219                 BT_ERR("HCI socket registration failed");
1220                 goto error;
1221         }
1222
1223         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1224         if (err < 0) {
1225                 BT_ERR("Failed to create HCI proc file");
1226                 bt_sock_unregister(BTPROTO_HCI);
1227                 goto error;
1228         }
1229
1230         BT_INFO("HCI socket layer initialized");
1231
1232         return 0;
1233
1234 error:
1235         proto_unregister(&hci_sk_proto);
1236         return err;
1237 }
1238
1239 void hci_sock_cleanup(void)
1240 {
1241         bt_procfs_cleanup(&init_net, "hci");
1242         bt_sock_unregister(BTPROTO_HCI);
1243         proto_unregister(&hci_sk_proto);
1244 }