2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static LIST_HEAD(mgmt_chan_list);
35 static DEFINE_MUTEX(mgmt_chan_list_lock);
37 static atomic_t monitor_promisc = ATOMIC_INIT(0);
39 /* ----- HCI socket interface ----- */
42 #define hci_pi(sk) ((struct hci_pinfo *) sk)
47 struct hci_filter filter;
49 unsigned short channel;
52 static inline int hci_test_bit(int nr, const void *addr)
54 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
58 #define HCI_SFLT_MAX_OGF 5
60 struct hci_sec_filter {
63 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
66 static const struct hci_sec_filter hci_sec_filter = {
70 { 0x1000d9fe, 0x0000b00c },
75 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
77 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
79 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
81 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
82 /* OGF_STATUS_PARAM */
83 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
87 static struct bt_sock_list hci_sk_list = {
88 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
91 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
93 struct hci_filter *flt;
94 int flt_type, flt_event;
97 flt = &hci_pi(sk)->filter;
99 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
102 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
104 if (!test_bit(flt_type, &flt->type_mask))
107 /* Extra filter for event packets only */
108 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
111 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
113 if (!hci_test_bit(flt_event, &flt->event_mask))
116 /* Check filter only when opcode is set */
120 if (flt_event == HCI_EV_CMD_COMPLETE &&
121 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
124 if (flt_event == HCI_EV_CMD_STATUS &&
125 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
131 /* Send frame to RAW socket */
132 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
135 struct sk_buff *skb_copy = NULL;
137 BT_DBG("hdev %p len %d", hdev, skb->len);
139 read_lock(&hci_sk_list.lock);
141 sk_for_each(sk, &hci_sk_list.head) {
142 struct sk_buff *nskb;
144 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
147 /* Don't send frame to the socket it came from */
151 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
152 if (is_filtered_packet(sk, skb))
154 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
155 if (!bt_cb(skb)->incoming)
157 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
158 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
159 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
162 /* Don't send frame to other channel types */
167 /* Create a private copy with headroom */
168 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
172 /* Put type byte before the data */
173 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
176 nskb = skb_clone(skb_copy, GFP_ATOMIC);
180 if (sock_queue_rcv_skb(sk, nskb))
184 read_unlock(&hci_sk_list.lock);
189 /* Send frame to sockets with specific channel */
190 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
191 struct sock *skip_sk)
195 BT_DBG("channel %u len %d", channel, skb->len);
197 read_lock(&hci_sk_list.lock);
199 sk_for_each(sk, &hci_sk_list.head) {
200 struct sk_buff *nskb;
202 /* Skip the original socket */
206 if (sk->sk_state != BT_BOUND)
209 if (hci_pi(sk)->channel != channel)
212 nskb = skb_clone(skb, GFP_ATOMIC);
216 if (sock_queue_rcv_skb(sk, nskb))
220 read_unlock(&hci_sk_list.lock);
223 /* Send frame to monitor socket */
224 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
226 struct sk_buff *skb_copy = NULL;
227 struct hci_mon_hdr *hdr;
230 if (!atomic_read(&monitor_promisc))
233 BT_DBG("hdev %p len %d", hdev, skb->len);
235 switch (bt_cb(skb)->pkt_type) {
236 case HCI_COMMAND_PKT:
237 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
240 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
242 case HCI_ACLDATA_PKT:
243 if (bt_cb(skb)->incoming)
244 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
246 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
248 case HCI_SCODATA_PKT:
249 if (bt_cb(skb)->incoming)
250 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
252 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
258 /* Create a private copy with headroom */
259 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
269 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
273 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
275 struct hci_mon_hdr *hdr;
276 struct hci_mon_new_index *ni;
282 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
286 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
287 ni->type = hdev->dev_type;
289 bacpy(&ni->bdaddr, &hdev->bdaddr);
290 memcpy(ni->name, hdev->name, 8);
292 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
296 skb = bt_skb_alloc(0, GFP_ATOMIC);
300 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
307 __net_timestamp(skb);
309 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
310 hdr->opcode = opcode;
311 hdr->index = cpu_to_le16(hdev->id);
312 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
317 static void send_monitor_replay(struct sock *sk)
319 struct hci_dev *hdev;
321 read_lock(&hci_dev_list_lock);
323 list_for_each_entry(hdev, &hci_dev_list, list) {
326 skb = create_monitor_event(hdev, HCI_DEV_REG);
330 if (sock_queue_rcv_skb(sk, skb))
334 read_unlock(&hci_dev_list_lock);
337 /* Generate internal stack event */
338 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
340 struct hci_event_hdr *hdr;
341 struct hci_ev_stack_internal *ev;
344 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
348 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
349 hdr->evt = HCI_EV_STACK_INTERNAL;
350 hdr->plen = sizeof(*ev) + dlen;
352 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
354 memcpy(ev->data, data, dlen);
356 bt_cb(skb)->incoming = 1;
357 __net_timestamp(skb);
359 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
360 hci_send_to_sock(hdev, skb);
364 void hci_sock_dev_event(struct hci_dev *hdev, int event)
366 struct hci_ev_si_device ev;
368 BT_DBG("hdev %s event %d", hdev->name, event);
370 /* Send event to monitor */
371 if (atomic_read(&monitor_promisc)) {
374 skb = create_monitor_event(hdev, event);
376 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
381 /* Send event to sockets */
383 ev.dev_id = hdev->id;
384 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
386 if (event == HCI_DEV_UNREG) {
389 /* Detach sockets from device */
390 read_lock(&hci_sk_list.lock);
391 sk_for_each(sk, &hci_sk_list.head) {
392 bh_lock_sock_nested(sk);
393 if (hci_pi(sk)->hdev == hdev) {
394 hci_pi(sk)->hdev = NULL;
396 sk->sk_state = BT_OPEN;
397 sk->sk_state_change(sk);
403 read_unlock(&hci_sk_list.lock);
407 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
409 struct hci_mgmt_chan *c;
411 list_for_each_entry(c, &mgmt_chan_list, list) {
412 if (c->channel == channel)
419 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
421 struct hci_mgmt_chan *c;
423 mutex_lock(&mgmt_chan_list_lock);
424 c = __hci_mgmt_chan_find(channel);
425 mutex_unlock(&mgmt_chan_list_lock);
430 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
432 if (c->channel < HCI_CHANNEL_CONTROL)
435 mutex_lock(&mgmt_chan_list_lock);
436 if (__hci_mgmt_chan_find(c->channel)) {
437 mutex_unlock(&mgmt_chan_list_lock);
441 list_add_tail(&c->list, &mgmt_chan_list);
443 mutex_unlock(&mgmt_chan_list_lock);
447 EXPORT_SYMBOL(hci_mgmt_chan_register);
449 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
451 mutex_lock(&mgmt_chan_list_lock);
453 mutex_unlock(&mgmt_chan_list_lock);
455 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
457 static int hci_sock_release(struct socket *sock)
459 struct sock *sk = sock->sk;
460 struct hci_dev *hdev;
462 BT_DBG("sock %p sk %p", sock, sk);
467 hdev = hci_pi(sk)->hdev;
469 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
470 atomic_dec(&monitor_promisc);
472 bt_sock_unlink(&hci_sk_list, sk);
475 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
476 mgmt_index_added(hdev);
477 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
478 hci_dev_close(hdev->id);
481 atomic_dec(&hdev->promisc);
487 skb_queue_purge(&sk->sk_receive_queue);
488 skb_queue_purge(&sk->sk_write_queue);
494 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
499 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
504 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
506 hci_dev_unlock(hdev);
511 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
516 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
521 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
523 hci_dev_unlock(hdev);
528 /* Ioctls that require bound socket */
529 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
532 struct hci_dev *hdev = hci_pi(sk)->hdev;
537 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
540 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
543 if (hdev->dev_type != HCI_BREDR)
548 if (!capable(CAP_NET_ADMIN))
553 return hci_get_conn_info(hdev, (void __user *) arg);
556 return hci_get_auth_info(hdev, (void __user *) arg);
559 if (!capable(CAP_NET_ADMIN))
561 return hci_sock_blacklist_add(hdev, (void __user *) arg);
564 if (!capable(CAP_NET_ADMIN))
566 return hci_sock_blacklist_del(hdev, (void __user *) arg);
572 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
575 void __user *argp = (void __user *) arg;
576 struct sock *sk = sock->sk;
579 BT_DBG("cmd %x arg %lx", cmd, arg);
583 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
592 return hci_get_dev_list(argp);
595 return hci_get_dev_info(argp);
598 return hci_get_conn_list(argp);
601 if (!capable(CAP_NET_ADMIN))
603 return hci_dev_open(arg);
606 if (!capable(CAP_NET_ADMIN))
608 return hci_dev_close(arg);
611 if (!capable(CAP_NET_ADMIN))
613 return hci_dev_reset(arg);
616 if (!capable(CAP_NET_ADMIN))
618 return hci_dev_reset_stat(arg);
628 if (!capable(CAP_NET_ADMIN))
630 return hci_dev_cmd(cmd, argp);
633 return hci_inquiry(argp);
638 err = hci_sock_bound_ioctl(sk, cmd, arg);
645 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
648 struct sockaddr_hci haddr;
649 struct sock *sk = sock->sk;
650 struct hci_dev *hdev = NULL;
653 BT_DBG("sock %p sk %p", sock, sk);
658 memset(&haddr, 0, sizeof(haddr));
659 len = min_t(unsigned int, sizeof(haddr), addr_len);
660 memcpy(&haddr, addr, len);
662 if (haddr.hci_family != AF_BLUETOOTH)
667 if (sk->sk_state == BT_BOUND) {
672 switch (haddr.hci_channel) {
673 case HCI_CHANNEL_RAW:
674 if (hci_pi(sk)->hdev) {
679 if (haddr.hci_dev != HCI_DEV_NONE) {
680 hdev = hci_dev_get(haddr.hci_dev);
686 atomic_inc(&hdev->promisc);
689 hci_pi(sk)->hdev = hdev;
692 case HCI_CHANNEL_USER:
693 if (hci_pi(sk)->hdev) {
698 if (haddr.hci_dev == HCI_DEV_NONE) {
703 if (!capable(CAP_NET_ADMIN)) {
708 hdev = hci_dev_get(haddr.hci_dev);
714 if (test_bit(HCI_UP, &hdev->flags) ||
715 test_bit(HCI_INIT, &hdev->flags) ||
716 hci_dev_test_flag(hdev, HCI_SETUP) ||
717 hci_dev_test_flag(hdev, HCI_CONFIG)) {
723 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
729 mgmt_index_removed(hdev);
731 err = hci_dev_open(hdev->id);
733 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
734 mgmt_index_added(hdev);
739 atomic_inc(&hdev->promisc);
741 hci_pi(sk)->hdev = hdev;
744 case HCI_CHANNEL_MONITOR:
745 if (haddr.hci_dev != HCI_DEV_NONE) {
750 if (!capable(CAP_NET_RAW)) {
755 send_monitor_replay(sk);
757 atomic_inc(&monitor_promisc);
761 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
766 if (haddr.hci_dev != HCI_DEV_NONE) {
771 if (!capable(CAP_NET_ADMIN)) {
780 hci_pi(sk)->channel = haddr.hci_channel;
781 sk->sk_state = BT_BOUND;
788 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
789 int *addr_len, int peer)
791 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
792 struct sock *sk = sock->sk;
793 struct hci_dev *hdev;
796 BT_DBG("sock %p sk %p", sock, sk);
803 hdev = hci_pi(sk)->hdev;
809 *addr_len = sizeof(*haddr);
810 haddr->hci_family = AF_BLUETOOTH;
811 haddr->hci_dev = hdev->id;
812 haddr->hci_channel= hci_pi(sk)->channel;
819 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
822 __u32 mask = hci_pi(sk)->cmsg_mask;
824 if (mask & HCI_CMSG_DIR) {
825 int incoming = bt_cb(skb)->incoming;
826 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
830 if (mask & HCI_CMSG_TSTAMP) {
832 struct compat_timeval ctv;
838 skb_get_timestamp(skb, &tv);
843 if (!COMPAT_USE_64BIT_TIME &&
844 (msg->msg_flags & MSG_CMSG_COMPAT)) {
845 ctv.tv_sec = tv.tv_sec;
846 ctv.tv_usec = tv.tv_usec;
852 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
856 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
859 int noblock = flags & MSG_DONTWAIT;
860 struct sock *sk = sock->sk;
864 BT_DBG("sock %p, sk %p", sock, sk);
866 if (flags & (MSG_OOB))
869 if (sk->sk_state == BT_CLOSED)
872 skb = skb_recv_datagram(sk, flags, noblock, &err);
878 msg->msg_flags |= MSG_TRUNC;
882 skb_reset_transport_header(skb);
883 err = skb_copy_datagram_msg(skb, 0, msg, copied);
885 switch (hci_pi(sk)->channel) {
886 case HCI_CHANNEL_RAW:
887 hci_sock_cmsg(sk, msg, skb);
889 case HCI_CHANNEL_USER:
890 case HCI_CHANNEL_MONITOR:
891 sock_recv_timestamp(msg, sk, skb);
894 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
895 sock_recv_timestamp(msg, sk, skb);
899 skb_free_datagram(sk, skb);
901 return err ? : copied;
904 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
907 struct sock *sk = sock->sk;
908 struct hci_mgmt_chan *chan;
909 struct hci_dev *hdev;
913 BT_DBG("sock %p sk %p", sock, sk);
915 if (msg->msg_flags & MSG_OOB)
918 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
921 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
926 switch (hci_pi(sk)->channel) {
927 case HCI_CHANNEL_RAW:
928 case HCI_CHANNEL_USER:
930 case HCI_CHANNEL_MONITOR:
934 mutex_lock(&mgmt_chan_list_lock);
935 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
937 err = mgmt_control(chan, sk, msg, len);
941 mutex_unlock(&mgmt_chan_list_lock);
945 hdev = hci_pi(sk)->hdev;
951 if (!test_bit(HCI_UP, &hdev->flags)) {
956 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
960 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
965 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
968 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
969 /* No permission check is needed for user channel
970 * since that gets enforced when binding the socket.
972 * However check that the packet type is valid.
974 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
975 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
976 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
981 skb_queue_tail(&hdev->raw_q, skb);
982 queue_work(hdev->workqueue, &hdev->tx_work);
983 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
984 u16 opcode = get_unaligned_le16(skb->data);
985 u16 ogf = hci_opcode_ogf(opcode);
986 u16 ocf = hci_opcode_ocf(opcode);
988 if (((ogf > HCI_SFLT_MAX_OGF) ||
989 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
990 &hci_sec_filter.ocf_mask[ogf])) &&
991 !capable(CAP_NET_RAW)) {
997 skb_queue_tail(&hdev->raw_q, skb);
998 queue_work(hdev->workqueue, &hdev->tx_work);
1000 /* Stand-alone HCI commands must be flagged as
1001 * single-command requests.
1003 bt_cb(skb)->req_start = 1;
1005 skb_queue_tail(&hdev->cmd_q, skb);
1006 queue_work(hdev->workqueue, &hdev->cmd_work);
1009 if (!capable(CAP_NET_RAW)) {
1014 skb_queue_tail(&hdev->raw_q, skb);
1015 queue_work(hdev->workqueue, &hdev->tx_work);
1029 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1030 char __user *optval, unsigned int len)
1032 struct hci_ufilter uf = { .opcode = 0 };
1033 struct sock *sk = sock->sk;
1034 int err = 0, opt = 0;
1036 BT_DBG("sk %p, opt %d", sk, optname);
1040 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1047 if (get_user(opt, (int __user *)optval)) {
1053 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1055 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1058 case HCI_TIME_STAMP:
1059 if (get_user(opt, (int __user *)optval)) {
1065 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1067 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1072 struct hci_filter *f = &hci_pi(sk)->filter;
1074 uf.type_mask = f->type_mask;
1075 uf.opcode = f->opcode;
1076 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1077 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1080 len = min_t(unsigned int, len, sizeof(uf));
1081 if (copy_from_user(&uf, optval, len)) {
1086 if (!capable(CAP_NET_RAW)) {
1087 uf.type_mask &= hci_sec_filter.type_mask;
1088 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1089 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1093 struct hci_filter *f = &hci_pi(sk)->filter;
1095 f->type_mask = uf.type_mask;
1096 f->opcode = uf.opcode;
1097 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1098 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1112 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1113 char __user *optval, int __user *optlen)
1115 struct hci_ufilter uf;
1116 struct sock *sk = sock->sk;
1117 int len, opt, err = 0;
1119 BT_DBG("sk %p, opt %d", sk, optname);
1121 if (get_user(len, optlen))
1126 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1133 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1138 if (put_user(opt, optval))
1142 case HCI_TIME_STAMP:
1143 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1148 if (put_user(opt, optval))
1154 struct hci_filter *f = &hci_pi(sk)->filter;
1156 memset(&uf, 0, sizeof(uf));
1157 uf.type_mask = f->type_mask;
1158 uf.opcode = f->opcode;
1159 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1160 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1163 len = min_t(unsigned int, len, sizeof(uf));
1164 if (copy_to_user(optval, &uf, len))
1178 static const struct proto_ops hci_sock_ops = {
1179 .family = PF_BLUETOOTH,
1180 .owner = THIS_MODULE,
1181 .release = hci_sock_release,
1182 .bind = hci_sock_bind,
1183 .getname = hci_sock_getname,
1184 .sendmsg = hci_sock_sendmsg,
1185 .recvmsg = hci_sock_recvmsg,
1186 .ioctl = hci_sock_ioctl,
1187 .poll = datagram_poll,
1188 .listen = sock_no_listen,
1189 .shutdown = sock_no_shutdown,
1190 .setsockopt = hci_sock_setsockopt,
1191 .getsockopt = hci_sock_getsockopt,
1192 .connect = sock_no_connect,
1193 .socketpair = sock_no_socketpair,
1194 .accept = sock_no_accept,
1195 .mmap = sock_no_mmap
1198 static struct proto hci_sk_proto = {
1200 .owner = THIS_MODULE,
1201 .obj_size = sizeof(struct hci_pinfo)
1204 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1209 BT_DBG("sock %p", sock);
1211 if (sock->type != SOCK_RAW)
1212 return -ESOCKTNOSUPPORT;
1214 sock->ops = &hci_sock_ops;
1216 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1220 sock_init_data(sock, sk);
1222 sock_reset_flag(sk, SOCK_ZAPPED);
1224 sk->sk_protocol = protocol;
1226 sock->state = SS_UNCONNECTED;
1227 sk->sk_state = BT_OPEN;
1229 bt_sock_link(&hci_sk_list, sk);
1233 static const struct net_proto_family hci_sock_family_ops = {
1234 .family = PF_BLUETOOTH,
1235 .owner = THIS_MODULE,
1236 .create = hci_sock_create,
1239 int __init hci_sock_init(void)
1243 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1245 err = proto_register(&hci_sk_proto, 0);
1249 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1251 BT_ERR("HCI socket registration failed");
1255 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1257 BT_ERR("Failed to create HCI proc file");
1258 bt_sock_unregister(BTPROTO_HCI);
1262 BT_INFO("HCI socket layer initialized");
1267 proto_unregister(&hci_sk_proto);
1271 void hci_sock_cleanup(void)
1273 bt_procfs_cleanup(&init_net, "hci");
1274 bt_sock_unregister(BTPROTO_HCI);
1275 proto_unregister(&hci_sk_proto);