2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static LIST_HEAD(mgmt_chan_list);
35 static DEFINE_MUTEX(mgmt_chan_list_lock);
37 static atomic_t monitor_promisc = ATOMIC_INIT(0);
39 /* ----- HCI socket interface ----- */
42 #define hci_pi(sk) ((struct hci_pinfo *) sk)
47 struct hci_filter filter;
49 unsigned short channel;
52 static inline int hci_test_bit(int nr, const void *addr)
54 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
58 #define HCI_SFLT_MAX_OGF 5
60 struct hci_sec_filter {
63 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
66 static const struct hci_sec_filter hci_sec_filter = {
70 { 0x1000d9fe, 0x0000b00c },
75 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
77 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
79 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
81 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
82 /* OGF_STATUS_PARAM */
83 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
87 static struct bt_sock_list hci_sk_list = {
88 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
91 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
93 struct hci_filter *flt;
94 int flt_type, flt_event;
97 flt = &hci_pi(sk)->filter;
99 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
102 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
104 if (!test_bit(flt_type, &flt->type_mask))
107 /* Extra filter for event packets only */
108 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
111 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
113 if (!hci_test_bit(flt_event, &flt->event_mask))
116 /* Check filter only when opcode is set */
120 if (flt_event == HCI_EV_CMD_COMPLETE &&
121 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
124 if (flt_event == HCI_EV_CMD_STATUS &&
125 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
131 /* Send frame to RAW socket */
132 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
135 struct sk_buff *skb_copy = NULL;
137 BT_DBG("hdev %p len %d", hdev, skb->len);
139 read_lock(&hci_sk_list.lock);
141 sk_for_each(sk, &hci_sk_list.head) {
142 struct sk_buff *nskb;
144 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
147 /* Don't send frame to the socket it came from */
151 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
152 if (is_filtered_packet(sk, skb))
154 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
155 if (!bt_cb(skb)->incoming)
157 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
158 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
159 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
162 /* Don't send frame to other channel types */
167 /* Create a private copy with headroom */
168 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
172 /* Put type byte before the data */
173 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
176 nskb = skb_clone(skb_copy, GFP_ATOMIC);
180 if (sock_queue_rcv_skb(sk, nskb))
184 read_unlock(&hci_sk_list.lock);
189 /* Send frame to sockets with specific channel */
190 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
191 struct sock *skip_sk)
195 BT_DBG("channel %u len %d", channel, skb->len);
197 read_lock(&hci_sk_list.lock);
199 sk_for_each(sk, &hci_sk_list.head) {
200 struct sk_buff *nskb;
202 /* Skip the original socket */
206 if (sk->sk_state != BT_BOUND)
209 if (hci_pi(sk)->channel != channel)
212 nskb = skb_clone(skb, GFP_ATOMIC);
216 if (sock_queue_rcv_skb(sk, nskb))
220 read_unlock(&hci_sk_list.lock);
223 /* Send frame to monitor socket */
224 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
226 struct sk_buff *skb_copy = NULL;
227 struct hci_mon_hdr *hdr;
230 if (!atomic_read(&monitor_promisc))
233 BT_DBG("hdev %p len %d", hdev, skb->len);
235 switch (bt_cb(skb)->pkt_type) {
236 case HCI_COMMAND_PKT:
237 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
240 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
242 case HCI_ACLDATA_PKT:
243 if (bt_cb(skb)->incoming)
244 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
246 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
248 case HCI_SCODATA_PKT:
249 if (bt_cb(skb)->incoming)
250 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
252 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
258 /* Create a private copy with headroom */
259 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
269 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, NULL);
273 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
275 struct hci_mon_hdr *hdr;
276 struct hci_mon_new_index *ni;
282 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
286 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
287 ni->type = hdev->dev_type;
289 bacpy(&ni->bdaddr, &hdev->bdaddr);
290 memcpy(ni->name, hdev->name, 8);
292 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
296 skb = bt_skb_alloc(0, GFP_ATOMIC);
300 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
307 __net_timestamp(skb);
309 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
310 hdr->opcode = opcode;
311 hdr->index = cpu_to_le16(hdev->id);
312 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
317 static void send_monitor_replay(struct sock *sk)
319 struct hci_dev *hdev;
321 read_lock(&hci_dev_list_lock);
323 list_for_each_entry(hdev, &hci_dev_list, list) {
326 skb = create_monitor_event(hdev, HCI_DEV_REG);
330 if (sock_queue_rcv_skb(sk, skb))
334 read_unlock(&hci_dev_list_lock);
337 /* Generate internal stack event */
338 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
340 struct hci_event_hdr *hdr;
341 struct hci_ev_stack_internal *ev;
344 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
348 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
349 hdr->evt = HCI_EV_STACK_INTERNAL;
350 hdr->plen = sizeof(*ev) + dlen;
352 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
354 memcpy(ev->data, data, dlen);
356 bt_cb(skb)->incoming = 1;
357 __net_timestamp(skb);
359 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
360 hci_send_to_sock(hdev, skb);
364 void hci_sock_dev_event(struct hci_dev *hdev, int event)
366 struct hci_ev_si_device ev;
368 BT_DBG("hdev %s event %d", hdev->name, event);
370 /* Send event to monitor */
371 if (atomic_read(&monitor_promisc)) {
374 skb = create_monitor_event(hdev, event);
376 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, NULL);
381 /* Send event to sockets */
383 ev.dev_id = hdev->id;
384 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
386 if (event == HCI_DEV_UNREG) {
389 /* Detach sockets from device */
390 read_lock(&hci_sk_list.lock);
391 sk_for_each(sk, &hci_sk_list.head) {
392 bh_lock_sock_nested(sk);
393 if (hci_pi(sk)->hdev == hdev) {
394 hci_pi(sk)->hdev = NULL;
396 sk->sk_state = BT_OPEN;
397 sk->sk_state_change(sk);
403 read_unlock(&hci_sk_list.lock);
407 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
409 struct hci_mgmt_chan *c;
411 list_for_each_entry(c, &mgmt_chan_list, list) {
412 if (c->channel == channel)
419 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
421 struct hci_mgmt_chan *c;
423 mutex_lock(&mgmt_chan_list_lock);
424 c = __hci_mgmt_chan_find(channel);
425 mutex_unlock(&mgmt_chan_list_lock);
430 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
432 if (c->channel < HCI_CHANNEL_CONTROL)
435 mutex_lock(&mgmt_chan_list_lock);
436 if (__hci_mgmt_chan_find(c->channel)) {
437 mutex_unlock(&mgmt_chan_list_lock);
441 list_add_tail(&c->list, &mgmt_chan_list);
443 mutex_unlock(&mgmt_chan_list_lock);
447 EXPORT_SYMBOL(hci_mgmt_chan_register);
449 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
451 mutex_lock(&mgmt_chan_list_lock);
453 mutex_unlock(&mgmt_chan_list_lock);
455 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
457 static int hci_sock_release(struct socket *sock)
459 struct sock *sk = sock->sk;
460 struct hci_dev *hdev;
462 BT_DBG("sock %p sk %p", sock, sk);
467 hdev = hci_pi(sk)->hdev;
469 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
470 atomic_dec(&monitor_promisc);
472 bt_sock_unlink(&hci_sk_list, sk);
475 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
476 mgmt_index_added(hdev);
477 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
478 hci_dev_close(hdev->id);
481 atomic_dec(&hdev->promisc);
487 skb_queue_purge(&sk->sk_receive_queue);
488 skb_queue_purge(&sk->sk_write_queue);
494 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
499 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
504 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
506 hci_dev_unlock(hdev);
511 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
516 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
521 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
523 hci_dev_unlock(hdev);
528 /* Ioctls that require bound socket */
529 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
532 struct hci_dev *hdev = hci_pi(sk)->hdev;
537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
540 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
543 if (hdev->dev_type != HCI_BREDR)
548 if (!capable(CAP_NET_ADMIN))
553 return hci_get_conn_info(hdev, (void __user *) arg);
556 return hci_get_auth_info(hdev, (void __user *) arg);
559 if (!capable(CAP_NET_ADMIN))
561 return hci_sock_blacklist_add(hdev, (void __user *) arg);
564 if (!capable(CAP_NET_ADMIN))
566 return hci_sock_blacklist_del(hdev, (void __user *) arg);
572 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
575 void __user *argp = (void __user *) arg;
576 struct sock *sk = sock->sk;
579 BT_DBG("cmd %x arg %lx", cmd, arg);
583 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
592 return hci_get_dev_list(argp);
595 return hci_get_dev_info(argp);
598 return hci_get_conn_list(argp);
601 if (!capable(CAP_NET_ADMIN))
603 return hci_dev_open(arg);
606 if (!capable(CAP_NET_ADMIN))
608 return hci_dev_close(arg);
611 if (!capable(CAP_NET_ADMIN))
613 return hci_dev_reset(arg);
616 if (!capable(CAP_NET_ADMIN))
618 return hci_dev_reset_stat(arg);
628 if (!capable(CAP_NET_ADMIN))
630 return hci_dev_cmd(cmd, argp);
633 return hci_inquiry(argp);
638 err = hci_sock_bound_ioctl(sk, cmd, arg);
645 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
648 struct sockaddr_hci haddr;
649 struct sock *sk = sock->sk;
650 struct hci_dev *hdev = NULL;
653 BT_DBG("sock %p sk %p", sock, sk);
658 memset(&haddr, 0, sizeof(haddr));
659 len = min_t(unsigned int, sizeof(haddr), addr_len);
660 memcpy(&haddr, addr, len);
662 if (haddr.hci_family != AF_BLUETOOTH)
667 if (sk->sk_state == BT_BOUND) {
672 switch (haddr.hci_channel) {
673 case HCI_CHANNEL_RAW:
674 if (hci_pi(sk)->hdev) {
679 if (haddr.hci_dev != HCI_DEV_NONE) {
680 hdev = hci_dev_get(haddr.hci_dev);
686 atomic_inc(&hdev->promisc);
689 hci_pi(sk)->hdev = hdev;
692 case HCI_CHANNEL_USER:
693 if (hci_pi(sk)->hdev) {
698 if (haddr.hci_dev == HCI_DEV_NONE) {
703 if (!capable(CAP_NET_ADMIN)) {
708 hdev = hci_dev_get(haddr.hci_dev);
714 if (test_bit(HCI_UP, &hdev->flags) ||
715 test_bit(HCI_INIT, &hdev->flags) ||
716 test_bit(HCI_SETUP, &hdev->dev_flags) ||
717 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
723 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
729 mgmt_index_removed(hdev);
731 err = hci_dev_open(hdev->id);
733 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
734 mgmt_index_added(hdev);
739 atomic_inc(&hdev->promisc);
741 hci_pi(sk)->hdev = hdev;
744 case HCI_CHANNEL_CONTROL:
745 if (haddr.hci_dev != HCI_DEV_NONE) {
750 if (!capable(CAP_NET_ADMIN)) {
757 case HCI_CHANNEL_MONITOR:
758 if (haddr.hci_dev != HCI_DEV_NONE) {
763 if (!capable(CAP_NET_RAW)) {
768 send_monitor_replay(sk);
770 atomic_inc(&monitor_promisc);
774 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
779 if (haddr.hci_dev != HCI_DEV_NONE) {
784 if (!capable(CAP_NET_ADMIN)) {
793 hci_pi(sk)->channel = haddr.hci_channel;
794 sk->sk_state = BT_BOUND;
801 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
802 int *addr_len, int peer)
804 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
805 struct sock *sk = sock->sk;
806 struct hci_dev *hdev;
809 BT_DBG("sock %p sk %p", sock, sk);
816 hdev = hci_pi(sk)->hdev;
822 *addr_len = sizeof(*haddr);
823 haddr->hci_family = AF_BLUETOOTH;
824 haddr->hci_dev = hdev->id;
825 haddr->hci_channel= hci_pi(sk)->channel;
832 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
835 __u32 mask = hci_pi(sk)->cmsg_mask;
837 if (mask & HCI_CMSG_DIR) {
838 int incoming = bt_cb(skb)->incoming;
839 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
843 if (mask & HCI_CMSG_TSTAMP) {
845 struct compat_timeval ctv;
851 skb_get_timestamp(skb, &tv);
856 if (!COMPAT_USE_64BIT_TIME &&
857 (msg->msg_flags & MSG_CMSG_COMPAT)) {
858 ctv.tv_sec = tv.tv_sec;
859 ctv.tv_usec = tv.tv_usec;
865 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
869 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
872 int noblock = flags & MSG_DONTWAIT;
873 struct sock *sk = sock->sk;
877 BT_DBG("sock %p, sk %p", sock, sk);
879 if (flags & (MSG_OOB))
882 if (sk->sk_state == BT_CLOSED)
885 skb = skb_recv_datagram(sk, flags, noblock, &err);
891 msg->msg_flags |= MSG_TRUNC;
895 skb_reset_transport_header(skb);
896 err = skb_copy_datagram_msg(skb, 0, msg, copied);
898 switch (hci_pi(sk)->channel) {
899 case HCI_CHANNEL_RAW:
900 hci_sock_cmsg(sk, msg, skb);
902 case HCI_CHANNEL_USER:
903 case HCI_CHANNEL_CONTROL:
904 case HCI_CHANNEL_MONITOR:
905 sock_recv_timestamp(msg, sk, skb);
908 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
909 sock_recv_timestamp(msg, sk, skb);
913 skb_free_datagram(sk, skb);
915 return err ? : copied;
918 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
921 struct sock *sk = sock->sk;
922 struct hci_mgmt_chan *chan;
923 struct hci_dev *hdev;
927 BT_DBG("sock %p sk %p", sock, sk);
929 if (msg->msg_flags & MSG_OOB)
932 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
935 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
940 switch (hci_pi(sk)->channel) {
941 case HCI_CHANNEL_RAW:
942 case HCI_CHANNEL_USER:
944 case HCI_CHANNEL_CONTROL:
945 err = mgmt_control(sk, msg, len);
947 case HCI_CHANNEL_MONITOR:
951 mutex_lock(&mgmt_chan_list_lock);
952 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
954 err = -ENOSYS; /* FIXME: call handler */
958 mutex_unlock(&mgmt_chan_list_lock);
962 hdev = hci_pi(sk)->hdev;
968 if (!test_bit(HCI_UP, &hdev->flags)) {
973 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
977 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
982 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
985 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
986 /* No permission check is needed for user channel
987 * since that gets enforced when binding the socket.
989 * However check that the packet type is valid.
991 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
992 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
993 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
998 skb_queue_tail(&hdev->raw_q, skb);
999 queue_work(hdev->workqueue, &hdev->tx_work);
1000 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1001 u16 opcode = get_unaligned_le16(skb->data);
1002 u16 ogf = hci_opcode_ogf(opcode);
1003 u16 ocf = hci_opcode_ocf(opcode);
1005 if (((ogf > HCI_SFLT_MAX_OGF) ||
1006 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1007 &hci_sec_filter.ocf_mask[ogf])) &&
1008 !capable(CAP_NET_RAW)) {
1014 skb_queue_tail(&hdev->raw_q, skb);
1015 queue_work(hdev->workqueue, &hdev->tx_work);
1017 /* Stand-alone HCI commands must be flagged as
1018 * single-command requests.
1020 bt_cb(skb)->req_start = 1;
1022 skb_queue_tail(&hdev->cmd_q, skb);
1023 queue_work(hdev->workqueue, &hdev->cmd_work);
1026 if (!capable(CAP_NET_RAW)) {
1031 skb_queue_tail(&hdev->raw_q, skb);
1032 queue_work(hdev->workqueue, &hdev->tx_work);
1046 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1047 char __user *optval, unsigned int len)
1049 struct hci_ufilter uf = { .opcode = 0 };
1050 struct sock *sk = sock->sk;
1051 int err = 0, opt = 0;
1053 BT_DBG("sk %p, opt %d", sk, optname);
1057 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1064 if (get_user(opt, (int __user *)optval)) {
1070 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1072 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1075 case HCI_TIME_STAMP:
1076 if (get_user(opt, (int __user *)optval)) {
1082 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1084 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1089 struct hci_filter *f = &hci_pi(sk)->filter;
1091 uf.type_mask = f->type_mask;
1092 uf.opcode = f->opcode;
1093 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1094 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1097 len = min_t(unsigned int, len, sizeof(uf));
1098 if (copy_from_user(&uf, optval, len)) {
1103 if (!capable(CAP_NET_RAW)) {
1104 uf.type_mask &= hci_sec_filter.type_mask;
1105 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1106 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1110 struct hci_filter *f = &hci_pi(sk)->filter;
1112 f->type_mask = uf.type_mask;
1113 f->opcode = uf.opcode;
1114 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1115 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1129 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1130 char __user *optval, int __user *optlen)
1132 struct hci_ufilter uf;
1133 struct sock *sk = sock->sk;
1134 int len, opt, err = 0;
1136 BT_DBG("sk %p, opt %d", sk, optname);
1138 if (get_user(len, optlen))
1143 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1150 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1155 if (put_user(opt, optval))
1159 case HCI_TIME_STAMP:
1160 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1165 if (put_user(opt, optval))
1171 struct hci_filter *f = &hci_pi(sk)->filter;
1173 memset(&uf, 0, sizeof(uf));
1174 uf.type_mask = f->type_mask;
1175 uf.opcode = f->opcode;
1176 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1177 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1180 len = min_t(unsigned int, len, sizeof(uf));
1181 if (copy_to_user(optval, &uf, len))
1195 static const struct proto_ops hci_sock_ops = {
1196 .family = PF_BLUETOOTH,
1197 .owner = THIS_MODULE,
1198 .release = hci_sock_release,
1199 .bind = hci_sock_bind,
1200 .getname = hci_sock_getname,
1201 .sendmsg = hci_sock_sendmsg,
1202 .recvmsg = hci_sock_recvmsg,
1203 .ioctl = hci_sock_ioctl,
1204 .poll = datagram_poll,
1205 .listen = sock_no_listen,
1206 .shutdown = sock_no_shutdown,
1207 .setsockopt = hci_sock_setsockopt,
1208 .getsockopt = hci_sock_getsockopt,
1209 .connect = sock_no_connect,
1210 .socketpair = sock_no_socketpair,
1211 .accept = sock_no_accept,
1212 .mmap = sock_no_mmap
1215 static struct proto hci_sk_proto = {
1217 .owner = THIS_MODULE,
1218 .obj_size = sizeof(struct hci_pinfo)
1221 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1226 BT_DBG("sock %p", sock);
1228 if (sock->type != SOCK_RAW)
1229 return -ESOCKTNOSUPPORT;
1231 sock->ops = &hci_sock_ops;
1233 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1237 sock_init_data(sock, sk);
1239 sock_reset_flag(sk, SOCK_ZAPPED);
1241 sk->sk_protocol = protocol;
1243 sock->state = SS_UNCONNECTED;
1244 sk->sk_state = BT_OPEN;
1246 bt_sock_link(&hci_sk_list, sk);
1250 static const struct net_proto_family hci_sock_family_ops = {
1251 .family = PF_BLUETOOTH,
1252 .owner = THIS_MODULE,
1253 .create = hci_sock_create,
1256 int __init hci_sock_init(void)
1260 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1262 err = proto_register(&hci_sk_proto, 0);
1266 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1268 BT_ERR("HCI socket registration failed");
1272 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1274 BT_ERR("Failed to create HCI proc file");
1275 bt_sock_unregister(BTPROTO_HCI);
1279 BT_INFO("HCI socket layer initialized");
1284 proto_unregister(&hci_sk_proto);
1288 void hci_sock_cleanup(void)
1290 bt_procfs_cleanup(&init_net, "hci");
1291 bt_sock_unregister(BTPROTO_HCI);
1292 proto_unregister(&hci_sk_proto);