2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
38 static inline int hci_test_bit(int nr, void *addr)
40 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
44 #define HCI_SFLT_MAX_OGF 5
46 struct hci_sec_filter {
49 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
52 static struct hci_sec_filter hci_sec_filter = {
56 { 0x1000d9fe, 0x0000b00c },
61 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
63 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
65 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
67 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
68 /* OGF_STATUS_PARAM */
69 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
73 static struct bt_sock_list hci_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
77 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
79 struct hci_filter *flt;
80 int flt_type, flt_event;
83 flt = &hci_pi(sk)->filter;
85 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
88 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
90 if (!test_bit(flt_type, &flt->type_mask))
93 /* Extra filter for event packets only */
94 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
97 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
99 if (!hci_test_bit(flt_event, &flt->event_mask))
102 /* Check filter only when opcode is set */
106 if (flt_event == HCI_EV_CMD_COMPLETE &&
107 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
110 if (flt_event == HCI_EV_CMD_STATUS &&
111 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
117 /* Send frame to RAW socket */
118 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
121 struct sk_buff *skb_copy = NULL;
123 BT_DBG("hdev %p len %d", hdev, skb->len);
125 read_lock(&hci_sk_list.lock);
127 sk_for_each(sk, &hci_sk_list.head) {
128 struct sk_buff *nskb;
130 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
133 /* Don't send frame to the socket it came from */
137 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
138 if (is_filtered_packet(sk, skb))
140 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
141 if (!bt_cb(skb)->incoming)
143 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
144 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
145 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
148 /* Don't send frame to other channel types */
153 /* Create a private copy with headroom */
154 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
158 /* Put type byte before the data */
159 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
162 nskb = skb_clone(skb_copy, GFP_ATOMIC);
166 if (sock_queue_rcv_skb(sk, nskb))
170 read_unlock(&hci_sk_list.lock);
175 /* Send frame to control socket */
176 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
180 BT_DBG("len %d", skb->len);
182 read_lock(&hci_sk_list.lock);
184 sk_for_each(sk, &hci_sk_list.head) {
185 struct sk_buff *nskb;
187 /* Skip the original socket */
191 if (sk->sk_state != BT_BOUND)
194 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
197 nskb = skb_clone(skb, GFP_ATOMIC);
201 if (sock_queue_rcv_skb(sk, nskb))
205 read_unlock(&hci_sk_list.lock);
208 /* Send frame to monitor socket */
209 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
212 struct sk_buff *skb_copy = NULL;
215 if (!atomic_read(&monitor_promisc))
218 BT_DBG("hdev %p len %d", hdev, skb->len);
220 switch (bt_cb(skb)->pkt_type) {
221 case HCI_COMMAND_PKT:
222 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
225 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
227 case HCI_ACLDATA_PKT:
228 if (bt_cb(skb)->incoming)
229 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
231 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
233 case HCI_SCODATA_PKT:
234 if (bt_cb(skb)->incoming)
235 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
237 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
243 read_lock(&hci_sk_list.lock);
245 sk_for_each(sk, &hci_sk_list.head) {
246 struct sk_buff *nskb;
248 if (sk->sk_state != BT_BOUND)
251 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
255 struct hci_mon_hdr *hdr;
257 /* Create a private copy with headroom */
258 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
263 /* Put header before the data */
264 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
265 hdr->opcode = opcode;
266 hdr->index = cpu_to_le16(hdev->id);
267 hdr->len = cpu_to_le16(skb->len);
270 nskb = skb_clone(skb_copy, GFP_ATOMIC);
274 if (sock_queue_rcv_skb(sk, nskb))
278 read_unlock(&hci_sk_list.lock);
283 static void send_monitor_event(struct sk_buff *skb)
287 BT_DBG("len %d", skb->len);
289 read_lock(&hci_sk_list.lock);
291 sk_for_each(sk, &hci_sk_list.head) {
292 struct sk_buff *nskb;
294 if (sk->sk_state != BT_BOUND)
297 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
300 nskb = skb_clone(skb, GFP_ATOMIC);
304 if (sock_queue_rcv_skb(sk, nskb))
308 read_unlock(&hci_sk_list.lock);
311 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
313 struct hci_mon_hdr *hdr;
314 struct hci_mon_new_index *ni;
320 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
324 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
325 ni->type = hdev->dev_type;
327 bacpy(&ni->bdaddr, &hdev->bdaddr);
328 memcpy(ni->name, hdev->name, 8);
330 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
334 skb = bt_skb_alloc(0, GFP_ATOMIC);
338 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
345 __net_timestamp(skb);
347 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
348 hdr->opcode = opcode;
349 hdr->index = cpu_to_le16(hdev->id);
350 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
355 static void send_monitor_replay(struct sock *sk)
357 struct hci_dev *hdev;
359 read_lock(&hci_dev_list_lock);
361 list_for_each_entry(hdev, &hci_dev_list, list) {
364 skb = create_monitor_event(hdev, HCI_DEV_REG);
368 if (sock_queue_rcv_skb(sk, skb))
372 read_unlock(&hci_dev_list_lock);
375 /* Generate internal stack event */
376 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
378 struct hci_event_hdr *hdr;
379 struct hci_ev_stack_internal *ev;
382 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
386 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
387 hdr->evt = HCI_EV_STACK_INTERNAL;
388 hdr->plen = sizeof(*ev) + dlen;
390 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
392 memcpy(ev->data, data, dlen);
394 bt_cb(skb)->incoming = 1;
395 __net_timestamp(skb);
397 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
398 hci_send_to_sock(hdev, skb);
402 void hci_sock_dev_event(struct hci_dev *hdev, int event)
404 struct hci_ev_si_device ev;
406 BT_DBG("hdev %s event %d", hdev->name, event);
408 /* Send event to monitor */
409 if (atomic_read(&monitor_promisc)) {
412 skb = create_monitor_event(hdev, event);
414 send_monitor_event(skb);
419 /* Send event to sockets */
421 ev.dev_id = hdev->id;
422 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
424 if (event == HCI_DEV_UNREG) {
427 /* Detach sockets from device */
428 read_lock(&hci_sk_list.lock);
429 sk_for_each(sk, &hci_sk_list.head) {
430 bh_lock_sock_nested(sk);
431 if (hci_pi(sk)->hdev == hdev) {
432 hci_pi(sk)->hdev = NULL;
434 sk->sk_state = BT_OPEN;
435 sk->sk_state_change(sk);
441 read_unlock(&hci_sk_list.lock);
445 static int hci_sock_release(struct socket *sock)
447 struct sock *sk = sock->sk;
448 struct hci_dev *hdev;
450 BT_DBG("sock %p sk %p", sock, sk);
455 hdev = hci_pi(sk)->hdev;
457 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
458 atomic_dec(&monitor_promisc);
460 bt_sock_unlink(&hci_sk_list, sk);
463 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
464 mgmt_index_added(hdev);
465 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
466 hci_dev_close(hdev->id);
469 atomic_dec(&hdev->promisc);
475 skb_queue_purge(&sk->sk_receive_queue);
476 skb_queue_purge(&sk->sk_write_queue);
482 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
487 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
492 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
494 hci_dev_unlock(hdev);
499 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
504 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
509 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
511 hci_dev_unlock(hdev);
516 /* Ioctls that require bound socket */
517 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
520 struct hci_dev *hdev = hci_pi(sk)->hdev;
525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
528 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
531 if (hdev->dev_type != HCI_BREDR)
536 if (!capable(CAP_NET_ADMIN))
541 return hci_get_conn_info(hdev, (void __user *) arg);
544 return hci_get_auth_info(hdev, (void __user *) arg);
547 if (!capable(CAP_NET_ADMIN))
549 return hci_sock_blacklist_add(hdev, (void __user *) arg);
552 if (!capable(CAP_NET_ADMIN))
554 return hci_sock_blacklist_del(hdev, (void __user *) arg);
560 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
563 void __user *argp = (void __user *) arg;
564 struct sock *sk = sock->sk;
567 BT_DBG("cmd %x arg %lx", cmd, arg);
571 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
580 return hci_get_dev_list(argp);
583 return hci_get_dev_info(argp);
586 return hci_get_conn_list(argp);
589 if (!capable(CAP_NET_ADMIN))
591 return hci_dev_open(arg);
594 if (!capable(CAP_NET_ADMIN))
596 return hci_dev_close(arg);
599 if (!capable(CAP_NET_ADMIN))
601 return hci_dev_reset(arg);
604 if (!capable(CAP_NET_ADMIN))
606 return hci_dev_reset_stat(arg);
616 if (!capable(CAP_NET_ADMIN))
618 return hci_dev_cmd(cmd, argp);
621 return hci_inquiry(argp);
626 err = hci_sock_bound_ioctl(sk, cmd, arg);
633 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
636 struct sockaddr_hci haddr;
637 struct sock *sk = sock->sk;
638 struct hci_dev *hdev = NULL;
641 BT_DBG("sock %p sk %p", sock, sk);
646 memset(&haddr, 0, sizeof(haddr));
647 len = min_t(unsigned int, sizeof(haddr), addr_len);
648 memcpy(&haddr, addr, len);
650 if (haddr.hci_family != AF_BLUETOOTH)
655 if (sk->sk_state == BT_BOUND) {
660 switch (haddr.hci_channel) {
661 case HCI_CHANNEL_RAW:
662 if (hci_pi(sk)->hdev) {
667 if (haddr.hci_dev != HCI_DEV_NONE) {
668 hdev = hci_dev_get(haddr.hci_dev);
674 atomic_inc(&hdev->promisc);
677 hci_pi(sk)->hdev = hdev;
680 case HCI_CHANNEL_USER:
681 if (hci_pi(sk)->hdev) {
686 if (haddr.hci_dev == HCI_DEV_NONE) {
691 if (!capable(CAP_NET_ADMIN)) {
696 hdev = hci_dev_get(haddr.hci_dev);
702 if (test_bit(HCI_UP, &hdev->flags) ||
703 test_bit(HCI_INIT, &hdev->flags) ||
704 test_bit(HCI_SETUP, &hdev->dev_flags) ||
705 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
711 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
717 mgmt_index_removed(hdev);
719 err = hci_dev_open(hdev->id);
721 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
722 mgmt_index_added(hdev);
727 atomic_inc(&hdev->promisc);
729 hci_pi(sk)->hdev = hdev;
732 case HCI_CHANNEL_CONTROL:
733 if (haddr.hci_dev != HCI_DEV_NONE) {
738 if (!capable(CAP_NET_ADMIN)) {
745 case HCI_CHANNEL_MONITOR:
746 if (haddr.hci_dev != HCI_DEV_NONE) {
751 if (!capable(CAP_NET_RAW)) {
756 send_monitor_replay(sk);
758 atomic_inc(&monitor_promisc);
767 hci_pi(sk)->channel = haddr.hci_channel;
768 sk->sk_state = BT_BOUND;
775 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
776 int *addr_len, int peer)
778 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
779 struct sock *sk = sock->sk;
780 struct hci_dev *hdev;
783 BT_DBG("sock %p sk %p", sock, sk);
790 hdev = hci_pi(sk)->hdev;
796 *addr_len = sizeof(*haddr);
797 haddr->hci_family = AF_BLUETOOTH;
798 haddr->hci_dev = hdev->id;
799 haddr->hci_channel= hci_pi(sk)->channel;
806 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
809 __u32 mask = hci_pi(sk)->cmsg_mask;
811 if (mask & HCI_CMSG_DIR) {
812 int incoming = bt_cb(skb)->incoming;
813 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
817 if (mask & HCI_CMSG_TSTAMP) {
819 struct compat_timeval ctv;
825 skb_get_timestamp(skb, &tv);
830 if (!COMPAT_USE_64BIT_TIME &&
831 (msg->msg_flags & MSG_CMSG_COMPAT)) {
832 ctv.tv_sec = tv.tv_sec;
833 ctv.tv_usec = tv.tv_usec;
839 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
843 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
844 struct msghdr *msg, size_t len, int flags)
846 int noblock = flags & MSG_DONTWAIT;
847 struct sock *sk = sock->sk;
851 BT_DBG("sock %p, sk %p", sock, sk);
853 if (flags & (MSG_OOB))
856 if (sk->sk_state == BT_CLOSED)
859 skb = skb_recv_datagram(sk, flags, noblock, &err);
865 msg->msg_flags |= MSG_TRUNC;
869 skb_reset_transport_header(skb);
870 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
872 switch (hci_pi(sk)->channel) {
873 case HCI_CHANNEL_RAW:
874 hci_sock_cmsg(sk, msg, skb);
876 case HCI_CHANNEL_USER:
877 case HCI_CHANNEL_CONTROL:
878 case HCI_CHANNEL_MONITOR:
879 sock_recv_timestamp(msg, sk, skb);
883 skb_free_datagram(sk, skb);
885 return err ? : copied;
888 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
889 struct msghdr *msg, size_t len)
891 struct sock *sk = sock->sk;
892 struct hci_dev *hdev;
896 BT_DBG("sock %p sk %p", sock, sk);
898 if (msg->msg_flags & MSG_OOB)
901 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
904 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
909 switch (hci_pi(sk)->channel) {
910 case HCI_CHANNEL_RAW:
911 case HCI_CHANNEL_USER:
913 case HCI_CHANNEL_CONTROL:
914 err = mgmt_control(sk, msg, len);
916 case HCI_CHANNEL_MONITOR:
924 hdev = hci_pi(sk)->hdev;
930 if (!test_bit(HCI_UP, &hdev->flags)) {
935 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
939 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
944 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
947 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
948 /* No permission check is needed for user channel
949 * since that gets enforced when binding the socket.
951 * However check that the packet type is valid.
953 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
954 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
955 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
960 skb_queue_tail(&hdev->raw_q, skb);
961 queue_work(hdev->workqueue, &hdev->tx_work);
962 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
963 u16 opcode = get_unaligned_le16(skb->data);
964 u16 ogf = hci_opcode_ogf(opcode);
965 u16 ocf = hci_opcode_ocf(opcode);
967 if (((ogf > HCI_SFLT_MAX_OGF) ||
968 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
969 &hci_sec_filter.ocf_mask[ogf])) &&
970 !capable(CAP_NET_RAW)) {
976 skb_queue_tail(&hdev->raw_q, skb);
977 queue_work(hdev->workqueue, &hdev->tx_work);
979 /* Stand-alone HCI commands must be flaged as
980 * single-command requests.
982 bt_cb(skb)->req.start = true;
984 skb_queue_tail(&hdev->cmd_q, skb);
985 queue_work(hdev->workqueue, &hdev->cmd_work);
988 if (!capable(CAP_NET_RAW)) {
993 skb_queue_tail(&hdev->raw_q, skb);
994 queue_work(hdev->workqueue, &hdev->tx_work);
1008 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1009 char __user *optval, unsigned int len)
1011 struct hci_ufilter uf = { .opcode = 0 };
1012 struct sock *sk = sock->sk;
1013 int err = 0, opt = 0;
1015 BT_DBG("sk %p, opt %d", sk, optname);
1019 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1026 if (get_user(opt, (int __user *)optval)) {
1032 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1034 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1037 case HCI_TIME_STAMP:
1038 if (get_user(opt, (int __user *)optval)) {
1044 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1046 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1051 struct hci_filter *f = &hci_pi(sk)->filter;
1053 uf.type_mask = f->type_mask;
1054 uf.opcode = f->opcode;
1055 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1056 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1059 len = min_t(unsigned int, len, sizeof(uf));
1060 if (copy_from_user(&uf, optval, len)) {
1065 if (!capable(CAP_NET_RAW)) {
1066 uf.type_mask &= hci_sec_filter.type_mask;
1067 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1068 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1072 struct hci_filter *f = &hci_pi(sk)->filter;
1074 f->type_mask = uf.type_mask;
1075 f->opcode = uf.opcode;
1076 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1077 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1091 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1092 char __user *optval, int __user *optlen)
1094 struct hci_ufilter uf;
1095 struct sock *sk = sock->sk;
1096 int len, opt, err = 0;
1098 BT_DBG("sk %p, opt %d", sk, optname);
1100 if (get_user(len, optlen))
1105 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1112 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1117 if (put_user(opt, optval))
1121 case HCI_TIME_STAMP:
1122 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1127 if (put_user(opt, optval))
1133 struct hci_filter *f = &hci_pi(sk)->filter;
1135 memset(&uf, 0, sizeof(uf));
1136 uf.type_mask = f->type_mask;
1137 uf.opcode = f->opcode;
1138 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1139 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1142 len = min_t(unsigned int, len, sizeof(uf));
1143 if (copy_to_user(optval, &uf, len))
1157 static const struct proto_ops hci_sock_ops = {
1158 .family = PF_BLUETOOTH,
1159 .owner = THIS_MODULE,
1160 .release = hci_sock_release,
1161 .bind = hci_sock_bind,
1162 .getname = hci_sock_getname,
1163 .sendmsg = hci_sock_sendmsg,
1164 .recvmsg = hci_sock_recvmsg,
1165 .ioctl = hci_sock_ioctl,
1166 .poll = datagram_poll,
1167 .listen = sock_no_listen,
1168 .shutdown = sock_no_shutdown,
1169 .setsockopt = hci_sock_setsockopt,
1170 .getsockopt = hci_sock_getsockopt,
1171 .connect = sock_no_connect,
1172 .socketpair = sock_no_socketpair,
1173 .accept = sock_no_accept,
1174 .mmap = sock_no_mmap
1177 static struct proto hci_sk_proto = {
1179 .owner = THIS_MODULE,
1180 .obj_size = sizeof(struct hci_pinfo)
1183 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1188 BT_DBG("sock %p", sock);
1190 if (sock->type != SOCK_RAW)
1191 return -ESOCKTNOSUPPORT;
1193 sock->ops = &hci_sock_ops;
1195 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1199 sock_init_data(sock, sk);
1201 sock_reset_flag(sk, SOCK_ZAPPED);
1203 sk->sk_protocol = protocol;
1205 sock->state = SS_UNCONNECTED;
1206 sk->sk_state = BT_OPEN;
1208 bt_sock_link(&hci_sk_list, sk);
1212 static const struct net_proto_family hci_sock_family_ops = {
1213 .family = PF_BLUETOOTH,
1214 .owner = THIS_MODULE,
1215 .create = hci_sock_create,
1218 int __init hci_sock_init(void)
1222 err = proto_register(&hci_sk_proto, 0);
1226 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1228 BT_ERR("HCI socket registration failed");
1232 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1234 BT_ERR("Failed to create HCI proc file");
1235 bt_sock_unregister(BTPROTO_HCI);
1239 BT_INFO("HCI socket layer initialized");
1244 proto_unregister(&hci_sk_proto);
1248 void hci_sock_cleanup(void)
1250 bt_procfs_cleanup(&init_net, "hci");
1251 bt_sock_unregister(BTPROTO_HCI);
1252 proto_unregister(&hci_sk_proto);