2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
36 /* ----- HCI socket interface ----- */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
44 struct hci_filter filter;
46 unsigned short channel;
49 static inline int hci_test_bit(int nr, void *addr)
51 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
55 #define HCI_SFLT_MAX_OGF 5
57 struct hci_sec_filter {
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
63 static const struct hci_sec_filter hci_sec_filter = {
67 { 0x1000d9fe, 0x0000b00c },
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
84 static struct bt_sock_list hci_sk_list = {
85 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
90 struct hci_filter *flt;
91 int flt_type, flt_event;
94 flt = &hci_pi(sk)->filter;
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
101 if (!test_bit(flt_type, &flt->type_mask))
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
110 if (!hci_test_bit(flt_event, &flt->event_mask))
113 /* Check filter only when opcode is set */
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
128 /* Send frame to RAW socket */
129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
132 struct sk_buff *skb_copy = NULL;
134 BT_DBG("hdev %p len %d", hdev, skb->len);
136 read_lock(&hci_sk_list.lock);
138 sk_for_each(sk, &hci_sk_list.head) {
139 struct sk_buff *nskb;
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
144 /* Don't send frame to the socket it came from */
148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
159 /* Don't send frame to other channel types */
164 /* Create a private copy with headroom */
165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
177 if (sock_queue_rcv_skb(sk, nskb))
181 read_unlock(&hci_sk_list.lock);
186 /* Send frame to sockets with specific channel */
187 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
188 struct sock *skip_sk)
192 BT_DBG("channel %u len %d", channel, skb->len);
194 read_lock(&hci_sk_list.lock);
196 sk_for_each(sk, &hci_sk_list.head) {
197 struct sk_buff *nskb;
199 /* Skip the original socket */
203 if (sk->sk_state != BT_BOUND)
206 if (hci_pi(sk)->channel != channel)
209 nskb = skb_clone(skb, GFP_ATOMIC);
213 if (sock_queue_rcv_skb(sk, nskb))
217 read_unlock(&hci_sk_list.lock);
220 static void queue_monitor_skb(struct sk_buff *skb)
224 BT_DBG("len %d", skb->len);
226 read_lock(&hci_sk_list.lock);
228 sk_for_each(sk, &hci_sk_list.head) {
229 struct sk_buff *nskb;
231 if (sk->sk_state != BT_BOUND)
234 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
237 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (sock_queue_rcv_skb(sk, nskb))
245 read_unlock(&hci_sk_list.lock);
248 /* Send frame to monitor socket */
249 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
251 struct sk_buff *skb_copy = NULL;
252 struct hci_mon_hdr *hdr;
255 if (!atomic_read(&monitor_promisc))
258 BT_DBG("hdev %p len %d", hdev, skb->len);
260 switch (bt_cb(skb)->pkt_type) {
261 case HCI_COMMAND_PKT:
262 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
265 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
267 case HCI_ACLDATA_PKT:
268 if (bt_cb(skb)->incoming)
269 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
271 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
273 case HCI_SCODATA_PKT:
274 if (bt_cb(skb)->incoming)
275 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
277 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283 /* Create a private copy with headroom */
284 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 /* Put header before the data */
289 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
290 hdr->opcode = opcode;
291 hdr->index = cpu_to_le16(hdev->id);
292 hdr->len = cpu_to_le16(skb->len);
294 queue_monitor_skb(skb_copy);
298 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
300 struct hci_mon_hdr *hdr;
301 struct hci_mon_new_index *ni;
307 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
311 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
312 ni->type = hdev->dev_type;
314 bacpy(&ni->bdaddr, &hdev->bdaddr);
315 memcpy(ni->name, hdev->name, 8);
317 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
321 skb = bt_skb_alloc(0, GFP_ATOMIC);
325 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
332 __net_timestamp(skb);
334 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
335 hdr->opcode = opcode;
336 hdr->index = cpu_to_le16(hdev->id);
337 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342 static void send_monitor_replay(struct sock *sk)
344 struct hci_dev *hdev;
346 read_lock(&hci_dev_list_lock);
348 list_for_each_entry(hdev, &hci_dev_list, list) {
351 skb = create_monitor_event(hdev, HCI_DEV_REG);
355 if (sock_queue_rcv_skb(sk, skb))
359 read_unlock(&hci_dev_list_lock);
362 /* Generate internal stack event */
363 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
365 struct hci_event_hdr *hdr;
366 struct hci_ev_stack_internal *ev;
369 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
373 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
374 hdr->evt = HCI_EV_STACK_INTERNAL;
375 hdr->plen = sizeof(*ev) + dlen;
377 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
379 memcpy(ev->data, data, dlen);
381 bt_cb(skb)->incoming = 1;
382 __net_timestamp(skb);
384 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
385 hci_send_to_sock(hdev, skb);
389 void hci_sock_dev_event(struct hci_dev *hdev, int event)
391 struct hci_ev_si_device ev;
393 BT_DBG("hdev %s event %d", hdev->name, event);
395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
399 skb = create_monitor_event(hdev, event);
401 queue_monitor_skb(skb);
406 /* Send event to sockets */
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
411 if (event == HCI_DEV_UNREG) {
414 /* Detach sockets from device */
415 read_lock(&hci_sk_list.lock);
416 sk_for_each(sk, &hci_sk_list.head) {
417 bh_lock_sock_nested(sk);
418 if (hci_pi(sk)->hdev == hdev) {
419 hci_pi(sk)->hdev = NULL;
421 sk->sk_state = BT_OPEN;
422 sk->sk_state_change(sk);
428 read_unlock(&hci_sk_list.lock);
432 static int hci_sock_release(struct socket *sock)
434 struct sock *sk = sock->sk;
435 struct hci_dev *hdev;
437 BT_DBG("sock %p sk %p", sock, sk);
442 hdev = hci_pi(sk)->hdev;
444 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
445 atomic_dec(&monitor_promisc);
447 bt_sock_unlink(&hci_sk_list, sk);
450 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
451 mgmt_index_added(hdev);
452 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
453 hci_dev_close(hdev->id);
456 atomic_dec(&hdev->promisc);
462 skb_queue_purge(&sk->sk_receive_queue);
463 skb_queue_purge(&sk->sk_write_queue);
469 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
474 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
479 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
481 hci_dev_unlock(hdev);
486 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
491 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
496 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
498 hci_dev_unlock(hdev);
503 /* Ioctls that require bound socket */
504 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
507 struct hci_dev *hdev = hci_pi(sk)->hdev;
512 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
515 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
518 if (hdev->dev_type != HCI_BREDR)
523 if (!capable(CAP_NET_ADMIN))
528 return hci_get_conn_info(hdev, (void __user *) arg);
531 return hci_get_auth_info(hdev, (void __user *) arg);
534 if (!capable(CAP_NET_ADMIN))
536 return hci_sock_blacklist_add(hdev, (void __user *) arg);
539 if (!capable(CAP_NET_ADMIN))
541 return hci_sock_blacklist_del(hdev, (void __user *) arg);
547 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
550 void __user *argp = (void __user *) arg;
551 struct sock *sk = sock->sk;
554 BT_DBG("cmd %x arg %lx", cmd, arg);
558 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
567 return hci_get_dev_list(argp);
570 return hci_get_dev_info(argp);
573 return hci_get_conn_list(argp);
576 if (!capable(CAP_NET_ADMIN))
578 return hci_dev_open(arg);
581 if (!capable(CAP_NET_ADMIN))
583 return hci_dev_close(arg);
586 if (!capable(CAP_NET_ADMIN))
588 return hci_dev_reset(arg);
591 if (!capable(CAP_NET_ADMIN))
593 return hci_dev_reset_stat(arg);
603 if (!capable(CAP_NET_ADMIN))
605 return hci_dev_cmd(cmd, argp);
608 return hci_inquiry(argp);
613 err = hci_sock_bound_ioctl(sk, cmd, arg);
620 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
623 struct sockaddr_hci haddr;
624 struct sock *sk = sock->sk;
625 struct hci_dev *hdev = NULL;
628 BT_DBG("sock %p sk %p", sock, sk);
633 memset(&haddr, 0, sizeof(haddr));
634 len = min_t(unsigned int, sizeof(haddr), addr_len);
635 memcpy(&haddr, addr, len);
637 if (haddr.hci_family != AF_BLUETOOTH)
642 if (sk->sk_state == BT_BOUND) {
647 switch (haddr.hci_channel) {
648 case HCI_CHANNEL_RAW:
649 if (hci_pi(sk)->hdev) {
654 if (haddr.hci_dev != HCI_DEV_NONE) {
655 hdev = hci_dev_get(haddr.hci_dev);
661 atomic_inc(&hdev->promisc);
664 hci_pi(sk)->hdev = hdev;
667 case HCI_CHANNEL_USER:
668 if (hci_pi(sk)->hdev) {
673 if (haddr.hci_dev == HCI_DEV_NONE) {
678 if (!capable(CAP_NET_ADMIN)) {
683 hdev = hci_dev_get(haddr.hci_dev);
689 if (test_bit(HCI_UP, &hdev->flags) ||
690 test_bit(HCI_INIT, &hdev->flags) ||
691 test_bit(HCI_SETUP, &hdev->dev_flags) ||
692 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
698 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
704 mgmt_index_removed(hdev);
706 err = hci_dev_open(hdev->id);
708 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
709 mgmt_index_added(hdev);
714 atomic_inc(&hdev->promisc);
716 hci_pi(sk)->hdev = hdev;
719 case HCI_CHANNEL_CONTROL:
720 if (haddr.hci_dev != HCI_DEV_NONE) {
725 if (!capable(CAP_NET_ADMIN)) {
732 case HCI_CHANNEL_MONITOR:
733 if (haddr.hci_dev != HCI_DEV_NONE) {
738 if (!capable(CAP_NET_RAW)) {
743 send_monitor_replay(sk);
745 atomic_inc(&monitor_promisc);
754 hci_pi(sk)->channel = haddr.hci_channel;
755 sk->sk_state = BT_BOUND;
762 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
763 int *addr_len, int peer)
765 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
766 struct sock *sk = sock->sk;
767 struct hci_dev *hdev;
770 BT_DBG("sock %p sk %p", sock, sk);
777 hdev = hci_pi(sk)->hdev;
783 *addr_len = sizeof(*haddr);
784 haddr->hci_family = AF_BLUETOOTH;
785 haddr->hci_dev = hdev->id;
786 haddr->hci_channel= hci_pi(sk)->channel;
793 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
796 __u32 mask = hci_pi(sk)->cmsg_mask;
798 if (mask & HCI_CMSG_DIR) {
799 int incoming = bt_cb(skb)->incoming;
800 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
804 if (mask & HCI_CMSG_TSTAMP) {
806 struct compat_timeval ctv;
812 skb_get_timestamp(skb, &tv);
817 if (!COMPAT_USE_64BIT_TIME &&
818 (msg->msg_flags & MSG_CMSG_COMPAT)) {
819 ctv.tv_sec = tv.tv_sec;
820 ctv.tv_usec = tv.tv_usec;
826 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
830 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
831 struct msghdr *msg, size_t len, int flags)
833 int noblock = flags & MSG_DONTWAIT;
834 struct sock *sk = sock->sk;
838 BT_DBG("sock %p, sk %p", sock, sk);
840 if (flags & (MSG_OOB))
843 if (sk->sk_state == BT_CLOSED)
846 skb = skb_recv_datagram(sk, flags, noblock, &err);
852 msg->msg_flags |= MSG_TRUNC;
856 skb_reset_transport_header(skb);
857 err = skb_copy_datagram_msg(skb, 0, msg, copied);
859 switch (hci_pi(sk)->channel) {
860 case HCI_CHANNEL_RAW:
861 hci_sock_cmsg(sk, msg, skb);
863 case HCI_CHANNEL_USER:
864 case HCI_CHANNEL_CONTROL:
865 case HCI_CHANNEL_MONITOR:
866 sock_recv_timestamp(msg, sk, skb);
870 skb_free_datagram(sk, skb);
872 return err ? : copied;
875 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
876 struct msghdr *msg, size_t len)
878 struct sock *sk = sock->sk;
879 struct hci_dev *hdev;
883 BT_DBG("sock %p sk %p", sock, sk);
885 if (msg->msg_flags & MSG_OOB)
888 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
891 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
896 switch (hci_pi(sk)->channel) {
897 case HCI_CHANNEL_RAW:
898 case HCI_CHANNEL_USER:
900 case HCI_CHANNEL_CONTROL:
901 err = mgmt_control(sk, msg, len);
903 case HCI_CHANNEL_MONITOR:
911 hdev = hci_pi(sk)->hdev;
917 if (!test_bit(HCI_UP, &hdev->flags)) {
922 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
926 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
931 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
934 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
935 /* No permission check is needed for user channel
936 * since that gets enforced when binding the socket.
938 * However check that the packet type is valid.
940 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
941 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
942 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
947 skb_queue_tail(&hdev->raw_q, skb);
948 queue_work(hdev->workqueue, &hdev->tx_work);
949 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
950 u16 opcode = get_unaligned_le16(skb->data);
951 u16 ogf = hci_opcode_ogf(opcode);
952 u16 ocf = hci_opcode_ocf(opcode);
954 if (((ogf > HCI_SFLT_MAX_OGF) ||
955 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
956 &hci_sec_filter.ocf_mask[ogf])) &&
957 !capable(CAP_NET_RAW)) {
963 skb_queue_tail(&hdev->raw_q, skb);
964 queue_work(hdev->workqueue, &hdev->tx_work);
966 /* Stand-alone HCI commands must be flagged as
967 * single-command requests.
969 bt_cb(skb)->req.start = true;
971 skb_queue_tail(&hdev->cmd_q, skb);
972 queue_work(hdev->workqueue, &hdev->cmd_work);
975 if (!capable(CAP_NET_RAW)) {
980 skb_queue_tail(&hdev->raw_q, skb);
981 queue_work(hdev->workqueue, &hdev->tx_work);
995 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
996 char __user *optval, unsigned int len)
998 struct hci_ufilter uf = { .opcode = 0 };
999 struct sock *sk = sock->sk;
1000 int err = 0, opt = 0;
1002 BT_DBG("sk %p, opt %d", sk, optname);
1006 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1013 if (get_user(opt, (int __user *)optval)) {
1019 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1021 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1024 case HCI_TIME_STAMP:
1025 if (get_user(opt, (int __user *)optval)) {
1031 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1033 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1038 struct hci_filter *f = &hci_pi(sk)->filter;
1040 uf.type_mask = f->type_mask;
1041 uf.opcode = f->opcode;
1042 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1043 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1046 len = min_t(unsigned int, len, sizeof(uf));
1047 if (copy_from_user(&uf, optval, len)) {
1052 if (!capable(CAP_NET_RAW)) {
1053 uf.type_mask &= hci_sec_filter.type_mask;
1054 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1055 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1059 struct hci_filter *f = &hci_pi(sk)->filter;
1061 f->type_mask = uf.type_mask;
1062 f->opcode = uf.opcode;
1063 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1064 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1078 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1079 char __user *optval, int __user *optlen)
1081 struct hci_ufilter uf;
1082 struct sock *sk = sock->sk;
1083 int len, opt, err = 0;
1085 BT_DBG("sk %p, opt %d", sk, optname);
1087 if (get_user(len, optlen))
1092 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1099 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1104 if (put_user(opt, optval))
1108 case HCI_TIME_STAMP:
1109 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1114 if (put_user(opt, optval))
1120 struct hci_filter *f = &hci_pi(sk)->filter;
1122 memset(&uf, 0, sizeof(uf));
1123 uf.type_mask = f->type_mask;
1124 uf.opcode = f->opcode;
1125 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1126 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1129 len = min_t(unsigned int, len, sizeof(uf));
1130 if (copy_to_user(optval, &uf, len))
1144 static const struct proto_ops hci_sock_ops = {
1145 .family = PF_BLUETOOTH,
1146 .owner = THIS_MODULE,
1147 .release = hci_sock_release,
1148 .bind = hci_sock_bind,
1149 .getname = hci_sock_getname,
1150 .sendmsg = hci_sock_sendmsg,
1151 .recvmsg = hci_sock_recvmsg,
1152 .ioctl = hci_sock_ioctl,
1153 .poll = datagram_poll,
1154 .listen = sock_no_listen,
1155 .shutdown = sock_no_shutdown,
1156 .setsockopt = hci_sock_setsockopt,
1157 .getsockopt = hci_sock_getsockopt,
1158 .connect = sock_no_connect,
1159 .socketpair = sock_no_socketpair,
1160 .accept = sock_no_accept,
1161 .mmap = sock_no_mmap
1164 static struct proto hci_sk_proto = {
1166 .owner = THIS_MODULE,
1167 .obj_size = sizeof(struct hci_pinfo)
1170 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1175 BT_DBG("sock %p", sock);
1177 if (sock->type != SOCK_RAW)
1178 return -ESOCKTNOSUPPORT;
1180 sock->ops = &hci_sock_ops;
1182 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1186 sock_init_data(sock, sk);
1188 sock_reset_flag(sk, SOCK_ZAPPED);
1190 sk->sk_protocol = protocol;
1192 sock->state = SS_UNCONNECTED;
1193 sk->sk_state = BT_OPEN;
1195 bt_sock_link(&hci_sk_list, sk);
1199 static const struct net_proto_family hci_sock_family_ops = {
1200 .family = PF_BLUETOOTH,
1201 .owner = THIS_MODULE,
1202 .create = hci_sock_create,
1205 int __init hci_sock_init(void)
1209 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1211 err = proto_register(&hci_sk_proto, 0);
1215 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1217 BT_ERR("HCI socket registration failed");
1221 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1223 BT_ERR("Failed to create HCI proc file");
1224 bt_sock_unregister(BTPROTO_HCI);
1228 BT_INFO("HCI socket layer initialized");
1233 proto_unregister(&hci_sk_proto);
1237 void hci_sock_cleanup(void)
1239 bt_procfs_cleanup(&init_net, "hci");
1240 bt_sock_unregister(BTPROTO_HCI);
1241 proto_unregister(&hci_sk_proto);