2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
34 static LIST_HEAD(mgmt_chan_list);
35 static DEFINE_MUTEX(mgmt_chan_list_lock);
37 static atomic_t monitor_promisc = ATOMIC_INIT(0);
39 /* ----- HCI socket interface ----- */
42 #define hci_pi(sk) ((struct hci_pinfo *) sk)
47 struct hci_filter filter;
49 unsigned short channel;
53 void hci_sock_set_flag(struct sock *sk, int nr)
55 set_bit(nr, &hci_pi(sk)->flags);
58 void hci_sock_clear_flag(struct sock *sk, int nr)
60 clear_bit(nr, &hci_pi(sk)->flags);
63 int hci_sock_test_flag(struct sock *sk, int nr)
65 return test_bit(nr, &hci_pi(sk)->flags);
68 unsigned short hci_sock_get_channel(struct sock *sk)
70 return hci_pi(sk)->channel;
73 static inline int hci_test_bit(int nr, const void *addr)
75 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 #define HCI_SFLT_MAX_OGF 5
81 struct hci_sec_filter {
84 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
87 static const struct hci_sec_filter hci_sec_filter = {
91 { 0x1000d9fe, 0x0000b00c },
96 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
98 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
100 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
102 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
103 /* OGF_STATUS_PARAM */
104 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108 static struct bt_sock_list hci_sk_list = {
109 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
112 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
114 struct hci_filter *flt;
115 int flt_type, flt_event;
118 flt = &hci_pi(sk)->filter;
120 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
125 if (!test_bit(flt_type, &flt->type_mask))
128 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
134 if (!hci_test_bit(flt_event, &flt->event_mask))
137 /* Check filter only when opcode is set */
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
152 /* Send frame to RAW socket */
153 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
156 struct sk_buff *skb_copy = NULL;
158 BT_DBG("hdev %p len %d", hdev, skb->len);
160 read_lock(&hci_sk_list.lock);
162 sk_for_each(sk, &hci_sk_list.head) {
163 struct sk_buff *nskb;
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
168 /* Don't send frame to the socket it came from */
172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 if (is_filtered_packet(sk, skb))
175 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
176 if (!bt_cb(skb)->incoming)
178 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
179 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
180 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
183 /* Don't send frame to other channel types */
188 /* Create a private copy with headroom */
189 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
193 /* Put type byte before the data */
194 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
197 nskb = skb_clone(skb_copy, GFP_ATOMIC);
201 if (sock_queue_rcv_skb(sk, nskb))
205 read_unlock(&hci_sk_list.lock);
210 /* Send frame to sockets with specific channel */
211 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
212 int flag, struct sock *skip_sk)
216 BT_DBG("channel %u len %d", channel, skb->len);
218 read_lock(&hci_sk_list.lock);
220 sk_for_each(sk, &hci_sk_list.head) {
221 struct sk_buff *nskb;
223 /* Ignore socket without the flag set */
224 if (!hci_sock_test_flag(sk, flag))
227 /* Skip the original socket */
231 if (sk->sk_state != BT_BOUND)
234 if (hci_pi(sk)->channel != channel)
237 nskb = skb_clone(skb, GFP_ATOMIC);
241 if (sock_queue_rcv_skb(sk, nskb))
245 read_unlock(&hci_sk_list.lock);
248 /* Send frame to monitor socket */
249 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
251 struct sk_buff *skb_copy = NULL;
252 struct hci_mon_hdr *hdr;
255 if (!atomic_read(&monitor_promisc))
258 BT_DBG("hdev %p len %d", hdev, skb->len);
260 switch (bt_cb(skb)->pkt_type) {
261 case HCI_COMMAND_PKT:
262 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
265 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
267 case HCI_ACLDATA_PKT:
268 if (bt_cb(skb)->incoming)
269 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
271 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
273 case HCI_SCODATA_PKT:
274 if (bt_cb(skb)->incoming)
275 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
277 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283 /* Create a private copy with headroom */
284 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
288 /* Put header before the data */
289 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
290 hdr->opcode = opcode;
291 hdr->index = cpu_to_le16(hdev->id);
292 hdr->len = cpu_to_le16(skb->len);
294 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
295 HCI_SOCK_TRUSTED, NULL);
299 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
301 struct hci_mon_hdr *hdr;
302 struct hci_mon_new_index *ni;
308 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
312 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
313 ni->type = hdev->dev_type;
315 bacpy(&ni->bdaddr, &hdev->bdaddr);
316 memcpy(ni->name, hdev->name, 8);
318 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
322 skb = bt_skb_alloc(0, GFP_ATOMIC);
326 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
333 __net_timestamp(skb);
335 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
336 hdr->opcode = opcode;
337 hdr->index = cpu_to_le16(hdev->id);
338 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
343 static void send_monitor_replay(struct sock *sk)
345 struct hci_dev *hdev;
347 read_lock(&hci_dev_list_lock);
349 list_for_each_entry(hdev, &hci_dev_list, list) {
352 skb = create_monitor_event(hdev, HCI_DEV_REG);
356 if (sock_queue_rcv_skb(sk, skb))
360 read_unlock(&hci_dev_list_lock);
363 /* Generate internal stack event */
364 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
366 struct hci_event_hdr *hdr;
367 struct hci_ev_stack_internal *ev;
370 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
374 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
375 hdr->evt = HCI_EV_STACK_INTERNAL;
376 hdr->plen = sizeof(*ev) + dlen;
378 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
380 memcpy(ev->data, data, dlen);
382 bt_cb(skb)->incoming = 1;
383 __net_timestamp(skb);
385 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
386 hci_send_to_sock(hdev, skb);
390 void hci_sock_dev_event(struct hci_dev *hdev, int event)
392 struct hci_ev_si_device ev;
394 BT_DBG("hdev %s event %d", hdev->name, event);
396 /* Send event to monitor */
397 if (atomic_read(&monitor_promisc)) {
400 skb = create_monitor_event(hdev, event);
402 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403 HCI_SOCK_TRUSTED, NULL);
408 /* Send event to sockets */
410 ev.dev_id = hdev->id;
411 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
413 if (event == HCI_DEV_UNREG) {
416 /* Detach sockets from device */
417 read_lock(&hci_sk_list.lock);
418 sk_for_each(sk, &hci_sk_list.head) {
419 bh_lock_sock_nested(sk);
420 if (hci_pi(sk)->hdev == hdev) {
421 hci_pi(sk)->hdev = NULL;
423 sk->sk_state = BT_OPEN;
424 sk->sk_state_change(sk);
430 read_unlock(&hci_sk_list.lock);
434 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
436 struct hci_mgmt_chan *c;
438 list_for_each_entry(c, &mgmt_chan_list, list) {
439 if (c->channel == channel)
446 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
448 struct hci_mgmt_chan *c;
450 mutex_lock(&mgmt_chan_list_lock);
451 c = __hci_mgmt_chan_find(channel);
452 mutex_unlock(&mgmt_chan_list_lock);
457 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
459 if (c->channel < HCI_CHANNEL_CONTROL)
462 mutex_lock(&mgmt_chan_list_lock);
463 if (__hci_mgmt_chan_find(c->channel)) {
464 mutex_unlock(&mgmt_chan_list_lock);
468 list_add_tail(&c->list, &mgmt_chan_list);
470 mutex_unlock(&mgmt_chan_list_lock);
474 EXPORT_SYMBOL(hci_mgmt_chan_register);
476 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
478 mutex_lock(&mgmt_chan_list_lock);
480 mutex_unlock(&mgmt_chan_list_lock);
482 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
484 static int hci_sock_release(struct socket *sock)
486 struct sock *sk = sock->sk;
487 struct hci_dev *hdev;
489 BT_DBG("sock %p sk %p", sock, sk);
494 hdev = hci_pi(sk)->hdev;
496 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
497 atomic_dec(&monitor_promisc);
499 bt_sock_unlink(&hci_sk_list, sk);
502 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
503 mgmt_index_added(hdev);
504 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
505 hci_dev_close(hdev->id);
508 atomic_dec(&hdev->promisc);
514 skb_queue_purge(&sk->sk_receive_queue);
515 skb_queue_purge(&sk->sk_write_queue);
521 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
526 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
531 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
533 hci_dev_unlock(hdev);
538 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
543 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
548 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
550 hci_dev_unlock(hdev);
555 /* Ioctls that require bound socket */
556 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
559 struct hci_dev *hdev = hci_pi(sk)->hdev;
564 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
567 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
570 if (hdev->dev_type != HCI_BREDR)
575 if (!capable(CAP_NET_ADMIN))
580 return hci_get_conn_info(hdev, (void __user *) arg);
583 return hci_get_auth_info(hdev, (void __user *) arg);
586 if (!capable(CAP_NET_ADMIN))
588 return hci_sock_blacklist_add(hdev, (void __user *) arg);
591 if (!capable(CAP_NET_ADMIN))
593 return hci_sock_blacklist_del(hdev, (void __user *) arg);
599 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
602 void __user *argp = (void __user *) arg;
603 struct sock *sk = sock->sk;
606 BT_DBG("cmd %x arg %lx", cmd, arg);
610 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
619 return hci_get_dev_list(argp);
622 return hci_get_dev_info(argp);
625 return hci_get_conn_list(argp);
628 if (!capable(CAP_NET_ADMIN))
630 return hci_dev_open(arg);
633 if (!capable(CAP_NET_ADMIN))
635 return hci_dev_close(arg);
638 if (!capable(CAP_NET_ADMIN))
640 return hci_dev_reset(arg);
643 if (!capable(CAP_NET_ADMIN))
645 return hci_dev_reset_stat(arg);
655 if (!capable(CAP_NET_ADMIN))
657 return hci_dev_cmd(cmd, argp);
660 return hci_inquiry(argp);
665 err = hci_sock_bound_ioctl(sk, cmd, arg);
672 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
675 struct sockaddr_hci haddr;
676 struct sock *sk = sock->sk;
677 struct hci_dev *hdev = NULL;
680 BT_DBG("sock %p sk %p", sock, sk);
685 memset(&haddr, 0, sizeof(haddr));
686 len = min_t(unsigned int, sizeof(haddr), addr_len);
687 memcpy(&haddr, addr, len);
689 if (haddr.hci_family != AF_BLUETOOTH)
694 if (sk->sk_state == BT_BOUND) {
699 switch (haddr.hci_channel) {
700 case HCI_CHANNEL_RAW:
701 if (hci_pi(sk)->hdev) {
706 if (haddr.hci_dev != HCI_DEV_NONE) {
707 hdev = hci_dev_get(haddr.hci_dev);
713 atomic_inc(&hdev->promisc);
716 hci_pi(sk)->hdev = hdev;
719 case HCI_CHANNEL_USER:
720 if (hci_pi(sk)->hdev) {
725 if (haddr.hci_dev == HCI_DEV_NONE) {
730 if (!capable(CAP_NET_ADMIN)) {
735 hdev = hci_dev_get(haddr.hci_dev);
741 if (test_bit(HCI_UP, &hdev->flags) ||
742 test_bit(HCI_INIT, &hdev->flags) ||
743 hci_dev_test_flag(hdev, HCI_SETUP) ||
744 hci_dev_test_flag(hdev, HCI_CONFIG)) {
750 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
756 mgmt_index_removed(hdev);
758 err = hci_dev_open(hdev->id);
760 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
761 mgmt_index_added(hdev);
766 atomic_inc(&hdev->promisc);
768 hci_pi(sk)->hdev = hdev;
771 case HCI_CHANNEL_MONITOR:
772 if (haddr.hci_dev != HCI_DEV_NONE) {
777 if (!capable(CAP_NET_RAW)) {
782 /* The monitor interface is restricted to CAP_NET_RAW
783 * capabilities and with that implicitly trusted.
785 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
787 send_monitor_replay(sk);
789 atomic_inc(&monitor_promisc);
793 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
798 if (haddr.hci_dev != HCI_DEV_NONE) {
803 /* Users with CAP_NET_ADMIN capabilities are allowed
804 * access to all management commands and events. For
805 * untrusted users the interface is restricted and
806 * also only untrusted events are sent.
808 if (capable(CAP_NET_ADMIN))
809 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
811 /* At the moment the index and unconfigured index events
812 * are enabled unconditionally. Setting them on each
813 * socket when binding keeps this functionality. They
814 * however might be cleared later and then sending of these
815 * events will be disabled, but that is then intentional.
817 * This also enables generic events that are safe to be
818 * received by untrusted users. Example for such events
819 * are changes to settings, class of device, name etc.
821 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
822 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
823 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
824 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
830 hci_pi(sk)->channel = haddr.hci_channel;
831 sk->sk_state = BT_BOUND;
838 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
839 int *addr_len, int peer)
841 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
842 struct sock *sk = sock->sk;
843 struct hci_dev *hdev;
846 BT_DBG("sock %p sk %p", sock, sk);
853 hdev = hci_pi(sk)->hdev;
859 *addr_len = sizeof(*haddr);
860 haddr->hci_family = AF_BLUETOOTH;
861 haddr->hci_dev = hdev->id;
862 haddr->hci_channel= hci_pi(sk)->channel;
869 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
872 __u32 mask = hci_pi(sk)->cmsg_mask;
874 if (mask & HCI_CMSG_DIR) {
875 int incoming = bt_cb(skb)->incoming;
876 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
880 if (mask & HCI_CMSG_TSTAMP) {
882 struct compat_timeval ctv;
888 skb_get_timestamp(skb, &tv);
893 if (!COMPAT_USE_64BIT_TIME &&
894 (msg->msg_flags & MSG_CMSG_COMPAT)) {
895 ctv.tv_sec = tv.tv_sec;
896 ctv.tv_usec = tv.tv_usec;
902 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
906 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
909 int noblock = flags & MSG_DONTWAIT;
910 struct sock *sk = sock->sk;
914 BT_DBG("sock %p, sk %p", sock, sk);
916 if (flags & (MSG_OOB))
919 if (sk->sk_state == BT_CLOSED)
922 skb = skb_recv_datagram(sk, flags, noblock, &err);
928 msg->msg_flags |= MSG_TRUNC;
932 skb_reset_transport_header(skb);
933 err = skb_copy_datagram_msg(skb, 0, msg, copied);
935 switch (hci_pi(sk)->channel) {
936 case HCI_CHANNEL_RAW:
937 hci_sock_cmsg(sk, msg, skb);
939 case HCI_CHANNEL_USER:
940 case HCI_CHANNEL_MONITOR:
941 sock_recv_timestamp(msg, sk, skb);
944 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
945 sock_recv_timestamp(msg, sk, skb);
949 skb_free_datagram(sk, skb);
951 return err ? : copied;
954 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
957 struct sock *sk = sock->sk;
958 struct hci_mgmt_chan *chan;
959 struct hci_dev *hdev;
963 BT_DBG("sock %p sk %p", sock, sk);
965 if (msg->msg_flags & MSG_OOB)
968 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
971 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
976 switch (hci_pi(sk)->channel) {
977 case HCI_CHANNEL_RAW:
978 case HCI_CHANNEL_USER:
980 case HCI_CHANNEL_MONITOR:
984 mutex_lock(&mgmt_chan_list_lock);
985 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
987 err = mgmt_control(chan, sk, msg, len);
991 mutex_unlock(&mgmt_chan_list_lock);
995 hdev = hci_pi(sk)->hdev;
1001 if (!test_bit(HCI_UP, &hdev->flags)) {
1006 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1010 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1015 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1018 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1019 /* No permission check is needed for user channel
1020 * since that gets enforced when binding the socket.
1022 * However check that the packet type is valid.
1024 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1025 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1026 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1031 skb_queue_tail(&hdev->raw_q, skb);
1032 queue_work(hdev->workqueue, &hdev->tx_work);
1033 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1034 u16 opcode = get_unaligned_le16(skb->data);
1035 u16 ogf = hci_opcode_ogf(opcode);
1036 u16 ocf = hci_opcode_ocf(opcode);
1038 if (((ogf > HCI_SFLT_MAX_OGF) ||
1039 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1040 &hci_sec_filter.ocf_mask[ogf])) &&
1041 !capable(CAP_NET_RAW)) {
1047 skb_queue_tail(&hdev->raw_q, skb);
1048 queue_work(hdev->workqueue, &hdev->tx_work);
1050 /* Stand-alone HCI commands must be flagged as
1051 * single-command requests.
1053 bt_cb(skb)->req_start = 1;
1055 skb_queue_tail(&hdev->cmd_q, skb);
1056 queue_work(hdev->workqueue, &hdev->cmd_work);
1059 if (!capable(CAP_NET_RAW)) {
1064 skb_queue_tail(&hdev->raw_q, skb);
1065 queue_work(hdev->workqueue, &hdev->tx_work);
1079 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1080 char __user *optval, unsigned int len)
1082 struct hci_ufilter uf = { .opcode = 0 };
1083 struct sock *sk = sock->sk;
1084 int err = 0, opt = 0;
1086 BT_DBG("sk %p, opt %d", sk, optname);
1090 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1097 if (get_user(opt, (int __user *)optval)) {
1103 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1105 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1108 case HCI_TIME_STAMP:
1109 if (get_user(opt, (int __user *)optval)) {
1115 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1117 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1122 struct hci_filter *f = &hci_pi(sk)->filter;
1124 uf.type_mask = f->type_mask;
1125 uf.opcode = f->opcode;
1126 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1127 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1130 len = min_t(unsigned int, len, sizeof(uf));
1131 if (copy_from_user(&uf, optval, len)) {
1136 if (!capable(CAP_NET_RAW)) {
1137 uf.type_mask &= hci_sec_filter.type_mask;
1138 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1139 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1143 struct hci_filter *f = &hci_pi(sk)->filter;
1145 f->type_mask = uf.type_mask;
1146 f->opcode = uf.opcode;
1147 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1148 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1162 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1163 char __user *optval, int __user *optlen)
1165 struct hci_ufilter uf;
1166 struct sock *sk = sock->sk;
1167 int len, opt, err = 0;
1169 BT_DBG("sk %p, opt %d", sk, optname);
1171 if (get_user(len, optlen))
1176 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1183 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1188 if (put_user(opt, optval))
1192 case HCI_TIME_STAMP:
1193 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1198 if (put_user(opt, optval))
1204 struct hci_filter *f = &hci_pi(sk)->filter;
1206 memset(&uf, 0, sizeof(uf));
1207 uf.type_mask = f->type_mask;
1208 uf.opcode = f->opcode;
1209 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1210 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1213 len = min_t(unsigned int, len, sizeof(uf));
1214 if (copy_to_user(optval, &uf, len))
1228 static const struct proto_ops hci_sock_ops = {
1229 .family = PF_BLUETOOTH,
1230 .owner = THIS_MODULE,
1231 .release = hci_sock_release,
1232 .bind = hci_sock_bind,
1233 .getname = hci_sock_getname,
1234 .sendmsg = hci_sock_sendmsg,
1235 .recvmsg = hci_sock_recvmsg,
1236 .ioctl = hci_sock_ioctl,
1237 .poll = datagram_poll,
1238 .listen = sock_no_listen,
1239 .shutdown = sock_no_shutdown,
1240 .setsockopt = hci_sock_setsockopt,
1241 .getsockopt = hci_sock_getsockopt,
1242 .connect = sock_no_connect,
1243 .socketpair = sock_no_socketpair,
1244 .accept = sock_no_accept,
1245 .mmap = sock_no_mmap
1248 static struct proto hci_sk_proto = {
1250 .owner = THIS_MODULE,
1251 .obj_size = sizeof(struct hci_pinfo)
1254 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1259 BT_DBG("sock %p", sock);
1261 if (sock->type != SOCK_RAW)
1262 return -ESOCKTNOSUPPORT;
1264 sock->ops = &hci_sock_ops;
1266 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1270 sock_init_data(sock, sk);
1272 sock_reset_flag(sk, SOCK_ZAPPED);
1274 sk->sk_protocol = protocol;
1276 sock->state = SS_UNCONNECTED;
1277 sk->sk_state = BT_OPEN;
1279 bt_sock_link(&hci_sk_list, sk);
1283 static const struct net_proto_family hci_sock_family_ops = {
1284 .family = PF_BLUETOOTH,
1285 .owner = THIS_MODULE,
1286 .create = hci_sock_create,
1289 int __init hci_sock_init(void)
1293 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1295 err = proto_register(&hci_sk_proto, 0);
1299 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1301 BT_ERR("HCI socket registration failed");
1305 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1307 BT_ERR("Failed to create HCI proc file");
1308 bt_sock_unregister(BTPROTO_HCI);
1312 BT_INFO("HCI socket layer initialized");
1317 proto_unregister(&hci_sk_proto);
1321 void hci_sock_cleanup(void)
1323 bt_procfs_cleanup(&init_net, "hci");
1324 bt_sock_unregister(BTPROTO_HCI);
1325 proto_unregister(&hci_sk_proto);