2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
103 s = __l2cap_get_chan_by_scid(l, cid);
106 read_unlock(&l->lock);
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
124 s = __l2cap_get_chan_by_ident(l, ident);
127 read_unlock(&l->lock);
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
133 u16 cid = L2CAP_CID_DYN_START;
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
148 l2cap_pi(l->head)->prev_c = sk;
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
159 write_lock_bh(&l->lock);
164 l2cap_pi(next)->prev_c = prev;
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
174 struct l2cap_chan_list *l = &conn->chan_list;
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
179 conn->disc_reason = 0x13;
181 l2cap_pi(sk)->conn = conn;
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 /* Alloc CID for connection-oriented socket */
185 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
186 } else if (sk->sk_type == SOCK_DGRAM) {
187 /* Connectionless socket */
188 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
189 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
192 /* Raw socket can send/recv signalling messages only */
193 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
194 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
198 __l2cap_chan_link(l, sk);
201 bt_accept_enqueue(parent, sk);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct sock *sk, int err)
208 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
209 struct sock *parent = bt_sk(sk)->parent;
211 l2cap_sock_clear_timer(sk);
213 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
216 /* Unlink from channel list */
217 l2cap_chan_unlink(&conn->chan_list, sk);
218 l2cap_pi(sk)->conn = NULL;
219 hci_conn_put(conn->hcon);
222 sk->sk_state = BT_CLOSED;
223 sock_set_flag(sk, SOCK_ZAPPED);
229 bt_accept_unlink(sk);
230 parent->sk_data_ready(parent, 0);
232 sk->sk_state_change(sk);
234 skb_queue_purge(TX_QUEUE(sk));
236 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
237 struct srej_list *l, *tmp;
239 del_timer(&l2cap_pi(sk)->retrans_timer);
240 del_timer(&l2cap_pi(sk)->monitor_timer);
241 del_timer(&l2cap_pi(sk)->ack_timer);
243 skb_queue_purge(SREJ_QUEUE(sk));
244 skb_queue_purge(BUSY_QUEUE(sk));
246 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
253 static inline u8 l2cap_get_auth_type(struct sock *sk)
255 if (sk->sk_type == SOCK_RAW) {
256 switch (l2cap_pi(sk)->sec_level) {
257 case BT_SECURITY_HIGH:
258 return HCI_AT_DEDICATED_BONDING_MITM;
259 case BT_SECURITY_MEDIUM:
260 return HCI_AT_DEDICATED_BONDING;
262 return HCI_AT_NO_BONDING;
264 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
265 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
266 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
268 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
269 return HCI_AT_NO_BONDING_MITM;
271 return HCI_AT_NO_BONDING;
273 switch (l2cap_pi(sk)->sec_level) {
274 case BT_SECURITY_HIGH:
275 return HCI_AT_GENERAL_BONDING_MITM;
276 case BT_SECURITY_MEDIUM:
277 return HCI_AT_GENERAL_BONDING;
279 return HCI_AT_NO_BONDING;
284 /* Service level security */
285 static inline int l2cap_check_security(struct sock *sk)
287 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
290 auth_type = l2cap_get_auth_type(sk);
292 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
296 u8 l2cap_get_ident(struct l2cap_conn *conn)
300 /* Get next available identificator.
301 * 1 - 128 are used by kernel.
302 * 129 - 199 are reserved.
303 * 200 - 254 are used by utilities like l2ping, etc.
306 spin_lock_bh(&conn->lock);
308 if (++conn->tx_ident > 128)
313 spin_unlock_bh(&conn->lock);
318 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
320 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
323 BT_DBG("code 0x%2.2x", code);
328 if (lmp_no_flush_capable(conn->hcon->hdev))
329 flags = ACL_START_NO_FLUSH;
333 hci_send_acl(conn->hcon, skb, flags);
336 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct l2cap_hdr *lh;
340 struct l2cap_conn *conn = pi->conn;
341 struct sock *sk = (struct sock *)pi;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
345 if (sk->sk_state != BT_CONNECTED)
348 if (pi->fcs == L2CAP_FCS_CRC16)
351 BT_DBG("pi %p, control 0x%2.2x", pi, control);
353 count = min_t(unsigned int, conn->mtu, hlen);
354 control |= L2CAP_CTRL_FRAME_TYPE;
356 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
357 control |= L2CAP_CTRL_FINAL;
358 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
361 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
362 control |= L2CAP_CTRL_POLL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
366 skb = bt_skb_alloc(count, GFP_ATOMIC);
370 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
371 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
372 lh->cid = cpu_to_le16(pi->dcid);
373 put_unaligned_le16(control, skb_put(skb, 2));
375 if (pi->fcs == L2CAP_FCS_CRC16) {
376 u16 fcs = crc16(0, (u8 *)lh, count - 2);
377 put_unaligned_le16(fcs, skb_put(skb, 2));
380 if (lmp_no_flush_capable(conn->hcon->hdev))
381 flags = ACL_START_NO_FLUSH;
385 hci_send_acl(pi->conn->hcon, skb, flags);
388 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
390 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
391 control |= L2CAP_SUPER_RCV_NOT_READY;
392 pi->conn_state |= L2CAP_CONN_RNR_SENT;
394 control |= L2CAP_SUPER_RCV_READY;
396 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
398 l2cap_send_sframe(pi, control);
401 static inline int __l2cap_no_conn_pending(struct sock *sk)
403 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
406 static void l2cap_do_start(struct sock *sk)
408 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
410 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
411 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
414 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
415 struct l2cap_conn_req req;
416 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
417 req.psm = l2cap_pi(sk)->psm;
419 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
420 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
422 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
423 L2CAP_CONN_REQ, sizeof(req), &req);
426 struct l2cap_info_req req;
427 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
429 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
430 conn->info_ident = l2cap_get_ident(conn);
432 mod_timer(&conn->info_timer, jiffies +
433 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
435 l2cap_send_cmd(conn, conn->info_ident,
436 L2CAP_INFO_REQ, sizeof(req), &req);
440 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
442 u32 local_feat_mask = l2cap_feat_mask;
444 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
447 case L2CAP_MODE_ERTM:
448 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
449 case L2CAP_MODE_STREAMING:
450 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
456 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
458 struct l2cap_disconn_req req;
463 skb_queue_purge(TX_QUEUE(sk));
465 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
466 del_timer(&l2cap_pi(sk)->retrans_timer);
467 del_timer(&l2cap_pi(sk)->monitor_timer);
468 del_timer(&l2cap_pi(sk)->ack_timer);
471 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
472 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
473 l2cap_send_cmd(conn, l2cap_get_ident(conn),
474 L2CAP_DISCONN_REQ, sizeof(req), &req);
476 sk->sk_state = BT_DISCONN;
480 /* ---- L2CAP connections ---- */
481 static void l2cap_conn_start(struct l2cap_conn *conn)
483 struct l2cap_chan_list *l = &conn->chan_list;
484 struct sock_del_list del, *tmp1, *tmp2;
487 BT_DBG("conn %p", conn);
489 INIT_LIST_HEAD(&del.list);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 struct l2cap_conn_req req;
505 if (!l2cap_check_security(sk) ||
506 !__l2cap_no_conn_pending(sk)) {
511 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
513 && l2cap_pi(sk)->conf_state &
514 L2CAP_CONF_STATE2_DEVICE) {
515 tmp1 = kzalloc(sizeof(struct sock_del_list),
518 list_add_tail(&tmp1->list, &del.list);
523 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
524 req.psm = l2cap_pi(sk)->psm;
526 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
527 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
529 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
530 L2CAP_CONN_REQ, sizeof(req), &req);
532 } else if (sk->sk_state == BT_CONNECT2) {
533 struct l2cap_conn_rsp rsp;
535 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
536 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
538 if (l2cap_check_security(sk)) {
539 if (bt_sk(sk)->defer_setup) {
540 struct sock *parent = bt_sk(sk)->parent;
541 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
542 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
543 parent->sk_data_ready(parent, 0);
546 sk->sk_state = BT_CONFIG;
547 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
548 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
551 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
552 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
555 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
556 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
558 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
559 rsp.result != L2CAP_CR_SUCCESS) {
564 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
565 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
566 l2cap_build_conf_req(sk, buf), buf);
567 l2cap_pi(sk)->num_conf_req++;
573 read_unlock(&l->lock);
575 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
576 bh_lock_sock(tmp1->sk);
577 __l2cap_sock_close(tmp1->sk, ECONNRESET);
578 bh_unlock_sock(tmp1->sk);
579 list_del(&tmp1->list);
584 static void l2cap_conn_ready(struct l2cap_conn *conn)
586 struct l2cap_chan_list *l = &conn->chan_list;
589 BT_DBG("conn %p", conn);
593 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
596 if (conn->hcon->type == LE_LINK) {
597 l2cap_sock_clear_timer(sk);
598 sk->sk_state = BT_CONNECTED;
599 sk->sk_state_change(sk);
602 if (sk->sk_type != SOCK_SEQPACKET &&
603 sk->sk_type != SOCK_STREAM) {
604 l2cap_sock_clear_timer(sk);
605 sk->sk_state = BT_CONNECTED;
606 sk->sk_state_change(sk);
607 } else if (sk->sk_state == BT_CONNECT)
613 read_unlock(&l->lock);
616 /* Notify sockets that we cannot guaranty reliability anymore */
617 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
619 struct l2cap_chan_list *l = &conn->chan_list;
622 BT_DBG("conn %p", conn);
626 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
627 if (l2cap_pi(sk)->force_reliable)
631 read_unlock(&l->lock);
634 static void l2cap_info_timeout(unsigned long arg)
636 struct l2cap_conn *conn = (void *) arg;
638 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
639 conn->info_ident = 0;
641 l2cap_conn_start(conn);
644 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
646 struct l2cap_conn *conn = hcon->l2cap_data;
651 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
655 hcon->l2cap_data = conn;
658 BT_DBG("hcon %p conn %p", hcon, conn);
660 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
661 conn->mtu = hcon->hdev->le_mtu;
663 conn->mtu = hcon->hdev->acl_mtu;
665 conn->src = &hcon->hdev->bdaddr;
666 conn->dst = &hcon->dst;
670 spin_lock_init(&conn->lock);
671 rwlock_init(&conn->chan_list.lock);
673 setup_timer(&conn->info_timer, l2cap_info_timeout,
674 (unsigned long) conn);
676 conn->disc_reason = 0x13;
681 static void l2cap_conn_del(struct hci_conn *hcon, int err)
683 struct l2cap_conn *conn = hcon->l2cap_data;
689 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
691 kfree_skb(conn->rx_skb);
694 while ((sk = conn->chan_list.head)) {
696 l2cap_chan_del(sk, err);
701 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
702 del_timer_sync(&conn->info_timer);
704 hcon->l2cap_data = NULL;
708 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
710 struct l2cap_chan_list *l = &conn->chan_list;
711 write_lock_bh(&l->lock);
712 __l2cap_chan_add(conn, sk, parent);
713 write_unlock_bh(&l->lock);
716 /* ---- Socket interface ---- */
718 /* Find socket with psm and source bdaddr.
719 * Returns closest match.
721 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
723 struct sock *sk = NULL, *sk1 = NULL;
724 struct hlist_node *node;
726 read_lock(&l2cap_sk_list.lock);
728 sk_for_each(sk, node, &l2cap_sk_list.head) {
729 if (state && sk->sk_state != state)
732 if (l2cap_pi(sk)->psm == psm) {
734 if (!bacmp(&bt_sk(sk)->src, src))
738 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
743 read_unlock(&l2cap_sk_list.lock);
745 return node ? sk : sk1;
748 int l2cap_do_connect(struct sock *sk)
750 bdaddr_t *src = &bt_sk(sk)->src;
751 bdaddr_t *dst = &bt_sk(sk)->dst;
752 struct l2cap_conn *conn;
753 struct hci_conn *hcon;
754 struct hci_dev *hdev;
758 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
761 hdev = hci_get_route(dst, src);
763 return -EHOSTUNREACH;
765 hci_dev_lock_bh(hdev);
769 auth_type = l2cap_get_auth_type(sk);
771 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
772 hcon = hci_connect(hdev, LE_LINK, dst,
773 l2cap_pi(sk)->sec_level, auth_type);
775 hcon = hci_connect(hdev, ACL_LINK, dst,
776 l2cap_pi(sk)->sec_level, auth_type);
781 conn = l2cap_conn_add(hcon, 0);
789 /* Update source addr of the socket */
790 bacpy(src, conn->src);
792 l2cap_chan_add(conn, sk, NULL);
794 sk->sk_state = BT_CONNECT;
795 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
797 if (hcon->state == BT_CONNECTED) {
798 if (sk->sk_type != SOCK_SEQPACKET &&
799 sk->sk_type != SOCK_STREAM) {
800 l2cap_sock_clear_timer(sk);
801 if (l2cap_check_security(sk))
802 sk->sk_state = BT_CONNECTED;
808 hci_dev_unlock_bh(hdev);
813 int __l2cap_wait_ack(struct sock *sk)
815 DECLARE_WAITQUEUE(wait, current);
819 add_wait_queue(sk_sleep(sk), &wait);
820 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
821 set_current_state(TASK_INTERRUPTIBLE);
826 if (signal_pending(current)) {
827 err = sock_intr_errno(timeo);
832 timeo = schedule_timeout(timeo);
835 err = sock_error(sk);
839 set_current_state(TASK_RUNNING);
840 remove_wait_queue(sk_sleep(sk), &wait);
844 static void l2cap_monitor_timeout(unsigned long arg)
846 struct sock *sk = (void *) arg;
851 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
852 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
857 l2cap_pi(sk)->retry_count++;
858 __mod_monitor_timer();
860 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
864 static void l2cap_retrans_timeout(unsigned long arg)
866 struct sock *sk = (void *) arg;
871 l2cap_pi(sk)->retry_count = 1;
872 __mod_monitor_timer();
874 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
876 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
880 static void l2cap_drop_acked_frames(struct sock *sk)
884 while ((skb = skb_peek(TX_QUEUE(sk))) &&
885 l2cap_pi(sk)->unacked_frames) {
886 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
889 skb = skb_dequeue(TX_QUEUE(sk));
892 l2cap_pi(sk)->unacked_frames--;
895 if (!l2cap_pi(sk)->unacked_frames)
896 del_timer(&l2cap_pi(sk)->retrans_timer);
899 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
901 struct l2cap_pinfo *pi = l2cap_pi(sk);
902 struct hci_conn *hcon = pi->conn->hcon;
905 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
907 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
908 flags = ACL_START_NO_FLUSH;
912 hci_send_acl(hcon, skb, flags);
915 void l2cap_streaming_send(struct sock *sk)
918 struct l2cap_pinfo *pi = l2cap_pi(sk);
921 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
922 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
923 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
924 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
926 if (pi->fcs == L2CAP_FCS_CRC16) {
927 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
928 put_unaligned_le16(fcs, skb->data + skb->len - 2);
931 l2cap_do_send(sk, skb);
933 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
937 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
939 struct l2cap_pinfo *pi = l2cap_pi(sk);
940 struct sk_buff *skb, *tx_skb;
943 skb = skb_peek(TX_QUEUE(sk));
948 if (bt_cb(skb)->tx_seq == tx_seq)
951 if (skb_queue_is_last(TX_QUEUE(sk), skb))
954 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
956 if (pi->remote_max_tx &&
957 bt_cb(skb)->retries == pi->remote_max_tx) {
958 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
962 tx_skb = skb_clone(skb, GFP_ATOMIC);
963 bt_cb(skb)->retries++;
964 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
966 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
967 control |= L2CAP_CTRL_FINAL;
968 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
971 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
972 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
974 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
976 if (pi->fcs == L2CAP_FCS_CRC16) {
977 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
978 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
981 l2cap_do_send(sk, tx_skb);
984 int l2cap_ertm_send(struct sock *sk)
986 struct sk_buff *skb, *tx_skb;
987 struct l2cap_pinfo *pi = l2cap_pi(sk);
991 if (sk->sk_state != BT_CONNECTED)
994 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
996 if (pi->remote_max_tx &&
997 bt_cb(skb)->retries == pi->remote_max_tx) {
998 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1002 tx_skb = skb_clone(skb, GFP_ATOMIC);
1004 bt_cb(skb)->retries++;
1006 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1007 control &= L2CAP_CTRL_SAR;
1009 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1010 control |= L2CAP_CTRL_FINAL;
1011 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1013 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1014 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1015 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1018 if (pi->fcs == L2CAP_FCS_CRC16) {
1019 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1020 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1023 l2cap_do_send(sk, tx_skb);
1025 __mod_retrans_timer();
1027 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1028 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1030 pi->unacked_frames++;
1033 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1034 sk->sk_send_head = NULL;
1036 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1044 static int l2cap_retransmit_frames(struct sock *sk)
1046 struct l2cap_pinfo *pi = l2cap_pi(sk);
1049 if (!skb_queue_empty(TX_QUEUE(sk)))
1050 sk->sk_send_head = TX_QUEUE(sk)->next;
1052 pi->next_tx_seq = pi->expected_ack_seq;
1053 ret = l2cap_ertm_send(sk);
1057 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1059 struct sock *sk = (struct sock *)pi;
1062 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1064 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1065 control |= L2CAP_SUPER_RCV_NOT_READY;
1066 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1067 l2cap_send_sframe(pi, control);
1071 if (l2cap_ertm_send(sk) > 0)
1074 control |= L2CAP_SUPER_RCV_READY;
1075 l2cap_send_sframe(pi, control);
1078 static void l2cap_send_srejtail(struct sock *sk)
1080 struct srej_list *tail;
1083 control = L2CAP_SUPER_SELECT_REJECT;
1084 control |= L2CAP_CTRL_FINAL;
1086 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1087 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1089 l2cap_send_sframe(l2cap_pi(sk), control);
1092 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1094 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1095 struct sk_buff **frag;
1098 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1104 /* Continuation fragments (no L2CAP header) */
1105 frag = &skb_shinfo(skb)->frag_list;
1107 count = min_t(unsigned int, conn->mtu, len);
1109 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1112 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1118 frag = &(*frag)->next;
1124 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1126 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1127 struct sk_buff *skb;
1128 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1129 struct l2cap_hdr *lh;
1131 BT_DBG("sk %p len %d", sk, (int)len);
1133 count = min_t(unsigned int, (conn->mtu - hlen), len);
1134 skb = bt_skb_send_alloc(sk, count + hlen,
1135 msg->msg_flags & MSG_DONTWAIT, &err);
1137 return ERR_PTR(err);
1139 /* Create L2CAP header */
1140 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1141 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1142 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1143 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1145 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1146 if (unlikely(err < 0)) {
1148 return ERR_PTR(err);
1153 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1155 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1156 struct sk_buff *skb;
1157 int err, count, hlen = L2CAP_HDR_SIZE;
1158 struct l2cap_hdr *lh;
1160 BT_DBG("sk %p len %d", sk, (int)len);
1162 count = min_t(unsigned int, (conn->mtu - hlen), len);
1163 skb = bt_skb_send_alloc(sk, count + hlen,
1164 msg->msg_flags & MSG_DONTWAIT, &err);
1166 return ERR_PTR(err);
1168 /* Create L2CAP header */
1169 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1170 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1171 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1173 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1174 if (unlikely(err < 0)) {
1176 return ERR_PTR(err);
1181 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1183 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1184 struct sk_buff *skb;
1185 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1186 struct l2cap_hdr *lh;
1188 BT_DBG("sk %p len %d", sk, (int)len);
1191 return ERR_PTR(-ENOTCONN);
1196 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1199 count = min_t(unsigned int, (conn->mtu - hlen), len);
1200 skb = bt_skb_send_alloc(sk, count + hlen,
1201 msg->msg_flags & MSG_DONTWAIT, &err);
1203 return ERR_PTR(err);
1205 /* Create L2CAP header */
1206 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1207 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1208 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1209 put_unaligned_le16(control, skb_put(skb, 2));
1211 put_unaligned_le16(sdulen, skb_put(skb, 2));
1213 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1214 if (unlikely(err < 0)) {
1216 return ERR_PTR(err);
1219 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1220 put_unaligned_le16(0, skb_put(skb, 2));
1222 bt_cb(skb)->retries = 0;
1226 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1228 struct l2cap_pinfo *pi = l2cap_pi(sk);
1229 struct sk_buff *skb;
1230 struct sk_buff_head sar_queue;
1234 skb_queue_head_init(&sar_queue);
1235 control = L2CAP_SDU_START;
1236 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1238 return PTR_ERR(skb);
1240 __skb_queue_tail(&sar_queue, skb);
1241 len -= pi->remote_mps;
1242 size += pi->remote_mps;
1247 if (len > pi->remote_mps) {
1248 control = L2CAP_SDU_CONTINUE;
1249 buflen = pi->remote_mps;
1251 control = L2CAP_SDU_END;
1255 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1257 skb_queue_purge(&sar_queue);
1258 return PTR_ERR(skb);
1261 __skb_queue_tail(&sar_queue, skb);
1265 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1266 if (sk->sk_send_head == NULL)
1267 sk->sk_send_head = sar_queue.next;
1272 static void l2cap_chan_ready(struct sock *sk)
1274 struct sock *parent = bt_sk(sk)->parent;
1276 BT_DBG("sk %p, parent %p", sk, parent);
1278 l2cap_pi(sk)->conf_state = 0;
1279 l2cap_sock_clear_timer(sk);
1282 /* Outgoing channel.
1283 * Wake up socket sleeping on connect.
1285 sk->sk_state = BT_CONNECTED;
1286 sk->sk_state_change(sk);
1288 /* Incoming channel.
1289 * Wake up socket sleeping on accept.
1291 parent->sk_data_ready(parent, 0);
1295 /* Copy frame to all raw sockets on that connection */
1296 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1298 struct l2cap_chan_list *l = &conn->chan_list;
1299 struct sk_buff *nskb;
1302 BT_DBG("conn %p", conn);
1304 read_lock(&l->lock);
1305 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1306 if (sk->sk_type != SOCK_RAW)
1309 /* Don't send frame to the socket it came from */
1312 nskb = skb_clone(skb, GFP_ATOMIC);
1316 if (sock_queue_rcv_skb(sk, nskb))
1319 read_unlock(&l->lock);
1322 /* ---- L2CAP signalling commands ---- */
1323 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1324 u8 code, u8 ident, u16 dlen, void *data)
1326 struct sk_buff *skb, **frag;
1327 struct l2cap_cmd_hdr *cmd;
1328 struct l2cap_hdr *lh;
1331 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1332 conn, code, ident, dlen);
1334 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1335 count = min_t(unsigned int, conn->mtu, len);
1337 skb = bt_skb_alloc(count, GFP_ATOMIC);
1341 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1342 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1343 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1345 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1348 cmd->len = cpu_to_le16(dlen);
1351 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1352 memcpy(skb_put(skb, count), data, count);
1358 /* Continuation fragments (no L2CAP header) */
1359 frag = &skb_shinfo(skb)->frag_list;
1361 count = min_t(unsigned int, conn->mtu, len);
1363 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1367 memcpy(skb_put(*frag, count), data, count);
1372 frag = &(*frag)->next;
1382 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1384 struct l2cap_conf_opt *opt = *ptr;
1387 len = L2CAP_CONF_OPT_SIZE + opt->len;
1395 *val = *((u8 *) opt->val);
1399 *val = get_unaligned_le16(opt->val);
1403 *val = get_unaligned_le32(opt->val);
1407 *val = (unsigned long) opt->val;
1411 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1415 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1417 struct l2cap_conf_opt *opt = *ptr;
1419 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1426 *((u8 *) opt->val) = val;
1430 put_unaligned_le16(val, opt->val);
1434 put_unaligned_le32(val, opt->val);
1438 memcpy(opt->val, (void *) val, len);
1442 *ptr += L2CAP_CONF_OPT_SIZE + len;
1445 static void l2cap_ack_timeout(unsigned long arg)
1447 struct sock *sk = (void *) arg;
1450 l2cap_send_ack(l2cap_pi(sk));
1454 static inline void l2cap_ertm_init(struct sock *sk)
1456 l2cap_pi(sk)->expected_ack_seq = 0;
1457 l2cap_pi(sk)->unacked_frames = 0;
1458 l2cap_pi(sk)->buffer_seq = 0;
1459 l2cap_pi(sk)->num_acked = 0;
1460 l2cap_pi(sk)->frames_sent = 0;
1462 setup_timer(&l2cap_pi(sk)->retrans_timer,
1463 l2cap_retrans_timeout, (unsigned long) sk);
1464 setup_timer(&l2cap_pi(sk)->monitor_timer,
1465 l2cap_monitor_timeout, (unsigned long) sk);
1466 setup_timer(&l2cap_pi(sk)->ack_timer,
1467 l2cap_ack_timeout, (unsigned long) sk);
1469 __skb_queue_head_init(SREJ_QUEUE(sk));
1470 __skb_queue_head_init(BUSY_QUEUE(sk));
1472 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1474 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1477 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1480 case L2CAP_MODE_STREAMING:
1481 case L2CAP_MODE_ERTM:
1482 if (l2cap_mode_supported(mode, remote_feat_mask))
1486 return L2CAP_MODE_BASIC;
1490 int l2cap_build_conf_req(struct sock *sk, void *data)
1492 struct l2cap_pinfo *pi = l2cap_pi(sk);
1493 struct l2cap_conf_req *req = data;
1494 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1495 void *ptr = req->data;
1497 BT_DBG("sk %p", sk);
1499 if (pi->num_conf_req || pi->num_conf_rsp)
1503 case L2CAP_MODE_STREAMING:
1504 case L2CAP_MODE_ERTM:
1505 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1510 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1515 if (pi->imtu != L2CAP_DEFAULT_MTU)
1516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1519 case L2CAP_MODE_BASIC:
1520 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1521 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1524 rfc.mode = L2CAP_MODE_BASIC;
1526 rfc.max_transmit = 0;
1527 rfc.retrans_timeout = 0;
1528 rfc.monitor_timeout = 0;
1529 rfc.max_pdu_size = 0;
1531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1532 (unsigned long) &rfc);
1535 case L2CAP_MODE_ERTM:
1536 rfc.mode = L2CAP_MODE_ERTM;
1537 rfc.txwin_size = pi->tx_win;
1538 rfc.max_transmit = pi->max_tx;
1539 rfc.retrans_timeout = 0;
1540 rfc.monitor_timeout = 0;
1541 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1542 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1543 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1546 (unsigned long) &rfc);
1548 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1551 if (pi->fcs == L2CAP_FCS_NONE ||
1552 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1553 pi->fcs = L2CAP_FCS_NONE;
1554 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1558 case L2CAP_MODE_STREAMING:
1559 rfc.mode = L2CAP_MODE_STREAMING;
1561 rfc.max_transmit = 0;
1562 rfc.retrans_timeout = 0;
1563 rfc.monitor_timeout = 0;
1564 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1565 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1566 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1569 (unsigned long) &rfc);
1571 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1574 if (pi->fcs == L2CAP_FCS_NONE ||
1575 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1576 pi->fcs = L2CAP_FCS_NONE;
1577 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1582 /* FIXME: Need actual value of the flush timeout */
1583 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1584 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1586 req->dcid = cpu_to_le16(pi->dcid);
1587 req->flags = cpu_to_le16(0);
1592 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1594 struct l2cap_pinfo *pi = l2cap_pi(sk);
1595 struct l2cap_conf_rsp *rsp = data;
1596 void *ptr = rsp->data;
1597 void *req = pi->conf_req;
1598 int len = pi->conf_len;
1599 int type, hint, olen;
1601 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1602 u16 mtu = L2CAP_DEFAULT_MTU;
1603 u16 result = L2CAP_CONF_SUCCESS;
1605 BT_DBG("sk %p", sk);
1607 while (len >= L2CAP_CONF_OPT_SIZE) {
1608 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1610 hint = type & L2CAP_CONF_HINT;
1611 type &= L2CAP_CONF_MASK;
1614 case L2CAP_CONF_MTU:
1618 case L2CAP_CONF_FLUSH_TO:
1622 case L2CAP_CONF_QOS:
1625 case L2CAP_CONF_RFC:
1626 if (olen == sizeof(rfc))
1627 memcpy(&rfc, (void *) val, olen);
1630 case L2CAP_CONF_FCS:
1631 if (val == L2CAP_FCS_NONE)
1632 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1640 result = L2CAP_CONF_UNKNOWN;
1641 *((u8 *) ptr++) = type;
1646 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1650 case L2CAP_MODE_STREAMING:
1651 case L2CAP_MODE_ERTM:
1652 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1653 pi->mode = l2cap_select_mode(rfc.mode,
1654 pi->conn->feat_mask);
1658 if (pi->mode != rfc.mode)
1659 return -ECONNREFUSED;
1665 if (pi->mode != rfc.mode) {
1666 result = L2CAP_CONF_UNACCEPT;
1667 rfc.mode = pi->mode;
1669 if (pi->num_conf_rsp == 1)
1670 return -ECONNREFUSED;
1672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1673 sizeof(rfc), (unsigned long) &rfc);
1677 if (result == L2CAP_CONF_SUCCESS) {
1678 /* Configure output options and let the other side know
1679 * which ones we don't like. */
1681 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1682 result = L2CAP_CONF_UNACCEPT;
1685 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1690 case L2CAP_MODE_BASIC:
1691 pi->fcs = L2CAP_FCS_NONE;
1692 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1695 case L2CAP_MODE_ERTM:
1696 pi->remote_tx_win = rfc.txwin_size;
1697 pi->remote_max_tx = rfc.max_transmit;
1699 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1700 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1702 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1704 rfc.retrans_timeout =
1705 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1706 rfc.monitor_timeout =
1707 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1709 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1712 sizeof(rfc), (unsigned long) &rfc);
1716 case L2CAP_MODE_STREAMING:
1717 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1718 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1720 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1722 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1724 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1725 sizeof(rfc), (unsigned long) &rfc);
1730 result = L2CAP_CONF_UNACCEPT;
1732 memset(&rfc, 0, sizeof(rfc));
1733 rfc.mode = pi->mode;
1736 if (result == L2CAP_CONF_SUCCESS)
1737 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1739 rsp->scid = cpu_to_le16(pi->dcid);
1740 rsp->result = cpu_to_le16(result);
1741 rsp->flags = cpu_to_le16(0x0000);
1746 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1748 struct l2cap_pinfo *pi = l2cap_pi(sk);
1749 struct l2cap_conf_req *req = data;
1750 void *ptr = req->data;
1753 struct l2cap_conf_rfc rfc;
1755 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1757 while (len >= L2CAP_CONF_OPT_SIZE) {
1758 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1761 case L2CAP_CONF_MTU:
1762 if (val < L2CAP_DEFAULT_MIN_MTU) {
1763 *result = L2CAP_CONF_UNACCEPT;
1764 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1767 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1770 case L2CAP_CONF_FLUSH_TO:
1772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1776 case L2CAP_CONF_RFC:
1777 if (olen == sizeof(rfc))
1778 memcpy(&rfc, (void *)val, olen);
1780 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1781 rfc.mode != pi->mode)
1782 return -ECONNREFUSED;
1786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1787 sizeof(rfc), (unsigned long) &rfc);
1792 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1793 return -ECONNREFUSED;
1795 pi->mode = rfc.mode;
1797 if (*result == L2CAP_CONF_SUCCESS) {
1799 case L2CAP_MODE_ERTM:
1800 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1801 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1802 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1804 case L2CAP_MODE_STREAMING:
1805 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1809 req->dcid = cpu_to_le16(pi->dcid);
1810 req->flags = cpu_to_le16(0x0000);
1815 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1817 struct l2cap_conf_rsp *rsp = data;
1818 void *ptr = rsp->data;
1820 BT_DBG("sk %p", sk);
1822 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1823 rsp->result = cpu_to_le16(result);
1824 rsp->flags = cpu_to_le16(flags);
1829 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1831 struct l2cap_pinfo *pi = l2cap_pi(sk);
1834 struct l2cap_conf_rfc rfc;
1836 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1838 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1841 while (len >= L2CAP_CONF_OPT_SIZE) {
1842 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1845 case L2CAP_CONF_RFC:
1846 if (olen == sizeof(rfc))
1847 memcpy(&rfc, (void *)val, olen);
1854 case L2CAP_MODE_ERTM:
1855 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1856 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1857 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1859 case L2CAP_MODE_STREAMING:
1860 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1864 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1866 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1868 if (rej->reason != 0x0000)
1871 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1872 cmd->ident == conn->info_ident) {
1873 del_timer(&conn->info_timer);
1875 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1876 conn->info_ident = 0;
1878 l2cap_conn_start(conn);
1884 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1886 struct l2cap_chan_list *list = &conn->chan_list;
1887 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1888 struct l2cap_conn_rsp rsp;
1889 struct sock *parent, *sk = NULL;
1890 int result, status = L2CAP_CS_NO_INFO;
1892 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1893 __le16 psm = req->psm;
1895 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1897 /* Check if we have socket listening on psm */
1898 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1900 result = L2CAP_CR_BAD_PSM;
1904 bh_lock_sock(parent);
1906 /* Check if the ACL is secure enough (if not SDP) */
1907 if (psm != cpu_to_le16(0x0001) &&
1908 !hci_conn_check_link_mode(conn->hcon)) {
1909 conn->disc_reason = 0x05;
1910 result = L2CAP_CR_SEC_BLOCK;
1914 result = L2CAP_CR_NO_MEM;
1916 /* Check for backlog size */
1917 if (sk_acceptq_is_full(parent)) {
1918 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1922 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1926 write_lock_bh(&list->lock);
1928 /* Check if we already have channel with that dcid */
1929 if (__l2cap_get_chan_by_dcid(list, scid)) {
1930 write_unlock_bh(&list->lock);
1931 sock_set_flag(sk, SOCK_ZAPPED);
1932 l2cap_sock_kill(sk);
1936 hci_conn_hold(conn->hcon);
1938 l2cap_sock_init(sk, parent);
1939 bacpy(&bt_sk(sk)->src, conn->src);
1940 bacpy(&bt_sk(sk)->dst, conn->dst);
1941 l2cap_pi(sk)->psm = psm;
1942 l2cap_pi(sk)->dcid = scid;
1944 __l2cap_chan_add(conn, sk, parent);
1945 dcid = l2cap_pi(sk)->scid;
1947 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1949 l2cap_pi(sk)->ident = cmd->ident;
1951 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1952 if (l2cap_check_security(sk)) {
1953 if (bt_sk(sk)->defer_setup) {
1954 sk->sk_state = BT_CONNECT2;
1955 result = L2CAP_CR_PEND;
1956 status = L2CAP_CS_AUTHOR_PEND;
1957 parent->sk_data_ready(parent, 0);
1959 sk->sk_state = BT_CONFIG;
1960 result = L2CAP_CR_SUCCESS;
1961 status = L2CAP_CS_NO_INFO;
1964 sk->sk_state = BT_CONNECT2;
1965 result = L2CAP_CR_PEND;
1966 status = L2CAP_CS_AUTHEN_PEND;
1969 sk->sk_state = BT_CONNECT2;
1970 result = L2CAP_CR_PEND;
1971 status = L2CAP_CS_NO_INFO;
1974 write_unlock_bh(&list->lock);
1977 bh_unlock_sock(parent);
1980 rsp.scid = cpu_to_le16(scid);
1981 rsp.dcid = cpu_to_le16(dcid);
1982 rsp.result = cpu_to_le16(result);
1983 rsp.status = cpu_to_le16(status);
1984 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1986 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1987 struct l2cap_info_req info;
1988 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1991 conn->info_ident = l2cap_get_ident(conn);
1993 mod_timer(&conn->info_timer, jiffies +
1994 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1996 l2cap_send_cmd(conn, conn->info_ident,
1997 L2CAP_INFO_REQ, sizeof(info), &info);
2000 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2001 result == L2CAP_CR_SUCCESS) {
2003 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2004 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2005 l2cap_build_conf_req(sk, buf), buf);
2006 l2cap_pi(sk)->num_conf_req++;
2012 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2014 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2015 u16 scid, dcid, result, status;
2019 scid = __le16_to_cpu(rsp->scid);
2020 dcid = __le16_to_cpu(rsp->dcid);
2021 result = __le16_to_cpu(rsp->result);
2022 status = __le16_to_cpu(rsp->status);
2024 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2027 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2031 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2037 case L2CAP_CR_SUCCESS:
2038 sk->sk_state = BT_CONFIG;
2039 l2cap_pi(sk)->ident = 0;
2040 l2cap_pi(sk)->dcid = dcid;
2041 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2043 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2046 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2048 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2049 l2cap_build_conf_req(sk, req), req);
2050 l2cap_pi(sk)->num_conf_req++;
2054 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2058 /* don't delete l2cap channel if sk is owned by user */
2059 if (sock_owned_by_user(sk)) {
2060 sk->sk_state = BT_DISCONN;
2061 l2cap_sock_clear_timer(sk);
2062 l2cap_sock_set_timer(sk, HZ / 5);
2066 l2cap_chan_del(sk, ECONNREFUSED);
2074 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2076 /* FCS is enabled only in ERTM or streaming mode, if one or both
2079 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2080 pi->fcs = L2CAP_FCS_NONE;
2081 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2082 pi->fcs = L2CAP_FCS_CRC16;
2085 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2087 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2093 dcid = __le16_to_cpu(req->dcid);
2094 flags = __le16_to_cpu(req->flags);
2096 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2098 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2102 if (sk->sk_state != BT_CONFIG) {
2103 struct l2cap_cmd_rej rej;
2105 rej.reason = cpu_to_le16(0x0002);
2106 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2111 /* Reject if config buffer is too small. */
2112 len = cmd_len - sizeof(*req);
2113 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2114 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2115 l2cap_build_conf_rsp(sk, rsp,
2116 L2CAP_CONF_REJECT, flags), rsp);
2121 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2122 l2cap_pi(sk)->conf_len += len;
2124 if (flags & 0x0001) {
2125 /* Incomplete config. Send empty response. */
2126 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2127 l2cap_build_conf_rsp(sk, rsp,
2128 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2132 /* Complete config. */
2133 len = l2cap_parse_conf_req(sk, rsp);
2135 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2140 l2cap_pi(sk)->num_conf_rsp++;
2142 /* Reset config buffer. */
2143 l2cap_pi(sk)->conf_len = 0;
2145 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2148 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2149 set_default_fcs(l2cap_pi(sk));
2151 sk->sk_state = BT_CONNECTED;
2153 l2cap_pi(sk)->next_tx_seq = 0;
2154 l2cap_pi(sk)->expected_tx_seq = 0;
2155 __skb_queue_head_init(TX_QUEUE(sk));
2156 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2157 l2cap_ertm_init(sk);
2159 l2cap_chan_ready(sk);
2163 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2165 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2166 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2167 l2cap_build_conf_req(sk, buf), buf);
2168 l2cap_pi(sk)->num_conf_req++;
2176 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2178 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2179 u16 scid, flags, result;
2181 int len = cmd->len - sizeof(*rsp);
2183 scid = __le16_to_cpu(rsp->scid);
2184 flags = __le16_to_cpu(rsp->flags);
2185 result = __le16_to_cpu(rsp->result);
2187 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2188 scid, flags, result);
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2195 case L2CAP_CONF_SUCCESS:
2196 l2cap_conf_rfc_get(sk, rsp->data, len);
2199 case L2CAP_CONF_UNACCEPT:
2200 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2203 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2204 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2208 /* throw out any old stored conf requests */
2209 result = L2CAP_CONF_SUCCESS;
2210 len = l2cap_parse_conf_rsp(sk, rsp->data,
2213 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2217 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2218 L2CAP_CONF_REQ, len, req);
2219 l2cap_pi(sk)->num_conf_req++;
2220 if (result != L2CAP_CONF_SUCCESS)
2226 sk->sk_err = ECONNRESET;
2227 l2cap_sock_set_timer(sk, HZ * 5);
2228 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2235 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2237 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2238 set_default_fcs(l2cap_pi(sk));
2240 sk->sk_state = BT_CONNECTED;
2241 l2cap_pi(sk)->next_tx_seq = 0;
2242 l2cap_pi(sk)->expected_tx_seq = 0;
2243 __skb_queue_head_init(TX_QUEUE(sk));
2244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2245 l2cap_ertm_init(sk);
2247 l2cap_chan_ready(sk);
2255 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2257 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2258 struct l2cap_disconn_rsp rsp;
2262 scid = __le16_to_cpu(req->scid);
2263 dcid = __le16_to_cpu(req->dcid);
2265 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2267 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2271 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2272 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2273 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2275 sk->sk_shutdown = SHUTDOWN_MASK;
2277 /* don't delete l2cap channel if sk is owned by user */
2278 if (sock_owned_by_user(sk)) {
2279 sk->sk_state = BT_DISCONN;
2280 l2cap_sock_clear_timer(sk);
2281 l2cap_sock_set_timer(sk, HZ / 5);
2286 l2cap_chan_del(sk, ECONNRESET);
2289 l2cap_sock_kill(sk);
2293 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2295 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2299 scid = __le16_to_cpu(rsp->scid);
2300 dcid = __le16_to_cpu(rsp->dcid);
2302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2304 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2308 /* don't delete l2cap channel if sk is owned by user */
2309 if (sock_owned_by_user(sk)) {
2310 sk->sk_state = BT_DISCONN;
2311 l2cap_sock_clear_timer(sk);
2312 l2cap_sock_set_timer(sk, HZ / 5);
2317 l2cap_chan_del(sk, 0);
2320 l2cap_sock_kill(sk);
2324 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2326 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2329 type = __le16_to_cpu(req->type);
2331 BT_DBG("type 0x%4.4x", type);
2333 if (type == L2CAP_IT_FEAT_MASK) {
2335 u32 feat_mask = l2cap_feat_mask;
2336 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2337 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2338 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2340 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2342 put_unaligned_le32(feat_mask, rsp->data);
2343 l2cap_send_cmd(conn, cmd->ident,
2344 L2CAP_INFO_RSP, sizeof(buf), buf);
2345 } else if (type == L2CAP_IT_FIXED_CHAN) {
2347 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2348 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2349 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2350 memcpy(buf + 4, l2cap_fixed_chan, 8);
2351 l2cap_send_cmd(conn, cmd->ident,
2352 L2CAP_INFO_RSP, sizeof(buf), buf);
2354 struct l2cap_info_rsp rsp;
2355 rsp.type = cpu_to_le16(type);
2356 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2357 l2cap_send_cmd(conn, cmd->ident,
2358 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2364 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2366 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2369 type = __le16_to_cpu(rsp->type);
2370 result = __le16_to_cpu(rsp->result);
2372 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2374 del_timer(&conn->info_timer);
2376 if (result != L2CAP_IR_SUCCESS) {
2377 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2378 conn->info_ident = 0;
2380 l2cap_conn_start(conn);
2385 if (type == L2CAP_IT_FEAT_MASK) {
2386 conn->feat_mask = get_unaligned_le32(rsp->data);
2388 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2389 struct l2cap_info_req req;
2390 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2392 conn->info_ident = l2cap_get_ident(conn);
2394 l2cap_send_cmd(conn, conn->info_ident,
2395 L2CAP_INFO_REQ, sizeof(req), &req);
2397 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2398 conn->info_ident = 0;
2400 l2cap_conn_start(conn);
2402 } else if (type == L2CAP_IT_FIXED_CHAN) {
2403 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2404 conn->info_ident = 0;
2406 l2cap_conn_start(conn);
2412 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2414 u8 *data = skb->data;
2416 struct l2cap_cmd_hdr cmd;
2419 l2cap_raw_recv(conn, skb);
2421 while (len >= L2CAP_CMD_HDR_SIZE) {
2423 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2424 data += L2CAP_CMD_HDR_SIZE;
2425 len -= L2CAP_CMD_HDR_SIZE;
2427 cmd_len = le16_to_cpu(cmd.len);
2429 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2431 if (cmd_len > len || !cmd.ident) {
2432 BT_DBG("corrupted command");
2437 case L2CAP_COMMAND_REJ:
2438 l2cap_command_rej(conn, &cmd, data);
2441 case L2CAP_CONN_REQ:
2442 err = l2cap_connect_req(conn, &cmd, data);
2445 case L2CAP_CONN_RSP:
2446 err = l2cap_connect_rsp(conn, &cmd, data);
2449 case L2CAP_CONF_REQ:
2450 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2453 case L2CAP_CONF_RSP:
2454 err = l2cap_config_rsp(conn, &cmd, data);
2457 case L2CAP_DISCONN_REQ:
2458 err = l2cap_disconnect_req(conn, &cmd, data);
2461 case L2CAP_DISCONN_RSP:
2462 err = l2cap_disconnect_rsp(conn, &cmd, data);
2465 case L2CAP_ECHO_REQ:
2466 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2469 case L2CAP_ECHO_RSP:
2472 case L2CAP_INFO_REQ:
2473 err = l2cap_information_req(conn, &cmd, data);
2476 case L2CAP_INFO_RSP:
2477 err = l2cap_information_rsp(conn, &cmd, data);
2481 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2487 struct l2cap_cmd_rej rej;
2488 BT_DBG("error %d", err);
2490 /* FIXME: Map err to a valid reason */
2491 rej.reason = cpu_to_le16(0);
2492 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2502 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2504 u16 our_fcs, rcv_fcs;
2505 int hdr_size = L2CAP_HDR_SIZE + 2;
2507 if (pi->fcs == L2CAP_FCS_CRC16) {
2508 skb_trim(skb, skb->len - 2);
2509 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2510 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2512 if (our_fcs != rcv_fcs)
2518 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2520 struct l2cap_pinfo *pi = l2cap_pi(sk);
2523 pi->frames_sent = 0;
2525 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2527 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2528 control |= L2CAP_SUPER_RCV_NOT_READY;
2529 l2cap_send_sframe(pi, control);
2530 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2533 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2534 l2cap_retransmit_frames(sk);
2536 l2cap_ertm_send(sk);
2538 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2539 pi->frames_sent == 0) {
2540 control |= L2CAP_SUPER_RCV_READY;
2541 l2cap_send_sframe(pi, control);
2545 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2547 struct sk_buff *next_skb;
2548 struct l2cap_pinfo *pi = l2cap_pi(sk);
2549 int tx_seq_offset, next_tx_seq_offset;
2551 bt_cb(skb)->tx_seq = tx_seq;
2552 bt_cb(skb)->sar = sar;
2554 next_skb = skb_peek(SREJ_QUEUE(sk));
2556 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2560 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2561 if (tx_seq_offset < 0)
2562 tx_seq_offset += 64;
2565 if (bt_cb(next_skb)->tx_seq == tx_seq)
2568 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2569 pi->buffer_seq) % 64;
2570 if (next_tx_seq_offset < 0)
2571 next_tx_seq_offset += 64;
2573 if (next_tx_seq_offset > tx_seq_offset) {
2574 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2578 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2581 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2583 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2588 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2590 struct l2cap_pinfo *pi = l2cap_pi(sk);
2591 struct sk_buff *_skb;
2594 switch (control & L2CAP_CTRL_SAR) {
2595 case L2CAP_SDU_UNSEGMENTED:
2596 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2599 err = sock_queue_rcv_skb(sk, skb);
2605 case L2CAP_SDU_START:
2606 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2609 pi->sdu_len = get_unaligned_le16(skb->data);
2611 if (pi->sdu_len > pi->imtu)
2614 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2618 /* pull sdu_len bytes only after alloc, because of Local Busy
2619 * condition we have to be sure that this will be executed
2620 * only once, i.e., when alloc does not fail */
2623 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2625 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2626 pi->partial_sdu_len = skb->len;
2629 case L2CAP_SDU_CONTINUE:
2630 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2636 pi->partial_sdu_len += skb->len;
2637 if (pi->partial_sdu_len > pi->sdu_len)
2640 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2645 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2651 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2652 pi->partial_sdu_len += skb->len;
2654 if (pi->partial_sdu_len > pi->imtu)
2657 if (pi->partial_sdu_len != pi->sdu_len)
2660 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2663 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2665 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2669 err = sock_queue_rcv_skb(sk, _skb);
2672 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2676 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2677 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2691 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2696 static int l2cap_try_push_rx_skb(struct sock *sk)
2698 struct l2cap_pinfo *pi = l2cap_pi(sk);
2699 struct sk_buff *skb;
2703 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2704 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2705 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2707 skb_queue_head(BUSY_QUEUE(sk), skb);
2711 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2714 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2717 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2718 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2719 l2cap_send_sframe(pi, control);
2720 l2cap_pi(sk)->retry_count = 1;
2722 del_timer(&pi->retrans_timer);
2723 __mod_monitor_timer();
2725 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2728 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2729 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2731 BT_DBG("sk %p, Exit local busy", sk);
2736 static void l2cap_busy_work(struct work_struct *work)
2738 DECLARE_WAITQUEUE(wait, current);
2739 struct l2cap_pinfo *pi =
2740 container_of(work, struct l2cap_pinfo, busy_work);
2741 struct sock *sk = (struct sock *)pi;
2742 int n_tries = 0, timeo = HZ/5, err;
2743 struct sk_buff *skb;
2747 add_wait_queue(sk_sleep(sk), &wait);
2748 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2749 set_current_state(TASK_INTERRUPTIBLE);
2751 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2753 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2760 if (signal_pending(current)) {
2761 err = sock_intr_errno(timeo);
2766 timeo = schedule_timeout(timeo);
2769 err = sock_error(sk);
2773 if (l2cap_try_push_rx_skb(sk) == 0)
2777 set_current_state(TASK_RUNNING);
2778 remove_wait_queue(sk_sleep(sk), &wait);
2783 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2785 struct l2cap_pinfo *pi = l2cap_pi(sk);
2788 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2789 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2790 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2791 return l2cap_try_push_rx_skb(sk);
2796 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2798 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2802 /* Busy Condition */
2803 BT_DBG("sk %p, Enter local busy", sk);
2805 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2806 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2807 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2809 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2810 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2811 l2cap_send_sframe(pi, sctrl);
2813 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2815 del_timer(&pi->ack_timer);
2817 queue_work(_busy_wq, &pi->busy_work);
2822 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2824 struct l2cap_pinfo *pi = l2cap_pi(sk);
2825 struct sk_buff *_skb;
2829 * TODO: We have to notify the userland if some data is lost with the
2833 switch (control & L2CAP_CTRL_SAR) {
2834 case L2CAP_SDU_UNSEGMENTED:
2835 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2840 err = sock_queue_rcv_skb(sk, skb);
2846 case L2CAP_SDU_START:
2847 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2852 pi->sdu_len = get_unaligned_le16(skb->data);
2855 if (pi->sdu_len > pi->imtu) {
2860 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2866 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2868 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2869 pi->partial_sdu_len = skb->len;
2873 case L2CAP_SDU_CONTINUE:
2874 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2877 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2879 pi->partial_sdu_len += skb->len;
2880 if (pi->partial_sdu_len > pi->sdu_len)
2888 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2891 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2893 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2894 pi->partial_sdu_len += skb->len;
2896 if (pi->partial_sdu_len > pi->imtu)
2899 if (pi->partial_sdu_len == pi->sdu_len) {
2900 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2901 err = sock_queue_rcv_skb(sk, _skb);
2916 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
2918 struct sk_buff *skb;
2921 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
2922 if (bt_cb(skb)->tx_seq != tx_seq)
2925 skb = skb_dequeue(SREJ_QUEUE(sk));
2926 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2927 l2cap_ertm_reassembly_sdu(sk, skb, control);
2928 l2cap_pi(sk)->buffer_seq_srej =
2929 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
2930 tx_seq = (tx_seq + 1) % 64;
2934 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
2936 struct l2cap_pinfo *pi = l2cap_pi(sk);
2937 struct srej_list *l, *tmp;
2940 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
2941 if (l->tx_seq == tx_seq) {
2946 control = L2CAP_SUPER_SELECT_REJECT;
2947 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2948 l2cap_send_sframe(pi, control);
2950 list_add_tail(&l->list, SREJ_LIST(sk));
2954 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
2956 struct l2cap_pinfo *pi = l2cap_pi(sk);
2957 struct srej_list *new;
2960 while (tx_seq != pi->expected_tx_seq) {
2961 control = L2CAP_SUPER_SELECT_REJECT;
2962 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2963 l2cap_send_sframe(pi, control);
2965 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
2966 new->tx_seq = pi->expected_tx_seq;
2967 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
2968 list_add_tail(&new->list, SREJ_LIST(sk));
2970 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
2973 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
2975 struct l2cap_pinfo *pi = l2cap_pi(sk);
2976 u8 tx_seq = __get_txseq(rx_control);
2977 u8 req_seq = __get_reqseq(rx_control);
2978 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
2979 int tx_seq_offset, expected_tx_seq_offset;
2980 int num_to_ack = (pi->tx_win/6) + 1;
2983 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
2986 if (L2CAP_CTRL_FINAL & rx_control &&
2987 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
2988 del_timer(&pi->monitor_timer);
2989 if (pi->unacked_frames > 0)
2990 __mod_retrans_timer();
2991 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
2994 pi->expected_ack_seq = req_seq;
2995 l2cap_drop_acked_frames(sk);
2997 if (tx_seq == pi->expected_tx_seq)
3000 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3001 if (tx_seq_offset < 0)
3002 tx_seq_offset += 64;
3004 /* invalid tx_seq */
3005 if (tx_seq_offset >= pi->tx_win) {
3006 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3010 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3013 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3014 struct srej_list *first;
3016 first = list_first_entry(SREJ_LIST(sk),
3017 struct srej_list, list);
3018 if (tx_seq == first->tx_seq) {
3019 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3020 l2cap_check_srej_gap(sk, tx_seq);
3022 list_del(&first->list);
3025 if (list_empty(SREJ_LIST(sk))) {
3026 pi->buffer_seq = pi->buffer_seq_srej;
3027 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3029 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3032 struct srej_list *l;
3034 /* duplicated tx_seq */
3035 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3038 list_for_each_entry(l, SREJ_LIST(sk), list) {
3039 if (l->tx_seq == tx_seq) {
3040 l2cap_resend_srejframe(sk, tx_seq);
3044 l2cap_send_srejframe(sk, tx_seq);
3047 expected_tx_seq_offset =
3048 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3049 if (expected_tx_seq_offset < 0)
3050 expected_tx_seq_offset += 64;
3052 /* duplicated tx_seq */
3053 if (tx_seq_offset < expected_tx_seq_offset)
3056 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3058 BT_DBG("sk %p, Enter SREJ", sk);
3060 INIT_LIST_HEAD(SREJ_LIST(sk));
3061 pi->buffer_seq_srej = pi->buffer_seq;
3063 __skb_queue_head_init(SREJ_QUEUE(sk));
3064 __skb_queue_head_init(BUSY_QUEUE(sk));
3065 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3067 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3069 l2cap_send_srejframe(sk, tx_seq);
3071 del_timer(&pi->ack_timer);
3076 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3078 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3079 bt_cb(skb)->tx_seq = tx_seq;
3080 bt_cb(skb)->sar = sar;
3081 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3085 err = l2cap_push_rx_skb(sk, skb, rx_control);
3089 if (rx_control & L2CAP_CTRL_FINAL) {
3090 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3091 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3093 l2cap_retransmit_frames(sk);
3098 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3099 if (pi->num_acked == num_to_ack - 1)
3109 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3111 struct l2cap_pinfo *pi = l2cap_pi(sk);
3113 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3116 pi->expected_ack_seq = __get_reqseq(rx_control);
3117 l2cap_drop_acked_frames(sk);
3119 if (rx_control & L2CAP_CTRL_POLL) {
3120 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3121 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3122 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3123 (pi->unacked_frames > 0))
3124 __mod_retrans_timer();
3126 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3127 l2cap_send_srejtail(sk);
3129 l2cap_send_i_or_rr_or_rnr(sk);
3132 } else if (rx_control & L2CAP_CTRL_FINAL) {
3133 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3135 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3136 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3138 l2cap_retransmit_frames(sk);
3141 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3142 (pi->unacked_frames > 0))
3143 __mod_retrans_timer();
3145 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3146 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3149 l2cap_ertm_send(sk);
3153 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3155 struct l2cap_pinfo *pi = l2cap_pi(sk);
3156 u8 tx_seq = __get_reqseq(rx_control);
3158 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3160 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3162 pi->expected_ack_seq = tx_seq;
3163 l2cap_drop_acked_frames(sk);
3165 if (rx_control & L2CAP_CTRL_FINAL) {
3166 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3167 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3169 l2cap_retransmit_frames(sk);
3171 l2cap_retransmit_frames(sk);
3173 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3174 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3177 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3179 struct l2cap_pinfo *pi = l2cap_pi(sk);
3180 u8 tx_seq = __get_reqseq(rx_control);
3182 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3184 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3186 if (rx_control & L2CAP_CTRL_POLL) {
3187 pi->expected_ack_seq = tx_seq;
3188 l2cap_drop_acked_frames(sk);
3190 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3191 l2cap_retransmit_one_frame(sk, tx_seq);
3193 l2cap_ertm_send(sk);
3195 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3196 pi->srej_save_reqseq = tx_seq;
3197 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3199 } else if (rx_control & L2CAP_CTRL_FINAL) {
3200 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3201 pi->srej_save_reqseq == tx_seq)
3202 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3204 l2cap_retransmit_one_frame(sk, tx_seq);
3206 l2cap_retransmit_one_frame(sk, tx_seq);
3207 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3208 pi->srej_save_reqseq = tx_seq;
3209 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3214 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3216 struct l2cap_pinfo *pi = l2cap_pi(sk);
3217 u8 tx_seq = __get_reqseq(rx_control);
3219 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3221 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3222 pi->expected_ack_seq = tx_seq;
3223 l2cap_drop_acked_frames(sk);
3225 if (rx_control & L2CAP_CTRL_POLL)
3226 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3228 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3229 del_timer(&pi->retrans_timer);
3230 if (rx_control & L2CAP_CTRL_POLL)
3231 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3235 if (rx_control & L2CAP_CTRL_POLL)
3236 l2cap_send_srejtail(sk);
3238 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3241 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3243 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3245 if (L2CAP_CTRL_FINAL & rx_control &&
3246 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3247 del_timer(&l2cap_pi(sk)->monitor_timer);
3248 if (l2cap_pi(sk)->unacked_frames > 0)
3249 __mod_retrans_timer();
3250 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3253 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3254 case L2CAP_SUPER_RCV_READY:
3255 l2cap_data_channel_rrframe(sk, rx_control);
3258 case L2CAP_SUPER_REJECT:
3259 l2cap_data_channel_rejframe(sk, rx_control);
3262 case L2CAP_SUPER_SELECT_REJECT:
3263 l2cap_data_channel_srejframe(sk, rx_control);
3266 case L2CAP_SUPER_RCV_NOT_READY:
3267 l2cap_data_channel_rnrframe(sk, rx_control);
3275 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3277 struct l2cap_pinfo *pi = l2cap_pi(sk);
3280 int len, next_tx_seq_offset, req_seq_offset;
3282 control = get_unaligned_le16(skb->data);
3287 * We can just drop the corrupted I-frame here.
3288 * Receiver will miss it and start proper recovery
3289 * procedures and ask retransmission.
3291 if (l2cap_check_fcs(pi, skb))
3294 if (__is_sar_start(control) && __is_iframe(control))
3297 if (pi->fcs == L2CAP_FCS_CRC16)
3300 if (len > pi->mps) {
3301 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3305 req_seq = __get_reqseq(control);
3306 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3307 if (req_seq_offset < 0)
3308 req_seq_offset += 64;
3310 next_tx_seq_offset =
3311 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3312 if (next_tx_seq_offset < 0)
3313 next_tx_seq_offset += 64;
3315 /* check for invalid req-seq */
3316 if (req_seq_offset > next_tx_seq_offset) {
3317 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3321 if (__is_iframe(control)) {
3323 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3327 l2cap_data_channel_iframe(sk, control, skb);
3331 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3335 l2cap_data_channel_sframe(sk, control, skb);
3345 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3348 struct l2cap_pinfo *pi;
3353 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3355 BT_DBG("unknown cid 0x%4.4x", cid);
3361 BT_DBG("sk %p, len %d", sk, skb->len);
3363 if (sk->sk_state != BT_CONNECTED)
3367 case L2CAP_MODE_BASIC:
3368 /* If socket recv buffers overflows we drop data here
3369 * which is *bad* because L2CAP has to be reliable.
3370 * But we don't have any other choice. L2CAP doesn't
3371 * provide flow control mechanism. */
3373 if (pi->imtu < skb->len)
3376 if (!sock_queue_rcv_skb(sk, skb))
3380 case L2CAP_MODE_ERTM:
3381 if (!sock_owned_by_user(sk)) {
3382 l2cap_ertm_data_rcv(sk, skb);
3384 if (sk_add_backlog(sk, skb))
3390 case L2CAP_MODE_STREAMING:
3391 control = get_unaligned_le16(skb->data);
3395 if (l2cap_check_fcs(pi, skb))
3398 if (__is_sar_start(control))
3401 if (pi->fcs == L2CAP_FCS_CRC16)
3404 if (len > pi->mps || len < 0 || __is_sframe(control))
3407 tx_seq = __get_txseq(control);
3409 if (pi->expected_tx_seq == tx_seq)
3410 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3412 pi->expected_tx_seq = (tx_seq + 1) % 64;
3414 l2cap_streaming_reassembly_sdu(sk, skb, control);
3419 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3433 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3437 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3443 BT_DBG("sk %p, len %d", sk, skb->len);
3445 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3448 if (l2cap_pi(sk)->imtu < skb->len)
3451 if (!sock_queue_rcv_skb(sk, skb))
3463 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3465 struct l2cap_hdr *lh = (void *) skb->data;
3469 skb_pull(skb, L2CAP_HDR_SIZE);
3470 cid = __le16_to_cpu(lh->cid);
3471 len = __le16_to_cpu(lh->len);
3473 if (len != skb->len) {
3478 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3481 case L2CAP_CID_SIGNALING:
3482 l2cap_sig_channel(conn, skb);
3485 case L2CAP_CID_CONN_LESS:
3486 psm = get_unaligned_le16(skb->data);
3488 l2cap_conless_channel(conn, psm, skb);
3492 l2cap_data_channel(conn, cid, skb);
3497 /* ---- L2CAP interface with lower layer (HCI) ---- */
3499 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3501 int exact = 0, lm1 = 0, lm2 = 0;
3502 register struct sock *sk;
3503 struct hlist_node *node;
3505 if (type != ACL_LINK)
3508 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3510 /* Find listening sockets and check their link_mode */
3511 read_lock(&l2cap_sk_list.lock);
3512 sk_for_each(sk, node, &l2cap_sk_list.head) {
3513 if (sk->sk_state != BT_LISTEN)
3516 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3517 lm1 |= HCI_LM_ACCEPT;
3518 if (l2cap_pi(sk)->role_switch)
3519 lm1 |= HCI_LM_MASTER;
3521 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3522 lm2 |= HCI_LM_ACCEPT;
3523 if (l2cap_pi(sk)->role_switch)
3524 lm2 |= HCI_LM_MASTER;
3527 read_unlock(&l2cap_sk_list.lock);
3529 return exact ? lm1 : lm2;
3532 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3534 struct l2cap_conn *conn;
3536 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3538 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3542 conn = l2cap_conn_add(hcon, status);
3544 l2cap_conn_ready(conn);
3546 l2cap_conn_del(hcon, bt_err(status));
3551 static int l2cap_disconn_ind(struct hci_conn *hcon)
3553 struct l2cap_conn *conn = hcon->l2cap_data;
3555 BT_DBG("hcon %p", hcon);
3557 if (hcon->type != ACL_LINK || !conn)
3560 return conn->disc_reason;
3563 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3565 BT_DBG("hcon %p reason %d", hcon, reason);
3567 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3570 l2cap_conn_del(hcon, bt_err(reason));
3575 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3577 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3580 if (encrypt == 0x00) {
3581 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3582 l2cap_sock_clear_timer(sk);
3583 l2cap_sock_set_timer(sk, HZ * 5);
3584 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3585 __l2cap_sock_close(sk, ECONNREFUSED);
3587 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3588 l2cap_sock_clear_timer(sk);
3592 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3594 struct l2cap_chan_list *l;
3595 struct l2cap_conn *conn = hcon->l2cap_data;
3601 l = &conn->chan_list;
3603 BT_DBG("conn %p", conn);
3605 read_lock(&l->lock);
3607 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3610 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3615 if (!status && (sk->sk_state == BT_CONNECTED ||
3616 sk->sk_state == BT_CONFIG)) {
3617 l2cap_check_encryption(sk, encrypt);
3622 if (sk->sk_state == BT_CONNECT) {
3624 struct l2cap_conn_req req;
3625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3626 req.psm = l2cap_pi(sk)->psm;
3628 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3629 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3631 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3632 L2CAP_CONN_REQ, sizeof(req), &req);
3634 l2cap_sock_clear_timer(sk);
3635 l2cap_sock_set_timer(sk, HZ / 10);
3637 } else if (sk->sk_state == BT_CONNECT2) {
3638 struct l2cap_conn_rsp rsp;
3642 sk->sk_state = BT_CONFIG;
3643 result = L2CAP_CR_SUCCESS;
3645 sk->sk_state = BT_DISCONN;
3646 l2cap_sock_set_timer(sk, HZ / 10);
3647 result = L2CAP_CR_SEC_BLOCK;
3650 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3651 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3652 rsp.result = cpu_to_le16(result);
3653 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3654 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3655 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3661 read_unlock(&l->lock);
3666 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3668 struct l2cap_conn *conn = hcon->l2cap_data;
3671 conn = l2cap_conn_add(hcon, 0);
3676 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3678 if (!(flags & ACL_CONT)) {
3679 struct l2cap_hdr *hdr;
3685 BT_ERR("Unexpected start frame (len %d)", skb->len);
3686 kfree_skb(conn->rx_skb);
3687 conn->rx_skb = NULL;
3689 l2cap_conn_unreliable(conn, ECOMM);
3692 /* Start fragment always begin with Basic L2CAP header */
3693 if (skb->len < L2CAP_HDR_SIZE) {
3694 BT_ERR("Frame is too short (len %d)", skb->len);
3695 l2cap_conn_unreliable(conn, ECOMM);
3699 hdr = (struct l2cap_hdr *) skb->data;
3700 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3701 cid = __le16_to_cpu(hdr->cid);
3703 if (len == skb->len) {
3704 /* Complete frame received */
3705 l2cap_recv_frame(conn, skb);
3709 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3711 if (skb->len > len) {
3712 BT_ERR("Frame is too long (len %d, expected len %d)",
3714 l2cap_conn_unreliable(conn, ECOMM);
3718 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3720 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3721 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3722 len, l2cap_pi(sk)->imtu);
3724 l2cap_conn_unreliable(conn, ECOMM);
3731 /* Allocate skb for the complete frame (with header) */
3732 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3736 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3738 conn->rx_len = len - skb->len;
3740 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3742 if (!conn->rx_len) {
3743 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3744 l2cap_conn_unreliable(conn, ECOMM);
3748 if (skb->len > conn->rx_len) {
3749 BT_ERR("Fragment is too long (len %d, expected %d)",
3750 skb->len, conn->rx_len);
3751 kfree_skb(conn->rx_skb);
3752 conn->rx_skb = NULL;
3754 l2cap_conn_unreliable(conn, ECOMM);
3758 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3760 conn->rx_len -= skb->len;
3762 if (!conn->rx_len) {
3763 /* Complete frame received */
3764 l2cap_recv_frame(conn, conn->rx_skb);
3765 conn->rx_skb = NULL;
3774 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3777 struct hlist_node *node;
3779 read_lock_bh(&l2cap_sk_list.lock);
3781 sk_for_each(sk, node, &l2cap_sk_list.head) {
3782 struct l2cap_pinfo *pi = l2cap_pi(sk);
3784 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3785 batostr(&bt_sk(sk)->src),
3786 batostr(&bt_sk(sk)->dst),
3787 sk->sk_state, __le16_to_cpu(pi->psm),
3789 pi->imtu, pi->omtu, pi->sec_level,
3793 read_unlock_bh(&l2cap_sk_list.lock);
3798 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3800 return single_open(file, l2cap_debugfs_show, inode->i_private);
3803 static const struct file_operations l2cap_debugfs_fops = {
3804 .open = l2cap_debugfs_open,
3806 .llseek = seq_lseek,
3807 .release = single_release,
3810 static struct dentry *l2cap_debugfs;
3812 static struct hci_proto l2cap_hci_proto = {
3814 .id = HCI_PROTO_L2CAP,
3815 .connect_ind = l2cap_connect_ind,
3816 .connect_cfm = l2cap_connect_cfm,
3817 .disconn_ind = l2cap_disconn_ind,
3818 .disconn_cfm = l2cap_disconn_cfm,
3819 .security_cfm = l2cap_security_cfm,
3820 .recv_acldata = l2cap_recv_acldata
3823 int __init l2cap_init(void)
3827 err = l2cap_init_sockets();
3831 _busy_wq = create_singlethread_workqueue("l2cap");
3837 err = hci_register_proto(&l2cap_hci_proto);
3839 BT_ERR("L2CAP protocol registration failed");
3840 bt_sock_unregister(BTPROTO_L2CAP);
3845 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
3846 bt_debugfs, NULL, &l2cap_debugfs_fops);
3848 BT_ERR("Failed to create L2CAP debug file");
3851 BT_INFO("L2CAP socket layer initialized");
3856 destroy_workqueue(_busy_wq);
3857 l2cap_cleanup_sockets();
3861 void l2cap_exit(void)
3863 debugfs_remove(l2cap_debugfs);
3865 flush_workqueue(_busy_wq);
3866 destroy_workqueue(_busy_wq);
3868 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3869 BT_ERR("L2CAP protocol unregistration failed");
3871 l2cap_cleanup_sockets();
3874 module_param(disable_ertm, bool, 0644);
3875 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");