2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static struct workqueue_struct *_busy_wq;
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
69 static void l2cap_busy_work(struct work_struct *work);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 /* ---- L2CAP channels ---- */
77 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
81 if (l2cap_pi(s)->dcid == cid)
87 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
91 if (l2cap_pi(s)->scid == cid)
97 /* Find channel with given SCID.
98 * Returns locked socket */
99 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
103 s = __l2cap_get_chan_by_scid(l, cid);
106 read_unlock(&l->lock);
110 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
114 if (l2cap_pi(s)->ident == ident)
120 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
124 s = __l2cap_get_chan_by_ident(l, ident);
127 read_unlock(&l->lock);
131 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
133 u16 cid = L2CAP_CID_DYN_START;
135 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid))
143 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
148 l2cap_pi(l->head)->prev_c = sk;
150 l2cap_pi(sk)->next_c = l->head;
151 l2cap_pi(sk)->prev_c = NULL;
155 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
159 write_lock_bh(&l->lock);
164 l2cap_pi(next)->prev_c = prev;
166 l2cap_pi(prev)->next_c = next;
167 write_unlock_bh(&l->lock);
172 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
174 struct l2cap_chan_list *l = &conn->chan_list;
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
179 conn->disc_reason = 0x13;
181 l2cap_pi(sk)->conn = conn;
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) {
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
190 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
194 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
200 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
206 __l2cap_chan_link(l, sk);
209 bt_accept_enqueue(parent, sk);
213 * Must be called on the locked socket. */
214 void l2cap_chan_del(struct sock *sk, int err)
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
217 struct sock *parent = bt_sk(sk)->parent;
219 l2cap_sock_clear_timer(sk);
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
224 /* Unlink from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk);
226 l2cap_pi(sk)->conn = NULL;
227 hci_conn_put(conn->hcon);
230 sk->sk_state = BT_CLOSED;
231 sock_set_flag(sk, SOCK_ZAPPED);
237 bt_accept_unlink(sk);
238 parent->sk_data_ready(parent, 0);
240 sk->sk_state_change(sk);
242 skb_queue_purge(TX_QUEUE(sk));
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp;
247 del_timer(&l2cap_pi(sk)->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer);
251 skb_queue_purge(SREJ_QUEUE(sk));
252 skb_queue_purge(BUSY_QUEUE(sk));
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
261 static inline u8 l2cap_get_auth_type(struct sock *sk)
263 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) {
265 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM:
268 return HCI_AT_DEDICATED_BONDING;
270 return HCI_AT_NO_BONDING;
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM;
279 return HCI_AT_NO_BONDING;
281 switch (l2cap_pi(sk)->sec_level) {
282 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM:
285 return HCI_AT_GENERAL_BONDING;
287 return HCI_AT_NO_BONDING;
292 /* Service level security */
293 static inline int l2cap_check_security(struct sock *sk)
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
298 auth_type = l2cap_get_auth_type(sk);
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
304 u8 l2cap_get_ident(struct l2cap_conn *conn)
308 /* Get next available identificator.
309 * 1 - 128 are used by kernel.
310 * 129 - 199 are reserved.
311 * 200 - 254 are used by utilities like l2ping, etc.
314 spin_lock_bh(&conn->lock);
316 if (++conn->tx_ident > 128)
321 spin_unlock_bh(&conn->lock);
326 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
328 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
331 BT_DBG("code 0x%2.2x", code);
336 if (lmp_no_flush_capable(conn->hcon->hdev))
337 flags = ACL_START_NO_FLUSH;
341 hci_send_acl(conn->hcon, skb, flags);
344 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
347 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn;
349 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
353 if (sk->sk_state != BT_CONNECTED)
356 if (pi->fcs == L2CAP_FCS_CRC16)
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 if (lmp_no_flush_capable(conn->hcon->hdev))
389 flags = ACL_START_NO_FLUSH;
393 hci_send_acl(pi->conn->hcon, skb, flags);
396 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT;
402 control |= L2CAP_SUPER_RCV_READY;
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
406 l2cap_send_sframe(pi, control);
409 static inline int __l2cap_no_conn_pending(struct sock *sk)
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
414 static void l2cap_do_start(struct sock *sk)
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 L2CAP_CONN_REQ, sizeof(req), &req);
434 struct l2cap_info_req req;
435 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
438 conn->info_ident = l2cap_get_ident(conn);
440 mod_timer(&conn->info_timer, jiffies +
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
443 l2cap_send_cmd(conn, conn->info_ident,
444 L2CAP_INFO_REQ, sizeof(req), &req);
448 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
450 u32 local_feat_mask = l2cap_feat_mask;
452 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
455 case L2CAP_MODE_ERTM:
456 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
457 case L2CAP_MODE_STREAMING:
458 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
464 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
466 struct l2cap_disconn_req req;
471 skb_queue_purge(TX_QUEUE(sk));
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer);
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req);
484 sk->sk_state = BT_DISCONN;
488 /* ---- L2CAP connections ---- */
489 static void l2cap_conn_start(struct l2cap_conn *conn)
491 struct l2cap_chan_list *l = &conn->chan_list;
492 struct sock_del_list del, *tmp1, *tmp2;
495 BT_DBG("conn %p", conn);
497 INIT_LIST_HEAD(&del.list);
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
504 if (sk->sk_type != SOCK_SEQPACKET &&
505 sk->sk_type != SOCK_STREAM) {
510 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req;
513 if (!l2cap_check_security(sk) ||
514 !__l2cap_no_conn_pending(sk)) {
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
521 && l2cap_pi(sk)->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list),
526 list_add_tail(&tmp1->list, &del.list);
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
532 req.psm = l2cap_pi(sk)->psm;
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_REQ, sizeof(req), &req);
540 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp;
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
546 if (l2cap_check_security(sk)) {
547 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0);
554 sk->sk_state = BT_CONFIG;
555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
559 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) {
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf);
575 l2cap_pi(sk)->num_conf_req++;
581 read_unlock(&l->lock);
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
597 struct sock *s, *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
600 read_lock(&l2cap_sk_list.lock);
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
606 if (l2cap_pi(sk)->scid == cid) {
608 if (!bacmp(&bt_sk(sk)->src, src))
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
619 read_unlock(&l2cap_sk_list.lock);
624 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
626 struct l2cap_chan_list *list = &conn->chan_list;
627 struct sock *parent, *uninitialized_var(sk);
631 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
647 write_lock_bh(&list->lock);
649 hci_conn_hold(conn->hcon);
651 l2cap_sock_init(sk, parent);
652 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst);
655 __l2cap_chan_add(conn, sk, parent);
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
659 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0);
662 write_unlock_bh(&list->lock);
665 bh_unlock_sock(parent);
668 static void l2cap_conn_ready(struct l2cap_conn *conn)
670 struct l2cap_chan_list *l = &conn->chan_list;
673 BT_DBG("conn %p", conn);
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn);
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
683 if (conn->hcon->type == LE_LINK) {
684 l2cap_sock_clear_timer(sk);
685 sk->sk_state = BT_CONNECTED;
686 sk->sk_state_change(sk);
689 if (sk->sk_type != SOCK_SEQPACKET &&
690 sk->sk_type != SOCK_STREAM) {
691 l2cap_sock_clear_timer(sk);
692 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT)
700 read_unlock(&l->lock);
703 /* Notify sockets that we cannot guaranty reliability anymore */
704 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
706 struct l2cap_chan_list *l = &conn->chan_list;
709 BT_DBG("conn %p", conn);
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
714 if (l2cap_pi(sk)->force_reliable)
718 read_unlock(&l->lock);
721 static void l2cap_info_timeout(unsigned long arg)
723 struct l2cap_conn *conn = (void *) arg;
725 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
726 conn->info_ident = 0;
728 l2cap_conn_start(conn);
731 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
733 struct l2cap_conn *conn = hcon->l2cap_data;
738 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
742 hcon->l2cap_data = conn;
745 BT_DBG("hcon %p conn %p", hcon, conn);
747 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
748 conn->mtu = hcon->hdev->le_mtu;
750 conn->mtu = hcon->hdev->acl_mtu;
752 conn->src = &hcon->hdev->bdaddr;
753 conn->dst = &hcon->dst;
757 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock);
760 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout,
762 (unsigned long) conn);
764 conn->disc_reason = 0x13;
769 static void l2cap_conn_del(struct hci_conn *hcon, int err)
771 struct l2cap_conn *conn = hcon->l2cap_data;
777 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
779 kfree_skb(conn->rx_skb);
782 while ((sk = conn->chan_list.head)) {
784 l2cap_chan_del(sk, err);
789 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
790 del_timer_sync(&conn->info_timer);
792 hcon->l2cap_data = NULL;
796 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
798 struct l2cap_chan_list *l = &conn->chan_list;
799 write_lock_bh(&l->lock);
800 __l2cap_chan_add(conn, sk, parent);
801 write_unlock_bh(&l->lock);
804 /* ---- Socket interface ---- */
806 /* Find socket with psm and source bdaddr.
807 * Returns closest match.
809 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
811 struct sock *sk = NULL, *sk1 = NULL;
812 struct hlist_node *node;
814 read_lock(&l2cap_sk_list.lock);
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state)
820 if (l2cap_pi(sk)->psm == psm) {
822 if (!bacmp(&bt_sk(sk)->src, src))
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
831 read_unlock(&l2cap_sk_list.lock);
833 return node ? sk : sk1;
836 int l2cap_do_connect(struct sock *sk)
838 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn;
841 struct hci_conn *hcon;
842 struct hci_dev *hdev;
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
849 hdev = hci_get_route(dst, src);
851 return -EHOSTUNREACH;
853 hci_dev_lock_bh(hdev);
857 auth_type = l2cap_get_auth_type(sk);
859 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
860 hcon = hci_connect(hdev, LE_LINK, dst,
861 l2cap_pi(sk)->sec_level, auth_type);
863 hcon = hci_connect(hdev, ACL_LINK, dst,
864 l2cap_pi(sk)->sec_level, auth_type);
869 conn = l2cap_conn_add(hcon, 0);
877 /* Update source addr of the socket */
878 bacpy(src, conn->src);
880 l2cap_chan_add(conn, sk, NULL);
882 sk->sk_state = BT_CONNECT;
883 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
885 if (hcon->state == BT_CONNECTED) {
886 if (sk->sk_type != SOCK_SEQPACKET &&
887 sk->sk_type != SOCK_STREAM) {
888 l2cap_sock_clear_timer(sk);
889 if (l2cap_check_security(sk))
890 sk->sk_state = BT_CONNECTED;
896 hci_dev_unlock_bh(hdev);
901 int __l2cap_wait_ack(struct sock *sk)
903 DECLARE_WAITQUEUE(wait, current);
907 add_wait_queue(sk_sleep(sk), &wait);
908 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
909 set_current_state(TASK_INTERRUPTIBLE);
914 if (signal_pending(current)) {
915 err = sock_intr_errno(timeo);
920 timeo = schedule_timeout(timeo);
923 err = sock_error(sk);
927 set_current_state(TASK_RUNNING);
928 remove_wait_queue(sk_sleep(sk), &wait);
932 static void l2cap_monitor_timeout(unsigned long arg)
934 struct sock *sk = (void *) arg;
939 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
940 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
945 l2cap_pi(sk)->retry_count++;
946 __mod_monitor_timer();
948 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
952 static void l2cap_retrans_timeout(unsigned long arg)
954 struct sock *sk = (void *) arg;
959 l2cap_pi(sk)->retry_count = 1;
960 __mod_monitor_timer();
962 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
964 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
968 static void l2cap_drop_acked_frames(struct sock *sk)
972 while ((skb = skb_peek(TX_QUEUE(sk))) &&
973 l2cap_pi(sk)->unacked_frames) {
974 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
977 skb = skb_dequeue(TX_QUEUE(sk));
980 l2cap_pi(sk)->unacked_frames--;
983 if (!l2cap_pi(sk)->unacked_frames)
984 del_timer(&l2cap_pi(sk)->retrans_timer);
987 void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
989 struct l2cap_pinfo *pi = l2cap_pi(sk);
990 struct hci_conn *hcon = pi->conn->hcon;
993 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
995 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev))
996 flags = ACL_START_NO_FLUSH;
1000 hci_send_acl(hcon, skb, flags);
1003 void l2cap_streaming_send(struct sock *sk)
1005 struct sk_buff *skb;
1006 struct l2cap_pinfo *pi = l2cap_pi(sk);
1009 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1010 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1011 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1012 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 if (pi->fcs == L2CAP_FCS_CRC16) {
1015 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1016 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1019 l2cap_do_send(sk, skb);
1021 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1025 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1027 struct l2cap_pinfo *pi = l2cap_pi(sk);
1028 struct sk_buff *skb, *tx_skb;
1031 skb = skb_peek(TX_QUEUE(sk));
1036 if (bt_cb(skb)->tx_seq == tx_seq)
1039 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1042 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1044 if (pi->remote_max_tx &&
1045 bt_cb(skb)->retries == pi->remote_max_tx) {
1046 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1050 tx_skb = skb_clone(skb, GFP_ATOMIC);
1051 bt_cb(skb)->retries++;
1052 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1055 control |= L2CAP_CTRL_FINAL;
1056 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1060 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1062 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1064 if (pi->fcs == L2CAP_FCS_CRC16) {
1065 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1066 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 l2cap_do_send(sk, tx_skb);
1072 int l2cap_ertm_send(struct sock *sk)
1074 struct sk_buff *skb, *tx_skb;
1075 struct l2cap_pinfo *pi = l2cap_pi(sk);
1079 if (sk->sk_state != BT_CONNECTED)
1082 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1084 if (pi->remote_max_tx &&
1085 bt_cb(skb)->retries == pi->remote_max_tx) {
1086 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1090 tx_skb = skb_clone(skb, GFP_ATOMIC);
1092 bt_cb(skb)->retries++;
1094 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1095 control &= L2CAP_CTRL_SAR;
1097 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1098 control |= L2CAP_CTRL_FINAL;
1099 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1101 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1102 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1103 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106 if (pi->fcs == L2CAP_FCS_CRC16) {
1107 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1108 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 l2cap_do_send(sk, tx_skb);
1113 __mod_retrans_timer();
1115 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1116 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1118 pi->unacked_frames++;
1121 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1122 sk->sk_send_head = NULL;
1124 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1132 static int l2cap_retransmit_frames(struct sock *sk)
1134 struct l2cap_pinfo *pi = l2cap_pi(sk);
1137 if (!skb_queue_empty(TX_QUEUE(sk)))
1138 sk->sk_send_head = TX_QUEUE(sk)->next;
1140 pi->next_tx_seq = pi->expected_ack_seq;
1141 ret = l2cap_ertm_send(sk);
1145 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1147 struct sock *sk = (struct sock *)pi;
1150 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1152 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1153 control |= L2CAP_SUPER_RCV_NOT_READY;
1154 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1155 l2cap_send_sframe(pi, control);
1159 if (l2cap_ertm_send(sk) > 0)
1162 control |= L2CAP_SUPER_RCV_READY;
1163 l2cap_send_sframe(pi, control);
1166 static void l2cap_send_srejtail(struct sock *sk)
1168 struct srej_list *tail;
1171 control = L2CAP_SUPER_SELECT_REJECT;
1172 control |= L2CAP_CTRL_FINAL;
1174 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1175 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1177 l2cap_send_sframe(l2cap_pi(sk), control);
1180 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1182 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1183 struct sk_buff **frag;
1186 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1192 /* Continuation fragments (no L2CAP header) */
1193 frag = &skb_shinfo(skb)->frag_list;
1195 count = min_t(unsigned int, conn->mtu, len);
1197 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1200 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1206 frag = &(*frag)->next;
1212 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1214 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1215 struct sk_buff *skb;
1216 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1217 struct l2cap_hdr *lh;
1219 BT_DBG("sk %p len %d", sk, (int)len);
1221 count = min_t(unsigned int, (conn->mtu - hlen), len);
1222 skb = bt_skb_send_alloc(sk, count + hlen,
1223 msg->msg_flags & MSG_DONTWAIT, &err);
1225 return ERR_PTR(err);
1227 /* Create L2CAP header */
1228 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1229 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1230 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1231 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1233 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1234 if (unlikely(err < 0)) {
1236 return ERR_PTR(err);
1241 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1244 struct sk_buff *skb;
1245 int err, count, hlen = L2CAP_HDR_SIZE;
1246 struct l2cap_hdr *lh;
1248 BT_DBG("sk %p len %d", sk, (int)len);
1250 count = min_t(unsigned int, (conn->mtu - hlen), len);
1251 skb = bt_skb_send_alloc(sk, count + hlen,
1252 msg->msg_flags & MSG_DONTWAIT, &err);
1254 return ERR_PTR(err);
1256 /* Create L2CAP header */
1257 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1258 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1261 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1262 if (unlikely(err < 0)) {
1264 return ERR_PTR(err);
1269 struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1271 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1272 struct sk_buff *skb;
1273 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1274 struct l2cap_hdr *lh;
1276 BT_DBG("sk %p len %d", sk, (int)len);
1279 return ERR_PTR(-ENOTCONN);
1284 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1287 count = min_t(unsigned int, (conn->mtu - hlen), len);
1288 skb = bt_skb_send_alloc(sk, count + hlen,
1289 msg->msg_flags & MSG_DONTWAIT, &err);
1291 return ERR_PTR(err);
1293 /* Create L2CAP header */
1294 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1295 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1296 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1297 put_unaligned_le16(control, skb_put(skb, 2));
1299 put_unaligned_le16(sdulen, skb_put(skb, 2));
1301 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1302 if (unlikely(err < 0)) {
1304 return ERR_PTR(err);
1307 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1308 put_unaligned_le16(0, skb_put(skb, 2));
1310 bt_cb(skb)->retries = 0;
1314 int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1316 struct l2cap_pinfo *pi = l2cap_pi(sk);
1317 struct sk_buff *skb;
1318 struct sk_buff_head sar_queue;
1322 skb_queue_head_init(&sar_queue);
1323 control = L2CAP_SDU_START;
1324 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1326 return PTR_ERR(skb);
1328 __skb_queue_tail(&sar_queue, skb);
1329 len -= pi->remote_mps;
1330 size += pi->remote_mps;
1335 if (len > pi->remote_mps) {
1336 control = L2CAP_SDU_CONTINUE;
1337 buflen = pi->remote_mps;
1339 control = L2CAP_SDU_END;
1343 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1345 skb_queue_purge(&sar_queue);
1346 return PTR_ERR(skb);
1349 __skb_queue_tail(&sar_queue, skb);
1353 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1354 if (sk->sk_send_head == NULL)
1355 sk->sk_send_head = sar_queue.next;
1360 static void l2cap_chan_ready(struct sock *sk)
1362 struct sock *parent = bt_sk(sk)->parent;
1364 BT_DBG("sk %p, parent %p", sk, parent);
1366 l2cap_pi(sk)->conf_state = 0;
1367 l2cap_sock_clear_timer(sk);
1370 /* Outgoing channel.
1371 * Wake up socket sleeping on connect.
1373 sk->sk_state = BT_CONNECTED;
1374 sk->sk_state_change(sk);
1376 /* Incoming channel.
1377 * Wake up socket sleeping on accept.
1379 parent->sk_data_ready(parent, 0);
1383 /* Copy frame to all raw sockets on that connection */
1384 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1386 struct l2cap_chan_list *l = &conn->chan_list;
1387 struct sk_buff *nskb;
1390 BT_DBG("conn %p", conn);
1392 read_lock(&l->lock);
1393 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1394 if (sk->sk_type != SOCK_RAW)
1397 /* Don't send frame to the socket it came from */
1400 nskb = skb_clone(skb, GFP_ATOMIC);
1404 if (sock_queue_rcv_skb(sk, nskb))
1407 read_unlock(&l->lock);
1410 /* ---- L2CAP signalling commands ---- */
1411 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1412 u8 code, u8 ident, u16 dlen, void *data)
1414 struct sk_buff *skb, **frag;
1415 struct l2cap_cmd_hdr *cmd;
1416 struct l2cap_hdr *lh;
1419 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1420 conn, code, ident, dlen);
1422 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1423 count = min_t(unsigned int, conn->mtu, len);
1425 skb = bt_skb_alloc(count, GFP_ATOMIC);
1429 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1430 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1432 if (conn->hcon->type == LE_LINK)
1433 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1435 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1437 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1440 cmd->len = cpu_to_le16(dlen);
1443 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1444 memcpy(skb_put(skb, count), data, count);
1450 /* Continuation fragments (no L2CAP header) */
1451 frag = &skb_shinfo(skb)->frag_list;
1453 count = min_t(unsigned int, conn->mtu, len);
1455 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1459 memcpy(skb_put(*frag, count), data, count);
1464 frag = &(*frag)->next;
1474 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1476 struct l2cap_conf_opt *opt = *ptr;
1479 len = L2CAP_CONF_OPT_SIZE + opt->len;
1487 *val = *((u8 *) opt->val);
1491 *val = get_unaligned_le16(opt->val);
1495 *val = get_unaligned_le32(opt->val);
1499 *val = (unsigned long) opt->val;
1503 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1507 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1509 struct l2cap_conf_opt *opt = *ptr;
1511 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1518 *((u8 *) opt->val) = val;
1522 put_unaligned_le16(val, opt->val);
1526 put_unaligned_le32(val, opt->val);
1530 memcpy(opt->val, (void *) val, len);
1534 *ptr += L2CAP_CONF_OPT_SIZE + len;
1537 static void l2cap_ack_timeout(unsigned long arg)
1539 struct sock *sk = (void *) arg;
1542 l2cap_send_ack(l2cap_pi(sk));
1546 static inline void l2cap_ertm_init(struct sock *sk)
1548 l2cap_pi(sk)->expected_ack_seq = 0;
1549 l2cap_pi(sk)->unacked_frames = 0;
1550 l2cap_pi(sk)->buffer_seq = 0;
1551 l2cap_pi(sk)->num_acked = 0;
1552 l2cap_pi(sk)->frames_sent = 0;
1554 setup_timer(&l2cap_pi(sk)->retrans_timer,
1555 l2cap_retrans_timeout, (unsigned long) sk);
1556 setup_timer(&l2cap_pi(sk)->monitor_timer,
1557 l2cap_monitor_timeout, (unsigned long) sk);
1558 setup_timer(&l2cap_pi(sk)->ack_timer,
1559 l2cap_ack_timeout, (unsigned long) sk);
1561 __skb_queue_head_init(SREJ_QUEUE(sk));
1562 __skb_queue_head_init(BUSY_QUEUE(sk));
1564 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
1566 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1569 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1572 case L2CAP_MODE_STREAMING:
1573 case L2CAP_MODE_ERTM:
1574 if (l2cap_mode_supported(mode, remote_feat_mask))
1578 return L2CAP_MODE_BASIC;
1582 int l2cap_build_conf_req(struct sock *sk, void *data)
1584 struct l2cap_pinfo *pi = l2cap_pi(sk);
1585 struct l2cap_conf_req *req = data;
1586 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1587 void *ptr = req->data;
1589 BT_DBG("sk %p", sk);
1591 if (pi->num_conf_req || pi->num_conf_rsp)
1595 case L2CAP_MODE_STREAMING:
1596 case L2CAP_MODE_ERTM:
1597 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
1602 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1607 if (pi->imtu != L2CAP_DEFAULT_MTU)
1608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1611 case L2CAP_MODE_BASIC:
1612 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1613 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1616 rfc.mode = L2CAP_MODE_BASIC;
1618 rfc.max_transmit = 0;
1619 rfc.retrans_timeout = 0;
1620 rfc.monitor_timeout = 0;
1621 rfc.max_pdu_size = 0;
1623 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1624 (unsigned long) &rfc);
1627 case L2CAP_MODE_ERTM:
1628 rfc.mode = L2CAP_MODE_ERTM;
1629 rfc.txwin_size = pi->tx_win;
1630 rfc.max_transmit = pi->max_tx;
1631 rfc.retrans_timeout = 0;
1632 rfc.monitor_timeout = 0;
1633 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1634 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1635 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1638 (unsigned long) &rfc);
1640 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1643 if (pi->fcs == L2CAP_FCS_NONE ||
1644 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1645 pi->fcs = L2CAP_FCS_NONE;
1646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1650 case L2CAP_MODE_STREAMING:
1651 rfc.mode = L2CAP_MODE_STREAMING;
1653 rfc.max_transmit = 0;
1654 rfc.retrans_timeout = 0;
1655 rfc.monitor_timeout = 0;
1656 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1657 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1658 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1661 (unsigned long) &rfc);
1663 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1666 if (pi->fcs == L2CAP_FCS_NONE ||
1667 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1668 pi->fcs = L2CAP_FCS_NONE;
1669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
1674 /* FIXME: Need actual value of the flush timeout */
1675 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1676 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1678 req->dcid = cpu_to_le16(pi->dcid);
1679 req->flags = cpu_to_le16(0);
1684 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1686 struct l2cap_pinfo *pi = l2cap_pi(sk);
1687 struct l2cap_conf_rsp *rsp = data;
1688 void *ptr = rsp->data;
1689 void *req = pi->conf_req;
1690 int len = pi->conf_len;
1691 int type, hint, olen;
1693 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1694 u16 mtu = L2CAP_DEFAULT_MTU;
1695 u16 result = L2CAP_CONF_SUCCESS;
1697 BT_DBG("sk %p", sk);
1699 while (len >= L2CAP_CONF_OPT_SIZE) {
1700 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1702 hint = type & L2CAP_CONF_HINT;
1703 type &= L2CAP_CONF_MASK;
1706 case L2CAP_CONF_MTU:
1710 case L2CAP_CONF_FLUSH_TO:
1714 case L2CAP_CONF_QOS:
1717 case L2CAP_CONF_RFC:
1718 if (olen == sizeof(rfc))
1719 memcpy(&rfc, (void *) val, olen);
1722 case L2CAP_CONF_FCS:
1723 if (val == L2CAP_FCS_NONE)
1724 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1732 result = L2CAP_CONF_UNKNOWN;
1733 *((u8 *) ptr++) = type;
1738 if (pi->num_conf_rsp || pi->num_conf_req > 1)
1742 case L2CAP_MODE_STREAMING:
1743 case L2CAP_MODE_ERTM:
1744 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1745 pi->mode = l2cap_select_mode(rfc.mode,
1746 pi->conn->feat_mask);
1750 if (pi->mode != rfc.mode)
1751 return -ECONNREFUSED;
1757 if (pi->mode != rfc.mode) {
1758 result = L2CAP_CONF_UNACCEPT;
1759 rfc.mode = pi->mode;
1761 if (pi->num_conf_rsp == 1)
1762 return -ECONNREFUSED;
1764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1765 sizeof(rfc), (unsigned long) &rfc);
1769 if (result == L2CAP_CONF_SUCCESS) {
1770 /* Configure output options and let the other side know
1771 * which ones we don't like. */
1773 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1774 result = L2CAP_CONF_UNACCEPT;
1777 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1782 case L2CAP_MODE_BASIC:
1783 pi->fcs = L2CAP_FCS_NONE;
1784 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1787 case L2CAP_MODE_ERTM:
1788 pi->remote_tx_win = rfc.txwin_size;
1789 pi->remote_max_tx = rfc.max_transmit;
1791 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1792 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1794 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1796 rfc.retrans_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1798 rfc.monitor_timeout =
1799 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1804 sizeof(rfc), (unsigned long) &rfc);
1808 case L2CAP_MODE_STREAMING:
1809 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1810 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1812 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1817 sizeof(rfc), (unsigned long) &rfc);
1822 result = L2CAP_CONF_UNACCEPT;
1824 memset(&rfc, 0, sizeof(rfc));
1825 rfc.mode = pi->mode;
1828 if (result == L2CAP_CONF_SUCCESS)
1829 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1831 rsp->scid = cpu_to_le16(pi->dcid);
1832 rsp->result = cpu_to_le16(result);
1833 rsp->flags = cpu_to_le16(0x0000);
1838 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1840 struct l2cap_pinfo *pi = l2cap_pi(sk);
1841 struct l2cap_conf_req *req = data;
1842 void *ptr = req->data;
1845 struct l2cap_conf_rfc rfc;
1847 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1849 while (len >= L2CAP_CONF_OPT_SIZE) {
1850 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1853 case L2CAP_CONF_MTU:
1854 if (val < L2CAP_DEFAULT_MIN_MTU) {
1855 *result = L2CAP_CONF_UNACCEPT;
1856 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1862 case L2CAP_CONF_FLUSH_TO:
1864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1868 case L2CAP_CONF_RFC:
1869 if (olen == sizeof(rfc))
1870 memcpy(&rfc, (void *)val, olen);
1872 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1873 rfc.mode != pi->mode)
1874 return -ECONNREFUSED;
1878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1879 sizeof(rfc), (unsigned long) &rfc);
1884 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1885 return -ECONNREFUSED;
1887 pi->mode = rfc.mode;
1889 if (*result == L2CAP_CONF_SUCCESS) {
1891 case L2CAP_MODE_ERTM:
1892 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1893 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1896 case L2CAP_MODE_STREAMING:
1897 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1901 req->dcid = cpu_to_le16(pi->dcid);
1902 req->flags = cpu_to_le16(0x0000);
1907 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1909 struct l2cap_conf_rsp *rsp = data;
1910 void *ptr = rsp->data;
1912 BT_DBG("sk %p", sk);
1914 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp->result = cpu_to_le16(result);
1916 rsp->flags = cpu_to_le16(flags);
1921 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1923 struct l2cap_pinfo *pi = l2cap_pi(sk);
1926 struct l2cap_conf_rfc rfc;
1928 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
1930 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1933 while (len >= L2CAP_CONF_OPT_SIZE) {
1934 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1937 case L2CAP_CONF_RFC:
1938 if (olen == sizeof(rfc))
1939 memcpy(&rfc, (void *)val, olen);
1946 case L2CAP_MODE_ERTM:
1947 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1948 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1951 case L2CAP_MODE_STREAMING:
1952 pi->mps = le16_to_cpu(rfc.max_pdu_size);
1956 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1958 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1960 if (rej->reason != 0x0000)
1963 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1964 cmd->ident == conn->info_ident) {
1965 del_timer(&conn->info_timer);
1967 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1968 conn->info_ident = 0;
1970 l2cap_conn_start(conn);
1976 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1978 struct l2cap_chan_list *list = &conn->chan_list;
1979 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1980 struct l2cap_conn_rsp rsp;
1981 struct sock *parent, *sk = NULL;
1982 int result, status = L2CAP_CS_NO_INFO;
1984 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1985 __le16 psm = req->psm;
1987 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1989 /* Check if we have socket listening on psm */
1990 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1992 result = L2CAP_CR_BAD_PSM;
1996 bh_lock_sock(parent);
1998 /* Check if the ACL is secure enough (if not SDP) */
1999 if (psm != cpu_to_le16(0x0001) &&
2000 !hci_conn_check_link_mode(conn->hcon)) {
2001 conn->disc_reason = 0x05;
2002 result = L2CAP_CR_SEC_BLOCK;
2006 result = L2CAP_CR_NO_MEM;
2008 /* Check for backlog size */
2009 if (sk_acceptq_is_full(parent)) {
2010 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2014 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2018 write_lock_bh(&list->lock);
2020 /* Check if we already have channel with that dcid */
2021 if (__l2cap_get_chan_by_dcid(list, scid)) {
2022 write_unlock_bh(&list->lock);
2023 sock_set_flag(sk, SOCK_ZAPPED);
2024 l2cap_sock_kill(sk);
2028 hci_conn_hold(conn->hcon);
2030 l2cap_sock_init(sk, parent);
2031 bacpy(&bt_sk(sk)->src, conn->src);
2032 bacpy(&bt_sk(sk)->dst, conn->dst);
2033 l2cap_pi(sk)->psm = psm;
2034 l2cap_pi(sk)->dcid = scid;
2036 __l2cap_chan_add(conn, sk, parent);
2037 dcid = l2cap_pi(sk)->scid;
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2041 l2cap_pi(sk)->ident = cmd->ident;
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) {
2045 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND;
2048 status = L2CAP_CS_AUTHOR_PEND;
2049 parent->sk_data_ready(parent, 0);
2051 sk->sk_state = BT_CONFIG;
2052 result = L2CAP_CR_SUCCESS;
2053 status = L2CAP_CS_NO_INFO;
2056 sk->sk_state = BT_CONNECT2;
2057 result = L2CAP_CR_PEND;
2058 status = L2CAP_CS_AUTHEN_PEND;
2061 sk->sk_state = BT_CONNECT2;
2062 result = L2CAP_CR_PEND;
2063 status = L2CAP_CS_NO_INFO;
2066 write_unlock_bh(&list->lock);
2069 bh_unlock_sock(parent);
2072 rsp.scid = cpu_to_le16(scid);
2073 rsp.dcid = cpu_to_le16(dcid);
2074 rsp.result = cpu_to_le16(result);
2075 rsp.status = cpu_to_le16(status);
2076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2078 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2079 struct l2cap_info_req info;
2080 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2082 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2083 conn->info_ident = l2cap_get_ident(conn);
2085 mod_timer(&conn->info_timer, jiffies +
2086 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2088 l2cap_send_cmd(conn, conn->info_ident,
2089 L2CAP_INFO_REQ, sizeof(info), &info);
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) {
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++;
2104 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status;
2111 scid = __le16_to_cpu(rsp->scid);
2112 dcid = __le16_to_cpu(rsp->dcid);
2113 result = __le16_to_cpu(rsp->result);
2114 status = __le16_to_cpu(rsp->status);
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2129 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req);
2142 l2cap_pi(sk)->num_conf_req++;
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2150 /* don't delete l2cap channel if sk is owned by user */
2151 if (sock_owned_by_user(sk)) {
2152 sk->sk_state = BT_DISCONN;
2153 l2cap_sock_clear_timer(sk);
2154 l2cap_sock_set_timer(sk, HZ / 5);
2158 l2cap_chan_del(sk, ECONNREFUSED);
2166 static inline void set_default_fcs(struct l2cap_pinfo *pi)
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16;
2177 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2185 dcid = __le16_to_cpu(req->dcid);
2186 flags = __le16_to_cpu(req->flags);
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2194 if (sk->sk_state != BT_CONFIG) {
2195 struct l2cap_cmd_rej rej;
2197 rej.reason = cpu_to_le16(0x0002);
2198 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2203 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp);
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len;
2216 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2224 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp);
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++;
2234 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0;
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk));
2243 sk->sk_state = BT_CONNECTED;
2245 l2cap_pi(sk)->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk));
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk);
2251 l2cap_chan_ready(sk);
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++;
2268 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result;
2273 int len = cmd->len - sizeof(*rsp);
2275 scid = __le16_to_cpu(rsp->scid);
2276 flags = __le16_to_cpu(rsp->flags);
2277 result = __le16_to_cpu(rsp->result);
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result);
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2287 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len);
2291 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2300 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data,
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS)
2318 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk));
2332 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk));
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk);
2339 l2cap_chan_ready(sk);
2347 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp;
2354 scid = __le16_to_cpu(req->scid);
2355 dcid = __le16_to_cpu(req->dcid);
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2367 sk->sk_shutdown = SHUTDOWN_MASK;
2369 /* don't delete l2cap channel if sk is owned by user */
2370 if (sock_owned_by_user(sk)) {
2371 sk->sk_state = BT_DISCONN;
2372 l2cap_sock_clear_timer(sk);
2373 l2cap_sock_set_timer(sk, HZ / 5);
2378 l2cap_chan_del(sk, ECONNRESET);
2381 l2cap_sock_kill(sk);
2385 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2391 scid = __le16_to_cpu(rsp->scid);
2392 dcid = __le16_to_cpu(rsp->dcid);
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2400 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN;
2403 l2cap_sock_clear_timer(sk);
2404 l2cap_sock_set_timer(sk, HZ / 5);
2409 l2cap_chan_del(sk, 0);
2412 l2cap_sock_kill(sk);
2416 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2418 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2421 type = __le16_to_cpu(req->type);
2423 BT_DBG("type 0x%4.4x", type);
2425 if (type == L2CAP_IT_FEAT_MASK) {
2427 u32 feat_mask = l2cap_feat_mask;
2428 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2429 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2430 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2432 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2434 put_unaligned_le32(feat_mask, rsp->data);
2435 l2cap_send_cmd(conn, cmd->ident,
2436 L2CAP_INFO_RSP, sizeof(buf), buf);
2437 } else if (type == L2CAP_IT_FIXED_CHAN) {
2439 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2440 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2441 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2442 memcpy(buf + 4, l2cap_fixed_chan, 8);
2443 l2cap_send_cmd(conn, cmd->ident,
2444 L2CAP_INFO_RSP, sizeof(buf), buf);
2446 struct l2cap_info_rsp rsp;
2447 rsp.type = cpu_to_le16(type);
2448 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2449 l2cap_send_cmd(conn, cmd->ident,
2450 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2456 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2458 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2461 type = __le16_to_cpu(rsp->type);
2462 result = __le16_to_cpu(rsp->result);
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2466 del_timer(&conn->info_timer);
2468 if (result != L2CAP_IR_SUCCESS) {
2469 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2470 conn->info_ident = 0;
2472 l2cap_conn_start(conn);
2477 if (type == L2CAP_IT_FEAT_MASK) {
2478 conn->feat_mask = get_unaligned_le32(rsp->data);
2480 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2481 struct l2cap_info_req req;
2482 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2484 conn->info_ident = l2cap_get_ident(conn);
2486 l2cap_send_cmd(conn, conn->info_ident,
2487 L2CAP_INFO_REQ, sizeof(req), &req);
2489 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2490 conn->info_ident = 0;
2492 l2cap_conn_start(conn);
2494 } else if (type == L2CAP_IT_FIXED_CHAN) {
2495 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2496 conn->info_ident = 0;
2498 l2cap_conn_start(conn);
2504 static int inline l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2509 if (min > max || min < 6 || max > 3200)
2512 if (to_multiplier < 10 || to_multiplier > 3200)
2515 if (max >= to_multiplier * 8)
2518 max_latency = (to_multiplier * 8 / max) - 1;
2519 if (latency > 499 || latency > max_latency)
2525 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2526 struct l2cap_cmd_hdr *cmd, u8 *data)
2528 struct hci_conn *hcon = conn->hcon;
2529 struct l2cap_conn_param_update_req *req;
2530 struct l2cap_conn_param_update_rsp rsp;
2531 u16 min, max, latency, to_multiplier, cmd_len;
2533 if (!(hcon->link_mode & HCI_LM_MASTER))
2536 cmd_len = __le16_to_cpu(cmd->len);
2537 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2540 req = (struct l2cap_conn_param_update_req *) data;
2541 min = __le16_to_cpu(req->min);
2542 max = __le16_to_cpu(req->max);
2543 latency = __le16_to_cpu(req->latency);
2544 to_multiplier = __le16_to_cpu(req->to_multiplier);
2546 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2547 min, max, latency, to_multiplier);
2549 memset(&rsp, 0, sizeof(rsp));
2550 if (l2cap_check_conn_param(min, max, latency, to_multiplier))
2551 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2553 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2561 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2562 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2566 switch (cmd->code) {
2567 case L2CAP_COMMAND_REJ:
2568 l2cap_command_rej(conn, cmd, data);
2571 case L2CAP_CONN_REQ:
2572 err = l2cap_connect_req(conn, cmd, data);
2575 case L2CAP_CONN_RSP:
2576 err = l2cap_connect_rsp(conn, cmd, data);
2579 case L2CAP_CONF_REQ:
2580 err = l2cap_config_req(conn, cmd, cmd_len, data);
2583 case L2CAP_CONF_RSP:
2584 err = l2cap_config_rsp(conn, cmd, data);
2587 case L2CAP_DISCONN_REQ:
2588 err = l2cap_disconnect_req(conn, cmd, data);
2591 case L2CAP_DISCONN_RSP:
2592 err = l2cap_disconnect_rsp(conn, cmd, data);
2595 case L2CAP_ECHO_REQ:
2596 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2599 case L2CAP_ECHO_RSP:
2602 case L2CAP_INFO_REQ:
2603 err = l2cap_information_req(conn, cmd, data);
2606 case L2CAP_INFO_RSP:
2607 err = l2cap_information_rsp(conn, cmd, data);
2611 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2619 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2620 struct l2cap_cmd_hdr *cmd, u8 *data)
2622 switch (cmd->code) {
2623 case L2CAP_COMMAND_REJ:
2626 case L2CAP_CONN_PARAM_UPDATE_REQ:
2627 return l2cap_conn_param_update_req(conn, cmd, data);
2629 case L2CAP_CONN_PARAM_UPDATE_RSP:
2633 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2638 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2639 struct sk_buff *skb)
2641 u8 *data = skb->data;
2643 struct l2cap_cmd_hdr cmd;
2646 l2cap_raw_recv(conn, skb);
2648 while (len >= L2CAP_CMD_HDR_SIZE) {
2650 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2651 data += L2CAP_CMD_HDR_SIZE;
2652 len -= L2CAP_CMD_HDR_SIZE;
2654 cmd_len = le16_to_cpu(cmd.len);
2656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2658 if (cmd_len > len || !cmd.ident) {
2659 BT_DBG("corrupted command");
2663 if (conn->hcon->type == LE_LINK)
2664 err = l2cap_le_sig_cmd(conn, &cmd, data);
2666 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2669 struct l2cap_cmd_rej rej;
2670 BT_DBG("error %d", err);
2672 /* FIXME: Map err to a valid reason */
2673 rej.reason = cpu_to_le16(0);
2674 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2684 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2686 u16 our_fcs, rcv_fcs;
2687 int hdr_size = L2CAP_HDR_SIZE + 2;
2689 if (pi->fcs == L2CAP_FCS_CRC16) {
2690 skb_trim(skb, skb->len - 2);
2691 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2692 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2694 if (our_fcs != rcv_fcs)
2700 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
2702 struct l2cap_pinfo *pi = l2cap_pi(sk);
2705 pi->frames_sent = 0;
2707 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2709 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2710 control |= L2CAP_SUPER_RCV_NOT_READY;
2711 l2cap_send_sframe(pi, control);
2712 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2715 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
2716 l2cap_retransmit_frames(sk);
2718 l2cap_ertm_send(sk);
2720 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2721 pi->frames_sent == 0) {
2722 control |= L2CAP_SUPER_RCV_READY;
2723 l2cap_send_sframe(pi, control);
2727 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
2729 struct sk_buff *next_skb;
2730 struct l2cap_pinfo *pi = l2cap_pi(sk);
2731 int tx_seq_offset, next_tx_seq_offset;
2733 bt_cb(skb)->tx_seq = tx_seq;
2734 bt_cb(skb)->sar = sar;
2736 next_skb = skb_peek(SREJ_QUEUE(sk));
2738 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2742 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
2743 if (tx_seq_offset < 0)
2744 tx_seq_offset += 64;
2747 if (bt_cb(next_skb)->tx_seq == tx_seq)
2750 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2751 pi->buffer_seq) % 64;
2752 if (next_tx_seq_offset < 0)
2753 next_tx_seq_offset += 64;
2755 if (next_tx_seq_offset > tx_seq_offset) {
2756 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
2760 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
2763 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
2765 __skb_queue_tail(SREJ_QUEUE(sk), skb);
2770 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2772 struct l2cap_pinfo *pi = l2cap_pi(sk);
2773 struct sk_buff *_skb;
2776 switch (control & L2CAP_CTRL_SAR) {
2777 case L2CAP_SDU_UNSEGMENTED:
2778 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2781 err = sock_queue_rcv_skb(sk, skb);
2787 case L2CAP_SDU_START:
2788 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
2791 pi->sdu_len = get_unaligned_le16(skb->data);
2793 if (pi->sdu_len > pi->imtu)
2796 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2800 /* pull sdu_len bytes only after alloc, because of Local Busy
2801 * condition we have to be sure that this will be executed
2802 * only once, i.e., when alloc does not fail */
2805 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2807 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2808 pi->partial_sdu_len = skb->len;
2811 case L2CAP_SDU_CONTINUE:
2812 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2818 pi->partial_sdu_len += skb->len;
2819 if (pi->partial_sdu_len > pi->sdu_len)
2822 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2827 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2833 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
2834 pi->partial_sdu_len += skb->len;
2836 if (pi->partial_sdu_len > pi->imtu)
2839 if (pi->partial_sdu_len != pi->sdu_len)
2842 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2845 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2847 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2851 err = sock_queue_rcv_skb(sk, _skb);
2854 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
2858 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2859 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2873 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2878 static int l2cap_try_push_rx_skb(struct sock *sk)
2880 struct l2cap_pinfo *pi = l2cap_pi(sk);
2881 struct sk_buff *skb;
2885 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
2886 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2887 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2889 skb_queue_head(BUSY_QUEUE(sk), skb);
2893 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2896 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
2899 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2900 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2901 l2cap_send_sframe(pi, control);
2902 l2cap_pi(sk)->retry_count = 1;
2904 del_timer(&pi->retrans_timer);
2905 __mod_monitor_timer();
2907 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
2910 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2911 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
2913 BT_DBG("sk %p, Exit local busy", sk);
2918 static void l2cap_busy_work(struct work_struct *work)
2920 DECLARE_WAITQUEUE(wait, current);
2921 struct l2cap_pinfo *pi =
2922 container_of(work, struct l2cap_pinfo, busy_work);
2923 struct sock *sk = (struct sock *)pi;
2924 int n_tries = 0, timeo = HZ/5, err;
2925 struct sk_buff *skb;
2929 add_wait_queue(sk_sleep(sk), &wait);
2930 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
2931 set_current_state(TASK_INTERRUPTIBLE);
2933 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2935 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
2942 if (signal_pending(current)) {
2943 err = sock_intr_errno(timeo);
2948 timeo = schedule_timeout(timeo);
2951 err = sock_error(sk);
2955 if (l2cap_try_push_rx_skb(sk) == 0)
2959 set_current_state(TASK_RUNNING);
2960 remove_wait_queue(sk_sleep(sk), &wait);
2965 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
2967 struct l2cap_pinfo *pi = l2cap_pi(sk);
2970 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2971 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2972 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2973 return l2cap_try_push_rx_skb(sk);
2978 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
2980 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
2984 /* Busy Condition */
2985 BT_DBG("sk %p, Enter local busy", sk);
2987 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2988 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2989 __skb_queue_tail(BUSY_QUEUE(sk), skb);
2991 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2992 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2993 l2cap_send_sframe(pi, sctrl);
2995 pi->conn_state |= L2CAP_CONN_RNR_SENT;
2997 del_timer(&pi->ack_timer);
2999 queue_work(_busy_wq, &pi->busy_work);
3004 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3006 struct l2cap_pinfo *pi = l2cap_pi(sk);
3007 struct sk_buff *_skb;
3011 * TODO: We have to notify the userland if some data is lost with the
3015 switch (control & L2CAP_CTRL_SAR) {
3016 case L2CAP_SDU_UNSEGMENTED:
3017 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3022 err = sock_queue_rcv_skb(sk, skb);
3028 case L2CAP_SDU_START:
3029 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3034 pi->sdu_len = get_unaligned_le16(skb->data);
3037 if (pi->sdu_len > pi->imtu) {
3042 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3048 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3050 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3051 pi->partial_sdu_len = skb->len;
3055 case L2CAP_SDU_CONTINUE:
3056 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3059 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3061 pi->partial_sdu_len += skb->len;
3062 if (pi->partial_sdu_len > pi->sdu_len)
3070 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3073 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3075 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3076 pi->partial_sdu_len += skb->len;
3078 if (pi->partial_sdu_len > pi->imtu)
3081 if (pi->partial_sdu_len == pi->sdu_len) {
3082 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3083 err = sock_queue_rcv_skb(sk, _skb);
3098 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3100 struct sk_buff *skb;
3103 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3104 if (bt_cb(skb)->tx_seq != tx_seq)
3107 skb = skb_dequeue(SREJ_QUEUE(sk));
3108 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3109 l2cap_ertm_reassembly_sdu(sk, skb, control);
3110 l2cap_pi(sk)->buffer_seq_srej =
3111 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3112 tx_seq = (tx_seq + 1) % 64;
3116 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3118 struct l2cap_pinfo *pi = l2cap_pi(sk);
3119 struct srej_list *l, *tmp;
3122 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3123 if (l->tx_seq == tx_seq) {
3128 control = L2CAP_SUPER_SELECT_REJECT;
3129 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3130 l2cap_send_sframe(pi, control);
3132 list_add_tail(&l->list, SREJ_LIST(sk));
3136 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3138 struct l2cap_pinfo *pi = l2cap_pi(sk);
3139 struct srej_list *new;
3142 while (tx_seq != pi->expected_tx_seq) {
3143 control = L2CAP_SUPER_SELECT_REJECT;
3144 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3145 l2cap_send_sframe(pi, control);
3147 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3148 new->tx_seq = pi->expected_tx_seq;
3149 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3150 list_add_tail(&new->list, SREJ_LIST(sk));
3152 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3155 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3157 struct l2cap_pinfo *pi = l2cap_pi(sk);
3158 u8 tx_seq = __get_txseq(rx_control);
3159 u8 req_seq = __get_reqseq(rx_control);
3160 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3161 int tx_seq_offset, expected_tx_seq_offset;
3162 int num_to_ack = (pi->tx_win/6) + 1;
3165 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3168 if (L2CAP_CTRL_FINAL & rx_control &&
3169 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3170 del_timer(&pi->monitor_timer);
3171 if (pi->unacked_frames > 0)
3172 __mod_retrans_timer();
3173 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3176 pi->expected_ack_seq = req_seq;
3177 l2cap_drop_acked_frames(sk);
3179 if (tx_seq == pi->expected_tx_seq)
3182 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3183 if (tx_seq_offset < 0)
3184 tx_seq_offset += 64;
3186 /* invalid tx_seq */
3187 if (tx_seq_offset >= pi->tx_win) {
3188 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3192 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3195 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3196 struct srej_list *first;
3198 first = list_first_entry(SREJ_LIST(sk),
3199 struct srej_list, list);
3200 if (tx_seq == first->tx_seq) {
3201 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3202 l2cap_check_srej_gap(sk, tx_seq);
3204 list_del(&first->list);
3207 if (list_empty(SREJ_LIST(sk))) {
3208 pi->buffer_seq = pi->buffer_seq_srej;
3209 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3211 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3214 struct srej_list *l;
3216 /* duplicated tx_seq */
3217 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3220 list_for_each_entry(l, SREJ_LIST(sk), list) {
3221 if (l->tx_seq == tx_seq) {
3222 l2cap_resend_srejframe(sk, tx_seq);
3226 l2cap_send_srejframe(sk, tx_seq);
3229 expected_tx_seq_offset =
3230 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3231 if (expected_tx_seq_offset < 0)
3232 expected_tx_seq_offset += 64;
3234 /* duplicated tx_seq */
3235 if (tx_seq_offset < expected_tx_seq_offset)
3238 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3240 BT_DBG("sk %p, Enter SREJ", sk);
3242 INIT_LIST_HEAD(SREJ_LIST(sk));
3243 pi->buffer_seq_srej = pi->buffer_seq;
3245 __skb_queue_head_init(SREJ_QUEUE(sk));
3246 __skb_queue_head_init(BUSY_QUEUE(sk));
3247 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3249 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3251 l2cap_send_srejframe(sk, tx_seq);
3253 del_timer(&pi->ack_timer);
3258 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3260 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3261 bt_cb(skb)->tx_seq = tx_seq;
3262 bt_cb(skb)->sar = sar;
3263 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3267 err = l2cap_push_rx_skb(sk, skb, rx_control);
3271 if (rx_control & L2CAP_CTRL_FINAL) {
3272 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3273 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3275 l2cap_retransmit_frames(sk);
3280 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3281 if (pi->num_acked == num_to_ack - 1)
3291 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3293 struct l2cap_pinfo *pi = l2cap_pi(sk);
3295 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3298 pi->expected_ack_seq = __get_reqseq(rx_control);
3299 l2cap_drop_acked_frames(sk);
3301 if (rx_control & L2CAP_CTRL_POLL) {
3302 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3303 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3304 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3305 (pi->unacked_frames > 0))
3306 __mod_retrans_timer();
3308 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3309 l2cap_send_srejtail(sk);
3311 l2cap_send_i_or_rr_or_rnr(sk);
3314 } else if (rx_control & L2CAP_CTRL_FINAL) {
3315 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3317 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3318 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3320 l2cap_retransmit_frames(sk);
3323 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3324 (pi->unacked_frames > 0))
3325 __mod_retrans_timer();
3327 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3328 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3331 l2cap_ertm_send(sk);
3335 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3337 struct l2cap_pinfo *pi = l2cap_pi(sk);
3338 u8 tx_seq = __get_reqseq(rx_control);
3340 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3342 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3344 pi->expected_ack_seq = tx_seq;
3345 l2cap_drop_acked_frames(sk);
3347 if (rx_control & L2CAP_CTRL_FINAL) {
3348 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3349 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3351 l2cap_retransmit_frames(sk);
3353 l2cap_retransmit_frames(sk);
3355 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3356 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3359 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3361 struct l2cap_pinfo *pi = l2cap_pi(sk);
3362 u8 tx_seq = __get_reqseq(rx_control);
3364 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3366 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3368 if (rx_control & L2CAP_CTRL_POLL) {
3369 pi->expected_ack_seq = tx_seq;
3370 l2cap_drop_acked_frames(sk);
3372 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3373 l2cap_retransmit_one_frame(sk, tx_seq);
3375 l2cap_ertm_send(sk);
3377 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3378 pi->srej_save_reqseq = tx_seq;
3379 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3381 } else if (rx_control & L2CAP_CTRL_FINAL) {
3382 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3383 pi->srej_save_reqseq == tx_seq)
3384 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3386 l2cap_retransmit_one_frame(sk, tx_seq);
3388 l2cap_retransmit_one_frame(sk, tx_seq);
3389 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3390 pi->srej_save_reqseq = tx_seq;
3391 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3396 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3398 struct l2cap_pinfo *pi = l2cap_pi(sk);
3399 u8 tx_seq = __get_reqseq(rx_control);
3401 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
3403 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3404 pi->expected_ack_seq = tx_seq;
3405 l2cap_drop_acked_frames(sk);
3407 if (rx_control & L2CAP_CTRL_POLL)
3408 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3410 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3411 del_timer(&pi->retrans_timer);
3412 if (rx_control & L2CAP_CTRL_POLL)
3413 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3417 if (rx_control & L2CAP_CTRL_POLL)
3418 l2cap_send_srejtail(sk);
3420 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3423 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3425 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3427 if (L2CAP_CTRL_FINAL & rx_control &&
3428 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3429 del_timer(&l2cap_pi(sk)->monitor_timer);
3430 if (l2cap_pi(sk)->unacked_frames > 0)
3431 __mod_retrans_timer();
3432 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3435 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3436 case L2CAP_SUPER_RCV_READY:
3437 l2cap_data_channel_rrframe(sk, rx_control);
3440 case L2CAP_SUPER_REJECT:
3441 l2cap_data_channel_rejframe(sk, rx_control);
3444 case L2CAP_SUPER_SELECT_REJECT:
3445 l2cap_data_channel_srejframe(sk, rx_control);
3448 case L2CAP_SUPER_RCV_NOT_READY:
3449 l2cap_data_channel_rnrframe(sk, rx_control);
3457 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3459 struct l2cap_pinfo *pi = l2cap_pi(sk);
3462 int len, next_tx_seq_offset, req_seq_offset;
3464 control = get_unaligned_le16(skb->data);
3469 * We can just drop the corrupted I-frame here.
3470 * Receiver will miss it and start proper recovery
3471 * procedures and ask retransmission.
3473 if (l2cap_check_fcs(pi, skb))
3476 if (__is_sar_start(control) && __is_iframe(control))
3479 if (pi->fcs == L2CAP_FCS_CRC16)
3482 if (len > pi->mps) {
3483 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3487 req_seq = __get_reqseq(control);
3488 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3489 if (req_seq_offset < 0)
3490 req_seq_offset += 64;
3492 next_tx_seq_offset =
3493 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3494 if (next_tx_seq_offset < 0)
3495 next_tx_seq_offset += 64;
3497 /* check for invalid req-seq */
3498 if (req_seq_offset > next_tx_seq_offset) {
3499 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3503 if (__is_iframe(control)) {
3505 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3509 l2cap_data_channel_iframe(sk, control, skb);
3513 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3517 l2cap_data_channel_sframe(sk, control, skb);
3527 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3530 struct l2cap_pinfo *pi;
3535 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3537 BT_DBG("unknown cid 0x%4.4x", cid);
3543 BT_DBG("sk %p, len %d", sk, skb->len);
3545 if (sk->sk_state != BT_CONNECTED)
3549 case L2CAP_MODE_BASIC:
3550 /* If socket recv buffers overflows we drop data here
3551 * which is *bad* because L2CAP has to be reliable.
3552 * But we don't have any other choice. L2CAP doesn't
3553 * provide flow control mechanism. */
3555 if (pi->imtu < skb->len)
3558 if (!sock_queue_rcv_skb(sk, skb))
3562 case L2CAP_MODE_ERTM:
3563 if (!sock_owned_by_user(sk)) {
3564 l2cap_ertm_data_rcv(sk, skb);
3566 if (sk_add_backlog(sk, skb))
3572 case L2CAP_MODE_STREAMING:
3573 control = get_unaligned_le16(skb->data);
3577 if (l2cap_check_fcs(pi, skb))
3580 if (__is_sar_start(control))
3583 if (pi->fcs == L2CAP_FCS_CRC16)
3586 if (len > pi->mps || len < 0 || __is_sframe(control))
3589 tx_seq = __get_txseq(control);
3591 if (pi->expected_tx_seq == tx_seq)
3592 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3594 pi->expected_tx_seq = (tx_seq + 1) % 64;
3596 l2cap_streaming_reassembly_sdu(sk, skb, control);
3601 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3615 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3619 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3625 BT_DBG("sk %p, len %d", sk, skb->len);
3627 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3630 if (l2cap_pi(sk)->imtu < skb->len)
3633 if (!sock_queue_rcv_skb(sk, skb))
3645 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3647 struct l2cap_hdr *lh = (void *) skb->data;
3651 skb_pull(skb, L2CAP_HDR_SIZE);
3652 cid = __le16_to_cpu(lh->cid);
3653 len = __le16_to_cpu(lh->len);
3655 if (len != skb->len) {
3660 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3663 case L2CAP_CID_LE_SIGNALING:
3664 case L2CAP_CID_SIGNALING:
3665 l2cap_sig_channel(conn, skb);
3668 case L2CAP_CID_CONN_LESS:
3669 psm = get_unaligned_le16(skb->data);
3671 l2cap_conless_channel(conn, psm, skb);
3675 l2cap_data_channel(conn, cid, skb);
3680 /* ---- L2CAP interface with lower layer (HCI) ---- */
3682 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3684 int exact = 0, lm1 = 0, lm2 = 0;
3685 register struct sock *sk;
3686 struct hlist_node *node;
3688 if (type != ACL_LINK)
3691 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3693 /* Find listening sockets and check their link_mode */
3694 read_lock(&l2cap_sk_list.lock);
3695 sk_for_each(sk, node, &l2cap_sk_list.head) {
3696 if (sk->sk_state != BT_LISTEN)
3699 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3700 lm1 |= HCI_LM_ACCEPT;
3701 if (l2cap_pi(sk)->role_switch)
3702 lm1 |= HCI_LM_MASTER;
3704 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3705 lm2 |= HCI_LM_ACCEPT;
3706 if (l2cap_pi(sk)->role_switch)
3707 lm2 |= HCI_LM_MASTER;
3710 read_unlock(&l2cap_sk_list.lock);
3712 return exact ? lm1 : lm2;
3715 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3717 struct l2cap_conn *conn;
3719 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3721 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3725 conn = l2cap_conn_add(hcon, status);
3727 l2cap_conn_ready(conn);
3729 l2cap_conn_del(hcon, bt_err(status));
3734 static int l2cap_disconn_ind(struct hci_conn *hcon)
3736 struct l2cap_conn *conn = hcon->l2cap_data;
3738 BT_DBG("hcon %p", hcon);
3740 if (hcon->type != ACL_LINK || !conn)
3743 return conn->disc_reason;
3746 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3748 BT_DBG("hcon %p reason %d", hcon, reason);
3750 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3753 l2cap_conn_del(hcon, bt_err(reason));
3758 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3760 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3763 if (encrypt == 0x00) {
3764 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3765 l2cap_sock_clear_timer(sk);
3766 l2cap_sock_set_timer(sk, HZ * 5);
3767 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3768 __l2cap_sock_close(sk, ECONNREFUSED);
3770 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3771 l2cap_sock_clear_timer(sk);
3775 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3777 struct l2cap_chan_list *l;
3778 struct l2cap_conn *conn = hcon->l2cap_data;
3784 l = &conn->chan_list;
3786 BT_DBG("conn %p", conn);
3788 read_lock(&l->lock);
3790 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3793 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3798 if (!status && (sk->sk_state == BT_CONNECTED ||
3799 sk->sk_state == BT_CONFIG)) {
3800 l2cap_check_encryption(sk, encrypt);
3805 if (sk->sk_state == BT_CONNECT) {
3807 struct l2cap_conn_req req;
3808 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3809 req.psm = l2cap_pi(sk)->psm;
3811 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3812 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3814 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3815 L2CAP_CONN_REQ, sizeof(req), &req);
3817 l2cap_sock_clear_timer(sk);
3818 l2cap_sock_set_timer(sk, HZ / 10);
3820 } else if (sk->sk_state == BT_CONNECT2) {
3821 struct l2cap_conn_rsp rsp;
3825 sk->sk_state = BT_CONFIG;
3826 result = L2CAP_CR_SUCCESS;
3828 sk->sk_state = BT_DISCONN;
3829 l2cap_sock_set_timer(sk, HZ / 10);
3830 result = L2CAP_CR_SEC_BLOCK;
3833 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3834 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3835 rsp.result = cpu_to_le16(result);
3836 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3837 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3838 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3844 read_unlock(&l->lock);
3849 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3851 struct l2cap_conn *conn = hcon->l2cap_data;
3854 conn = l2cap_conn_add(hcon, 0);
3859 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3861 if (!(flags & ACL_CONT)) {
3862 struct l2cap_hdr *hdr;
3868 BT_ERR("Unexpected start frame (len %d)", skb->len);
3869 kfree_skb(conn->rx_skb);
3870 conn->rx_skb = NULL;
3872 l2cap_conn_unreliable(conn, ECOMM);
3875 /* Start fragment always begin with Basic L2CAP header */
3876 if (skb->len < L2CAP_HDR_SIZE) {
3877 BT_ERR("Frame is too short (len %d)", skb->len);
3878 l2cap_conn_unreliable(conn, ECOMM);
3882 hdr = (struct l2cap_hdr *) skb->data;
3883 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3884 cid = __le16_to_cpu(hdr->cid);
3886 if (len == skb->len) {
3887 /* Complete frame received */
3888 l2cap_recv_frame(conn, skb);
3892 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3894 if (skb->len > len) {
3895 BT_ERR("Frame is too long (len %d, expected len %d)",
3897 l2cap_conn_unreliable(conn, ECOMM);
3901 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3903 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
3904 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3905 len, l2cap_pi(sk)->imtu);
3907 l2cap_conn_unreliable(conn, ECOMM);
3914 /* Allocate skb for the complete frame (with header) */
3915 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3919 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3921 conn->rx_len = len - skb->len;
3923 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3925 if (!conn->rx_len) {
3926 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3927 l2cap_conn_unreliable(conn, ECOMM);
3931 if (skb->len > conn->rx_len) {
3932 BT_ERR("Fragment is too long (len %d, expected %d)",
3933 skb->len, conn->rx_len);
3934 kfree_skb(conn->rx_skb);
3935 conn->rx_skb = NULL;
3937 l2cap_conn_unreliable(conn, ECOMM);
3941 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3943 conn->rx_len -= skb->len;
3945 if (!conn->rx_len) {
3946 /* Complete frame received */
3947 l2cap_recv_frame(conn, conn->rx_skb);
3948 conn->rx_skb = NULL;
3957 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3960 struct hlist_node *node;
3962 read_lock_bh(&l2cap_sk_list.lock);
3964 sk_for_each(sk, node, &l2cap_sk_list.head) {
3965 struct l2cap_pinfo *pi = l2cap_pi(sk);
3967 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3968 batostr(&bt_sk(sk)->src),
3969 batostr(&bt_sk(sk)->dst),
3970 sk->sk_state, __le16_to_cpu(pi->psm),
3972 pi->imtu, pi->omtu, pi->sec_level,
3976 read_unlock_bh(&l2cap_sk_list.lock);
3981 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3983 return single_open(file, l2cap_debugfs_show, inode->i_private);
3986 static const struct file_operations l2cap_debugfs_fops = {
3987 .open = l2cap_debugfs_open,
3989 .llseek = seq_lseek,
3990 .release = single_release,
3993 static struct dentry *l2cap_debugfs;
3995 static struct hci_proto l2cap_hci_proto = {
3997 .id = HCI_PROTO_L2CAP,
3998 .connect_ind = l2cap_connect_ind,
3999 .connect_cfm = l2cap_connect_cfm,
4000 .disconn_ind = l2cap_disconn_ind,
4001 .disconn_cfm = l2cap_disconn_cfm,
4002 .security_cfm = l2cap_security_cfm,
4003 .recv_acldata = l2cap_recv_acldata
4006 int __init l2cap_init(void)
4010 err = l2cap_init_sockets();
4014 _busy_wq = create_singlethread_workqueue("l2cap");
4020 err = hci_register_proto(&l2cap_hci_proto);
4022 BT_ERR("L2CAP protocol registration failed");
4023 bt_sock_unregister(BTPROTO_L2CAP);
4028 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4029 bt_debugfs, NULL, &l2cap_debugfs_fops);
4031 BT_ERR("Failed to create L2CAP debug file");
4034 BT_INFO("L2CAP socket layer initialized");
4039 destroy_workqueue(_busy_wq);
4040 l2cap_cleanup_sockets();
4044 void l2cap_exit(void)
4046 debugfs_remove(l2cap_debugfs);
4048 flush_workqueue(_busy_wq);
4049 destroy_workqueue(_busy_wq);
4051 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4052 BT_ERR("L2CAP protocol unregistration failed");
4054 l2cap_cleanup_sockets();
4057 module_param(disable_ertm, bool, 0644);
4058 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");