2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
52 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
53 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
65 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
67 if (hcon->type == LE_LINK) {
68 if (type == ADDR_LE_DEV_PUBLIC)
69 return BDADDR_LE_PUBLIC;
71 return BDADDR_LE_RANDOM;
77 /* ---- L2CAP channels ---- */
79 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
84 list_for_each_entry(c, &conn->chan_l, list) {
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
96 list_for_each_entry(c, &conn->chan_l, list) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 struct l2cap_chan *c;
110 mutex_lock(&conn->chan_lock);
111 c = __l2cap_get_chan_by_scid(conn, cid);
114 mutex_unlock(&conn->chan_lock);
119 /* Find channel with given DCID.
120 * Returns locked channel.
122 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_dcid(conn, cid);
131 mutex_unlock(&conn->chan_lock);
136 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 struct l2cap_chan *c;
141 list_for_each_entry(c, &conn->chan_l, list) {
142 if (c->ident == ident)
148 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 struct l2cap_chan *c;
153 mutex_lock(&conn->chan_lock);
154 c = __l2cap_get_chan_by_ident(conn, ident);
157 mutex_unlock(&conn->chan_lock);
162 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
164 struct l2cap_chan *c;
166 list_for_each_entry(c, &chan_list, global_l) {
167 if (c->sport == psm && !bacmp(&c->src, src))
173 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
177 write_lock(&chan_list_lock);
179 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
192 for (p = 0x1001; p < 0x1100; p += 2)
193 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
194 chan->psm = cpu_to_le16(p);
195 chan->sport = cpu_to_le16(p);
202 write_unlock(&chan_list_lock);
206 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
208 write_lock(&chan_list_lock);
212 write_unlock(&chan_list_lock);
217 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 if (conn->hcon->type == LE_LINK)
222 dyn_end = L2CAP_CID_LE_DYN_END;
224 dyn_end = L2CAP_CID_DYN_END;
226 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
227 if (!__l2cap_get_chan_by_scid(conn, cid))
234 static void l2cap_state_change(struct l2cap_chan *chan, int state)
236 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
237 state_to_string(state));
240 chan->ops->state_change(chan, state, 0);
243 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 chan->ops->state_change(chan, chan->state, err);
250 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
252 chan->ops->state_change(chan, chan->state, err);
255 static void __set_retrans_timer(struct l2cap_chan *chan)
257 if (!delayed_work_pending(&chan->monitor_timer) &&
258 chan->retrans_timeout) {
259 l2cap_set_timer(chan, &chan->retrans_timer,
260 msecs_to_jiffies(chan->retrans_timeout));
264 static void __set_monitor_timer(struct l2cap_chan *chan)
266 __clear_retrans_timer(chan);
267 if (chan->monitor_timeout) {
268 l2cap_set_timer(chan, &chan->monitor_timer,
269 msecs_to_jiffies(chan->monitor_timeout));
273 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
278 skb_queue_walk(head, skb) {
279 if (bt_cb(skb)->control.txseq == seq)
286 /* ---- L2CAP sequence number lists ---- */
288 /* For ERTM, ordered lists of sequence numbers must be tracked for
289 * SREJ requests that are received and for frames that are to be
290 * retransmitted. These seq_list functions implement a singly-linked
291 * list in an array, where membership in the list can also be checked
292 * in constant time. Items can also be added to the tail of the list
293 * and removed from the head in constant time, without further memory
297 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
299 size_t alloc_size, i;
301 /* Allocated size is a power of 2 to map sequence numbers
302 * (which may be up to 14 bits) in to a smaller array that is
303 * sized for the negotiated ERTM transmit windows.
305 alloc_size = roundup_pow_of_two(size);
307 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 seq_list->mask = alloc_size - 1;
312 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
313 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
314 for (i = 0; i < alloc_size; i++)
315 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
320 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
322 kfree(seq_list->list);
325 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
328 /* Constant-time check for list membership */
329 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
332 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
334 u16 mask = seq_list->mask;
336 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
337 /* In case someone tries to pop the head of an empty list */
338 return L2CAP_SEQ_LIST_CLEAR;
339 } else if (seq_list->head == seq) {
340 /* Head can be removed in constant time */
341 seq_list->head = seq_list->list[seq & mask];
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
344 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
345 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
349 /* Walk the list to find the sequence number */
350 u16 prev = seq_list->head;
351 while (seq_list->list[prev & mask] != seq) {
352 prev = seq_list->list[prev & mask];
353 if (prev == L2CAP_SEQ_LIST_TAIL)
354 return L2CAP_SEQ_LIST_CLEAR;
357 /* Unlink the number from the list and clear it */
358 seq_list->list[prev & mask] = seq_list->list[seq & mask];
359 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
360 if (seq_list->tail == seq)
361 seq_list->tail = prev;
366 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
368 /* Remove the head in constant time */
369 return l2cap_seq_list_remove(seq_list, seq_list->head);
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
379 for (i = 0; i <= seq_list->mask; i++)
380 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 u16 mask = seq_list->mask;
390 /* All appends happen in constant time */
392 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
395 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 seq_list->head = seq;
398 seq_list->list[seq_list->tail & mask] = seq;
400 seq_list->tail = seq;
401 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
404 static void l2cap_chan_timeout(struct work_struct *work)
406 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 struct l2cap_conn *conn = chan->conn;
411 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 mutex_lock(&conn->chan_lock);
414 l2cap_chan_lock(chan);
416 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 reason = ECONNREFUSED;
418 else if (chan->state == BT_CONNECT &&
419 chan->sec_level != BT_SECURITY_SDP)
420 reason = ECONNREFUSED;
424 l2cap_chan_close(chan, reason);
426 l2cap_chan_unlock(chan);
428 chan->ops->close(chan);
429 mutex_unlock(&conn->chan_lock);
431 l2cap_chan_put(chan);
434 struct l2cap_chan *l2cap_chan_create(void)
436 struct l2cap_chan *chan;
438 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
442 mutex_init(&chan->lock);
444 write_lock(&chan_list_lock);
445 list_add(&chan->global_l, &chan_list);
446 write_unlock(&chan_list_lock);
448 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
450 chan->state = BT_OPEN;
452 kref_init(&chan->kref);
454 /* This flag is cleared in l2cap_chan_ready() */
455 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
457 BT_DBG("chan %p", chan);
462 static void l2cap_chan_destroy(struct kref *kref)
464 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
466 BT_DBG("chan %p", chan);
468 write_lock(&chan_list_lock);
469 list_del(&chan->global_l);
470 write_unlock(&chan_list_lock);
475 void l2cap_chan_hold(struct l2cap_chan *c)
477 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
482 void l2cap_chan_put(struct l2cap_chan *c)
484 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
486 kref_put(&c->kref, l2cap_chan_destroy);
489 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
491 chan->fcs = L2CAP_FCS_CRC16;
492 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
493 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
494 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
495 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
496 chan->sec_level = BT_SECURITY_LOW;
498 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
501 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
504 chan->sdu_last_frag = NULL;
506 chan->tx_credits = 0;
507 chan->rx_credits = le_max_credits;
508 chan->mps = min_t(u16, chan->imtu, L2CAP_LE_DEFAULT_MPS);
510 skb_queue_head_init(&chan->tx_q);
513 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
515 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
516 __le16_to_cpu(chan->psm), chan->dcid);
518 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
522 switch (chan->chan_type) {
523 case L2CAP_CHAN_CONN_ORIENTED:
524 if (conn->hcon->type == LE_LINK) {
525 if (chan->dcid == L2CAP_CID_ATT) {
526 chan->omtu = L2CAP_DEFAULT_MTU;
527 chan->scid = L2CAP_CID_ATT;
529 chan->scid = l2cap_alloc_cid(conn);
532 /* Alloc CID for connection-oriented socket */
533 chan->scid = l2cap_alloc_cid(conn);
534 chan->omtu = L2CAP_DEFAULT_MTU;
538 case L2CAP_CHAN_CONN_LESS:
539 /* Connectionless socket */
540 chan->scid = L2CAP_CID_CONN_LESS;
541 chan->dcid = L2CAP_CID_CONN_LESS;
542 chan->omtu = L2CAP_DEFAULT_MTU;
545 case L2CAP_CHAN_CONN_FIX_A2MP:
546 chan->scid = L2CAP_CID_A2MP;
547 chan->dcid = L2CAP_CID_A2MP;
548 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
549 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
553 /* Raw socket can send/recv signalling messages only */
554 chan->scid = L2CAP_CID_SIGNALING;
555 chan->dcid = L2CAP_CID_SIGNALING;
556 chan->omtu = L2CAP_DEFAULT_MTU;
559 chan->local_id = L2CAP_BESTEFFORT_ID;
560 chan->local_stype = L2CAP_SERV_BESTEFFORT;
561 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
562 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
563 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
564 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
566 l2cap_chan_hold(chan);
568 hci_conn_hold(conn->hcon);
570 list_add(&chan->list, &conn->chan_l);
573 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
575 mutex_lock(&conn->chan_lock);
576 __l2cap_chan_add(conn, chan);
577 mutex_unlock(&conn->chan_lock);
580 void l2cap_chan_del(struct l2cap_chan *chan, int err)
582 struct l2cap_conn *conn = chan->conn;
584 __clear_chan_timer(chan);
586 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
589 struct amp_mgr *mgr = conn->hcon->amp_mgr;
590 /* Delete from channel list */
591 list_del(&chan->list);
593 l2cap_chan_put(chan);
597 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
598 hci_conn_drop(conn->hcon);
600 if (mgr && mgr->bredr_chan == chan)
601 mgr->bredr_chan = NULL;
604 if (chan->hs_hchan) {
605 struct hci_chan *hs_hchan = chan->hs_hchan;
607 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
608 amp_disconnect_logical_link(hs_hchan);
611 chan->ops->teardown(chan, err);
613 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
617 case L2CAP_MODE_BASIC:
620 case L2CAP_MODE_LE_FLOWCTL:
621 skb_queue_purge(&chan->tx_q);
624 case L2CAP_MODE_ERTM:
625 __clear_retrans_timer(chan);
626 __clear_monitor_timer(chan);
627 __clear_ack_timer(chan);
629 skb_queue_purge(&chan->srej_q);
631 l2cap_seq_list_free(&chan->srej_list);
632 l2cap_seq_list_free(&chan->retrans_list);
636 case L2CAP_MODE_STREAMING:
637 skb_queue_purge(&chan->tx_q);
644 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
646 struct l2cap_conn *conn = chan->conn;
647 struct l2cap_le_conn_rsp rsp;
650 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
651 result = L2CAP_CR_AUTHORIZATION;
653 result = L2CAP_CR_BAD_PSM;
655 l2cap_state_change(chan, BT_DISCONN);
657 rsp.dcid = cpu_to_le16(chan->scid);
658 rsp.mtu = cpu_to_le16(chan->imtu);
659 rsp.mps = cpu_to_le16(chan->mps);
660 rsp.credits = cpu_to_le16(chan->rx_credits);
661 rsp.result = cpu_to_le16(result);
663 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
667 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
669 struct l2cap_conn *conn = chan->conn;
670 struct l2cap_conn_rsp rsp;
673 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
674 result = L2CAP_CR_SEC_BLOCK;
676 result = L2CAP_CR_BAD_PSM;
678 l2cap_state_change(chan, BT_DISCONN);
680 rsp.scid = cpu_to_le16(chan->dcid);
681 rsp.dcid = cpu_to_le16(chan->scid);
682 rsp.result = cpu_to_le16(result);
683 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
685 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
688 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
690 struct l2cap_conn *conn = chan->conn;
692 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
694 switch (chan->state) {
696 chan->ops->teardown(chan, 0);
701 /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
702 * check for chan->psm.
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
705 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
706 l2cap_send_disconn_req(chan, reason);
708 l2cap_chan_del(chan, reason);
712 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
713 if (conn->hcon->type == ACL_LINK)
714 l2cap_chan_connect_reject(chan);
715 else if (conn->hcon->type == LE_LINK)
716 l2cap_chan_le_connect_reject(chan);
719 l2cap_chan_del(chan, reason);
724 l2cap_chan_del(chan, reason);
728 chan->ops->teardown(chan, 0);
733 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
735 switch (chan->chan_type) {
737 switch (chan->sec_level) {
738 case BT_SECURITY_HIGH:
739 return HCI_AT_DEDICATED_BONDING_MITM;
740 case BT_SECURITY_MEDIUM:
741 return HCI_AT_DEDICATED_BONDING;
743 return HCI_AT_NO_BONDING;
746 case L2CAP_CHAN_CONN_LESS:
747 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
748 if (chan->sec_level == BT_SECURITY_LOW)
749 chan->sec_level = BT_SECURITY_SDP;
751 if (chan->sec_level == BT_SECURITY_HIGH)
752 return HCI_AT_NO_BONDING_MITM;
754 return HCI_AT_NO_BONDING;
756 case L2CAP_CHAN_CONN_ORIENTED:
757 if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
758 if (chan->sec_level == BT_SECURITY_LOW)
759 chan->sec_level = BT_SECURITY_SDP;
761 if (chan->sec_level == BT_SECURITY_HIGH)
762 return HCI_AT_NO_BONDING_MITM;
764 return HCI_AT_NO_BONDING;
768 switch (chan->sec_level) {
769 case BT_SECURITY_HIGH:
770 return HCI_AT_GENERAL_BONDING_MITM;
771 case BT_SECURITY_MEDIUM:
772 return HCI_AT_GENERAL_BONDING;
774 return HCI_AT_NO_BONDING;
780 /* Service level security */
781 int l2cap_chan_check_security(struct l2cap_chan *chan)
783 struct l2cap_conn *conn = chan->conn;
786 if (conn->hcon->type == LE_LINK)
787 return smp_conn_security(conn->hcon, chan->sec_level);
789 auth_type = l2cap_get_auth_type(chan);
791 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
794 static u8 l2cap_get_ident(struct l2cap_conn *conn)
798 /* Get next available identificator.
799 * 1 - 128 are used by kernel.
800 * 129 - 199 are reserved.
801 * 200 - 254 are used by utilities like l2ping, etc.
804 spin_lock(&conn->lock);
806 if (++conn->tx_ident > 128)
811 spin_unlock(&conn->lock);
816 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
819 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
822 BT_DBG("code 0x%2.2x", code);
827 if (lmp_no_flush_capable(conn->hcon->hdev))
828 flags = ACL_START_NO_FLUSH;
832 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
833 skb->priority = HCI_PRIO_MAX;
835 hci_send_acl(conn->hchan, skb, flags);
838 static bool __chan_is_moving(struct l2cap_chan *chan)
840 return chan->move_state != L2CAP_MOVE_STABLE &&
841 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
844 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
846 struct hci_conn *hcon = chan->conn->hcon;
849 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
852 if (chan->hs_hcon && !__chan_is_moving(chan)) {
854 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
861 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
862 lmp_no_flush_capable(hcon->hdev))
863 flags = ACL_START_NO_FLUSH;
867 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
868 hci_send_acl(chan->conn->hchan, skb, flags);
871 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
873 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
874 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
876 if (enh & L2CAP_CTRL_FRAME_TYPE) {
879 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
880 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
887 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
888 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
895 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
897 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
898 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
900 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
903 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
904 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
911 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
912 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
919 static inline void __unpack_control(struct l2cap_chan *chan,
922 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
923 __unpack_extended_control(get_unaligned_le32(skb->data),
924 &bt_cb(skb)->control);
925 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
927 __unpack_enhanced_control(get_unaligned_le16(skb->data),
928 &bt_cb(skb)->control);
929 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
933 static u32 __pack_extended_control(struct l2cap_ctrl *control)
937 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
938 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
940 if (control->sframe) {
941 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
942 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
943 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
945 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
946 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
952 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
956 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
957 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
959 if (control->sframe) {
960 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
961 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
962 packed |= L2CAP_CTRL_FRAME_TYPE;
964 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
965 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
971 static inline void __pack_control(struct l2cap_chan *chan,
972 struct l2cap_ctrl *control,
975 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
976 put_unaligned_le32(__pack_extended_control(control),
977 skb->data + L2CAP_HDR_SIZE);
979 put_unaligned_le16(__pack_enhanced_control(control),
980 skb->data + L2CAP_HDR_SIZE);
984 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
986 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
987 return L2CAP_EXT_HDR_SIZE;
989 return L2CAP_ENH_HDR_SIZE;
992 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
996 struct l2cap_hdr *lh;
997 int hlen = __ertm_hdr_size(chan);
999 if (chan->fcs == L2CAP_FCS_CRC16)
1000 hlen += L2CAP_FCS_SIZE;
1002 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1005 return ERR_PTR(-ENOMEM);
1007 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1008 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1009 lh->cid = cpu_to_le16(chan->dcid);
1011 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1012 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1014 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1016 if (chan->fcs == L2CAP_FCS_CRC16) {
1017 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1018 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1021 skb->priority = HCI_PRIO_MAX;
1025 static void l2cap_send_sframe(struct l2cap_chan *chan,
1026 struct l2cap_ctrl *control)
1028 struct sk_buff *skb;
1031 BT_DBG("chan %p, control %p", chan, control);
1033 if (!control->sframe)
1036 if (__chan_is_moving(chan))
1039 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1043 if (control->super == L2CAP_SUPER_RR)
1044 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1045 else if (control->super == L2CAP_SUPER_RNR)
1046 set_bit(CONN_RNR_SENT, &chan->conn_state);
1048 if (control->super != L2CAP_SUPER_SREJ) {
1049 chan->last_acked_seq = control->reqseq;
1050 __clear_ack_timer(chan);
1053 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1054 control->final, control->poll, control->super);
1056 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1057 control_field = __pack_extended_control(control);
1059 control_field = __pack_enhanced_control(control);
1061 skb = l2cap_create_sframe_pdu(chan, control_field);
1063 l2cap_do_send(chan, skb);
1066 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1068 struct l2cap_ctrl control;
1070 BT_DBG("chan %p, poll %d", chan, poll);
1072 memset(&control, 0, sizeof(control));
1074 control.poll = poll;
1076 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1077 control.super = L2CAP_SUPER_RNR;
1079 control.super = L2CAP_SUPER_RR;
1081 control.reqseq = chan->buffer_seq;
1082 l2cap_send_sframe(chan, &control);
1085 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1087 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1090 static bool __amp_capable(struct l2cap_chan *chan)
1092 struct l2cap_conn *conn = chan->conn;
1093 struct hci_dev *hdev;
1094 bool amp_available = false;
1096 if (!conn->hs_enabled)
1099 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1102 read_lock(&hci_dev_list_lock);
1103 list_for_each_entry(hdev, &hci_dev_list, list) {
1104 if (hdev->amp_type != AMP_TYPE_BREDR &&
1105 test_bit(HCI_UP, &hdev->flags)) {
1106 amp_available = true;
1110 read_unlock(&hci_dev_list_lock);
1112 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1113 return amp_available;
1118 static bool l2cap_check_efs(struct l2cap_chan *chan)
1120 /* Check EFS parameters */
1124 void l2cap_send_conn_req(struct l2cap_chan *chan)
1126 struct l2cap_conn *conn = chan->conn;
1127 struct l2cap_conn_req req;
1129 req.scid = cpu_to_le16(chan->scid);
1130 req.psm = chan->psm;
1132 chan->ident = l2cap_get_ident(conn);
1134 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1136 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1139 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1141 struct l2cap_create_chan_req req;
1142 req.scid = cpu_to_le16(chan->scid);
1143 req.psm = chan->psm;
1144 req.amp_id = amp_id;
1146 chan->ident = l2cap_get_ident(chan->conn);
1148 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1152 static void l2cap_move_setup(struct l2cap_chan *chan)
1154 struct sk_buff *skb;
1156 BT_DBG("chan %p", chan);
1158 if (chan->mode != L2CAP_MODE_ERTM)
1161 __clear_retrans_timer(chan);
1162 __clear_monitor_timer(chan);
1163 __clear_ack_timer(chan);
1165 chan->retry_count = 0;
1166 skb_queue_walk(&chan->tx_q, skb) {
1167 if (bt_cb(skb)->control.retries)
1168 bt_cb(skb)->control.retries = 1;
1173 chan->expected_tx_seq = chan->buffer_seq;
1175 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1176 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1177 l2cap_seq_list_clear(&chan->retrans_list);
1178 l2cap_seq_list_clear(&chan->srej_list);
1179 skb_queue_purge(&chan->srej_q);
1181 chan->tx_state = L2CAP_TX_STATE_XMIT;
1182 chan->rx_state = L2CAP_RX_STATE_MOVE;
1184 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1187 static void l2cap_move_done(struct l2cap_chan *chan)
1189 u8 move_role = chan->move_role;
1190 BT_DBG("chan %p", chan);
1192 chan->move_state = L2CAP_MOVE_STABLE;
1193 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1195 if (chan->mode != L2CAP_MODE_ERTM)
1198 switch (move_role) {
1199 case L2CAP_MOVE_ROLE_INITIATOR:
1200 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1201 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1203 case L2CAP_MOVE_ROLE_RESPONDER:
1204 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1209 static void l2cap_chan_ready(struct l2cap_chan *chan)
1211 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1212 chan->conf_state = 0;
1213 __clear_chan_timer(chan);
1215 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1216 chan->ops->suspend(chan);
1218 chan->state = BT_CONNECTED;
1220 chan->ops->ready(chan);
1223 static void l2cap_le_connect(struct l2cap_chan *chan)
1225 struct l2cap_conn *conn = chan->conn;
1226 struct l2cap_le_conn_req req;
1228 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1231 req.psm = chan->psm;
1232 req.scid = cpu_to_le16(chan->scid);
1233 req.mtu = cpu_to_le16(chan->imtu);
1234 req.mps = cpu_to_le16(chan->mps);
1235 req.credits = cpu_to_le16(chan->rx_credits);
1237 chan->ident = l2cap_get_ident(conn);
1239 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1243 static void l2cap_le_start(struct l2cap_chan *chan)
1245 struct l2cap_conn *conn = chan->conn;
1247 if (!smp_conn_security(conn->hcon, chan->sec_level))
1251 l2cap_chan_ready(chan);
1255 if (chan->state == BT_CONNECT)
1256 l2cap_le_connect(chan);
1259 static void l2cap_start_connection(struct l2cap_chan *chan)
1261 if (__amp_capable(chan)) {
1262 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1263 a2mp_discover_amp(chan);
1264 } else if (chan->conn->hcon->type == LE_LINK) {
1265 l2cap_le_start(chan);
1267 l2cap_send_conn_req(chan);
1271 static void l2cap_do_start(struct l2cap_chan *chan)
1273 struct l2cap_conn *conn = chan->conn;
1275 if (conn->hcon->type == LE_LINK) {
1276 l2cap_le_start(chan);
1280 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1281 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1284 if (l2cap_chan_check_security(chan) &&
1285 __l2cap_no_conn_pending(chan)) {
1286 l2cap_start_connection(chan);
1289 struct l2cap_info_req req;
1290 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1292 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1293 conn->info_ident = l2cap_get_ident(conn);
1295 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1297 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1302 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1304 u32 local_feat_mask = l2cap_feat_mask;
1306 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1309 case L2CAP_MODE_ERTM:
1310 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1311 case L2CAP_MODE_STREAMING:
1312 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1318 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1320 struct l2cap_conn *conn = chan->conn;
1321 struct l2cap_disconn_req req;
1326 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1327 __clear_retrans_timer(chan);
1328 __clear_monitor_timer(chan);
1329 __clear_ack_timer(chan);
1332 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1333 l2cap_state_change(chan, BT_DISCONN);
1337 req.dcid = cpu_to_le16(chan->dcid);
1338 req.scid = cpu_to_le16(chan->scid);
1339 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1342 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1345 /* ---- L2CAP connections ---- */
1346 static void l2cap_conn_start(struct l2cap_conn *conn)
1348 struct l2cap_chan *chan, *tmp;
1350 BT_DBG("conn %p", conn);
1352 mutex_lock(&conn->chan_lock);
1354 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1355 l2cap_chan_lock(chan);
1357 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1358 l2cap_chan_unlock(chan);
1362 if (chan->state == BT_CONNECT) {
1363 if (!l2cap_chan_check_security(chan) ||
1364 !__l2cap_no_conn_pending(chan)) {
1365 l2cap_chan_unlock(chan);
1369 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1370 && test_bit(CONF_STATE2_DEVICE,
1371 &chan->conf_state)) {
1372 l2cap_chan_close(chan, ECONNRESET);
1373 l2cap_chan_unlock(chan);
1377 l2cap_start_connection(chan);
1379 } else if (chan->state == BT_CONNECT2) {
1380 struct l2cap_conn_rsp rsp;
1382 rsp.scid = cpu_to_le16(chan->dcid);
1383 rsp.dcid = cpu_to_le16(chan->scid);
1385 if (l2cap_chan_check_security(chan)) {
1386 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1387 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1388 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1389 chan->ops->defer(chan);
1392 l2cap_state_change(chan, BT_CONFIG);
1393 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1394 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1397 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1398 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1401 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1404 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1405 rsp.result != L2CAP_CR_SUCCESS) {
1406 l2cap_chan_unlock(chan);
1410 set_bit(CONF_REQ_SENT, &chan->conf_state);
1411 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1412 l2cap_build_conf_req(chan, buf), buf);
1413 chan->num_conf_req++;
1416 l2cap_chan_unlock(chan);
1419 mutex_unlock(&conn->chan_lock);
1422 /* Find socket with cid and source/destination bdaddr.
1423 * Returns closest match, locked.
1425 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1429 struct l2cap_chan *c, *c1 = NULL;
1431 read_lock(&chan_list_lock);
1433 list_for_each_entry(c, &chan_list, global_l) {
1434 if (state && c->state != state)
1437 if (c->scid == cid) {
1438 int src_match, dst_match;
1439 int src_any, dst_any;
1442 src_match = !bacmp(&c->src, src);
1443 dst_match = !bacmp(&c->dst, dst);
1444 if (src_match && dst_match) {
1445 read_unlock(&chan_list_lock);
1450 src_any = !bacmp(&c->src, BDADDR_ANY);
1451 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1452 if ((src_match && dst_any) || (src_any && dst_match) ||
1453 (src_any && dst_any))
1458 read_unlock(&chan_list_lock);
1463 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1465 struct hci_conn *hcon = conn->hcon;
1466 struct l2cap_chan *chan, *pchan;
1471 /* Check if we have socket listening on cid */
1472 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1473 &hcon->src, &hcon->dst);
1477 /* Client ATT sockets should override the server one */
1478 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1481 dst_type = bdaddr_type(hcon, hcon->dst_type);
1483 /* If device is blocked, do not create a channel for it */
1484 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1487 l2cap_chan_lock(pchan);
1489 chan = pchan->ops->new_connection(pchan);
1493 chan->dcid = L2CAP_CID_ATT;
1495 bacpy(&chan->src, &hcon->src);
1496 bacpy(&chan->dst, &hcon->dst);
1497 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1498 chan->dst_type = dst_type;
1500 __l2cap_chan_add(conn, chan);
1503 l2cap_chan_unlock(pchan);
1506 static void l2cap_conn_ready(struct l2cap_conn *conn)
1508 struct l2cap_chan *chan;
1509 struct hci_conn *hcon = conn->hcon;
1511 BT_DBG("conn %p", conn);
1513 /* For outgoing pairing which doesn't necessarily have an
1514 * associated socket (e.g. mgmt_pair_device).
1516 if (hcon->out && hcon->type == LE_LINK)
1517 smp_conn_security(hcon, hcon->pending_sec_level);
1519 mutex_lock(&conn->chan_lock);
1521 if (hcon->type == LE_LINK)
1522 l2cap_le_conn_ready(conn);
1524 list_for_each_entry(chan, &conn->chan_l, list) {
1526 l2cap_chan_lock(chan);
1528 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1529 l2cap_chan_unlock(chan);
1533 if (hcon->type == LE_LINK) {
1534 l2cap_le_start(chan);
1535 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1536 l2cap_chan_ready(chan);
1538 } else if (chan->state == BT_CONNECT) {
1539 l2cap_do_start(chan);
1542 l2cap_chan_unlock(chan);
1545 mutex_unlock(&conn->chan_lock);
1548 /* Notify sockets that we cannot guaranty reliability anymore */
1549 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1551 struct l2cap_chan *chan;
1553 BT_DBG("conn %p", conn);
1555 mutex_lock(&conn->chan_lock);
1557 list_for_each_entry(chan, &conn->chan_l, list) {
1558 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1559 l2cap_chan_set_err(chan, err);
1562 mutex_unlock(&conn->chan_lock);
1565 static void l2cap_info_timeout(struct work_struct *work)
1567 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1571 conn->info_ident = 0;
1573 l2cap_conn_start(conn);
1578 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1579 * callback is called during registration. The ->remove callback is called
1580 * during unregistration.
1581 * An l2cap_user object can either be explicitly unregistered or when the
1582 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1583 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1584 * External modules must own a reference to the l2cap_conn object if they intend
1585 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1586 * any time if they don't.
1589 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1591 struct hci_dev *hdev = conn->hcon->hdev;
1594 /* We need to check whether l2cap_conn is registered. If it is not, we
1595 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1596 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1597 * relies on the parent hci_conn object to be locked. This itself relies
1598 * on the hci_dev object to be locked. So we must lock the hci device
1603 if (user->list.next || user->list.prev) {
1608 /* conn->hchan is NULL after l2cap_conn_del() was called */
1614 ret = user->probe(conn, user);
1618 list_add(&user->list, &conn->users);
1622 hci_dev_unlock(hdev);
1625 EXPORT_SYMBOL(l2cap_register_user);
1627 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1629 struct hci_dev *hdev = conn->hcon->hdev;
1633 if (!user->list.next || !user->list.prev)
1636 list_del(&user->list);
1637 user->list.next = NULL;
1638 user->list.prev = NULL;
1639 user->remove(conn, user);
1642 hci_dev_unlock(hdev);
1644 EXPORT_SYMBOL(l2cap_unregister_user);
1646 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1648 struct l2cap_user *user;
1650 while (!list_empty(&conn->users)) {
1651 user = list_first_entry(&conn->users, struct l2cap_user, list);
1652 list_del(&user->list);
1653 user->list.next = NULL;
1654 user->list.prev = NULL;
1655 user->remove(conn, user);
1659 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1661 struct l2cap_conn *conn = hcon->l2cap_data;
1662 struct l2cap_chan *chan, *l;
1667 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1669 kfree_skb(conn->rx_skb);
1671 l2cap_unregister_all_users(conn);
1673 mutex_lock(&conn->chan_lock);
1676 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1677 l2cap_chan_hold(chan);
1678 l2cap_chan_lock(chan);
1680 l2cap_chan_del(chan, err);
1682 l2cap_chan_unlock(chan);
1684 chan->ops->close(chan);
1685 l2cap_chan_put(chan);
1688 mutex_unlock(&conn->chan_lock);
1690 hci_chan_del(conn->hchan);
1692 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1693 cancel_delayed_work_sync(&conn->info_timer);
1695 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1696 cancel_delayed_work_sync(&conn->security_timer);
1697 smp_chan_destroy(conn);
1700 hcon->l2cap_data = NULL;
1702 l2cap_conn_put(conn);
1705 static void security_timeout(struct work_struct *work)
1707 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1708 security_timer.work);
1710 BT_DBG("conn %p", conn);
1712 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1713 smp_chan_destroy(conn);
1714 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1718 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1720 struct l2cap_conn *conn = hcon->l2cap_data;
1721 struct hci_chan *hchan;
1726 hchan = hci_chan_create(hcon);
1730 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1732 hci_chan_del(hchan);
1736 kref_init(&conn->ref);
1737 hcon->l2cap_data = conn;
1739 hci_conn_get(conn->hcon);
1740 conn->hchan = hchan;
1742 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1744 switch (hcon->type) {
1746 if (hcon->hdev->le_mtu) {
1747 conn->mtu = hcon->hdev->le_mtu;
1752 conn->mtu = hcon->hdev->acl_mtu;
1756 conn->feat_mask = 0;
1758 if (hcon->type == ACL_LINK)
1759 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1760 &hcon->hdev->dev_flags);
1762 spin_lock_init(&conn->lock);
1763 mutex_init(&conn->chan_lock);
1765 INIT_LIST_HEAD(&conn->chan_l);
1766 INIT_LIST_HEAD(&conn->users);
1768 if (hcon->type == LE_LINK)
1769 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1771 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1773 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1778 static void l2cap_conn_free(struct kref *ref)
1780 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1782 hci_conn_put(conn->hcon);
1786 void l2cap_conn_get(struct l2cap_conn *conn)
1788 kref_get(&conn->ref);
1790 EXPORT_SYMBOL(l2cap_conn_get);
1792 void l2cap_conn_put(struct l2cap_conn *conn)
1794 kref_put(&conn->ref, l2cap_conn_free);
1796 EXPORT_SYMBOL(l2cap_conn_put);
1798 /* ---- Socket interface ---- */
1800 /* Find socket with psm and source / destination bdaddr.
1801 * Returns closest match.
1803 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1808 struct l2cap_chan *c, *c1 = NULL;
1810 read_lock(&chan_list_lock);
1812 list_for_each_entry(c, &chan_list, global_l) {
1813 if (state && c->state != state)
1816 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1819 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1822 if (c->psm == psm) {
1823 int src_match, dst_match;
1824 int src_any, dst_any;
1827 src_match = !bacmp(&c->src, src);
1828 dst_match = !bacmp(&c->dst, dst);
1829 if (src_match && dst_match) {
1830 read_unlock(&chan_list_lock);
1835 src_any = !bacmp(&c->src, BDADDR_ANY);
1836 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1837 if ((src_match && dst_any) || (src_any && dst_match) ||
1838 (src_any && dst_any))
1843 read_unlock(&chan_list_lock);
1848 static bool is_valid_psm(u16 psm, u8 dst_type)
1853 if (bdaddr_type_is_le(dst_type))
1854 return (psm <= 0x00ff);
1856 /* PSM must be odd and lsb of upper byte must be 0 */
1857 return ((psm & 0x0101) == 0x0001);
1860 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1861 bdaddr_t *dst, u8 dst_type)
1863 struct l2cap_conn *conn;
1864 struct hci_conn *hcon;
1865 struct hci_dev *hdev;
1869 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1870 dst_type, __le16_to_cpu(psm));
1872 hdev = hci_get_route(dst, &chan->src);
1874 return -EHOSTUNREACH;
1878 l2cap_chan_lock(chan);
1880 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
1881 chan->chan_type != L2CAP_CHAN_RAW) {
1886 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1891 switch (chan->mode) {
1892 case L2CAP_MODE_BASIC:
1894 case L2CAP_MODE_LE_FLOWCTL:
1895 l2cap_le_flowctl_init(chan);
1897 case L2CAP_MODE_ERTM:
1898 case L2CAP_MODE_STREAMING:
1907 switch (chan->state) {
1911 /* Already connecting */
1916 /* Already connected */
1930 /* Set destination address and psm */
1931 bacpy(&chan->dst, dst);
1932 chan->dst_type = dst_type;
1937 auth_type = l2cap_get_auth_type(chan);
1939 if (bdaddr_type_is_le(dst_type))
1940 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1941 chan->sec_level, auth_type);
1943 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1944 chan->sec_level, auth_type);
1947 err = PTR_ERR(hcon);
1951 conn = l2cap_conn_add(hcon);
1953 hci_conn_drop(hcon);
1958 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1959 hci_conn_drop(hcon);
1964 /* Update source addr of the socket */
1965 bacpy(&chan->src, &hcon->src);
1966 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1968 l2cap_chan_unlock(chan);
1969 l2cap_chan_add(conn, chan);
1970 l2cap_chan_lock(chan);
1972 /* l2cap_chan_add takes its own ref so we can drop this one */
1973 hci_conn_drop(hcon);
1975 l2cap_state_change(chan, BT_CONNECT);
1976 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1978 if (hcon->state == BT_CONNECTED) {
1979 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1980 __clear_chan_timer(chan);
1981 if (l2cap_chan_check_security(chan))
1982 l2cap_state_change(chan, BT_CONNECTED);
1984 l2cap_do_start(chan);
1990 l2cap_chan_unlock(chan);
1991 hci_dev_unlock(hdev);
1996 static void l2cap_monitor_timeout(struct work_struct *work)
1998 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1999 monitor_timer.work);
2001 BT_DBG("chan %p", chan);
2003 l2cap_chan_lock(chan);
2006 l2cap_chan_unlock(chan);
2007 l2cap_chan_put(chan);
2011 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2013 l2cap_chan_unlock(chan);
2014 l2cap_chan_put(chan);
2017 static void l2cap_retrans_timeout(struct work_struct *work)
2019 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2020 retrans_timer.work);
2022 BT_DBG("chan %p", chan);
2024 l2cap_chan_lock(chan);
2027 l2cap_chan_unlock(chan);
2028 l2cap_chan_put(chan);
2032 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2033 l2cap_chan_unlock(chan);
2034 l2cap_chan_put(chan);
2037 static void l2cap_streaming_send(struct l2cap_chan *chan,
2038 struct sk_buff_head *skbs)
2040 struct sk_buff *skb;
2041 struct l2cap_ctrl *control;
2043 BT_DBG("chan %p, skbs %p", chan, skbs);
2045 if (__chan_is_moving(chan))
2048 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2050 while (!skb_queue_empty(&chan->tx_q)) {
2052 skb = skb_dequeue(&chan->tx_q);
2054 bt_cb(skb)->control.retries = 1;
2055 control = &bt_cb(skb)->control;
2057 control->reqseq = 0;
2058 control->txseq = chan->next_tx_seq;
2060 __pack_control(chan, control, skb);
2062 if (chan->fcs == L2CAP_FCS_CRC16) {
2063 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2064 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2067 l2cap_do_send(chan, skb);
2069 BT_DBG("Sent txseq %u", control->txseq);
2071 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2072 chan->frames_sent++;
2076 static int l2cap_ertm_send(struct l2cap_chan *chan)
2078 struct sk_buff *skb, *tx_skb;
2079 struct l2cap_ctrl *control;
2082 BT_DBG("chan %p", chan);
2084 if (chan->state != BT_CONNECTED)
2087 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2090 if (__chan_is_moving(chan))
2093 while (chan->tx_send_head &&
2094 chan->unacked_frames < chan->remote_tx_win &&
2095 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2097 skb = chan->tx_send_head;
2099 bt_cb(skb)->control.retries = 1;
2100 control = &bt_cb(skb)->control;
2102 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2105 control->reqseq = chan->buffer_seq;
2106 chan->last_acked_seq = chan->buffer_seq;
2107 control->txseq = chan->next_tx_seq;
2109 __pack_control(chan, control, skb);
2111 if (chan->fcs == L2CAP_FCS_CRC16) {
2112 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2113 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2116 /* Clone after data has been modified. Data is assumed to be
2117 read-only (for locking purposes) on cloned sk_buffs.
2119 tx_skb = skb_clone(skb, GFP_KERNEL);
2124 __set_retrans_timer(chan);
2126 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2127 chan->unacked_frames++;
2128 chan->frames_sent++;
2131 if (skb_queue_is_last(&chan->tx_q, skb))
2132 chan->tx_send_head = NULL;
2134 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2136 l2cap_do_send(chan, tx_skb);
2137 BT_DBG("Sent txseq %u", control->txseq);
2140 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2141 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2146 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2148 struct l2cap_ctrl control;
2149 struct sk_buff *skb;
2150 struct sk_buff *tx_skb;
2153 BT_DBG("chan %p", chan);
2155 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2158 if (__chan_is_moving(chan))
2161 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2162 seq = l2cap_seq_list_pop(&chan->retrans_list);
2164 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2166 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2171 bt_cb(skb)->control.retries++;
2172 control = bt_cb(skb)->control;
2174 if (chan->max_tx != 0 &&
2175 bt_cb(skb)->control.retries > chan->max_tx) {
2176 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2177 l2cap_send_disconn_req(chan, ECONNRESET);
2178 l2cap_seq_list_clear(&chan->retrans_list);
2182 control.reqseq = chan->buffer_seq;
2183 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2188 if (skb_cloned(skb)) {
2189 /* Cloned sk_buffs are read-only, so we need a
2192 tx_skb = skb_copy(skb, GFP_KERNEL);
2194 tx_skb = skb_clone(skb, GFP_KERNEL);
2198 l2cap_seq_list_clear(&chan->retrans_list);
2202 /* Update skb contents */
2203 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2204 put_unaligned_le32(__pack_extended_control(&control),
2205 tx_skb->data + L2CAP_HDR_SIZE);
2207 put_unaligned_le16(__pack_enhanced_control(&control),
2208 tx_skb->data + L2CAP_HDR_SIZE);
2211 if (chan->fcs == L2CAP_FCS_CRC16) {
2212 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2213 put_unaligned_le16(fcs, skb_put(tx_skb,
2217 l2cap_do_send(chan, tx_skb);
2219 BT_DBG("Resent txseq %d", control.txseq);
2221 chan->last_acked_seq = chan->buffer_seq;
2225 static void l2cap_retransmit(struct l2cap_chan *chan,
2226 struct l2cap_ctrl *control)
2228 BT_DBG("chan %p, control %p", chan, control);
2230 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2231 l2cap_ertm_resend(chan);
2234 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2235 struct l2cap_ctrl *control)
2237 struct sk_buff *skb;
2239 BT_DBG("chan %p, control %p", chan, control);
2242 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2244 l2cap_seq_list_clear(&chan->retrans_list);
2246 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2249 if (chan->unacked_frames) {
2250 skb_queue_walk(&chan->tx_q, skb) {
2251 if (bt_cb(skb)->control.txseq == control->reqseq ||
2252 skb == chan->tx_send_head)
2256 skb_queue_walk_from(&chan->tx_q, skb) {
2257 if (skb == chan->tx_send_head)
2260 l2cap_seq_list_append(&chan->retrans_list,
2261 bt_cb(skb)->control.txseq);
2264 l2cap_ertm_resend(chan);
2268 static void l2cap_send_ack(struct l2cap_chan *chan)
2270 struct l2cap_ctrl control;
2271 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2272 chan->last_acked_seq);
2275 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2276 chan, chan->last_acked_seq, chan->buffer_seq);
2278 memset(&control, 0, sizeof(control));
2281 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2282 chan->rx_state == L2CAP_RX_STATE_RECV) {
2283 __clear_ack_timer(chan);
2284 control.super = L2CAP_SUPER_RNR;
2285 control.reqseq = chan->buffer_seq;
2286 l2cap_send_sframe(chan, &control);
2288 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2289 l2cap_ertm_send(chan);
2290 /* If any i-frames were sent, they included an ack */
2291 if (chan->buffer_seq == chan->last_acked_seq)
2295 /* Ack now if the window is 3/4ths full.
2296 * Calculate without mul or div
2298 threshold = chan->ack_win;
2299 threshold += threshold << 1;
2302 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2305 if (frames_to_ack >= threshold) {
2306 __clear_ack_timer(chan);
2307 control.super = L2CAP_SUPER_RR;
2308 control.reqseq = chan->buffer_seq;
2309 l2cap_send_sframe(chan, &control);
2314 __set_ack_timer(chan);
2318 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2319 struct msghdr *msg, int len,
2320 int count, struct sk_buff *skb)
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff **frag;
2326 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2332 /* Continuation fragments (no L2CAP header) */
2333 frag = &skb_shinfo(skb)->frag_list;
2335 struct sk_buff *tmp;
2337 count = min_t(unsigned int, conn->mtu, len);
2339 tmp = chan->ops->alloc_skb(chan, count,
2340 msg->msg_flags & MSG_DONTWAIT);
2342 return PTR_ERR(tmp);
2346 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2349 (*frag)->priority = skb->priority;
2354 skb->len += (*frag)->len;
2355 skb->data_len += (*frag)->len;
2357 frag = &(*frag)->next;
2363 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2364 struct msghdr *msg, size_t len,
2367 struct l2cap_conn *conn = chan->conn;
2368 struct sk_buff *skb;
2369 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2370 struct l2cap_hdr *lh;
2372 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2373 __le16_to_cpu(chan->psm), len, priority);
2375 count = min_t(unsigned int, (conn->mtu - hlen), len);
2377 skb = chan->ops->alloc_skb(chan, count + hlen,
2378 msg->msg_flags & MSG_DONTWAIT);
2382 skb->priority = priority;
2384 /* Create L2CAP header */
2385 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2386 lh->cid = cpu_to_le16(chan->dcid);
2387 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2388 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2390 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2391 if (unlikely(err < 0)) {
2393 return ERR_PTR(err);
2398 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2399 struct msghdr *msg, size_t len,
2402 struct l2cap_conn *conn = chan->conn;
2403 struct sk_buff *skb;
2405 struct l2cap_hdr *lh;
2407 BT_DBG("chan %p len %zu", chan, len);
2409 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2411 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2412 msg->msg_flags & MSG_DONTWAIT);
2416 skb->priority = priority;
2418 /* Create L2CAP header */
2419 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2420 lh->cid = cpu_to_le16(chan->dcid);
2421 lh->len = cpu_to_le16(len);
2423 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2424 if (unlikely(err < 0)) {
2426 return ERR_PTR(err);
2431 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2432 struct msghdr *msg, size_t len,
2435 struct l2cap_conn *conn = chan->conn;
2436 struct sk_buff *skb;
2437 int err, count, hlen;
2438 struct l2cap_hdr *lh;
2440 BT_DBG("chan %p len %zu", chan, len);
2443 return ERR_PTR(-ENOTCONN);
2445 hlen = __ertm_hdr_size(chan);
2448 hlen += L2CAP_SDULEN_SIZE;
2450 if (chan->fcs == L2CAP_FCS_CRC16)
2451 hlen += L2CAP_FCS_SIZE;
2453 count = min_t(unsigned int, (conn->mtu - hlen), len);
2455 skb = chan->ops->alloc_skb(chan, count + hlen,
2456 msg->msg_flags & MSG_DONTWAIT);
2460 /* Create L2CAP header */
2461 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2462 lh->cid = cpu_to_le16(chan->dcid);
2463 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2465 /* Control header is populated later */
2466 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2467 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2469 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2472 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2474 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2475 if (unlikely(err < 0)) {
2477 return ERR_PTR(err);
2480 bt_cb(skb)->control.fcs = chan->fcs;
2481 bt_cb(skb)->control.retries = 0;
2485 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2486 struct sk_buff_head *seg_queue,
2487 struct msghdr *msg, size_t len)
2489 struct sk_buff *skb;
2494 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2496 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2497 * so fragmented skbs are not used. The HCI layer's handling
2498 * of fragmented skbs is not compatible with ERTM's queueing.
2501 /* PDU size is derived from the HCI MTU */
2502 pdu_len = chan->conn->mtu;
2504 /* Constrain PDU size for BR/EDR connections */
2506 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2508 /* Adjust for largest possible L2CAP overhead. */
2510 pdu_len -= L2CAP_FCS_SIZE;
2512 pdu_len -= __ertm_hdr_size(chan);
2514 /* Remote device may have requested smaller PDUs */
2515 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2517 if (len <= pdu_len) {
2518 sar = L2CAP_SAR_UNSEGMENTED;
2522 sar = L2CAP_SAR_START;
2524 pdu_len -= L2CAP_SDULEN_SIZE;
2528 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2531 __skb_queue_purge(seg_queue);
2532 return PTR_ERR(skb);
2535 bt_cb(skb)->control.sar = sar;
2536 __skb_queue_tail(seg_queue, skb);
2541 pdu_len += L2CAP_SDULEN_SIZE;
2544 if (len <= pdu_len) {
2545 sar = L2CAP_SAR_END;
2548 sar = L2CAP_SAR_CONTINUE;
2555 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2557 size_t len, u16 sdulen)
2559 struct l2cap_conn *conn = chan->conn;
2560 struct sk_buff *skb;
2561 int err, count, hlen;
2562 struct l2cap_hdr *lh;
2564 BT_DBG("chan %p len %zu", chan, len);
2567 return ERR_PTR(-ENOTCONN);
2569 hlen = L2CAP_HDR_SIZE;
2572 hlen += L2CAP_SDULEN_SIZE;
2574 count = min_t(unsigned int, (conn->mtu - hlen), len);
2576 skb = chan->ops->alloc_skb(chan, count + hlen,
2577 msg->msg_flags & MSG_DONTWAIT);
2581 /* Create L2CAP header */
2582 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2583 lh->cid = cpu_to_le16(chan->dcid);
2584 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2587 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2589 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2590 if (unlikely(err < 0)) {
2592 return ERR_PTR(err);
2598 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2599 struct sk_buff_head *seg_queue,
2600 struct msghdr *msg, size_t len)
2602 struct sk_buff *skb;
2606 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2608 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2610 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2613 pdu_len -= L2CAP_SDULEN_SIZE;
2619 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2621 __skb_queue_purge(seg_queue);
2622 return PTR_ERR(skb);
2625 __skb_queue_tail(seg_queue, skb);
2631 pdu_len += L2CAP_SDULEN_SIZE;
2638 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2641 struct sk_buff *skb;
2643 struct sk_buff_head seg_queue;
2648 /* Connectionless channel */
2649 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2650 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2652 return PTR_ERR(skb);
2654 l2cap_do_send(chan, skb);
2658 switch (chan->mode) {
2659 case L2CAP_MODE_LE_FLOWCTL:
2660 /* Check outgoing MTU */
2661 if (len > chan->omtu)
2664 if (!chan->tx_credits)
2667 __skb_queue_head_init(&seg_queue);
2669 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2671 if (chan->state != BT_CONNECTED) {
2672 __skb_queue_purge(&seg_queue);
2679 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2681 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2682 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2686 if (!chan->tx_credits)
2687 chan->ops->suspend(chan);
2693 case L2CAP_MODE_BASIC:
2694 /* Check outgoing MTU */
2695 if (len > chan->omtu)
2698 /* Create a basic PDU */
2699 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2701 return PTR_ERR(skb);
2703 l2cap_do_send(chan, skb);
2707 case L2CAP_MODE_ERTM:
2708 case L2CAP_MODE_STREAMING:
2709 /* Check outgoing MTU */
2710 if (len > chan->omtu) {
2715 __skb_queue_head_init(&seg_queue);
2717 /* Do segmentation before calling in to the state machine,
2718 * since it's possible to block while waiting for memory
2721 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2723 /* The channel could have been closed while segmenting,
2724 * check that it is still connected.
2726 if (chan->state != BT_CONNECTED) {
2727 __skb_queue_purge(&seg_queue);
2734 if (chan->mode == L2CAP_MODE_ERTM)
2735 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2737 l2cap_streaming_send(chan, &seg_queue);
2741 /* If the skbs were not queued for sending, they'll still be in
2742 * seg_queue and need to be purged.
2744 __skb_queue_purge(&seg_queue);
2748 BT_DBG("bad state %1.1x", chan->mode);
2755 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2757 struct l2cap_ctrl control;
2760 BT_DBG("chan %p, txseq %u", chan, txseq);
2762 memset(&control, 0, sizeof(control));
2764 control.super = L2CAP_SUPER_SREJ;
2766 for (seq = chan->expected_tx_seq; seq != txseq;
2767 seq = __next_seq(chan, seq)) {
2768 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2769 control.reqseq = seq;
2770 l2cap_send_sframe(chan, &control);
2771 l2cap_seq_list_append(&chan->srej_list, seq);
2775 chan->expected_tx_seq = __next_seq(chan, txseq);
2778 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2780 struct l2cap_ctrl control;
2782 BT_DBG("chan %p", chan);
2784 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2787 memset(&control, 0, sizeof(control));
2789 control.super = L2CAP_SUPER_SREJ;
2790 control.reqseq = chan->srej_list.tail;
2791 l2cap_send_sframe(chan, &control);
2794 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2796 struct l2cap_ctrl control;
2800 BT_DBG("chan %p, txseq %u", chan, txseq);
2802 memset(&control, 0, sizeof(control));
2804 control.super = L2CAP_SUPER_SREJ;
2806 /* Capture initial list head to allow only one pass through the list. */
2807 initial_head = chan->srej_list.head;
2810 seq = l2cap_seq_list_pop(&chan->srej_list);
2811 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2814 control.reqseq = seq;
2815 l2cap_send_sframe(chan, &control);
2816 l2cap_seq_list_append(&chan->srej_list, seq);
2817 } while (chan->srej_list.head != initial_head);
2820 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2822 struct sk_buff *acked_skb;
2825 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2827 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2830 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2831 chan->expected_ack_seq, chan->unacked_frames);
2833 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2834 ackseq = __next_seq(chan, ackseq)) {
2836 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2838 skb_unlink(acked_skb, &chan->tx_q);
2839 kfree_skb(acked_skb);
2840 chan->unacked_frames--;
2844 chan->expected_ack_seq = reqseq;
2846 if (chan->unacked_frames == 0)
2847 __clear_retrans_timer(chan);
2849 BT_DBG("unacked_frames %u", chan->unacked_frames);
2852 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2854 BT_DBG("chan %p", chan);
2856 chan->expected_tx_seq = chan->buffer_seq;
2857 l2cap_seq_list_clear(&chan->srej_list);
2858 skb_queue_purge(&chan->srej_q);
2859 chan->rx_state = L2CAP_RX_STATE_RECV;
2862 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2863 struct l2cap_ctrl *control,
2864 struct sk_buff_head *skbs, u8 event)
2866 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2870 case L2CAP_EV_DATA_REQUEST:
2871 if (chan->tx_send_head == NULL)
2872 chan->tx_send_head = skb_peek(skbs);
2874 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2875 l2cap_ertm_send(chan);
2877 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2878 BT_DBG("Enter LOCAL_BUSY");
2879 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2881 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2882 /* The SREJ_SENT state must be aborted if we are to
2883 * enter the LOCAL_BUSY state.
2885 l2cap_abort_rx_srej_sent(chan);
2888 l2cap_send_ack(chan);
2891 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2892 BT_DBG("Exit LOCAL_BUSY");
2893 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2895 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2896 struct l2cap_ctrl local_control;
2898 memset(&local_control, 0, sizeof(local_control));
2899 local_control.sframe = 1;
2900 local_control.super = L2CAP_SUPER_RR;
2901 local_control.poll = 1;
2902 local_control.reqseq = chan->buffer_seq;
2903 l2cap_send_sframe(chan, &local_control);
2905 chan->retry_count = 1;
2906 __set_monitor_timer(chan);
2907 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2910 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2911 l2cap_process_reqseq(chan, control->reqseq);
2913 case L2CAP_EV_EXPLICIT_POLL:
2914 l2cap_send_rr_or_rnr(chan, 1);
2915 chan->retry_count = 1;
2916 __set_monitor_timer(chan);
2917 __clear_ack_timer(chan);
2918 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2920 case L2CAP_EV_RETRANS_TO:
2921 l2cap_send_rr_or_rnr(chan, 1);
2922 chan->retry_count = 1;
2923 __set_monitor_timer(chan);
2924 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2926 case L2CAP_EV_RECV_FBIT:
2927 /* Nothing to process */
2934 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2935 struct l2cap_ctrl *control,
2936 struct sk_buff_head *skbs, u8 event)
2938 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2942 case L2CAP_EV_DATA_REQUEST:
2943 if (chan->tx_send_head == NULL)
2944 chan->tx_send_head = skb_peek(skbs);
2945 /* Queue data, but don't send. */
2946 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2948 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2949 BT_DBG("Enter LOCAL_BUSY");
2950 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2952 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2953 /* The SREJ_SENT state must be aborted if we are to
2954 * enter the LOCAL_BUSY state.
2956 l2cap_abort_rx_srej_sent(chan);
2959 l2cap_send_ack(chan);
2962 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2963 BT_DBG("Exit LOCAL_BUSY");
2964 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2966 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2967 struct l2cap_ctrl local_control;
2968 memset(&local_control, 0, sizeof(local_control));
2969 local_control.sframe = 1;
2970 local_control.super = L2CAP_SUPER_RR;
2971 local_control.poll = 1;
2972 local_control.reqseq = chan->buffer_seq;
2973 l2cap_send_sframe(chan, &local_control);
2975 chan->retry_count = 1;
2976 __set_monitor_timer(chan);
2977 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2980 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2981 l2cap_process_reqseq(chan, control->reqseq);
2985 case L2CAP_EV_RECV_FBIT:
2986 if (control && control->final) {
2987 __clear_monitor_timer(chan);
2988 if (chan->unacked_frames > 0)
2989 __set_retrans_timer(chan);
2990 chan->retry_count = 0;
2991 chan->tx_state = L2CAP_TX_STATE_XMIT;
2992 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2995 case L2CAP_EV_EXPLICIT_POLL:
2998 case L2CAP_EV_MONITOR_TO:
2999 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3000 l2cap_send_rr_or_rnr(chan, 1);
3001 __set_monitor_timer(chan);
3002 chan->retry_count++;
3004 l2cap_send_disconn_req(chan, ECONNABORTED);
3012 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3013 struct sk_buff_head *skbs, u8 event)
3015 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3016 chan, control, skbs, event, chan->tx_state);
3018 switch (chan->tx_state) {
3019 case L2CAP_TX_STATE_XMIT:
3020 l2cap_tx_state_xmit(chan, control, skbs, event);
3022 case L2CAP_TX_STATE_WAIT_F:
3023 l2cap_tx_state_wait_f(chan, control, skbs, event);
3031 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3032 struct l2cap_ctrl *control)
3034 BT_DBG("chan %p, control %p", chan, control);
3035 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3038 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3039 struct l2cap_ctrl *control)
3041 BT_DBG("chan %p, control %p", chan, control);
3042 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3045 /* Copy frame to all raw sockets on that connection */
3046 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3048 struct sk_buff *nskb;
3049 struct l2cap_chan *chan;
3051 BT_DBG("conn %p", conn);
3053 mutex_lock(&conn->chan_lock);
3055 list_for_each_entry(chan, &conn->chan_l, list) {
3056 if (chan->chan_type != L2CAP_CHAN_RAW)
3059 /* Don't send frame to the channel it came from */
3060 if (bt_cb(skb)->chan == chan)
3063 nskb = skb_clone(skb, GFP_KERNEL);
3066 if (chan->ops->recv(chan, nskb))
3070 mutex_unlock(&conn->chan_lock);
3073 /* ---- L2CAP signalling commands ---- */
3074 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3075 u8 ident, u16 dlen, void *data)
3077 struct sk_buff *skb, **frag;
3078 struct l2cap_cmd_hdr *cmd;
3079 struct l2cap_hdr *lh;
3082 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3083 conn, code, ident, dlen);
3085 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3088 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3089 count = min_t(unsigned int, conn->mtu, len);
3091 skb = bt_skb_alloc(count, GFP_KERNEL);
3095 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3096 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3098 if (conn->hcon->type == LE_LINK)
3099 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3101 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3103 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3106 cmd->len = cpu_to_le16(dlen);
3109 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3110 memcpy(skb_put(skb, count), data, count);
3116 /* Continuation fragments (no L2CAP header) */
3117 frag = &skb_shinfo(skb)->frag_list;
3119 count = min_t(unsigned int, conn->mtu, len);
3121 *frag = bt_skb_alloc(count, GFP_KERNEL);
3125 memcpy(skb_put(*frag, count), data, count);
3130 frag = &(*frag)->next;
3140 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3143 struct l2cap_conf_opt *opt = *ptr;
3146 len = L2CAP_CONF_OPT_SIZE + opt->len;
3154 *val = *((u8 *) opt->val);
3158 *val = get_unaligned_le16(opt->val);
3162 *val = get_unaligned_le32(opt->val);
3166 *val = (unsigned long) opt->val;
3170 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3174 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3176 struct l2cap_conf_opt *opt = *ptr;
3178 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3185 *((u8 *) opt->val) = val;
3189 put_unaligned_le16(val, opt->val);
3193 put_unaligned_le32(val, opt->val);
3197 memcpy(opt->val, (void *) val, len);
3201 *ptr += L2CAP_CONF_OPT_SIZE + len;
3204 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3206 struct l2cap_conf_efs efs;
3208 switch (chan->mode) {
3209 case L2CAP_MODE_ERTM:
3210 efs.id = chan->local_id;
3211 efs.stype = chan->local_stype;
3212 efs.msdu = cpu_to_le16(chan->local_msdu);
3213 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3214 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3215 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3218 case L2CAP_MODE_STREAMING:
3220 efs.stype = L2CAP_SERV_BESTEFFORT;
3221 efs.msdu = cpu_to_le16(chan->local_msdu);
3222 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3231 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3232 (unsigned long) &efs);
3235 static void l2cap_ack_timeout(struct work_struct *work)
3237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3241 BT_DBG("chan %p", chan);
3243 l2cap_chan_lock(chan);
3245 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3246 chan->last_acked_seq);
3249 l2cap_send_rr_or_rnr(chan, 0);
3251 l2cap_chan_unlock(chan);
3252 l2cap_chan_put(chan);
3255 int l2cap_ertm_init(struct l2cap_chan *chan)
3259 chan->next_tx_seq = 0;
3260 chan->expected_tx_seq = 0;
3261 chan->expected_ack_seq = 0;
3262 chan->unacked_frames = 0;
3263 chan->buffer_seq = 0;
3264 chan->frames_sent = 0;
3265 chan->last_acked_seq = 0;
3267 chan->sdu_last_frag = NULL;
3270 skb_queue_head_init(&chan->tx_q);
3272 chan->local_amp_id = AMP_ID_BREDR;
3273 chan->move_id = AMP_ID_BREDR;
3274 chan->move_state = L2CAP_MOVE_STABLE;
3275 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3277 if (chan->mode != L2CAP_MODE_ERTM)
3280 chan->rx_state = L2CAP_RX_STATE_RECV;
3281 chan->tx_state = L2CAP_TX_STATE_XMIT;
3283 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3284 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3285 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3287 skb_queue_head_init(&chan->srej_q);
3289 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3293 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3295 l2cap_seq_list_free(&chan->srej_list);
3300 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3303 case L2CAP_MODE_STREAMING:
3304 case L2CAP_MODE_ERTM:
3305 if (l2cap_mode_supported(mode, remote_feat_mask))
3309 return L2CAP_MODE_BASIC;
3313 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3315 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3318 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3320 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3323 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3324 struct l2cap_conf_rfc *rfc)
3326 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3327 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3329 /* Class 1 devices have must have ERTM timeouts
3330 * exceeding the Link Supervision Timeout. The
3331 * default Link Supervision Timeout for AMP
3332 * controllers is 10 seconds.
3334 * Class 1 devices use 0xffffffff for their
3335 * best-effort flush timeout, so the clamping logic
3336 * will result in a timeout that meets the above
3337 * requirement. ERTM timeouts are 16-bit values, so
3338 * the maximum timeout is 65.535 seconds.
3341 /* Convert timeout to milliseconds and round */
3342 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3344 /* This is the recommended formula for class 2 devices
3345 * that start ERTM timers when packets are sent to the
3348 ertm_to = 3 * ertm_to + 500;
3350 if (ertm_to > 0xffff)
3353 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3354 rfc->monitor_timeout = rfc->retrans_timeout;
3356 rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3357 rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3361 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3363 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3364 __l2cap_ews_supported(chan->conn)) {
3365 /* use extended control field */
3366 set_bit(FLAG_EXT_CTRL, &chan->flags);
3367 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3369 chan->tx_win = min_t(u16, chan->tx_win,
3370 L2CAP_DEFAULT_TX_WINDOW);
3371 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3373 chan->ack_win = chan->tx_win;
3376 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3378 struct l2cap_conf_req *req = data;
3379 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3380 void *ptr = req->data;
3383 BT_DBG("chan %p", chan);
3385 if (chan->num_conf_req || chan->num_conf_rsp)
3388 switch (chan->mode) {
3389 case L2CAP_MODE_STREAMING:
3390 case L2CAP_MODE_ERTM:
3391 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3394 if (__l2cap_efs_supported(chan->conn))
3395 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3399 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3404 if (chan->imtu != L2CAP_DEFAULT_MTU)
3405 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3407 switch (chan->mode) {
3408 case L2CAP_MODE_BASIC:
3409 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3410 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3413 rfc.mode = L2CAP_MODE_BASIC;
3415 rfc.max_transmit = 0;
3416 rfc.retrans_timeout = 0;
3417 rfc.monitor_timeout = 0;
3418 rfc.max_pdu_size = 0;
3420 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3421 (unsigned long) &rfc);
3424 case L2CAP_MODE_ERTM:
3425 rfc.mode = L2CAP_MODE_ERTM;
3426 rfc.max_transmit = chan->max_tx;
3428 __l2cap_set_ertm_timeouts(chan, &rfc);
3430 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3431 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3433 rfc.max_pdu_size = cpu_to_le16(size);
3435 l2cap_txwin_setup(chan);
3437 rfc.txwin_size = min_t(u16, chan->tx_win,
3438 L2CAP_DEFAULT_TX_WINDOW);
3440 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3441 (unsigned long) &rfc);
3443 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3444 l2cap_add_opt_efs(&ptr, chan);
3446 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3450 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3451 if (chan->fcs == L2CAP_FCS_NONE ||
3452 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3453 chan->fcs = L2CAP_FCS_NONE;
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3459 case L2CAP_MODE_STREAMING:
3460 l2cap_txwin_setup(chan);
3461 rfc.mode = L2CAP_MODE_STREAMING;
3463 rfc.max_transmit = 0;
3464 rfc.retrans_timeout = 0;
3465 rfc.monitor_timeout = 0;
3467 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3468 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3470 rfc.max_pdu_size = cpu_to_le16(size);
3472 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3473 (unsigned long) &rfc);
3475 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3476 l2cap_add_opt_efs(&ptr, chan);
3478 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3479 if (chan->fcs == L2CAP_FCS_NONE ||
3480 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3481 chan->fcs = L2CAP_FCS_NONE;
3482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3488 req->dcid = cpu_to_le16(chan->dcid);
3489 req->flags = __constant_cpu_to_le16(0);
3494 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3496 struct l2cap_conf_rsp *rsp = data;
3497 void *ptr = rsp->data;
3498 void *req = chan->conf_req;
3499 int len = chan->conf_len;
3500 int type, hint, olen;
3502 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3503 struct l2cap_conf_efs efs;
3505 u16 mtu = L2CAP_DEFAULT_MTU;
3506 u16 result = L2CAP_CONF_SUCCESS;
3509 BT_DBG("chan %p", chan);
3511 while (len >= L2CAP_CONF_OPT_SIZE) {
3512 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3514 hint = type & L2CAP_CONF_HINT;
3515 type &= L2CAP_CONF_MASK;
3518 case L2CAP_CONF_MTU:
3522 case L2CAP_CONF_FLUSH_TO:
3523 chan->flush_to = val;
3526 case L2CAP_CONF_QOS:
3529 case L2CAP_CONF_RFC:
3530 if (olen == sizeof(rfc))
3531 memcpy(&rfc, (void *) val, olen);
3534 case L2CAP_CONF_FCS:
3535 if (val == L2CAP_FCS_NONE)
3536 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3539 case L2CAP_CONF_EFS:
3541 if (olen == sizeof(efs))
3542 memcpy(&efs, (void *) val, olen);
3545 case L2CAP_CONF_EWS:
3546 if (!chan->conn->hs_enabled)
3547 return -ECONNREFUSED;
3549 set_bit(FLAG_EXT_CTRL, &chan->flags);
3550 set_bit(CONF_EWS_RECV, &chan->conf_state);
3551 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3552 chan->remote_tx_win = val;
3559 result = L2CAP_CONF_UNKNOWN;
3560 *((u8 *) ptr++) = type;
3565 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3568 switch (chan->mode) {
3569 case L2CAP_MODE_STREAMING:
3570 case L2CAP_MODE_ERTM:
3571 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3572 chan->mode = l2cap_select_mode(rfc.mode,
3573 chan->conn->feat_mask);
3578 if (__l2cap_efs_supported(chan->conn))
3579 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3581 return -ECONNREFUSED;
3584 if (chan->mode != rfc.mode)
3585 return -ECONNREFUSED;
3591 if (chan->mode != rfc.mode) {
3592 result = L2CAP_CONF_UNACCEPT;
3593 rfc.mode = chan->mode;
3595 if (chan->num_conf_rsp == 1)
3596 return -ECONNREFUSED;
3598 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3599 (unsigned long) &rfc);
3602 if (result == L2CAP_CONF_SUCCESS) {
3603 /* Configure output options and let the other side know
3604 * which ones we don't like. */
3606 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3607 result = L2CAP_CONF_UNACCEPT;
3610 set_bit(CONF_MTU_DONE, &chan->conf_state);
3612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3615 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3616 efs.stype != L2CAP_SERV_NOTRAFIC &&
3617 efs.stype != chan->local_stype) {
3619 result = L2CAP_CONF_UNACCEPT;
3621 if (chan->num_conf_req >= 1)
3622 return -ECONNREFUSED;
3624 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3626 (unsigned long) &efs);
3628 /* Send PENDING Conf Rsp */
3629 result = L2CAP_CONF_PENDING;
3630 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3635 case L2CAP_MODE_BASIC:
3636 chan->fcs = L2CAP_FCS_NONE;
3637 set_bit(CONF_MODE_DONE, &chan->conf_state);
3640 case L2CAP_MODE_ERTM:
3641 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3642 chan->remote_tx_win = rfc.txwin_size;
3644 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3646 chan->remote_max_tx = rfc.max_transmit;
3648 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3649 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3650 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3651 rfc.max_pdu_size = cpu_to_le16(size);
3652 chan->remote_mps = size;
3654 __l2cap_set_ertm_timeouts(chan, &rfc);
3656 set_bit(CONF_MODE_DONE, &chan->conf_state);
3658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3659 sizeof(rfc), (unsigned long) &rfc);
3661 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3662 chan->remote_id = efs.id;
3663 chan->remote_stype = efs.stype;
3664 chan->remote_msdu = le16_to_cpu(efs.msdu);
3665 chan->remote_flush_to =
3666 le32_to_cpu(efs.flush_to);
3667 chan->remote_acc_lat =
3668 le32_to_cpu(efs.acc_lat);
3669 chan->remote_sdu_itime =
3670 le32_to_cpu(efs.sdu_itime);
3671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3673 (unsigned long) &efs);
3677 case L2CAP_MODE_STREAMING:
3678 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3679 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3680 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3681 rfc.max_pdu_size = cpu_to_le16(size);
3682 chan->remote_mps = size;
3684 set_bit(CONF_MODE_DONE, &chan->conf_state);
3686 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3687 (unsigned long) &rfc);
3692 result = L2CAP_CONF_UNACCEPT;
3694 memset(&rfc, 0, sizeof(rfc));
3695 rfc.mode = chan->mode;
3698 if (result == L2CAP_CONF_SUCCESS)
3699 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3701 rsp->scid = cpu_to_le16(chan->dcid);
3702 rsp->result = cpu_to_le16(result);
3703 rsp->flags = __constant_cpu_to_le16(0);
3708 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3709 void *data, u16 *result)
3711 struct l2cap_conf_req *req = data;
3712 void *ptr = req->data;
3715 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3716 struct l2cap_conf_efs efs;
3718 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3720 while (len >= L2CAP_CONF_OPT_SIZE) {
3721 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3724 case L2CAP_CONF_MTU:
3725 if (val < L2CAP_DEFAULT_MIN_MTU) {
3726 *result = L2CAP_CONF_UNACCEPT;
3727 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3730 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3733 case L2CAP_CONF_FLUSH_TO:
3734 chan->flush_to = val;
3735 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3739 case L2CAP_CONF_RFC:
3740 if (olen == sizeof(rfc))
3741 memcpy(&rfc, (void *)val, olen);
3743 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3744 rfc.mode != chan->mode)
3745 return -ECONNREFUSED;
3749 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3750 sizeof(rfc), (unsigned long) &rfc);
3753 case L2CAP_CONF_EWS:
3754 chan->ack_win = min_t(u16, val, chan->ack_win);
3755 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3759 case L2CAP_CONF_EFS:
3760 if (olen == sizeof(efs))
3761 memcpy(&efs, (void *)val, olen);
3763 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3764 efs.stype != L2CAP_SERV_NOTRAFIC &&
3765 efs.stype != chan->local_stype)
3766 return -ECONNREFUSED;
3768 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3769 (unsigned long) &efs);
3772 case L2CAP_CONF_FCS:
3773 if (*result == L2CAP_CONF_PENDING)
3774 if (val == L2CAP_FCS_NONE)
3775 set_bit(CONF_RECV_NO_FCS,
3781 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3782 return -ECONNREFUSED;
3784 chan->mode = rfc.mode;
3786 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3788 case L2CAP_MODE_ERTM:
3789 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3790 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3791 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3792 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3793 chan->ack_win = min_t(u16, chan->ack_win,
3796 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3797 chan->local_msdu = le16_to_cpu(efs.msdu);
3798 chan->local_sdu_itime =
3799 le32_to_cpu(efs.sdu_itime);
3800 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3801 chan->local_flush_to =
3802 le32_to_cpu(efs.flush_to);
3806 case L2CAP_MODE_STREAMING:
3807 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3811 req->dcid = cpu_to_le16(chan->dcid);
3812 req->flags = __constant_cpu_to_le16(0);
3817 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3818 u16 result, u16 flags)
3820 struct l2cap_conf_rsp *rsp = data;
3821 void *ptr = rsp->data;
3823 BT_DBG("chan %p", chan);
3825 rsp->scid = cpu_to_le16(chan->dcid);
3826 rsp->result = cpu_to_le16(result);
3827 rsp->flags = cpu_to_le16(flags);
3832 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3834 struct l2cap_le_conn_rsp rsp;
3835 struct l2cap_conn *conn = chan->conn;
3837 BT_DBG("chan %p", chan);
3839 rsp.dcid = cpu_to_le16(chan->scid);
3840 rsp.mtu = cpu_to_le16(chan->imtu);
3841 rsp.mps = cpu_to_le16(chan->mps);
3842 rsp.credits = cpu_to_le16(chan->rx_credits);
3843 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3845 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3849 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3851 struct l2cap_conn_rsp rsp;
3852 struct l2cap_conn *conn = chan->conn;
3856 rsp.scid = cpu_to_le16(chan->dcid);
3857 rsp.dcid = cpu_to_le16(chan->scid);
3858 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3859 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3862 rsp_code = L2CAP_CREATE_CHAN_RSP;
3864 rsp_code = L2CAP_CONN_RSP;
3866 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3868 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3870 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3873 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3874 l2cap_build_conf_req(chan, buf), buf);
3875 chan->num_conf_req++;
3878 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3882 /* Use sane default values in case a misbehaving remote device
3883 * did not send an RFC or extended window size option.
3885 u16 txwin_ext = chan->ack_win;
3886 struct l2cap_conf_rfc rfc = {
3888 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3889 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3890 .max_pdu_size = cpu_to_le16(chan->imtu),
3891 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3894 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3896 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3899 while (len >= L2CAP_CONF_OPT_SIZE) {
3900 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3903 case L2CAP_CONF_RFC:
3904 if (olen == sizeof(rfc))
3905 memcpy(&rfc, (void *)val, olen);
3907 case L2CAP_CONF_EWS:
3914 case L2CAP_MODE_ERTM:
3915 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3916 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3917 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3918 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3919 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3921 chan->ack_win = min_t(u16, chan->ack_win,
3924 case L2CAP_MODE_STREAMING:
3925 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3929 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3930 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3933 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3935 if (cmd_len < sizeof(*rej))
3938 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3941 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3942 cmd->ident == conn->info_ident) {
3943 cancel_delayed_work(&conn->info_timer);
3945 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3946 conn->info_ident = 0;
3948 l2cap_conn_start(conn);
3954 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3955 struct l2cap_cmd_hdr *cmd,
3956 u8 *data, u8 rsp_code, u8 amp_id)
3958 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3959 struct l2cap_conn_rsp rsp;
3960 struct l2cap_chan *chan = NULL, *pchan;
3961 int result, status = L2CAP_CS_NO_INFO;
3963 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3964 __le16 psm = req->psm;
3966 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3968 /* Check if we have socket listening on psm */
3969 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3970 &conn->hcon->dst, ACL_LINK);
3972 result = L2CAP_CR_BAD_PSM;
3976 mutex_lock(&conn->chan_lock);
3977 l2cap_chan_lock(pchan);
3979 /* Check if the ACL is secure enough (if not SDP) */
3980 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3981 !hci_conn_check_link_mode(conn->hcon)) {
3982 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3983 result = L2CAP_CR_SEC_BLOCK;
3987 result = L2CAP_CR_NO_MEM;
3989 /* Check if we already have channel with that dcid */
3990 if (__l2cap_get_chan_by_dcid(conn, scid))
3993 chan = pchan->ops->new_connection(pchan);
3997 /* For certain devices (ex: HID mouse), support for authentication,
3998 * pairing and bonding is optional. For such devices, inorder to avoid
3999 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4000 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4002 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4004 bacpy(&chan->src, &conn->hcon->src);
4005 bacpy(&chan->dst, &conn->hcon->dst);
4006 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4007 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4010 chan->local_amp_id = amp_id;
4012 __l2cap_chan_add(conn, chan);
4016 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4018 chan->ident = cmd->ident;
4020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4021 if (l2cap_chan_check_security(chan)) {
4022 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4023 l2cap_state_change(chan, BT_CONNECT2);
4024 result = L2CAP_CR_PEND;
4025 status = L2CAP_CS_AUTHOR_PEND;
4026 chan->ops->defer(chan);
4028 /* Force pending result for AMP controllers.
4029 * The connection will succeed after the
4030 * physical link is up.
4032 if (amp_id == AMP_ID_BREDR) {
4033 l2cap_state_change(chan, BT_CONFIG);
4034 result = L2CAP_CR_SUCCESS;
4036 l2cap_state_change(chan, BT_CONNECT2);
4037 result = L2CAP_CR_PEND;
4039 status = L2CAP_CS_NO_INFO;
4042 l2cap_state_change(chan, BT_CONNECT2);
4043 result = L2CAP_CR_PEND;
4044 status = L2CAP_CS_AUTHEN_PEND;
4047 l2cap_state_change(chan, BT_CONNECT2);
4048 result = L2CAP_CR_PEND;
4049 status = L2CAP_CS_NO_INFO;
4053 l2cap_chan_unlock(pchan);
4054 mutex_unlock(&conn->chan_lock);
4057 rsp.scid = cpu_to_le16(scid);
4058 rsp.dcid = cpu_to_le16(dcid);
4059 rsp.result = cpu_to_le16(result);
4060 rsp.status = cpu_to_le16(status);
4061 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4063 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4064 struct l2cap_info_req info;
4065 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4067 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4068 conn->info_ident = l2cap_get_ident(conn);
4070 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4072 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4073 sizeof(info), &info);
4076 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4077 result == L2CAP_CR_SUCCESS) {
4079 set_bit(CONF_REQ_SENT, &chan->conf_state);
4080 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4081 l2cap_build_conf_req(chan, buf), buf);
4082 chan->num_conf_req++;
4088 static int l2cap_connect_req(struct l2cap_conn *conn,
4089 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4091 struct hci_dev *hdev = conn->hcon->hdev;
4092 struct hci_conn *hcon = conn->hcon;
4094 if (cmd_len < sizeof(struct l2cap_conn_req))
4098 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4099 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4100 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4101 hcon->dst_type, 0, NULL, 0,
4103 hci_dev_unlock(hdev);
4105 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4109 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4110 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4113 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4114 u16 scid, dcid, result, status;
4115 struct l2cap_chan *chan;
4119 if (cmd_len < sizeof(*rsp))
4122 scid = __le16_to_cpu(rsp->scid);
4123 dcid = __le16_to_cpu(rsp->dcid);
4124 result = __le16_to_cpu(rsp->result);
4125 status = __le16_to_cpu(rsp->status);
4127 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4128 dcid, scid, result, status);
4130 mutex_lock(&conn->chan_lock);
4133 chan = __l2cap_get_chan_by_scid(conn, scid);
4139 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4148 l2cap_chan_lock(chan);
4151 case L2CAP_CR_SUCCESS:
4152 l2cap_state_change(chan, BT_CONFIG);
4155 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4157 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4160 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4161 l2cap_build_conf_req(chan, req), req);
4162 chan->num_conf_req++;
4166 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4170 l2cap_chan_del(chan, ECONNREFUSED);
4174 l2cap_chan_unlock(chan);
4177 mutex_unlock(&conn->chan_lock);
4182 static inline void set_default_fcs(struct l2cap_chan *chan)
4184 /* FCS is enabled only in ERTM or streaming mode, if one or both
4187 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4188 chan->fcs = L2CAP_FCS_NONE;
4189 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4190 chan->fcs = L2CAP_FCS_CRC16;
4193 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4194 u8 ident, u16 flags)
4196 struct l2cap_conn *conn = chan->conn;
4198 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4201 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4202 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4204 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4205 l2cap_build_conf_rsp(chan, data,
4206 L2CAP_CONF_SUCCESS, flags), data);
4209 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4212 struct l2cap_cmd_rej_cid rej;
4214 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4215 rej.scid = __cpu_to_le16(scid);
4216 rej.dcid = __cpu_to_le16(dcid);
4218 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4221 static inline int l2cap_config_req(struct l2cap_conn *conn,
4222 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4225 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4228 struct l2cap_chan *chan;
4231 if (cmd_len < sizeof(*req))
4234 dcid = __le16_to_cpu(req->dcid);
4235 flags = __le16_to_cpu(req->flags);
4237 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4239 chan = l2cap_get_chan_by_scid(conn, dcid);
4241 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4245 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4246 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4251 /* Reject if config buffer is too small. */
4252 len = cmd_len - sizeof(*req);
4253 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4254 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4255 l2cap_build_conf_rsp(chan, rsp,
4256 L2CAP_CONF_REJECT, flags), rsp);
4261 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4262 chan->conf_len += len;
4264 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4265 /* Incomplete config. Send empty response. */
4266 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4267 l2cap_build_conf_rsp(chan, rsp,
4268 L2CAP_CONF_SUCCESS, flags), rsp);
4272 /* Complete config. */
4273 len = l2cap_parse_conf_req(chan, rsp);
4275 l2cap_send_disconn_req(chan, ECONNRESET);
4279 chan->ident = cmd->ident;
4280 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4281 chan->num_conf_rsp++;
4283 /* Reset config buffer. */
4286 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4289 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4290 set_default_fcs(chan);
4292 if (chan->mode == L2CAP_MODE_ERTM ||
4293 chan->mode == L2CAP_MODE_STREAMING)
4294 err = l2cap_ertm_init(chan);
4297 l2cap_send_disconn_req(chan, -err);
4299 l2cap_chan_ready(chan);
4304 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4306 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4307 l2cap_build_conf_req(chan, buf), buf);
4308 chan->num_conf_req++;
4311 /* Got Conf Rsp PENDING from remote side and asume we sent
4312 Conf Rsp PENDING in the code above */
4313 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4314 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4316 /* check compatibility */
4318 /* Send rsp for BR/EDR channel */
4320 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4322 chan->ident = cmd->ident;
4326 l2cap_chan_unlock(chan);
4330 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4331 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4334 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4335 u16 scid, flags, result;
4336 struct l2cap_chan *chan;
4337 int len = cmd_len - sizeof(*rsp);
4340 if (cmd_len < sizeof(*rsp))
4343 scid = __le16_to_cpu(rsp->scid);
4344 flags = __le16_to_cpu(rsp->flags);
4345 result = __le16_to_cpu(rsp->result);
4347 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4350 chan = l2cap_get_chan_by_scid(conn, scid);
4355 case L2CAP_CONF_SUCCESS:
4356 l2cap_conf_rfc_get(chan, rsp->data, len);
4357 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4360 case L2CAP_CONF_PENDING:
4361 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4363 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4366 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4369 l2cap_send_disconn_req(chan, ECONNRESET);
4373 if (!chan->hs_hcon) {
4374 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4377 if (l2cap_check_efs(chan)) {
4378 amp_create_logical_link(chan);
4379 chan->ident = cmd->ident;
4385 case L2CAP_CONF_UNACCEPT:
4386 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4389 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4390 l2cap_send_disconn_req(chan, ECONNRESET);
4394 /* throw out any old stored conf requests */
4395 result = L2CAP_CONF_SUCCESS;
4396 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4399 l2cap_send_disconn_req(chan, ECONNRESET);
4403 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4404 L2CAP_CONF_REQ, len, req);
4405 chan->num_conf_req++;
4406 if (result != L2CAP_CONF_SUCCESS)
4412 l2cap_chan_set_err(chan, ECONNRESET);
4414 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4415 l2cap_send_disconn_req(chan, ECONNRESET);
4419 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4422 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4424 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4425 set_default_fcs(chan);
4427 if (chan->mode == L2CAP_MODE_ERTM ||
4428 chan->mode == L2CAP_MODE_STREAMING)
4429 err = l2cap_ertm_init(chan);
4432 l2cap_send_disconn_req(chan, -err);
4434 l2cap_chan_ready(chan);
4438 l2cap_chan_unlock(chan);
4442 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4443 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4446 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4447 struct l2cap_disconn_rsp rsp;
4449 struct l2cap_chan *chan;
4451 if (cmd_len != sizeof(*req))
4454 scid = __le16_to_cpu(req->scid);
4455 dcid = __le16_to_cpu(req->dcid);
4457 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4459 mutex_lock(&conn->chan_lock);
4461 chan = __l2cap_get_chan_by_scid(conn, dcid);
4463 mutex_unlock(&conn->chan_lock);
4464 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4468 l2cap_chan_lock(chan);
4470 rsp.dcid = cpu_to_le16(chan->scid);
4471 rsp.scid = cpu_to_le16(chan->dcid);
4472 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4474 chan->ops->set_shutdown(chan);
4476 l2cap_chan_hold(chan);
4477 l2cap_chan_del(chan, ECONNRESET);
4479 l2cap_chan_unlock(chan);
4481 chan->ops->close(chan);
4482 l2cap_chan_put(chan);
4484 mutex_unlock(&conn->chan_lock);
4489 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4490 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4493 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4495 struct l2cap_chan *chan;
4497 if (cmd_len != sizeof(*rsp))
4500 scid = __le16_to_cpu(rsp->scid);
4501 dcid = __le16_to_cpu(rsp->dcid);
4503 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4505 mutex_lock(&conn->chan_lock);
4507 chan = __l2cap_get_chan_by_scid(conn, scid);
4509 mutex_unlock(&conn->chan_lock);
4513 l2cap_chan_lock(chan);
4515 l2cap_chan_hold(chan);
4516 l2cap_chan_del(chan, 0);
4518 l2cap_chan_unlock(chan);
4520 chan->ops->close(chan);
4521 l2cap_chan_put(chan);
4523 mutex_unlock(&conn->chan_lock);
4528 static inline int l2cap_information_req(struct l2cap_conn *conn,
4529 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4532 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4535 if (cmd_len != sizeof(*req))
4538 type = __le16_to_cpu(req->type);
4540 BT_DBG("type 0x%4.4x", type);
4542 if (type == L2CAP_IT_FEAT_MASK) {
4544 u32 feat_mask = l2cap_feat_mask;
4545 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4546 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4547 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4549 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4551 if (conn->hs_enabled)
4552 feat_mask |= L2CAP_FEAT_EXT_FLOW
4553 | L2CAP_FEAT_EXT_WINDOW;
4555 put_unaligned_le32(feat_mask, rsp->data);
4556 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4558 } else if (type == L2CAP_IT_FIXED_CHAN) {
4560 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4562 if (conn->hs_enabled)
4563 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4565 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4567 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4568 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4569 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4570 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4573 struct l2cap_info_rsp rsp;
4574 rsp.type = cpu_to_le16(type);
4575 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4576 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4583 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4584 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4587 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4590 if (cmd_len < sizeof(*rsp))
4593 type = __le16_to_cpu(rsp->type);
4594 result = __le16_to_cpu(rsp->result);
4596 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4598 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4599 if (cmd->ident != conn->info_ident ||
4600 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4603 cancel_delayed_work(&conn->info_timer);
4605 if (result != L2CAP_IR_SUCCESS) {
4606 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4607 conn->info_ident = 0;
4609 l2cap_conn_start(conn);
4615 case L2CAP_IT_FEAT_MASK:
4616 conn->feat_mask = get_unaligned_le32(rsp->data);
4618 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4619 struct l2cap_info_req req;
4620 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4622 conn->info_ident = l2cap_get_ident(conn);
4624 l2cap_send_cmd(conn, conn->info_ident,
4625 L2CAP_INFO_REQ, sizeof(req), &req);
4627 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4628 conn->info_ident = 0;
4630 l2cap_conn_start(conn);
4634 case L2CAP_IT_FIXED_CHAN:
4635 conn->fixed_chan_mask = rsp->data[0];
4636 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4637 conn->info_ident = 0;
4639 l2cap_conn_start(conn);
4646 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4647 struct l2cap_cmd_hdr *cmd,
4648 u16 cmd_len, void *data)
4650 struct l2cap_create_chan_req *req = data;
4651 struct l2cap_create_chan_rsp rsp;
4652 struct l2cap_chan *chan;
4653 struct hci_dev *hdev;
4656 if (cmd_len != sizeof(*req))
4659 if (!conn->hs_enabled)
4662 psm = le16_to_cpu(req->psm);
4663 scid = le16_to_cpu(req->scid);
4665 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4667 /* For controller id 0 make BR/EDR connection */
4668 if (req->amp_id == AMP_ID_BREDR) {
4669 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4674 /* Validate AMP controller id */
4675 hdev = hci_dev_get(req->amp_id);
4679 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4684 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4687 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4688 struct hci_conn *hs_hcon;
4690 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4694 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4699 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4701 mgr->bredr_chan = chan;
4702 chan->hs_hcon = hs_hcon;
4703 chan->fcs = L2CAP_FCS_NONE;
4704 conn->mtu = hdev->block_mtu;
4713 rsp.scid = cpu_to_le16(scid);
4714 rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4715 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4717 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4723 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4725 struct l2cap_move_chan_req req;
4728 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4730 ident = l2cap_get_ident(chan->conn);
4731 chan->ident = ident;
4733 req.icid = cpu_to_le16(chan->scid);
4734 req.dest_amp_id = dest_amp_id;
4736 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4739 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4742 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4744 struct l2cap_move_chan_rsp rsp;
4746 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4748 rsp.icid = cpu_to_le16(chan->dcid);
4749 rsp.result = cpu_to_le16(result);
4751 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4755 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4757 struct l2cap_move_chan_cfm cfm;
4759 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4761 chan->ident = l2cap_get_ident(chan->conn);
4763 cfm.icid = cpu_to_le16(chan->scid);
4764 cfm.result = cpu_to_le16(result);
4766 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4769 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4772 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4774 struct l2cap_move_chan_cfm cfm;
4776 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4778 cfm.icid = cpu_to_le16(icid);
4779 cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4781 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4785 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4788 struct l2cap_move_chan_cfm_rsp rsp;
4790 BT_DBG("icid 0x%4.4x", icid);
4792 rsp.icid = cpu_to_le16(icid);
4793 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4796 static void __release_logical_link(struct l2cap_chan *chan)
4798 chan->hs_hchan = NULL;
4799 chan->hs_hcon = NULL;
4801 /* Placeholder - release the logical link */
4804 static void l2cap_logical_fail(struct l2cap_chan *chan)
4806 /* Logical link setup failed */
4807 if (chan->state != BT_CONNECTED) {
4808 /* Create channel failure, disconnect */
4809 l2cap_send_disconn_req(chan, ECONNRESET);
4813 switch (chan->move_role) {
4814 case L2CAP_MOVE_ROLE_RESPONDER:
4815 l2cap_move_done(chan);
4816 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4818 case L2CAP_MOVE_ROLE_INITIATOR:
4819 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4820 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4821 /* Remote has only sent pending or
4822 * success responses, clean up
4824 l2cap_move_done(chan);
4827 /* Other amp move states imply that the move
4828 * has already aborted
4830 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4835 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4836 struct hci_chan *hchan)
4838 struct l2cap_conf_rsp rsp;
4840 chan->hs_hchan = hchan;
4841 chan->hs_hcon->l2cap_data = chan->conn;
4843 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4845 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4848 set_default_fcs(chan);
4850 err = l2cap_ertm_init(chan);
4852 l2cap_send_disconn_req(chan, -err);
4854 l2cap_chan_ready(chan);
4858 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4859 struct hci_chan *hchan)
4861 chan->hs_hcon = hchan->conn;
4862 chan->hs_hcon->l2cap_data = chan->conn;
4864 BT_DBG("move_state %d", chan->move_state);
4866 switch (chan->move_state) {
4867 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4868 /* Move confirm will be sent after a success
4869 * response is received
4871 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4873 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4874 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4875 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4876 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4877 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4878 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4879 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4880 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4881 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4885 /* Move was not in expected state, free the channel */
4886 __release_logical_link(chan);
4888 chan->move_state = L2CAP_MOVE_STABLE;
4892 /* Call with chan locked */
4893 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4896 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4899 l2cap_logical_fail(chan);
4900 __release_logical_link(chan);
4904 if (chan->state != BT_CONNECTED) {
4905 /* Ignore logical link if channel is on BR/EDR */
4906 if (chan->local_amp_id != AMP_ID_BREDR)
4907 l2cap_logical_finish_create(chan, hchan);
4909 l2cap_logical_finish_move(chan, hchan);
4913 void l2cap_move_start(struct l2cap_chan *chan)
4915 BT_DBG("chan %p", chan);
4917 if (chan->local_amp_id == AMP_ID_BREDR) {
4918 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4920 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4921 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4922 /* Placeholder - start physical link setup */
4924 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4925 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4927 l2cap_move_setup(chan);
4928 l2cap_send_move_chan_req(chan, 0);
4932 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4933 u8 local_amp_id, u8 remote_amp_id)
4935 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4936 local_amp_id, remote_amp_id);
4938 chan->fcs = L2CAP_FCS_NONE;
4940 /* Outgoing channel on AMP */
4941 if (chan->state == BT_CONNECT) {
4942 if (result == L2CAP_CR_SUCCESS) {
4943 chan->local_amp_id = local_amp_id;
4944 l2cap_send_create_chan_req(chan, remote_amp_id);
4946 /* Revert to BR/EDR connect */
4947 l2cap_send_conn_req(chan);
4953 /* Incoming channel on AMP */
4954 if (__l2cap_no_conn_pending(chan)) {
4955 struct l2cap_conn_rsp rsp;
4957 rsp.scid = cpu_to_le16(chan->dcid);
4958 rsp.dcid = cpu_to_le16(chan->scid);
4960 if (result == L2CAP_CR_SUCCESS) {
4961 /* Send successful response */
4962 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4963 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4965 /* Send negative response */
4966 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4967 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4970 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4973 if (result == L2CAP_CR_SUCCESS) {
4974 l2cap_state_change(chan, BT_CONFIG);
4975 set_bit(CONF_REQ_SENT, &chan->conf_state);
4976 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4978 l2cap_build_conf_req(chan, buf), buf);
4979 chan->num_conf_req++;
4984 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4987 l2cap_move_setup(chan);
4988 chan->move_id = local_amp_id;
4989 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4991 l2cap_send_move_chan_req(chan, remote_amp_id);
4994 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4996 struct hci_chan *hchan = NULL;
4998 /* Placeholder - get hci_chan for logical link */
5001 if (hchan->state == BT_CONNECTED) {
5002 /* Logical link is ready to go */
5003 chan->hs_hcon = hchan->conn;
5004 chan->hs_hcon->l2cap_data = chan->conn;
5005 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5006 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5008 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5010 /* Wait for logical link to be ready */
5011 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5014 /* Logical link not available */
5015 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5019 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5021 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5023 if (result == -EINVAL)
5024 rsp_result = L2CAP_MR_BAD_ID;
5026 rsp_result = L2CAP_MR_NOT_ALLOWED;
5028 l2cap_send_move_chan_rsp(chan, rsp_result);
5031 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5032 chan->move_state = L2CAP_MOVE_STABLE;
5034 /* Restart data transmission */
5035 l2cap_ertm_send(chan);
5038 /* Invoke with locked chan */
5039 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5041 u8 local_amp_id = chan->local_amp_id;
5042 u8 remote_amp_id = chan->remote_amp_id;
5044 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5045 chan, result, local_amp_id, remote_amp_id);
5047 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5048 l2cap_chan_unlock(chan);
5052 if (chan->state != BT_CONNECTED) {
5053 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5054 } else if (result != L2CAP_MR_SUCCESS) {
5055 l2cap_do_move_cancel(chan, result);
5057 switch (chan->move_role) {
5058 case L2CAP_MOVE_ROLE_INITIATOR:
5059 l2cap_do_move_initiate(chan, local_amp_id,
5062 case L2CAP_MOVE_ROLE_RESPONDER:
5063 l2cap_do_move_respond(chan, result);
5066 l2cap_do_move_cancel(chan, result);
5072 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5073 struct l2cap_cmd_hdr *cmd,
5074 u16 cmd_len, void *data)
5076 struct l2cap_move_chan_req *req = data;
5077 struct l2cap_move_chan_rsp rsp;
5078 struct l2cap_chan *chan;
5080 u16 result = L2CAP_MR_NOT_ALLOWED;
5082 if (cmd_len != sizeof(*req))
5085 icid = le16_to_cpu(req->icid);
5087 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5089 if (!conn->hs_enabled)
5092 chan = l2cap_get_chan_by_dcid(conn, icid);
5094 rsp.icid = cpu_to_le16(icid);
5095 rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5096 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5101 chan->ident = cmd->ident;
5103 if (chan->scid < L2CAP_CID_DYN_START ||
5104 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5105 (chan->mode != L2CAP_MODE_ERTM &&
5106 chan->mode != L2CAP_MODE_STREAMING)) {
5107 result = L2CAP_MR_NOT_ALLOWED;
5108 goto send_move_response;
5111 if (chan->local_amp_id == req->dest_amp_id) {
5112 result = L2CAP_MR_SAME_ID;
5113 goto send_move_response;
5116 if (req->dest_amp_id != AMP_ID_BREDR) {
5117 struct hci_dev *hdev;
5118 hdev = hci_dev_get(req->dest_amp_id);
5119 if (!hdev || hdev->dev_type != HCI_AMP ||
5120 !test_bit(HCI_UP, &hdev->flags)) {
5124 result = L2CAP_MR_BAD_ID;
5125 goto send_move_response;
5130 /* Detect a move collision. Only send a collision response
5131 * if this side has "lost", otherwise proceed with the move.
5132 * The winner has the larger bd_addr.
5134 if ((__chan_is_moving(chan) ||
5135 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5136 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5137 result = L2CAP_MR_COLLISION;
5138 goto send_move_response;
5141 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5142 l2cap_move_setup(chan);
5143 chan->move_id = req->dest_amp_id;
5146 if (req->dest_amp_id == AMP_ID_BREDR) {
5147 /* Moving to BR/EDR */
5148 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5149 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5150 result = L2CAP_MR_PEND;
5152 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5153 result = L2CAP_MR_SUCCESS;
5156 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5157 /* Placeholder - uncomment when amp functions are available */
5158 /*amp_accept_physical(chan, req->dest_amp_id);*/
5159 result = L2CAP_MR_PEND;
5163 l2cap_send_move_chan_rsp(chan, result);
5165 l2cap_chan_unlock(chan);
5170 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5172 struct l2cap_chan *chan;
5173 struct hci_chan *hchan = NULL;
5175 chan = l2cap_get_chan_by_scid(conn, icid);
5177 l2cap_send_move_chan_cfm_icid(conn, icid);
5181 __clear_chan_timer(chan);
5182 if (result == L2CAP_MR_PEND)
5183 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5185 switch (chan->move_state) {
5186 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5187 /* Move confirm will be sent when logical link
5190 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5192 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5193 if (result == L2CAP_MR_PEND) {
5195 } else if (test_bit(CONN_LOCAL_BUSY,
5196 &chan->conn_state)) {
5197 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5199 /* Logical link is up or moving to BR/EDR,
5202 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5203 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5206 case L2CAP_MOVE_WAIT_RSP:
5208 if (result == L2CAP_MR_SUCCESS) {
5209 /* Remote is ready, send confirm immediately
5210 * after logical link is ready
5212 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5214 /* Both logical link and move success
5215 * are required to confirm
5217 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5220 /* Placeholder - get hci_chan for logical link */
5222 /* Logical link not available */
5223 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5227 /* If the logical link is not yet connected, do not
5228 * send confirmation.
5230 if (hchan->state != BT_CONNECTED)
5233 /* Logical link is already ready to go */
5235 chan->hs_hcon = hchan->conn;
5236 chan->hs_hcon->l2cap_data = chan->conn;
5238 if (result == L2CAP_MR_SUCCESS) {
5239 /* Can confirm now */
5240 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5242 /* Now only need move success
5245 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5248 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5251 /* Any other amp move state means the move failed. */
5252 chan->move_id = chan->local_amp_id;
5253 l2cap_move_done(chan);
5254 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5257 l2cap_chan_unlock(chan);
5260 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5263 struct l2cap_chan *chan;
5265 chan = l2cap_get_chan_by_ident(conn, ident);
5267 /* Could not locate channel, icid is best guess */
5268 l2cap_send_move_chan_cfm_icid(conn, icid);
5272 __clear_chan_timer(chan);
5274 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5275 if (result == L2CAP_MR_COLLISION) {
5276 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5278 /* Cleanup - cancel move */
5279 chan->move_id = chan->local_amp_id;
5280 l2cap_move_done(chan);
5284 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5286 l2cap_chan_unlock(chan);
5289 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5290 struct l2cap_cmd_hdr *cmd,
5291 u16 cmd_len, void *data)
5293 struct l2cap_move_chan_rsp *rsp = data;
5296 if (cmd_len != sizeof(*rsp))
5299 icid = le16_to_cpu(rsp->icid);
5300 result = le16_to_cpu(rsp->result);
5302 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5304 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5305 l2cap_move_continue(conn, icid, result);
5307 l2cap_move_fail(conn, cmd->ident, icid, result);
5312 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5313 struct l2cap_cmd_hdr *cmd,
5314 u16 cmd_len, void *data)
5316 struct l2cap_move_chan_cfm *cfm = data;
5317 struct l2cap_chan *chan;
5320 if (cmd_len != sizeof(*cfm))
5323 icid = le16_to_cpu(cfm->icid);
5324 result = le16_to_cpu(cfm->result);
5326 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5328 chan = l2cap_get_chan_by_dcid(conn, icid);
5330 /* Spec requires a response even if the icid was not found */
5331 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5335 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5336 if (result == L2CAP_MC_CONFIRMED) {
5337 chan->local_amp_id = chan->move_id;
5338 if (chan->local_amp_id == AMP_ID_BREDR)
5339 __release_logical_link(chan);
5341 chan->move_id = chan->local_amp_id;
5344 l2cap_move_done(chan);
5347 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5349 l2cap_chan_unlock(chan);
5354 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5355 struct l2cap_cmd_hdr *cmd,
5356 u16 cmd_len, void *data)
5358 struct l2cap_move_chan_cfm_rsp *rsp = data;
5359 struct l2cap_chan *chan;
5362 if (cmd_len != sizeof(*rsp))
5365 icid = le16_to_cpu(rsp->icid);
5367 BT_DBG("icid 0x%4.4x", icid);
5369 chan = l2cap_get_chan_by_scid(conn, icid);
5373 __clear_chan_timer(chan);
5375 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5376 chan->local_amp_id = chan->move_id;
5378 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5379 __release_logical_link(chan);
5381 l2cap_move_done(chan);
5384 l2cap_chan_unlock(chan);
5389 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5394 if (min > max || min < 6 || max > 3200)
5397 if (to_multiplier < 10 || to_multiplier > 3200)
5400 if (max >= to_multiplier * 8)
5403 max_latency = (to_multiplier * 8 / max) - 1;
5404 if (latency > 499 || latency > max_latency)
5410 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5411 struct l2cap_cmd_hdr *cmd,
5412 u16 cmd_len, u8 *data)
5414 struct hci_conn *hcon = conn->hcon;
5415 struct l2cap_conn_param_update_req *req;
5416 struct l2cap_conn_param_update_rsp rsp;
5417 u16 min, max, latency, to_multiplier;
5420 if (!(hcon->link_mode & HCI_LM_MASTER))
5423 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5426 req = (struct l2cap_conn_param_update_req *) data;
5427 min = __le16_to_cpu(req->min);
5428 max = __le16_to_cpu(req->max);
5429 latency = __le16_to_cpu(req->latency);
5430 to_multiplier = __le16_to_cpu(req->to_multiplier);
5432 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5433 min, max, latency, to_multiplier);
5435 memset(&rsp, 0, sizeof(rsp));
5437 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5439 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5441 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5443 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5447 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5452 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5453 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5456 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5457 u16 dcid, mtu, mps, credits, result;
5458 struct l2cap_chan *chan;
5461 if (cmd_len < sizeof(*rsp))
5464 dcid = __le16_to_cpu(rsp->dcid);
5465 mtu = __le16_to_cpu(rsp->mtu);
5466 mps = __le16_to_cpu(rsp->mps);
5467 credits = __le16_to_cpu(rsp->credits);
5468 result = __le16_to_cpu(rsp->result);
5470 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5473 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5474 dcid, mtu, mps, credits, result);
5476 mutex_lock(&conn->chan_lock);
5478 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5486 l2cap_chan_lock(chan);
5489 case L2CAP_CR_SUCCESS:
5493 chan->remote_mps = mps;
5494 chan->tx_credits = credits;
5495 l2cap_chan_ready(chan);
5499 l2cap_chan_del(chan, ECONNREFUSED);
5503 l2cap_chan_unlock(chan);
5506 mutex_unlock(&conn->chan_lock);
5511 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5512 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5517 switch (cmd->code) {
5518 case L2CAP_COMMAND_REJ:
5519 l2cap_command_rej(conn, cmd, cmd_len, data);
5522 case L2CAP_CONN_REQ:
5523 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5526 case L2CAP_CONN_RSP:
5527 case L2CAP_CREATE_CHAN_RSP:
5528 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5531 case L2CAP_CONF_REQ:
5532 err = l2cap_config_req(conn, cmd, cmd_len, data);
5535 case L2CAP_CONF_RSP:
5536 l2cap_config_rsp(conn, cmd, cmd_len, data);
5539 case L2CAP_DISCONN_REQ:
5540 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5543 case L2CAP_DISCONN_RSP:
5544 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5547 case L2CAP_ECHO_REQ:
5548 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5551 case L2CAP_ECHO_RSP:
5554 case L2CAP_INFO_REQ:
5555 err = l2cap_information_req(conn, cmd, cmd_len, data);
5558 case L2CAP_INFO_RSP:
5559 l2cap_information_rsp(conn, cmd, cmd_len, data);
5562 case L2CAP_CREATE_CHAN_REQ:
5563 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5566 case L2CAP_MOVE_CHAN_REQ:
5567 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5570 case L2CAP_MOVE_CHAN_RSP:
5571 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5574 case L2CAP_MOVE_CHAN_CFM:
5575 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5578 case L2CAP_MOVE_CHAN_CFM_RSP:
5579 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5583 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5591 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5592 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5595 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5596 struct l2cap_le_conn_rsp rsp;
5597 struct l2cap_chan *chan, *pchan;
5598 u16 dcid, scid, credits, mtu, mps;
5602 if (cmd_len != sizeof(*req))
5605 scid = __le16_to_cpu(req->scid);
5606 mtu = __le16_to_cpu(req->mtu);
5607 mps = __le16_to_cpu(req->mps);
5612 if (mtu < 23 || mps < 23)
5615 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5618 /* Check if we have socket listening on psm */
5619 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5620 &conn->hcon->dst, LE_LINK);
5622 result = L2CAP_CR_BAD_PSM;
5627 mutex_lock(&conn->chan_lock);
5628 l2cap_chan_lock(pchan);
5630 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5631 result = L2CAP_CR_AUTHENTICATION;
5633 goto response_unlock;
5636 /* Check if we already have channel with that dcid */
5637 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5638 result = L2CAP_CR_NO_MEM;
5640 goto response_unlock;
5643 chan = pchan->ops->new_connection(pchan);
5645 result = L2CAP_CR_NO_MEM;
5646 goto response_unlock;
5649 l2cap_le_flowctl_init(chan);
5651 bacpy(&chan->src, &conn->hcon->src);
5652 bacpy(&chan->dst, &conn->hcon->dst);
5653 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5654 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5658 chan->remote_mps = mps;
5659 chan->tx_credits = __le16_to_cpu(req->credits);
5661 __l2cap_chan_add(conn, chan);
5663 credits = chan->rx_credits;
5665 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5667 chan->ident = cmd->ident;
5669 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5670 l2cap_state_change(chan, BT_CONNECT2);
5671 result = L2CAP_CR_PEND;
5672 chan->ops->defer(chan);
5674 l2cap_chan_ready(chan);
5675 result = L2CAP_CR_SUCCESS;
5679 l2cap_chan_unlock(pchan);
5680 mutex_unlock(&conn->chan_lock);
5682 if (result == L2CAP_CR_PEND)
5687 rsp.mtu = cpu_to_le16(chan->imtu);
5688 rsp.mps = cpu_to_le16(chan->mps);
5694 rsp.dcid = cpu_to_le16(dcid);
5695 rsp.credits = cpu_to_le16(credits);
5696 rsp.result = cpu_to_le16(result);
5698 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5703 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5704 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5707 struct l2cap_le_credits *pkt;
5708 struct l2cap_chan *chan;
5711 if (cmd_len != sizeof(*pkt))
5714 pkt = (struct l2cap_le_credits *) data;
5715 cid = __le16_to_cpu(pkt->cid);
5716 credits = __le16_to_cpu(pkt->credits);
5718 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5720 chan = l2cap_get_chan_by_dcid(conn, cid);
5724 chan->tx_credits += credits;
5726 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5727 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5731 if (chan->tx_credits)
5732 chan->ops->resume(chan);
5734 l2cap_chan_unlock(chan);
5739 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5740 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5743 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5744 struct l2cap_chan *chan;
5746 if (cmd_len < sizeof(*rej))
5749 mutex_lock(&conn->chan_lock);
5751 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5755 l2cap_chan_lock(chan);
5756 l2cap_chan_del(chan, ECONNREFUSED);
5757 l2cap_chan_unlock(chan);
5760 mutex_unlock(&conn->chan_lock);
5764 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5765 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5770 if (!enable_lecoc) {
5771 switch (cmd->code) {
5772 case L2CAP_LE_CONN_REQ:
5773 case L2CAP_LE_CONN_RSP:
5774 case L2CAP_LE_CREDITS:
5775 case L2CAP_DISCONN_REQ:
5776 case L2CAP_DISCONN_RSP:
5781 switch (cmd->code) {
5782 case L2CAP_COMMAND_REJ:
5783 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5786 case L2CAP_CONN_PARAM_UPDATE_REQ:
5787 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5790 case L2CAP_CONN_PARAM_UPDATE_RSP:
5793 case L2CAP_LE_CONN_RSP:
5794 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5797 case L2CAP_LE_CONN_REQ:
5798 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5801 case L2CAP_LE_CREDITS:
5802 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5805 case L2CAP_DISCONN_REQ:
5806 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5809 case L2CAP_DISCONN_RSP:
5810 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5814 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5822 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5823 struct sk_buff *skb)
5825 struct hci_conn *hcon = conn->hcon;
5826 struct l2cap_cmd_hdr *cmd;
5830 if (hcon->type != LE_LINK)
5833 if (skb->len < L2CAP_CMD_HDR_SIZE)
5836 cmd = (void *) skb->data;
5837 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5839 len = le16_to_cpu(cmd->len);
5841 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5843 if (len != skb->len || !cmd->ident) {
5844 BT_DBG("corrupted command");
5848 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5850 struct l2cap_cmd_rej_unk rej;
5852 BT_ERR("Wrong link type (%d)", err);
5854 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5855 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5863 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5864 struct sk_buff *skb)
5866 struct hci_conn *hcon = conn->hcon;
5867 u8 *data = skb->data;
5869 struct l2cap_cmd_hdr cmd;
5872 l2cap_raw_recv(conn, skb);
5874 if (hcon->type != ACL_LINK)
5877 while (len >= L2CAP_CMD_HDR_SIZE) {
5879 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5880 data += L2CAP_CMD_HDR_SIZE;
5881 len -= L2CAP_CMD_HDR_SIZE;
5883 cmd_len = le16_to_cpu(cmd.len);
5885 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5888 if (cmd_len > len || !cmd.ident) {
5889 BT_DBG("corrupted command");
5893 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5895 struct l2cap_cmd_rej_unk rej;
5897 BT_ERR("Wrong link type (%d)", err);
5899 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5900 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5912 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5914 u16 our_fcs, rcv_fcs;
5917 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5918 hdr_size = L2CAP_EXT_HDR_SIZE;
5920 hdr_size = L2CAP_ENH_HDR_SIZE;
5922 if (chan->fcs == L2CAP_FCS_CRC16) {
5923 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5924 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5925 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5927 if (our_fcs != rcv_fcs)
5933 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5935 struct l2cap_ctrl control;
5937 BT_DBG("chan %p", chan);
5939 memset(&control, 0, sizeof(control));
5942 control.reqseq = chan->buffer_seq;
5943 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5945 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5946 control.super = L2CAP_SUPER_RNR;
5947 l2cap_send_sframe(chan, &control);
5950 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5951 chan->unacked_frames > 0)
5952 __set_retrans_timer(chan);
5954 /* Send pending iframes */
5955 l2cap_ertm_send(chan);
5957 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5958 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5959 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5962 control.super = L2CAP_SUPER_RR;
5963 l2cap_send_sframe(chan, &control);
5967 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5968 struct sk_buff **last_frag)
5970 /* skb->len reflects data in skb as well as all fragments
5971 * skb->data_len reflects only data in fragments
5973 if (!skb_has_frag_list(skb))
5974 skb_shinfo(skb)->frag_list = new_frag;
5976 new_frag->next = NULL;
5978 (*last_frag)->next = new_frag;
5979 *last_frag = new_frag;
5981 skb->len += new_frag->len;
5982 skb->data_len += new_frag->len;
5983 skb->truesize += new_frag->truesize;
5986 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5987 struct l2cap_ctrl *control)
5991 switch (control->sar) {
5992 case L2CAP_SAR_UNSEGMENTED:
5996 err = chan->ops->recv(chan, skb);
5999 case L2CAP_SAR_START:
6003 chan->sdu_len = get_unaligned_le16(skb->data);
6004 skb_pull(skb, L2CAP_SDULEN_SIZE);
6006 if (chan->sdu_len > chan->imtu) {
6011 if (skb->len >= chan->sdu_len)
6015 chan->sdu_last_frag = skb;
6021 case L2CAP_SAR_CONTINUE:
6025 append_skb_frag(chan->sdu, skb,
6026 &chan->sdu_last_frag);
6029 if (chan->sdu->len >= chan->sdu_len)
6039 append_skb_frag(chan->sdu, skb,
6040 &chan->sdu_last_frag);
6043 if (chan->sdu->len != chan->sdu_len)
6046 err = chan->ops->recv(chan, chan->sdu);
6049 /* Reassembly complete */
6051 chan->sdu_last_frag = NULL;
6059 kfree_skb(chan->sdu);
6061 chan->sdu_last_frag = NULL;
6068 static int l2cap_resegment(struct l2cap_chan *chan)
6074 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6078 if (chan->mode != L2CAP_MODE_ERTM)
6081 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6082 l2cap_tx(chan, NULL, NULL, event);
6085 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6088 /* Pass sequential frames to l2cap_reassemble_sdu()
6089 * until a gap is encountered.
6092 BT_DBG("chan %p", chan);
6094 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6095 struct sk_buff *skb;
6096 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6097 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6099 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6104 skb_unlink(skb, &chan->srej_q);
6105 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6106 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6111 if (skb_queue_empty(&chan->srej_q)) {
6112 chan->rx_state = L2CAP_RX_STATE_RECV;
6113 l2cap_send_ack(chan);
6119 static void l2cap_handle_srej(struct l2cap_chan *chan,
6120 struct l2cap_ctrl *control)
6122 struct sk_buff *skb;
6124 BT_DBG("chan %p, control %p", chan, control);
6126 if (control->reqseq == chan->next_tx_seq) {
6127 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6128 l2cap_send_disconn_req(chan, ECONNRESET);
6132 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6135 BT_DBG("Seq %d not available for retransmission",
6140 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6141 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6142 l2cap_send_disconn_req(chan, ECONNRESET);
6146 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6148 if (control->poll) {
6149 l2cap_pass_to_tx(chan, control);
6151 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6152 l2cap_retransmit(chan, control);
6153 l2cap_ertm_send(chan);
6155 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6156 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6157 chan->srej_save_reqseq = control->reqseq;
6160 l2cap_pass_to_tx_fbit(chan, control);
6162 if (control->final) {
6163 if (chan->srej_save_reqseq != control->reqseq ||
6164 !test_and_clear_bit(CONN_SREJ_ACT,
6166 l2cap_retransmit(chan, control);
6168 l2cap_retransmit(chan, control);
6169 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6170 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6171 chan->srej_save_reqseq = control->reqseq;
6177 static void l2cap_handle_rej(struct l2cap_chan *chan,
6178 struct l2cap_ctrl *control)
6180 struct sk_buff *skb;
6182 BT_DBG("chan %p, control %p", chan, control);
6184 if (control->reqseq == chan->next_tx_seq) {
6185 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6186 l2cap_send_disconn_req(chan, ECONNRESET);
6190 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6192 if (chan->max_tx && skb &&
6193 bt_cb(skb)->control.retries >= chan->max_tx) {
6194 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6195 l2cap_send_disconn_req(chan, ECONNRESET);
6199 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6201 l2cap_pass_to_tx(chan, control);
6203 if (control->final) {
6204 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6205 l2cap_retransmit_all(chan, control);
6207 l2cap_retransmit_all(chan, control);
6208 l2cap_ertm_send(chan);
6209 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6210 set_bit(CONN_REJ_ACT, &chan->conn_state);
6214 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6216 BT_DBG("chan %p, txseq %d", chan, txseq);
6218 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6219 chan->expected_tx_seq);
6221 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6222 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6224 /* See notes below regarding "double poll" and
6227 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6228 BT_DBG("Invalid/Ignore - after SREJ");
6229 return L2CAP_TXSEQ_INVALID_IGNORE;
6231 BT_DBG("Invalid - in window after SREJ sent");
6232 return L2CAP_TXSEQ_INVALID;
6236 if (chan->srej_list.head == txseq) {
6237 BT_DBG("Expected SREJ");
6238 return L2CAP_TXSEQ_EXPECTED_SREJ;
6241 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6242 BT_DBG("Duplicate SREJ - txseq already stored");
6243 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6246 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6247 BT_DBG("Unexpected SREJ - not requested");
6248 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6252 if (chan->expected_tx_seq == txseq) {
6253 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6255 BT_DBG("Invalid - txseq outside tx window");
6256 return L2CAP_TXSEQ_INVALID;
6259 return L2CAP_TXSEQ_EXPECTED;
6263 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6264 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6265 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6266 return L2CAP_TXSEQ_DUPLICATE;
6269 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6270 /* A source of invalid packets is a "double poll" condition,
6271 * where delays cause us to send multiple poll packets. If
6272 * the remote stack receives and processes both polls,
6273 * sequence numbers can wrap around in such a way that a
6274 * resent frame has a sequence number that looks like new data
6275 * with a sequence gap. This would trigger an erroneous SREJ
6278 * Fortunately, this is impossible with a tx window that's
6279 * less than half of the maximum sequence number, which allows
6280 * invalid frames to be safely ignored.
6282 * With tx window sizes greater than half of the tx window
6283 * maximum, the frame is invalid and cannot be ignored. This
6284 * causes a disconnect.
6287 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6288 BT_DBG("Invalid/Ignore - txseq outside tx window");
6289 return L2CAP_TXSEQ_INVALID_IGNORE;
6291 BT_DBG("Invalid - txseq outside tx window");
6292 return L2CAP_TXSEQ_INVALID;
6295 BT_DBG("Unexpected - txseq indicates missing frames");
6296 return L2CAP_TXSEQ_UNEXPECTED;
6300 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6301 struct l2cap_ctrl *control,
6302 struct sk_buff *skb, u8 event)
6305 bool skb_in_use = false;
6307 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6311 case L2CAP_EV_RECV_IFRAME:
6312 switch (l2cap_classify_txseq(chan, control->txseq)) {
6313 case L2CAP_TXSEQ_EXPECTED:
6314 l2cap_pass_to_tx(chan, control);
6316 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6317 BT_DBG("Busy, discarding expected seq %d",
6322 chan->expected_tx_seq = __next_seq(chan,
6325 chan->buffer_seq = chan->expected_tx_seq;
6328 err = l2cap_reassemble_sdu(chan, skb, control);
6332 if (control->final) {
6333 if (!test_and_clear_bit(CONN_REJ_ACT,
6334 &chan->conn_state)) {
6336 l2cap_retransmit_all(chan, control);
6337 l2cap_ertm_send(chan);
6341 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6342 l2cap_send_ack(chan);
6344 case L2CAP_TXSEQ_UNEXPECTED:
6345 l2cap_pass_to_tx(chan, control);
6347 /* Can't issue SREJ frames in the local busy state.
6348 * Drop this frame, it will be seen as missing
6349 * when local busy is exited.
6351 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6352 BT_DBG("Busy, discarding unexpected seq %d",
6357 /* There was a gap in the sequence, so an SREJ
6358 * must be sent for each missing frame. The
6359 * current frame is stored for later use.
6361 skb_queue_tail(&chan->srej_q, skb);
6363 BT_DBG("Queued %p (queue len %d)", skb,
6364 skb_queue_len(&chan->srej_q));
6366 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6367 l2cap_seq_list_clear(&chan->srej_list);
6368 l2cap_send_srej(chan, control->txseq);
6370 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6372 case L2CAP_TXSEQ_DUPLICATE:
6373 l2cap_pass_to_tx(chan, control);
6375 case L2CAP_TXSEQ_INVALID_IGNORE:
6377 case L2CAP_TXSEQ_INVALID:
6379 l2cap_send_disconn_req(chan, ECONNRESET);
6383 case L2CAP_EV_RECV_RR:
6384 l2cap_pass_to_tx(chan, control);
6385 if (control->final) {
6386 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6388 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6389 !__chan_is_moving(chan)) {
6391 l2cap_retransmit_all(chan, control);
6394 l2cap_ertm_send(chan);
6395 } else if (control->poll) {
6396 l2cap_send_i_or_rr_or_rnr(chan);
6398 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6399 &chan->conn_state) &&
6400 chan->unacked_frames)
6401 __set_retrans_timer(chan);
6403 l2cap_ertm_send(chan);
6406 case L2CAP_EV_RECV_RNR:
6407 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6408 l2cap_pass_to_tx(chan, control);
6409 if (control && control->poll) {
6410 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6411 l2cap_send_rr_or_rnr(chan, 0);
6413 __clear_retrans_timer(chan);
6414 l2cap_seq_list_clear(&chan->retrans_list);
6416 case L2CAP_EV_RECV_REJ:
6417 l2cap_handle_rej(chan, control);
6419 case L2CAP_EV_RECV_SREJ:
6420 l2cap_handle_srej(chan, control);
6426 if (skb && !skb_in_use) {
6427 BT_DBG("Freeing %p", skb);
6434 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6435 struct l2cap_ctrl *control,
6436 struct sk_buff *skb, u8 event)
6439 u16 txseq = control->txseq;
6440 bool skb_in_use = false;
6442 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6446 case L2CAP_EV_RECV_IFRAME:
6447 switch (l2cap_classify_txseq(chan, txseq)) {
6448 case L2CAP_TXSEQ_EXPECTED:
6449 /* Keep frame for reassembly later */
6450 l2cap_pass_to_tx(chan, control);
6451 skb_queue_tail(&chan->srej_q, skb);
6453 BT_DBG("Queued %p (queue len %d)", skb,
6454 skb_queue_len(&chan->srej_q));
6456 chan->expected_tx_seq = __next_seq(chan, txseq);
6458 case L2CAP_TXSEQ_EXPECTED_SREJ:
6459 l2cap_seq_list_pop(&chan->srej_list);
6461 l2cap_pass_to_tx(chan, control);
6462 skb_queue_tail(&chan->srej_q, skb);
6464 BT_DBG("Queued %p (queue len %d)", skb,
6465 skb_queue_len(&chan->srej_q));
6467 err = l2cap_rx_queued_iframes(chan);
6472 case L2CAP_TXSEQ_UNEXPECTED:
6473 /* Got a frame that can't be reassembled yet.
6474 * Save it for later, and send SREJs to cover
6475 * the missing frames.
6477 skb_queue_tail(&chan->srej_q, skb);
6479 BT_DBG("Queued %p (queue len %d)", skb,
6480 skb_queue_len(&chan->srej_q));
6482 l2cap_pass_to_tx(chan, control);
6483 l2cap_send_srej(chan, control->txseq);
6485 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6486 /* This frame was requested with an SREJ, but
6487 * some expected retransmitted frames are
6488 * missing. Request retransmission of missing
6491 skb_queue_tail(&chan->srej_q, skb);
6493 BT_DBG("Queued %p (queue len %d)", skb,
6494 skb_queue_len(&chan->srej_q));
6496 l2cap_pass_to_tx(chan, control);
6497 l2cap_send_srej_list(chan, control->txseq);
6499 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6500 /* We've already queued this frame. Drop this copy. */
6501 l2cap_pass_to_tx(chan, control);
6503 case L2CAP_TXSEQ_DUPLICATE:
6504 /* Expecting a later sequence number, so this frame
6505 * was already received. Ignore it completely.
6508 case L2CAP_TXSEQ_INVALID_IGNORE:
6510 case L2CAP_TXSEQ_INVALID:
6512 l2cap_send_disconn_req(chan, ECONNRESET);
6516 case L2CAP_EV_RECV_RR:
6517 l2cap_pass_to_tx(chan, control);
6518 if (control->final) {
6519 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6521 if (!test_and_clear_bit(CONN_REJ_ACT,
6522 &chan->conn_state)) {
6524 l2cap_retransmit_all(chan, control);
6527 l2cap_ertm_send(chan);
6528 } else if (control->poll) {
6529 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6530 &chan->conn_state) &&
6531 chan->unacked_frames) {
6532 __set_retrans_timer(chan);
6535 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6536 l2cap_send_srej_tail(chan);
6538 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6539 &chan->conn_state) &&
6540 chan->unacked_frames)
6541 __set_retrans_timer(chan);
6543 l2cap_send_ack(chan);
6546 case L2CAP_EV_RECV_RNR:
6547 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6548 l2cap_pass_to_tx(chan, control);
6549 if (control->poll) {
6550 l2cap_send_srej_tail(chan);
6552 struct l2cap_ctrl rr_control;
6553 memset(&rr_control, 0, sizeof(rr_control));
6554 rr_control.sframe = 1;
6555 rr_control.super = L2CAP_SUPER_RR;
6556 rr_control.reqseq = chan->buffer_seq;
6557 l2cap_send_sframe(chan, &rr_control);
6561 case L2CAP_EV_RECV_REJ:
6562 l2cap_handle_rej(chan, control);
6564 case L2CAP_EV_RECV_SREJ:
6565 l2cap_handle_srej(chan, control);
6569 if (skb && !skb_in_use) {
6570 BT_DBG("Freeing %p", skb);
6577 static int l2cap_finish_move(struct l2cap_chan *chan)
6579 BT_DBG("chan %p", chan);
6581 chan->rx_state = L2CAP_RX_STATE_RECV;
6584 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6586 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6588 return l2cap_resegment(chan);
6591 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6592 struct l2cap_ctrl *control,
6593 struct sk_buff *skb, u8 event)
6597 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6603 l2cap_process_reqseq(chan, control->reqseq);
6605 if (!skb_queue_empty(&chan->tx_q))
6606 chan->tx_send_head = skb_peek(&chan->tx_q);
6608 chan->tx_send_head = NULL;
6610 /* Rewind next_tx_seq to the point expected
6613 chan->next_tx_seq = control->reqseq;
6614 chan->unacked_frames = 0;
6616 err = l2cap_finish_move(chan);
6620 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6621 l2cap_send_i_or_rr_or_rnr(chan);
6623 if (event == L2CAP_EV_RECV_IFRAME)
6626 return l2cap_rx_state_recv(chan, control, NULL, event);
6629 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6630 struct l2cap_ctrl *control,
6631 struct sk_buff *skb, u8 event)
6635 if (!control->final)
6638 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6640 chan->rx_state = L2CAP_RX_STATE_RECV;
6641 l2cap_process_reqseq(chan, control->reqseq);
6643 if (!skb_queue_empty(&chan->tx_q))
6644 chan->tx_send_head = skb_peek(&chan->tx_q);
6646 chan->tx_send_head = NULL;
6648 /* Rewind next_tx_seq to the point expected
6651 chan->next_tx_seq = control->reqseq;
6652 chan->unacked_frames = 0;
6655 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6657 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6659 err = l2cap_resegment(chan);
6662 err = l2cap_rx_state_recv(chan, control, skb, event);
6667 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6669 /* Make sure reqseq is for a packet that has been sent but not acked */
6672 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6673 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6676 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6677 struct sk_buff *skb, u8 event)
6681 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6682 control, skb, event, chan->rx_state);
6684 if (__valid_reqseq(chan, control->reqseq)) {
6685 switch (chan->rx_state) {
6686 case L2CAP_RX_STATE_RECV:
6687 err = l2cap_rx_state_recv(chan, control, skb, event);
6689 case L2CAP_RX_STATE_SREJ_SENT:
6690 err = l2cap_rx_state_srej_sent(chan, control, skb,
6693 case L2CAP_RX_STATE_WAIT_P:
6694 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6696 case L2CAP_RX_STATE_WAIT_F:
6697 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6704 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6705 control->reqseq, chan->next_tx_seq,
6706 chan->expected_ack_seq);
6707 l2cap_send_disconn_req(chan, ECONNRESET);
6713 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6714 struct sk_buff *skb)
6718 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6721 if (l2cap_classify_txseq(chan, control->txseq) ==
6722 L2CAP_TXSEQ_EXPECTED) {
6723 l2cap_pass_to_tx(chan, control);
6725 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6726 __next_seq(chan, chan->buffer_seq));
6728 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6730 l2cap_reassemble_sdu(chan, skb, control);
6733 kfree_skb(chan->sdu);
6736 chan->sdu_last_frag = NULL;
6740 BT_DBG("Freeing %p", skb);
6745 chan->last_acked_seq = control->txseq;
6746 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6751 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6753 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6757 __unpack_control(chan, skb);
6762 * We can just drop the corrupted I-frame here.
6763 * Receiver will miss it and start proper recovery
6764 * procedures and ask for retransmission.
6766 if (l2cap_check_fcs(chan, skb))
6769 if (!control->sframe && control->sar == L2CAP_SAR_START)
6770 len -= L2CAP_SDULEN_SIZE;
6772 if (chan->fcs == L2CAP_FCS_CRC16)
6773 len -= L2CAP_FCS_SIZE;
6775 if (len > chan->mps) {
6776 l2cap_send_disconn_req(chan, ECONNRESET);
6780 if (!control->sframe) {
6783 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6784 control->sar, control->reqseq, control->final,
6787 /* Validate F-bit - F=0 always valid, F=1 only
6788 * valid in TX WAIT_F
6790 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6793 if (chan->mode != L2CAP_MODE_STREAMING) {
6794 event = L2CAP_EV_RECV_IFRAME;
6795 err = l2cap_rx(chan, control, skb, event);
6797 err = l2cap_stream_rx(chan, control, skb);
6801 l2cap_send_disconn_req(chan, ECONNRESET);
6803 const u8 rx_func_to_event[4] = {
6804 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6805 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6808 /* Only I-frames are expected in streaming mode */
6809 if (chan->mode == L2CAP_MODE_STREAMING)
6812 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6813 control->reqseq, control->final, control->poll,
6817 BT_ERR("Trailing bytes: %d in sframe", len);
6818 l2cap_send_disconn_req(chan, ECONNRESET);
6822 /* Validate F and P bits */
6823 if (control->final && (control->poll ||
6824 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6827 event = rx_func_to_event[control->super];
6828 if (l2cap_rx(chan, control, skb, event))
6829 l2cap_send_disconn_req(chan, ECONNRESET);
6839 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6841 struct l2cap_conn *conn = chan->conn;
6842 struct l2cap_le_credits pkt;
6845 /* We return more credits to the sender only after the amount of
6846 * credits falls below half of the initial amount.
6848 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6851 return_credits = le_max_credits - chan->rx_credits;
6853 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6855 chan->rx_credits += return_credits;
6857 pkt.cid = cpu_to_le16(chan->scid);
6858 pkt.credits = cpu_to_le16(return_credits);
6860 chan->ident = l2cap_get_ident(conn);
6862 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6865 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6869 if (!chan->rx_credits) {
6870 BT_ERR("No credits to receive LE L2CAP data");
6874 if (chan->imtu < skb->len) {
6875 BT_ERR("Too big LE L2CAP PDU");
6880 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6882 l2cap_chan_le_send_credits(chan);
6889 sdu_len = get_unaligned_le16(skb->data);
6890 skb_pull(skb, L2CAP_SDULEN_SIZE);
6892 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6893 sdu_len, skb->len, chan->imtu);
6895 if (sdu_len > chan->imtu) {
6896 BT_ERR("Too big LE L2CAP SDU length received");
6901 if (skb->len > sdu_len) {
6902 BT_ERR("Too much LE L2CAP data received");
6907 if (skb->len == sdu_len)
6908 return chan->ops->recv(chan, skb);
6911 chan->sdu_len = sdu_len;
6912 chan->sdu_last_frag = skb;
6917 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6918 chan->sdu->len, skb->len, chan->sdu_len);
6920 if (chan->sdu->len + skb->len > chan->sdu_len) {
6921 BT_ERR("Too much LE L2CAP data received");
6926 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6929 if (chan->sdu->len == chan->sdu_len) {
6930 err = chan->ops->recv(chan, chan->sdu);
6933 chan->sdu_last_frag = NULL;
6941 kfree_skb(chan->sdu);
6943 chan->sdu_last_frag = NULL;
6947 /* We can't return an error here since we took care of the skb
6948 * freeing internally. An error return would cause the caller to
6949 * do a double-free of the skb.
6954 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6955 struct sk_buff *skb)
6957 struct l2cap_chan *chan;
6959 chan = l2cap_get_chan_by_scid(conn, cid);
6961 if (cid == L2CAP_CID_A2MP) {
6962 chan = a2mp_channel_create(conn, skb);
6968 l2cap_chan_lock(chan);
6970 BT_DBG("unknown cid 0x%4.4x", cid);
6971 /* Drop packet and return */
6977 BT_DBG("chan %p, len %d", chan, skb->len);
6979 if (chan->state != BT_CONNECTED)
6982 switch (chan->mode) {
6983 case L2CAP_MODE_LE_FLOWCTL:
6984 if (l2cap_le_data_rcv(chan, skb) < 0)
6989 case L2CAP_MODE_BASIC:
6990 /* If socket recv buffers overflows we drop data here
6991 * which is *bad* because L2CAP has to be reliable.
6992 * But we don't have any other choice. L2CAP doesn't
6993 * provide flow control mechanism. */
6995 if (chan->imtu < skb->len)
6998 if (!chan->ops->recv(chan, skb))
7002 case L2CAP_MODE_ERTM:
7003 case L2CAP_MODE_STREAMING:
7004 l2cap_data_rcv(chan, skb);
7008 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7016 l2cap_chan_unlock(chan);
7019 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7020 struct sk_buff *skb)
7022 struct hci_conn *hcon = conn->hcon;
7023 struct l2cap_chan *chan;
7025 if (hcon->type != ACL_LINK)
7028 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7033 BT_DBG("chan %p, len %d", chan, skb->len);
7035 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7038 if (chan->imtu < skb->len)
7041 /* Store remote BD_ADDR and PSM for msg_name */
7042 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7043 bt_cb(skb)->psm = psm;
7045 if (!chan->ops->recv(chan, skb))
7052 static void l2cap_att_channel(struct l2cap_conn *conn,
7053 struct sk_buff *skb)
7055 struct hci_conn *hcon = conn->hcon;
7056 struct l2cap_chan *chan;
7058 if (hcon->type != LE_LINK)
7061 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7062 &hcon->src, &hcon->dst);
7066 BT_DBG("chan %p, len %d", chan, skb->len);
7068 if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7071 if (chan->imtu < skb->len)
7074 if (!chan->ops->recv(chan, skb))
7081 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7083 struct l2cap_hdr *lh = (void *) skb->data;
7087 skb_pull(skb, L2CAP_HDR_SIZE);
7088 cid = __le16_to_cpu(lh->cid);
7089 len = __le16_to_cpu(lh->len);
7091 if (len != skb->len) {
7096 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7099 case L2CAP_CID_SIGNALING:
7100 l2cap_sig_channel(conn, skb);
7103 case L2CAP_CID_CONN_LESS:
7104 psm = get_unaligned((__le16 *) skb->data);
7105 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7106 l2cap_conless_channel(conn, psm, skb);
7110 l2cap_att_channel(conn, skb);
7113 case L2CAP_CID_LE_SIGNALING:
7114 l2cap_le_sig_channel(conn, skb);
7118 if (smp_sig_channel(conn, skb))
7119 l2cap_conn_del(conn->hcon, EACCES);
7123 l2cap_data_channel(conn, cid, skb);
7128 /* ---- L2CAP interface with lower layer (HCI) ---- */
7130 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7132 int exact = 0, lm1 = 0, lm2 = 0;
7133 struct l2cap_chan *c;
7135 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7137 /* Find listening sockets and check their link_mode */
7138 read_lock(&chan_list_lock);
7139 list_for_each_entry(c, &chan_list, global_l) {
7140 if (c->state != BT_LISTEN)
7143 if (!bacmp(&c->src, &hdev->bdaddr)) {
7144 lm1 |= HCI_LM_ACCEPT;
7145 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7146 lm1 |= HCI_LM_MASTER;
7148 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7149 lm2 |= HCI_LM_ACCEPT;
7150 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7151 lm2 |= HCI_LM_MASTER;
7154 read_unlock(&chan_list_lock);
7156 return exact ? lm1 : lm2;
7159 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7161 struct l2cap_conn *conn;
7163 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7166 conn = l2cap_conn_add(hcon);
7168 l2cap_conn_ready(conn);
7170 l2cap_conn_del(hcon, bt_to_errno(status));
7174 int l2cap_disconn_ind(struct hci_conn *hcon)
7176 struct l2cap_conn *conn = hcon->l2cap_data;
7178 BT_DBG("hcon %p", hcon);
7181 return HCI_ERROR_REMOTE_USER_TERM;
7182 return conn->disc_reason;
7185 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7187 BT_DBG("hcon %p reason %d", hcon, reason);
7189 l2cap_conn_del(hcon, bt_to_errno(reason));
7192 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7194 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7197 if (encrypt == 0x00) {
7198 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7199 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7200 } else if (chan->sec_level == BT_SECURITY_HIGH)
7201 l2cap_chan_close(chan, ECONNREFUSED);
7203 if (chan->sec_level == BT_SECURITY_MEDIUM)
7204 __clear_chan_timer(chan);
7208 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7210 struct l2cap_conn *conn = hcon->l2cap_data;
7211 struct l2cap_chan *chan;
7216 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7218 if (hcon->type == LE_LINK) {
7219 if (!status && encrypt)
7220 smp_distribute_keys(conn, 0);
7221 cancel_delayed_work(&conn->security_timer);
7224 mutex_lock(&conn->chan_lock);
7226 list_for_each_entry(chan, &conn->chan_l, list) {
7227 l2cap_chan_lock(chan);
7229 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7230 state_to_string(chan->state));
7232 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7233 l2cap_chan_unlock(chan);
7237 if (chan->scid == L2CAP_CID_ATT) {
7238 if (!status && encrypt) {
7239 chan->sec_level = hcon->sec_level;
7240 l2cap_chan_ready(chan);
7243 l2cap_chan_unlock(chan);
7247 if (!__l2cap_no_conn_pending(chan)) {
7248 l2cap_chan_unlock(chan);
7252 if (!status && (chan->state == BT_CONNECTED ||
7253 chan->state == BT_CONFIG)) {
7254 chan->ops->resume(chan);
7255 l2cap_check_encryption(chan, encrypt);
7256 l2cap_chan_unlock(chan);
7260 if (chan->state == BT_CONNECT) {
7262 l2cap_start_connection(chan);
7264 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7265 } else if (chan->state == BT_CONNECT2) {
7266 struct l2cap_conn_rsp rsp;
7270 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7271 res = L2CAP_CR_PEND;
7272 stat = L2CAP_CS_AUTHOR_PEND;
7273 chan->ops->defer(chan);
7275 l2cap_state_change(chan, BT_CONFIG);
7276 res = L2CAP_CR_SUCCESS;
7277 stat = L2CAP_CS_NO_INFO;
7280 l2cap_state_change(chan, BT_DISCONN);
7281 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7282 res = L2CAP_CR_SEC_BLOCK;
7283 stat = L2CAP_CS_NO_INFO;
7286 rsp.scid = cpu_to_le16(chan->dcid);
7287 rsp.dcid = cpu_to_le16(chan->scid);
7288 rsp.result = cpu_to_le16(res);
7289 rsp.status = cpu_to_le16(stat);
7290 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7293 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7294 res == L2CAP_CR_SUCCESS) {
7296 set_bit(CONF_REQ_SENT, &chan->conf_state);
7297 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7299 l2cap_build_conf_req(chan, buf),
7301 chan->num_conf_req++;
7305 l2cap_chan_unlock(chan);
7308 mutex_unlock(&conn->chan_lock);
7313 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7315 struct l2cap_conn *conn = hcon->l2cap_data;
7316 struct l2cap_hdr *hdr;
7319 /* For AMP controller do not create l2cap conn */
7320 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7324 conn = l2cap_conn_add(hcon);
7329 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7333 case ACL_START_NO_FLUSH:
7336 BT_ERR("Unexpected start frame (len %d)", skb->len);
7337 kfree_skb(conn->rx_skb);
7338 conn->rx_skb = NULL;
7340 l2cap_conn_unreliable(conn, ECOMM);
7343 /* Start fragment always begin with Basic L2CAP header */
7344 if (skb->len < L2CAP_HDR_SIZE) {
7345 BT_ERR("Frame is too short (len %d)", skb->len);
7346 l2cap_conn_unreliable(conn, ECOMM);
7350 hdr = (struct l2cap_hdr *) skb->data;
7351 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7353 if (len == skb->len) {
7354 /* Complete frame received */
7355 l2cap_recv_frame(conn, skb);
7359 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7361 if (skb->len > len) {
7362 BT_ERR("Frame is too long (len %d, expected len %d)",
7364 l2cap_conn_unreliable(conn, ECOMM);
7368 /* Allocate skb for the complete frame (with header) */
7369 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7373 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7375 conn->rx_len = len - skb->len;
7379 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7381 if (!conn->rx_len) {
7382 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7383 l2cap_conn_unreliable(conn, ECOMM);
7387 if (skb->len > conn->rx_len) {
7388 BT_ERR("Fragment is too long (len %d, expected %d)",
7389 skb->len, conn->rx_len);
7390 kfree_skb(conn->rx_skb);
7391 conn->rx_skb = NULL;
7393 l2cap_conn_unreliable(conn, ECOMM);
7397 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7399 conn->rx_len -= skb->len;
7401 if (!conn->rx_len) {
7402 /* Complete frame received. l2cap_recv_frame
7403 * takes ownership of the skb so set the global
7404 * rx_skb pointer to NULL first.
7406 struct sk_buff *rx_skb = conn->rx_skb;
7407 conn->rx_skb = NULL;
7408 l2cap_recv_frame(conn, rx_skb);
7418 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7420 struct l2cap_chan *c;
7422 read_lock(&chan_list_lock);
7424 list_for_each_entry(c, &chan_list, global_l) {
7425 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7427 c->state, __le16_to_cpu(c->psm),
7428 c->scid, c->dcid, c->imtu, c->omtu,
7429 c->sec_level, c->mode);
7432 read_unlock(&chan_list_lock);
7437 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7439 return single_open(file, l2cap_debugfs_show, inode->i_private);
7442 static const struct file_operations l2cap_debugfs_fops = {
7443 .open = l2cap_debugfs_open,
7445 .llseek = seq_lseek,
7446 .release = single_release,
7449 static struct dentry *l2cap_debugfs;
7451 int __init l2cap_init(void)
7455 err = l2cap_init_sockets();
7459 if (IS_ERR_OR_NULL(bt_debugfs))
7462 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7463 NULL, &l2cap_debugfs_fops);
7465 debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7467 debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7473 void l2cap_exit(void)
7475 debugfs_remove(l2cap_debugfs);
7476 l2cap_cleanup_sockets();
7479 module_param(disable_ertm, bool, 0644);
7480 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");