2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
86 list_for_each_entry(c, &conn->chan_l, list) {
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
98 list_for_each_entry(c, &conn->chan_l, list) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
127 struct l2cap_chan *c;
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
133 mutex_unlock(&conn->chan_lock);
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 struct l2cap_chan *c;
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
159 mutex_unlock(&conn->chan_lock);
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 struct l2cap_chan *c;
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
179 write_lock(&chan_list_lock);
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
204 write_unlock(&chan_list_lock);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
215 write_unlock(&chan_list_lock);
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
227 dyn_end = L2CAP_CID_DYN_END;
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
243 chan->ops->state_change(chan, state, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 chan->ops->state_change(chan, chan->state, err);
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
255 chan->ops->state_change(chan, chan->state, err);
258 static void __set_retrans_timer(struct l2cap_chan *chan)
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
267 static void __set_monitor_timer(struct l2cap_chan *chan)
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
302 size_t alloc_size, i;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size = roundup_pow_of_two(size);
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
325 kfree(seq_list->list);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
367 u16 mask = seq_list->mask;
369 /* All appends happen in constant time */
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
377 seq_list->list[seq_list->tail & mask] = seq;
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
383 static void l2cap_chan_timeout(struct work_struct *work)
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
387 struct l2cap_conn *conn = chan->conn;
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
403 l2cap_chan_close(chan, reason);
405 l2cap_chan_unlock(chan);
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
410 l2cap_chan_put(chan);
413 struct l2cap_chan *l2cap_chan_create(void)
415 struct l2cap_chan *chan;
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 mutex_init(&chan->lock);
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
429 chan->state = BT_OPEN;
431 kref_init(&chan->kref);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
436 BT_DBG("chan %p", chan);
440 EXPORT_SYMBOL_GPL(l2cap_chan_create);
442 static void l2cap_chan_destroy(struct kref *kref)
444 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
446 BT_DBG("chan %p", chan);
448 write_lock(&chan_list_lock);
449 list_del(&chan->global_l);
450 write_unlock(&chan_list_lock);
455 void l2cap_chan_hold(struct l2cap_chan *c)
457 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
462 void l2cap_chan_put(struct l2cap_chan *c)
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
466 kref_put(&c->kref, l2cap_chan_destroy);
468 EXPORT_SYMBOL_GPL(l2cap_chan_put);
470 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
472 chan->fcs = L2CAP_FCS_CRC16;
473 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
474 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 chan->remote_max_tx = chan->max_tx;
477 chan->remote_tx_win = chan->tx_win;
478 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->sec_level = BT_SECURITY_LOW;
480 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
481 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
482 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
483 chan->conf_state = 0;
485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
489 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
492 chan->sdu_last_frag = NULL;
494 chan->tx_credits = 0;
495 chan->rx_credits = le_max_credits;
496 chan->mps = min_t(u16, chan->imtu, le_default_mps);
498 skb_queue_head_init(&chan->tx_q);
501 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504 __le16_to_cpu(chan->psm), chan->dcid);
506 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
510 switch (chan->chan_type) {
511 case L2CAP_CHAN_CONN_ORIENTED:
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 if (conn->hcon->type == ACL_LINK)
515 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_FIXED:
526 /* Caller will set CID and CID specific MTU values */
530 /* Raw socket can send/recv signalling messages only */
531 chan->scid = L2CAP_CID_SIGNALING;
532 chan->dcid = L2CAP_CID_SIGNALING;
533 chan->omtu = L2CAP_DEFAULT_MTU;
536 chan->local_id = L2CAP_BESTEFFORT_ID;
537 chan->local_stype = L2CAP_SERV_BESTEFFORT;
538 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
539 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
540 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
541 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543 l2cap_chan_hold(chan);
545 hci_conn_hold(conn->hcon);
547 list_add(&chan->list, &conn->chan_l);
550 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
557 void l2cap_chan_del(struct l2cap_chan *chan, int err)
559 struct l2cap_conn *conn = chan->conn;
561 __clear_chan_timer(chan);
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
570 l2cap_chan_put(chan);
574 if (chan->scid != L2CAP_CID_A2MP)
575 hci_conn_drop(conn->hcon);
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
588 chan->ops->teardown(chan, err);
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 case L2CAP_MODE_BASIC:
597 case L2CAP_MODE_LE_FLOWCTL:
598 skb_queue_purge(&chan->tx_q);
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
606 skb_queue_purge(&chan->srej_q);
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
620 EXPORT_SYMBOL_GPL(l2cap_chan_del);
622 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
624 struct l2cap_conn *conn = hcon->l2cap_data;
625 struct l2cap_chan *chan;
627 mutex_lock(&conn->chan_lock);
629 list_for_each_entry(chan, &conn->chan_l, list) {
630 l2cap_chan_lock(chan);
631 bacpy(&chan->dst, &hcon->dst);
632 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
633 l2cap_chan_unlock(chan);
636 mutex_unlock(&conn->chan_lock);
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
641 struct l2cap_conn *conn = chan->conn;
642 struct l2cap_le_conn_rsp rsp;
645 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
646 result = L2CAP_CR_AUTHORIZATION;
648 result = L2CAP_CR_BAD_PSM;
650 l2cap_state_change(chan, BT_DISCONN);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.mtu = cpu_to_le16(chan->imtu);
654 rsp.mps = cpu_to_le16(chan->mps);
655 rsp.credits = cpu_to_le16(chan->rx_credits);
656 rsp.result = cpu_to_le16(result);
658 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
662 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
664 struct l2cap_conn *conn = chan->conn;
665 struct l2cap_conn_rsp rsp;
668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
669 result = L2CAP_CR_SEC_BLOCK;
671 result = L2CAP_CR_BAD_PSM;
673 l2cap_state_change(chan, BT_DISCONN);
675 rsp.scid = cpu_to_le16(chan->dcid);
676 rsp.dcid = cpu_to_le16(chan->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
680 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
683 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
685 struct l2cap_conn *conn = chan->conn;
687 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
689 switch (chan->state) {
691 chan->ops->teardown(chan, 0);
696 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698 l2cap_send_disconn_req(chan, reason);
700 l2cap_chan_del(chan, reason);
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
705 if (conn->hcon->type == ACL_LINK)
706 l2cap_chan_connect_reject(chan);
707 else if (conn->hcon->type == LE_LINK)
708 l2cap_chan_le_connect_reject(chan);
711 l2cap_chan_del(chan, reason);
716 l2cap_chan_del(chan, reason);
720 chan->ops->teardown(chan, 0);
724 EXPORT_SYMBOL(l2cap_chan_close);
726 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
728 switch (chan->chan_type) {
730 switch (chan->sec_level) {
731 case BT_SECURITY_HIGH:
732 case BT_SECURITY_FIPS:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
737 return HCI_AT_NO_BONDING;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
749 return HCI_AT_NO_BONDING;
751 case L2CAP_CHAN_CONN_ORIENTED:
752 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 if (chan->sec_level == BT_SECURITY_LOW)
754 chan->sec_level = BT_SECURITY_SDP;
756 if (chan->sec_level == BT_SECURITY_HIGH ||
757 chan->sec_level == BT_SECURITY_FIPS)
758 return HCI_AT_NO_BONDING_MITM;
760 return HCI_AT_NO_BONDING;
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_GENERAL_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_GENERAL_BONDING;
771 return HCI_AT_NO_BONDING;
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan *chan)
780 struct l2cap_conn *conn = chan->conn;
783 if (conn->hcon->type == LE_LINK)
784 return smp_conn_security(conn->hcon, chan->sec_level);
786 auth_type = l2cap_get_auth_type(chan);
788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
791 static u8 l2cap_get_ident(struct l2cap_conn *conn)
795 /* Get next available identificator.
796 * 1 - 128 are used by kernel.
797 * 129 - 199 are reserved.
798 * 200 - 254 are used by utilities like l2ping, etc.
801 spin_lock(&conn->lock);
803 if (++conn->tx_ident > 128)
808 spin_unlock(&conn->lock);
813 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
816 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
819 BT_DBG("code 0x%2.2x", code);
824 if (lmp_no_flush_capable(conn->hcon->hdev))
825 flags = ACL_START_NO_FLUSH;
829 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
830 skb->priority = HCI_PRIO_MAX;
832 hci_send_acl(conn->hchan, skb, flags);
835 static bool __chan_is_moving(struct l2cap_chan *chan)
837 return chan->move_state != L2CAP_MOVE_STABLE &&
838 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
841 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
843 struct hci_conn *hcon = chan->conn->hcon;
846 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
849 if (chan->hs_hcon && !__chan_is_moving(chan)) {
851 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
858 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
859 lmp_no_flush_capable(hcon->hdev))
860 flags = ACL_START_NO_FLUSH;
864 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
865 hci_send_acl(chan->conn->hchan, skb, flags);
868 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
870 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
871 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
873 if (enh & L2CAP_CTRL_FRAME_TYPE) {
876 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
877 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
884 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
885 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
892 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
894 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
897 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
900 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
901 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
908 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
909 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
916 static inline void __unpack_control(struct l2cap_chan *chan,
919 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
920 __unpack_extended_control(get_unaligned_le32(skb->data),
921 &bt_cb(skb)->control);
922 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
924 __unpack_enhanced_control(get_unaligned_le16(skb->data),
925 &bt_cb(skb)->control);
926 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
930 static u32 __pack_extended_control(struct l2cap_ctrl *control)
934 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
935 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
937 if (control->sframe) {
938 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
939 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
940 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
942 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
943 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
949 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
953 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
954 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
956 if (control->sframe) {
957 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
958 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
959 packed |= L2CAP_CTRL_FRAME_TYPE;
961 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
962 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
968 static inline void __pack_control(struct l2cap_chan *chan,
969 struct l2cap_ctrl *control,
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
973 put_unaligned_le32(__pack_extended_control(control),
974 skb->data + L2CAP_HDR_SIZE);
976 put_unaligned_le16(__pack_enhanced_control(control),
977 skb->data + L2CAP_HDR_SIZE);
981 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
983 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
984 return L2CAP_EXT_HDR_SIZE;
986 return L2CAP_ENH_HDR_SIZE;
989 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
993 struct l2cap_hdr *lh;
994 int hlen = __ertm_hdr_size(chan);
996 if (chan->fcs == L2CAP_FCS_CRC16)
997 hlen += L2CAP_FCS_SIZE;
999 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1002 return ERR_PTR(-ENOMEM);
1004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1005 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1006 lh->cid = cpu_to_le16(chan->dcid);
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1009 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1011 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1013 if (chan->fcs == L2CAP_FCS_CRC16) {
1014 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1015 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1018 skb->priority = HCI_PRIO_MAX;
1022 static void l2cap_send_sframe(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control)
1025 struct sk_buff *skb;
1028 BT_DBG("chan %p, control %p", chan, control);
1030 if (!control->sframe)
1033 if (__chan_is_moving(chan))
1036 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1040 if (control->super == L2CAP_SUPER_RR)
1041 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1042 else if (control->super == L2CAP_SUPER_RNR)
1043 set_bit(CONN_RNR_SENT, &chan->conn_state);
1045 if (control->super != L2CAP_SUPER_SREJ) {
1046 chan->last_acked_seq = control->reqseq;
1047 __clear_ack_timer(chan);
1050 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1051 control->final, control->poll, control->super);
1053 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1054 control_field = __pack_extended_control(control);
1056 control_field = __pack_enhanced_control(control);
1058 skb = l2cap_create_sframe_pdu(chan, control_field);
1060 l2cap_do_send(chan, skb);
1063 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1065 struct l2cap_ctrl control;
1067 BT_DBG("chan %p, poll %d", chan, poll);
1069 memset(&control, 0, sizeof(control));
1071 control.poll = poll;
1073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1074 control.super = L2CAP_SUPER_RNR;
1076 control.super = L2CAP_SUPER_RR;
1078 control.reqseq = chan->buffer_seq;
1079 l2cap_send_sframe(chan, &control);
1082 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1084 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1087 static bool __amp_capable(struct l2cap_chan *chan)
1089 struct l2cap_conn *conn = chan->conn;
1090 struct hci_dev *hdev;
1091 bool amp_available = false;
1093 if (!conn->hs_enabled)
1096 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1099 read_lock(&hci_dev_list_lock);
1100 list_for_each_entry(hdev, &hci_dev_list, list) {
1101 if (hdev->amp_type != AMP_TYPE_BREDR &&
1102 test_bit(HCI_UP, &hdev->flags)) {
1103 amp_available = true;
1107 read_unlock(&hci_dev_list_lock);
1109 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1110 return amp_available;
1115 static bool l2cap_check_efs(struct l2cap_chan *chan)
1117 /* Check EFS parameters */
1121 void l2cap_send_conn_req(struct l2cap_chan *chan)
1123 struct l2cap_conn *conn = chan->conn;
1124 struct l2cap_conn_req req;
1126 req.scid = cpu_to_le16(chan->scid);
1127 req.psm = chan->psm;
1129 chan->ident = l2cap_get_ident(conn);
1131 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1133 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1136 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1138 struct l2cap_create_chan_req req;
1139 req.scid = cpu_to_le16(chan->scid);
1140 req.psm = chan->psm;
1141 req.amp_id = amp_id;
1143 chan->ident = l2cap_get_ident(chan->conn);
1145 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1149 static void l2cap_move_setup(struct l2cap_chan *chan)
1151 struct sk_buff *skb;
1153 BT_DBG("chan %p", chan);
1155 if (chan->mode != L2CAP_MODE_ERTM)
1158 __clear_retrans_timer(chan);
1159 __clear_monitor_timer(chan);
1160 __clear_ack_timer(chan);
1162 chan->retry_count = 0;
1163 skb_queue_walk(&chan->tx_q, skb) {
1164 if (bt_cb(skb)->control.retries)
1165 bt_cb(skb)->control.retries = 1;
1170 chan->expected_tx_seq = chan->buffer_seq;
1172 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1173 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1174 l2cap_seq_list_clear(&chan->retrans_list);
1175 l2cap_seq_list_clear(&chan->srej_list);
1176 skb_queue_purge(&chan->srej_q);
1178 chan->tx_state = L2CAP_TX_STATE_XMIT;
1179 chan->rx_state = L2CAP_RX_STATE_MOVE;
1181 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1184 static void l2cap_move_done(struct l2cap_chan *chan)
1186 u8 move_role = chan->move_role;
1187 BT_DBG("chan %p", chan);
1189 chan->move_state = L2CAP_MOVE_STABLE;
1190 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1192 if (chan->mode != L2CAP_MODE_ERTM)
1195 switch (move_role) {
1196 case L2CAP_MOVE_ROLE_INITIATOR:
1197 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1198 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1200 case L2CAP_MOVE_ROLE_RESPONDER:
1201 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1206 static void l2cap_chan_ready(struct l2cap_chan *chan)
1208 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1209 chan->conf_state = 0;
1210 __clear_chan_timer(chan);
1212 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1213 chan->ops->suspend(chan);
1215 chan->state = BT_CONNECTED;
1217 chan->ops->ready(chan);
1220 static void l2cap_le_connect(struct l2cap_chan *chan)
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_le_conn_req req;
1225 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1228 req.psm = chan->psm;
1229 req.scid = cpu_to_le16(chan->scid);
1230 req.mtu = cpu_to_le16(chan->imtu);
1231 req.mps = cpu_to_le16(chan->mps);
1232 req.credits = cpu_to_le16(chan->rx_credits);
1234 chan->ident = l2cap_get_ident(conn);
1236 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1240 static void l2cap_le_start(struct l2cap_chan *chan)
1242 struct l2cap_conn *conn = chan->conn;
1244 if (!smp_conn_security(conn->hcon, chan->sec_level))
1248 l2cap_chan_ready(chan);
1252 if (chan->state == BT_CONNECT)
1253 l2cap_le_connect(chan);
1256 static void l2cap_start_connection(struct l2cap_chan *chan)
1258 if (__amp_capable(chan)) {
1259 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1260 a2mp_discover_amp(chan);
1261 } else if (chan->conn->hcon->type == LE_LINK) {
1262 l2cap_le_start(chan);
1264 l2cap_send_conn_req(chan);
1268 static void l2cap_do_start(struct l2cap_chan *chan)
1270 struct l2cap_conn *conn = chan->conn;
1272 if (conn->hcon->type == LE_LINK) {
1273 l2cap_le_start(chan);
1277 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1278 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1281 if (l2cap_chan_check_security(chan) &&
1282 __l2cap_no_conn_pending(chan)) {
1283 l2cap_start_connection(chan);
1286 struct l2cap_info_req req;
1287 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1289 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1290 conn->info_ident = l2cap_get_ident(conn);
1292 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1294 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1299 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1301 u32 local_feat_mask = l2cap_feat_mask;
1303 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1306 case L2CAP_MODE_ERTM:
1307 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1308 case L2CAP_MODE_STREAMING:
1309 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1315 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1317 struct l2cap_conn *conn = chan->conn;
1318 struct l2cap_disconn_req req;
1323 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1324 __clear_retrans_timer(chan);
1325 __clear_monitor_timer(chan);
1326 __clear_ack_timer(chan);
1329 if (chan->scid == L2CAP_CID_A2MP) {
1330 l2cap_state_change(chan, BT_DISCONN);
1334 req.dcid = cpu_to_le16(chan->dcid);
1335 req.scid = cpu_to_le16(chan->scid);
1336 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1339 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1342 /* ---- L2CAP connections ---- */
1343 static void l2cap_conn_start(struct l2cap_conn *conn)
1345 struct l2cap_chan *chan, *tmp;
1347 BT_DBG("conn %p", conn);
1349 mutex_lock(&conn->chan_lock);
1351 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1352 l2cap_chan_lock(chan);
1354 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1355 l2cap_chan_unlock(chan);
1359 if (chan->state == BT_CONNECT) {
1360 if (!l2cap_chan_check_security(chan) ||
1361 !__l2cap_no_conn_pending(chan)) {
1362 l2cap_chan_unlock(chan);
1366 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1367 && test_bit(CONF_STATE2_DEVICE,
1368 &chan->conf_state)) {
1369 l2cap_chan_close(chan, ECONNRESET);
1370 l2cap_chan_unlock(chan);
1374 l2cap_start_connection(chan);
1376 } else if (chan->state == BT_CONNECT2) {
1377 struct l2cap_conn_rsp rsp;
1379 rsp.scid = cpu_to_le16(chan->dcid);
1380 rsp.dcid = cpu_to_le16(chan->scid);
1382 if (l2cap_chan_check_security(chan)) {
1383 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1386 chan->ops->defer(chan);
1389 l2cap_state_change(chan, BT_CONFIG);
1390 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1391 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1394 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1395 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1398 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1401 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1402 rsp.result != L2CAP_CR_SUCCESS) {
1403 l2cap_chan_unlock(chan);
1407 set_bit(CONF_REQ_SENT, &chan->conf_state);
1408 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1409 l2cap_build_conf_req(chan, buf), buf);
1410 chan->num_conf_req++;
1413 l2cap_chan_unlock(chan);
1416 mutex_unlock(&conn->chan_lock);
1419 /* Find socket with cid and source/destination bdaddr.
1420 * Returns closest match, locked.
1422 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1426 struct l2cap_chan *c, *c1 = NULL;
1428 read_lock(&chan_list_lock);
1430 list_for_each_entry(c, &chan_list, global_l) {
1431 if (state && c->state != state)
1434 if (c->scid == cid) {
1435 int src_match, dst_match;
1436 int src_any, dst_any;
1439 src_match = !bacmp(&c->src, src);
1440 dst_match = !bacmp(&c->dst, dst);
1441 if (src_match && dst_match) {
1442 read_unlock(&chan_list_lock);
1447 src_any = !bacmp(&c->src, BDADDR_ANY);
1448 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1449 if ((src_match && dst_any) || (src_any && dst_match) ||
1450 (src_any && dst_any))
1455 read_unlock(&chan_list_lock);
1460 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1462 struct hci_conn *hcon = conn->hcon;
1463 struct hci_dev *hdev = hcon->hdev;
1464 struct l2cap_chan *chan, *pchan;
1469 /* Check if we have socket listening on cid */
1470 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1471 &hcon->src, &hcon->dst);
1475 /* Client ATT sockets should override the server one */
1476 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1479 dst_type = bdaddr_type(hcon, hcon->dst_type);
1481 /* If device is blocked, do not create a channel for it */
1482 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1485 /* For LE slave connections, make sure the connection interval
1486 * is in the range of the minium and maximum interval that has
1487 * been configured for this connection. If not, then trigger
1488 * the connection update procedure.
1490 if (!test_bit(HCI_CONN_MASTER, &hcon->flags) &&
1491 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1492 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1493 struct l2cap_conn_param_update_req req;
1495 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1496 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1497 req.latency = cpu_to_le16(hcon->le_conn_latency);
1498 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1500 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1501 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1504 l2cap_chan_lock(pchan);
1506 chan = pchan->ops->new_connection(pchan);
1510 bacpy(&chan->src, &hcon->src);
1511 bacpy(&chan->dst, &hcon->dst);
1512 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1513 chan->dst_type = dst_type;
1515 __l2cap_chan_add(conn, chan);
1518 l2cap_chan_unlock(pchan);
1521 static void l2cap_conn_ready(struct l2cap_conn *conn)
1523 struct l2cap_chan *chan;
1524 struct hci_conn *hcon = conn->hcon;
1526 BT_DBG("conn %p", conn);
1528 /* For outgoing pairing which doesn't necessarily have an
1529 * associated socket (e.g. mgmt_pair_device).
1531 if (hcon->out && hcon->type == LE_LINK)
1532 smp_conn_security(hcon, hcon->pending_sec_level);
1534 mutex_lock(&conn->chan_lock);
1536 if (hcon->type == LE_LINK)
1537 l2cap_le_conn_ready(conn);
1539 list_for_each_entry(chan, &conn->chan_l, list) {
1541 l2cap_chan_lock(chan);
1543 if (chan->scid == L2CAP_CID_A2MP) {
1544 l2cap_chan_unlock(chan);
1548 if (hcon->type == LE_LINK) {
1549 l2cap_le_start(chan);
1550 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1551 l2cap_chan_ready(chan);
1553 } else if (chan->state == BT_CONNECT) {
1554 l2cap_do_start(chan);
1557 l2cap_chan_unlock(chan);
1560 mutex_unlock(&conn->chan_lock);
1562 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1565 /* Notify sockets that we cannot guaranty reliability anymore */
1566 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1568 struct l2cap_chan *chan;
1570 BT_DBG("conn %p", conn);
1572 mutex_lock(&conn->chan_lock);
1574 list_for_each_entry(chan, &conn->chan_l, list) {
1575 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1576 l2cap_chan_set_err(chan, err);
1579 mutex_unlock(&conn->chan_lock);
1582 static void l2cap_info_timeout(struct work_struct *work)
1584 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1588 conn->info_ident = 0;
1590 l2cap_conn_start(conn);
1595 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1596 * callback is called during registration. The ->remove callback is called
1597 * during unregistration.
1598 * An l2cap_user object can either be explicitly unregistered or when the
1599 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1600 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1601 * External modules must own a reference to the l2cap_conn object if they intend
1602 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1603 * any time if they don't.
1606 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1608 struct hci_dev *hdev = conn->hcon->hdev;
1611 /* We need to check whether l2cap_conn is registered. If it is not, we
1612 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1613 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1614 * relies on the parent hci_conn object to be locked. This itself relies
1615 * on the hci_dev object to be locked. So we must lock the hci device
1620 if (user->list.next || user->list.prev) {
1625 /* conn->hchan is NULL after l2cap_conn_del() was called */
1631 ret = user->probe(conn, user);
1635 list_add(&user->list, &conn->users);
1639 hci_dev_unlock(hdev);
1642 EXPORT_SYMBOL(l2cap_register_user);
1644 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1646 struct hci_dev *hdev = conn->hcon->hdev;
1650 if (!user->list.next || !user->list.prev)
1653 list_del(&user->list);
1654 user->list.next = NULL;
1655 user->list.prev = NULL;
1656 user->remove(conn, user);
1659 hci_dev_unlock(hdev);
1661 EXPORT_SYMBOL(l2cap_unregister_user);
1663 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1665 struct l2cap_user *user;
1667 while (!list_empty(&conn->users)) {
1668 user = list_first_entry(&conn->users, struct l2cap_user, list);
1669 list_del(&user->list);
1670 user->list.next = NULL;
1671 user->list.prev = NULL;
1672 user->remove(conn, user);
1676 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1678 struct l2cap_conn *conn = hcon->l2cap_data;
1679 struct l2cap_chan *chan, *l;
1684 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1686 kfree_skb(conn->rx_skb);
1688 skb_queue_purge(&conn->pending_rx);
1690 /* We can not call flush_work(&conn->pending_rx_work) here since we
1691 * might block if we are running on a worker from the same workqueue
1692 * pending_rx_work is waiting on.
1694 if (work_pending(&conn->pending_rx_work))
1695 cancel_work_sync(&conn->pending_rx_work);
1697 l2cap_unregister_all_users(conn);
1699 mutex_lock(&conn->chan_lock);
1702 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1703 l2cap_chan_hold(chan);
1704 l2cap_chan_lock(chan);
1706 l2cap_chan_del(chan, err);
1708 l2cap_chan_unlock(chan);
1710 chan->ops->close(chan);
1711 l2cap_chan_put(chan);
1714 mutex_unlock(&conn->chan_lock);
1716 hci_chan_del(conn->hchan);
1718 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1719 cancel_delayed_work_sync(&conn->info_timer);
1721 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1722 cancel_delayed_work_sync(&conn->security_timer);
1723 smp_chan_destroy(conn);
1726 hcon->l2cap_data = NULL;
1728 l2cap_conn_put(conn);
1731 static void security_timeout(struct work_struct *work)
1733 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1734 security_timer.work);
1736 BT_DBG("conn %p", conn);
1738 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1739 smp_chan_destroy(conn);
1740 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1744 static void l2cap_conn_free(struct kref *ref)
1746 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1748 hci_conn_put(conn->hcon);
1752 void l2cap_conn_get(struct l2cap_conn *conn)
1754 kref_get(&conn->ref);
1756 EXPORT_SYMBOL(l2cap_conn_get);
1758 void l2cap_conn_put(struct l2cap_conn *conn)
1760 kref_put(&conn->ref, l2cap_conn_free);
1762 EXPORT_SYMBOL(l2cap_conn_put);
1764 /* ---- Socket interface ---- */
1766 /* Find socket with psm and source / destination bdaddr.
1767 * Returns closest match.
1769 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1774 struct l2cap_chan *c, *c1 = NULL;
1776 read_lock(&chan_list_lock);
1778 list_for_each_entry(c, &chan_list, global_l) {
1779 if (state && c->state != state)
1782 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1785 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1788 if (c->psm == psm) {
1789 int src_match, dst_match;
1790 int src_any, dst_any;
1793 src_match = !bacmp(&c->src, src);
1794 dst_match = !bacmp(&c->dst, dst);
1795 if (src_match && dst_match) {
1796 read_unlock(&chan_list_lock);
1801 src_any = !bacmp(&c->src, BDADDR_ANY);
1802 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1803 if ((src_match && dst_any) || (src_any && dst_match) ||
1804 (src_any && dst_any))
1809 read_unlock(&chan_list_lock);
1814 static void l2cap_monitor_timeout(struct work_struct *work)
1816 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1817 monitor_timer.work);
1819 BT_DBG("chan %p", chan);
1821 l2cap_chan_lock(chan);
1824 l2cap_chan_unlock(chan);
1825 l2cap_chan_put(chan);
1829 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1831 l2cap_chan_unlock(chan);
1832 l2cap_chan_put(chan);
1835 static void l2cap_retrans_timeout(struct work_struct *work)
1837 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1838 retrans_timer.work);
1840 BT_DBG("chan %p", chan);
1842 l2cap_chan_lock(chan);
1845 l2cap_chan_unlock(chan);
1846 l2cap_chan_put(chan);
1850 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1851 l2cap_chan_unlock(chan);
1852 l2cap_chan_put(chan);
1855 static void l2cap_streaming_send(struct l2cap_chan *chan,
1856 struct sk_buff_head *skbs)
1858 struct sk_buff *skb;
1859 struct l2cap_ctrl *control;
1861 BT_DBG("chan %p, skbs %p", chan, skbs);
1863 if (__chan_is_moving(chan))
1866 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1868 while (!skb_queue_empty(&chan->tx_q)) {
1870 skb = skb_dequeue(&chan->tx_q);
1872 bt_cb(skb)->control.retries = 1;
1873 control = &bt_cb(skb)->control;
1875 control->reqseq = 0;
1876 control->txseq = chan->next_tx_seq;
1878 __pack_control(chan, control, skb);
1880 if (chan->fcs == L2CAP_FCS_CRC16) {
1881 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1882 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1885 l2cap_do_send(chan, skb);
1887 BT_DBG("Sent txseq %u", control->txseq);
1889 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1890 chan->frames_sent++;
1894 static int l2cap_ertm_send(struct l2cap_chan *chan)
1896 struct sk_buff *skb, *tx_skb;
1897 struct l2cap_ctrl *control;
1900 BT_DBG("chan %p", chan);
1902 if (chan->state != BT_CONNECTED)
1905 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1908 if (__chan_is_moving(chan))
1911 while (chan->tx_send_head &&
1912 chan->unacked_frames < chan->remote_tx_win &&
1913 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1915 skb = chan->tx_send_head;
1917 bt_cb(skb)->control.retries = 1;
1918 control = &bt_cb(skb)->control;
1920 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1923 control->reqseq = chan->buffer_seq;
1924 chan->last_acked_seq = chan->buffer_seq;
1925 control->txseq = chan->next_tx_seq;
1927 __pack_control(chan, control, skb);
1929 if (chan->fcs == L2CAP_FCS_CRC16) {
1930 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1931 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1934 /* Clone after data has been modified. Data is assumed to be
1935 read-only (for locking purposes) on cloned sk_buffs.
1937 tx_skb = skb_clone(skb, GFP_KERNEL);
1942 __set_retrans_timer(chan);
1944 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1945 chan->unacked_frames++;
1946 chan->frames_sent++;
1949 if (skb_queue_is_last(&chan->tx_q, skb))
1950 chan->tx_send_head = NULL;
1952 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1954 l2cap_do_send(chan, tx_skb);
1955 BT_DBG("Sent txseq %u", control->txseq);
1958 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1959 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1964 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1966 struct l2cap_ctrl control;
1967 struct sk_buff *skb;
1968 struct sk_buff *tx_skb;
1971 BT_DBG("chan %p", chan);
1973 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1976 if (__chan_is_moving(chan))
1979 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1980 seq = l2cap_seq_list_pop(&chan->retrans_list);
1982 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1984 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1989 bt_cb(skb)->control.retries++;
1990 control = bt_cb(skb)->control;
1992 if (chan->max_tx != 0 &&
1993 bt_cb(skb)->control.retries > chan->max_tx) {
1994 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1995 l2cap_send_disconn_req(chan, ECONNRESET);
1996 l2cap_seq_list_clear(&chan->retrans_list);
2000 control.reqseq = chan->buffer_seq;
2001 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2006 if (skb_cloned(skb)) {
2007 /* Cloned sk_buffs are read-only, so we need a
2010 tx_skb = skb_copy(skb, GFP_KERNEL);
2012 tx_skb = skb_clone(skb, GFP_KERNEL);
2016 l2cap_seq_list_clear(&chan->retrans_list);
2020 /* Update skb contents */
2021 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2022 put_unaligned_le32(__pack_extended_control(&control),
2023 tx_skb->data + L2CAP_HDR_SIZE);
2025 put_unaligned_le16(__pack_enhanced_control(&control),
2026 tx_skb->data + L2CAP_HDR_SIZE);
2029 if (chan->fcs == L2CAP_FCS_CRC16) {
2030 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2031 put_unaligned_le16(fcs, skb_put(tx_skb,
2035 l2cap_do_send(chan, tx_skb);
2037 BT_DBG("Resent txseq %d", control.txseq);
2039 chan->last_acked_seq = chan->buffer_seq;
2043 static void l2cap_retransmit(struct l2cap_chan *chan,
2044 struct l2cap_ctrl *control)
2046 BT_DBG("chan %p, control %p", chan, control);
2048 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2049 l2cap_ertm_resend(chan);
2052 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2053 struct l2cap_ctrl *control)
2055 struct sk_buff *skb;
2057 BT_DBG("chan %p, control %p", chan, control);
2060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2062 l2cap_seq_list_clear(&chan->retrans_list);
2064 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2067 if (chan->unacked_frames) {
2068 skb_queue_walk(&chan->tx_q, skb) {
2069 if (bt_cb(skb)->control.txseq == control->reqseq ||
2070 skb == chan->tx_send_head)
2074 skb_queue_walk_from(&chan->tx_q, skb) {
2075 if (skb == chan->tx_send_head)
2078 l2cap_seq_list_append(&chan->retrans_list,
2079 bt_cb(skb)->control.txseq);
2082 l2cap_ertm_resend(chan);
2086 static void l2cap_send_ack(struct l2cap_chan *chan)
2088 struct l2cap_ctrl control;
2089 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2090 chan->last_acked_seq);
2093 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2094 chan, chan->last_acked_seq, chan->buffer_seq);
2096 memset(&control, 0, sizeof(control));
2099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2100 chan->rx_state == L2CAP_RX_STATE_RECV) {
2101 __clear_ack_timer(chan);
2102 control.super = L2CAP_SUPER_RNR;
2103 control.reqseq = chan->buffer_seq;
2104 l2cap_send_sframe(chan, &control);
2106 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2107 l2cap_ertm_send(chan);
2108 /* If any i-frames were sent, they included an ack */
2109 if (chan->buffer_seq == chan->last_acked_seq)
2113 /* Ack now if the window is 3/4ths full.
2114 * Calculate without mul or div
2116 threshold = chan->ack_win;
2117 threshold += threshold << 1;
2120 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2123 if (frames_to_ack >= threshold) {
2124 __clear_ack_timer(chan);
2125 control.super = L2CAP_SUPER_RR;
2126 control.reqseq = chan->buffer_seq;
2127 l2cap_send_sframe(chan, &control);
2132 __set_ack_timer(chan);
2136 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2137 struct msghdr *msg, int len,
2138 int count, struct sk_buff *skb)
2140 struct l2cap_conn *conn = chan->conn;
2141 struct sk_buff **frag;
2144 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2145 msg->msg_iov, count))
2151 /* Continuation fragments (no L2CAP header) */
2152 frag = &skb_shinfo(skb)->frag_list;
2154 struct sk_buff *tmp;
2156 count = min_t(unsigned int, conn->mtu, len);
2158 tmp = chan->ops->alloc_skb(chan, 0, count,
2159 msg->msg_flags & MSG_DONTWAIT);
2161 return PTR_ERR(tmp);
2165 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2166 msg->msg_iov, count))
2172 skb->len += (*frag)->len;
2173 skb->data_len += (*frag)->len;
2175 frag = &(*frag)->next;
2181 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2182 struct msghdr *msg, size_t len)
2184 struct l2cap_conn *conn = chan->conn;
2185 struct sk_buff *skb;
2186 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2187 struct l2cap_hdr *lh;
2189 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2190 __le16_to_cpu(chan->psm), len);
2192 count = min_t(unsigned int, (conn->mtu - hlen), len);
2194 skb = chan->ops->alloc_skb(chan, hlen, count,
2195 msg->msg_flags & MSG_DONTWAIT);
2199 /* Create L2CAP header */
2200 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2201 lh->cid = cpu_to_le16(chan->dcid);
2202 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2203 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2205 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2206 if (unlikely(err < 0)) {
2208 return ERR_PTR(err);
2213 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2214 struct msghdr *msg, size_t len)
2216 struct l2cap_conn *conn = chan->conn;
2217 struct sk_buff *skb;
2219 struct l2cap_hdr *lh;
2221 BT_DBG("chan %p len %zu", chan, len);
2223 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2225 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2226 msg->msg_flags & MSG_DONTWAIT);
2230 /* Create L2CAP header */
2231 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2232 lh->cid = cpu_to_le16(chan->dcid);
2233 lh->len = cpu_to_le16(len);
2235 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2236 if (unlikely(err < 0)) {
2238 return ERR_PTR(err);
2243 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2244 struct msghdr *msg, size_t len,
2247 struct l2cap_conn *conn = chan->conn;
2248 struct sk_buff *skb;
2249 int err, count, hlen;
2250 struct l2cap_hdr *lh;
2252 BT_DBG("chan %p len %zu", chan, len);
2255 return ERR_PTR(-ENOTCONN);
2257 hlen = __ertm_hdr_size(chan);
2260 hlen += L2CAP_SDULEN_SIZE;
2262 if (chan->fcs == L2CAP_FCS_CRC16)
2263 hlen += L2CAP_FCS_SIZE;
2265 count = min_t(unsigned int, (conn->mtu - hlen), len);
2267 skb = chan->ops->alloc_skb(chan, hlen, count,
2268 msg->msg_flags & MSG_DONTWAIT);
2272 /* Create L2CAP header */
2273 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2274 lh->cid = cpu_to_le16(chan->dcid);
2275 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2277 /* Control header is populated later */
2278 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2279 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2281 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2284 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2286 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2287 if (unlikely(err < 0)) {
2289 return ERR_PTR(err);
2292 bt_cb(skb)->control.fcs = chan->fcs;
2293 bt_cb(skb)->control.retries = 0;
2297 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2298 struct sk_buff_head *seg_queue,
2299 struct msghdr *msg, size_t len)
2301 struct sk_buff *skb;
2306 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2308 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2309 * so fragmented skbs are not used. The HCI layer's handling
2310 * of fragmented skbs is not compatible with ERTM's queueing.
2313 /* PDU size is derived from the HCI MTU */
2314 pdu_len = chan->conn->mtu;
2316 /* Constrain PDU size for BR/EDR connections */
2318 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2320 /* Adjust for largest possible L2CAP overhead. */
2322 pdu_len -= L2CAP_FCS_SIZE;
2324 pdu_len -= __ertm_hdr_size(chan);
2326 /* Remote device may have requested smaller PDUs */
2327 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_UNSEGMENTED;
2334 sar = L2CAP_SAR_START;
2336 pdu_len -= L2CAP_SDULEN_SIZE;
2340 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2343 __skb_queue_purge(seg_queue);
2344 return PTR_ERR(skb);
2347 bt_cb(skb)->control.sar = sar;
2348 __skb_queue_tail(seg_queue, skb);
2353 pdu_len += L2CAP_SDULEN_SIZE;
2356 if (len <= pdu_len) {
2357 sar = L2CAP_SAR_END;
2360 sar = L2CAP_SAR_CONTINUE;
2367 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2369 size_t len, u16 sdulen)
2371 struct l2cap_conn *conn = chan->conn;
2372 struct sk_buff *skb;
2373 int err, count, hlen;
2374 struct l2cap_hdr *lh;
2376 BT_DBG("chan %p len %zu", chan, len);
2379 return ERR_PTR(-ENOTCONN);
2381 hlen = L2CAP_HDR_SIZE;
2384 hlen += L2CAP_SDULEN_SIZE;
2386 count = min_t(unsigned int, (conn->mtu - hlen), len);
2388 skb = chan->ops->alloc_skb(chan, hlen, count,
2389 msg->msg_flags & MSG_DONTWAIT);
2393 /* Create L2CAP header */
2394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2395 lh->cid = cpu_to_le16(chan->dcid);
2396 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2399 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2401 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2402 if (unlikely(err < 0)) {
2404 return ERR_PTR(err);
2410 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2411 struct sk_buff_head *seg_queue,
2412 struct msghdr *msg, size_t len)
2414 struct sk_buff *skb;
2418 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2420 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2422 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2425 pdu_len -= L2CAP_SDULEN_SIZE;
2431 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2433 __skb_queue_purge(seg_queue);
2434 return PTR_ERR(skb);
2437 __skb_queue_tail(seg_queue, skb);
2443 pdu_len += L2CAP_SDULEN_SIZE;
2450 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2452 struct sk_buff *skb;
2454 struct sk_buff_head seg_queue;
2459 /* Connectionless channel */
2460 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2461 skb = l2cap_create_connless_pdu(chan, msg, len);
2463 return PTR_ERR(skb);
2465 /* Channel lock is released before requesting new skb and then
2466 * reacquired thus we need to recheck channel state.
2468 if (chan->state != BT_CONNECTED) {
2473 l2cap_do_send(chan, skb);
2477 switch (chan->mode) {
2478 case L2CAP_MODE_LE_FLOWCTL:
2479 /* Check outgoing MTU */
2480 if (len > chan->omtu)
2483 if (!chan->tx_credits)
2486 __skb_queue_head_init(&seg_queue);
2488 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2490 if (chan->state != BT_CONNECTED) {
2491 __skb_queue_purge(&seg_queue);
2498 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2500 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2501 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2505 if (!chan->tx_credits)
2506 chan->ops->suspend(chan);
2512 case L2CAP_MODE_BASIC:
2513 /* Check outgoing MTU */
2514 if (len > chan->omtu)
2517 /* Create a basic PDU */
2518 skb = l2cap_create_basic_pdu(chan, msg, len);
2520 return PTR_ERR(skb);
2522 /* Channel lock is released before requesting new skb and then
2523 * reacquired thus we need to recheck channel state.
2525 if (chan->state != BT_CONNECTED) {
2530 l2cap_do_send(chan, skb);
2534 case L2CAP_MODE_ERTM:
2535 case L2CAP_MODE_STREAMING:
2536 /* Check outgoing MTU */
2537 if (len > chan->omtu) {
2542 __skb_queue_head_init(&seg_queue);
2544 /* Do segmentation before calling in to the state machine,
2545 * since it's possible to block while waiting for memory
2548 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2550 /* The channel could have been closed while segmenting,
2551 * check that it is still connected.
2553 if (chan->state != BT_CONNECTED) {
2554 __skb_queue_purge(&seg_queue);
2561 if (chan->mode == L2CAP_MODE_ERTM)
2562 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2564 l2cap_streaming_send(chan, &seg_queue);
2568 /* If the skbs were not queued for sending, they'll still be in
2569 * seg_queue and need to be purged.
2571 __skb_queue_purge(&seg_queue);
2575 BT_DBG("bad state %1.1x", chan->mode);
2581 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2583 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2585 struct l2cap_ctrl control;
2588 BT_DBG("chan %p, txseq %u", chan, txseq);
2590 memset(&control, 0, sizeof(control));
2592 control.super = L2CAP_SUPER_SREJ;
2594 for (seq = chan->expected_tx_seq; seq != txseq;
2595 seq = __next_seq(chan, seq)) {
2596 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2597 control.reqseq = seq;
2598 l2cap_send_sframe(chan, &control);
2599 l2cap_seq_list_append(&chan->srej_list, seq);
2603 chan->expected_tx_seq = __next_seq(chan, txseq);
2606 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2608 struct l2cap_ctrl control;
2610 BT_DBG("chan %p", chan);
2612 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2615 memset(&control, 0, sizeof(control));
2617 control.super = L2CAP_SUPER_SREJ;
2618 control.reqseq = chan->srej_list.tail;
2619 l2cap_send_sframe(chan, &control);
2622 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2624 struct l2cap_ctrl control;
2628 BT_DBG("chan %p, txseq %u", chan, txseq);
2630 memset(&control, 0, sizeof(control));
2632 control.super = L2CAP_SUPER_SREJ;
2634 /* Capture initial list head to allow only one pass through the list. */
2635 initial_head = chan->srej_list.head;
2638 seq = l2cap_seq_list_pop(&chan->srej_list);
2639 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2642 control.reqseq = seq;
2643 l2cap_send_sframe(chan, &control);
2644 l2cap_seq_list_append(&chan->srej_list, seq);
2645 } while (chan->srej_list.head != initial_head);
2648 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2650 struct sk_buff *acked_skb;
2653 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2655 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2658 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2659 chan->expected_ack_seq, chan->unacked_frames);
2661 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2662 ackseq = __next_seq(chan, ackseq)) {
2664 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2666 skb_unlink(acked_skb, &chan->tx_q);
2667 kfree_skb(acked_skb);
2668 chan->unacked_frames--;
2672 chan->expected_ack_seq = reqseq;
2674 if (chan->unacked_frames == 0)
2675 __clear_retrans_timer(chan);
2677 BT_DBG("unacked_frames %u", chan->unacked_frames);
2680 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2682 BT_DBG("chan %p", chan);
2684 chan->expected_tx_seq = chan->buffer_seq;
2685 l2cap_seq_list_clear(&chan->srej_list);
2686 skb_queue_purge(&chan->srej_q);
2687 chan->rx_state = L2CAP_RX_STATE_RECV;
2690 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2691 struct l2cap_ctrl *control,
2692 struct sk_buff_head *skbs, u8 event)
2694 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2698 case L2CAP_EV_DATA_REQUEST:
2699 if (chan->tx_send_head == NULL)
2700 chan->tx_send_head = skb_peek(skbs);
2702 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2703 l2cap_ertm_send(chan);
2705 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2706 BT_DBG("Enter LOCAL_BUSY");
2707 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2709 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2710 /* The SREJ_SENT state must be aborted if we are to
2711 * enter the LOCAL_BUSY state.
2713 l2cap_abort_rx_srej_sent(chan);
2716 l2cap_send_ack(chan);
2719 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2720 BT_DBG("Exit LOCAL_BUSY");
2721 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2724 struct l2cap_ctrl local_control;
2726 memset(&local_control, 0, sizeof(local_control));
2727 local_control.sframe = 1;
2728 local_control.super = L2CAP_SUPER_RR;
2729 local_control.poll = 1;
2730 local_control.reqseq = chan->buffer_seq;
2731 l2cap_send_sframe(chan, &local_control);
2733 chan->retry_count = 1;
2734 __set_monitor_timer(chan);
2735 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2738 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2739 l2cap_process_reqseq(chan, control->reqseq);
2741 case L2CAP_EV_EXPLICIT_POLL:
2742 l2cap_send_rr_or_rnr(chan, 1);
2743 chan->retry_count = 1;
2744 __set_monitor_timer(chan);
2745 __clear_ack_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 case L2CAP_EV_RETRANS_TO:
2749 l2cap_send_rr_or_rnr(chan, 1);
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2754 case L2CAP_EV_RECV_FBIT:
2755 /* Nothing to process */
2762 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2763 struct l2cap_ctrl *control,
2764 struct sk_buff_head *skbs, u8 event)
2766 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2770 case L2CAP_EV_DATA_REQUEST:
2771 if (chan->tx_send_head == NULL)
2772 chan->tx_send_head = skb_peek(skbs);
2773 /* Queue data, but don't send. */
2774 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2776 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2777 BT_DBG("Enter LOCAL_BUSY");
2778 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2780 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2781 /* The SREJ_SENT state must be aborted if we are to
2782 * enter the LOCAL_BUSY state.
2784 l2cap_abort_rx_srej_sent(chan);
2787 l2cap_send_ack(chan);
2790 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2791 BT_DBG("Exit LOCAL_BUSY");
2792 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2794 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2795 struct l2cap_ctrl local_control;
2796 memset(&local_control, 0, sizeof(local_control));
2797 local_control.sframe = 1;
2798 local_control.super = L2CAP_SUPER_RR;
2799 local_control.poll = 1;
2800 local_control.reqseq = chan->buffer_seq;
2801 l2cap_send_sframe(chan, &local_control);
2803 chan->retry_count = 1;
2804 __set_monitor_timer(chan);
2805 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2808 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2809 l2cap_process_reqseq(chan, control->reqseq);
2813 case L2CAP_EV_RECV_FBIT:
2814 if (control && control->final) {
2815 __clear_monitor_timer(chan);
2816 if (chan->unacked_frames > 0)
2817 __set_retrans_timer(chan);
2818 chan->retry_count = 0;
2819 chan->tx_state = L2CAP_TX_STATE_XMIT;
2820 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2823 case L2CAP_EV_EXPLICIT_POLL:
2826 case L2CAP_EV_MONITOR_TO:
2827 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2828 l2cap_send_rr_or_rnr(chan, 1);
2829 __set_monitor_timer(chan);
2830 chan->retry_count++;
2832 l2cap_send_disconn_req(chan, ECONNABORTED);
2840 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2841 struct sk_buff_head *skbs, u8 event)
2843 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2844 chan, control, skbs, event, chan->tx_state);
2846 switch (chan->tx_state) {
2847 case L2CAP_TX_STATE_XMIT:
2848 l2cap_tx_state_xmit(chan, control, skbs, event);
2850 case L2CAP_TX_STATE_WAIT_F:
2851 l2cap_tx_state_wait_f(chan, control, skbs, event);
2859 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2860 struct l2cap_ctrl *control)
2862 BT_DBG("chan %p, control %p", chan, control);
2863 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2866 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2867 struct l2cap_ctrl *control)
2869 BT_DBG("chan %p, control %p", chan, control);
2870 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2873 /* Copy frame to all raw sockets on that connection */
2874 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2876 struct sk_buff *nskb;
2877 struct l2cap_chan *chan;
2879 BT_DBG("conn %p", conn);
2881 mutex_lock(&conn->chan_lock);
2883 list_for_each_entry(chan, &conn->chan_l, list) {
2884 if (chan->chan_type != L2CAP_CHAN_RAW)
2887 /* Don't send frame to the channel it came from */
2888 if (bt_cb(skb)->chan == chan)
2891 nskb = skb_clone(skb, GFP_KERNEL);
2894 if (chan->ops->recv(chan, nskb))
2898 mutex_unlock(&conn->chan_lock);
2901 /* ---- L2CAP signalling commands ---- */
2902 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2903 u8 ident, u16 dlen, void *data)
2905 struct sk_buff *skb, **frag;
2906 struct l2cap_cmd_hdr *cmd;
2907 struct l2cap_hdr *lh;
2910 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2911 conn, code, ident, dlen);
2913 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2916 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2917 count = min_t(unsigned int, conn->mtu, len);
2919 skb = bt_skb_alloc(count, GFP_KERNEL);
2923 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2924 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2926 if (conn->hcon->type == LE_LINK)
2927 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2929 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2931 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2934 cmd->len = cpu_to_le16(dlen);
2937 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2938 memcpy(skb_put(skb, count), data, count);
2944 /* Continuation fragments (no L2CAP header) */
2945 frag = &skb_shinfo(skb)->frag_list;
2947 count = min_t(unsigned int, conn->mtu, len);
2949 *frag = bt_skb_alloc(count, GFP_KERNEL);
2953 memcpy(skb_put(*frag, count), data, count);
2958 frag = &(*frag)->next;
2968 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2971 struct l2cap_conf_opt *opt = *ptr;
2974 len = L2CAP_CONF_OPT_SIZE + opt->len;
2982 *val = *((u8 *) opt->val);
2986 *val = get_unaligned_le16(opt->val);
2990 *val = get_unaligned_le32(opt->val);
2994 *val = (unsigned long) opt->val;
2998 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3002 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3004 struct l2cap_conf_opt *opt = *ptr;
3006 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3013 *((u8 *) opt->val) = val;
3017 put_unaligned_le16(val, opt->val);
3021 put_unaligned_le32(val, opt->val);
3025 memcpy(opt->val, (void *) val, len);
3029 *ptr += L2CAP_CONF_OPT_SIZE + len;
3032 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3034 struct l2cap_conf_efs efs;
3036 switch (chan->mode) {
3037 case L2CAP_MODE_ERTM:
3038 efs.id = chan->local_id;
3039 efs.stype = chan->local_stype;
3040 efs.msdu = cpu_to_le16(chan->local_msdu);
3041 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3042 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3043 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3046 case L2CAP_MODE_STREAMING:
3048 efs.stype = L2CAP_SERV_BESTEFFORT;
3049 efs.msdu = cpu_to_le16(chan->local_msdu);
3050 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3059 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3060 (unsigned long) &efs);
3063 static void l2cap_ack_timeout(struct work_struct *work)
3065 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3069 BT_DBG("chan %p", chan);
3071 l2cap_chan_lock(chan);
3073 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3074 chan->last_acked_seq);
3077 l2cap_send_rr_or_rnr(chan, 0);
3079 l2cap_chan_unlock(chan);
3080 l2cap_chan_put(chan);
3083 int l2cap_ertm_init(struct l2cap_chan *chan)
3087 chan->next_tx_seq = 0;
3088 chan->expected_tx_seq = 0;
3089 chan->expected_ack_seq = 0;
3090 chan->unacked_frames = 0;
3091 chan->buffer_seq = 0;
3092 chan->frames_sent = 0;
3093 chan->last_acked_seq = 0;
3095 chan->sdu_last_frag = NULL;
3098 skb_queue_head_init(&chan->tx_q);
3100 chan->local_amp_id = AMP_ID_BREDR;
3101 chan->move_id = AMP_ID_BREDR;
3102 chan->move_state = L2CAP_MOVE_STABLE;
3103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3105 if (chan->mode != L2CAP_MODE_ERTM)
3108 chan->rx_state = L2CAP_RX_STATE_RECV;
3109 chan->tx_state = L2CAP_TX_STATE_XMIT;
3111 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3112 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3113 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3115 skb_queue_head_init(&chan->srej_q);
3117 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3121 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3123 l2cap_seq_list_free(&chan->srej_list);
3128 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3131 case L2CAP_MODE_STREAMING:
3132 case L2CAP_MODE_ERTM:
3133 if (l2cap_mode_supported(mode, remote_feat_mask))
3137 return L2CAP_MODE_BASIC;
3141 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3143 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3146 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3148 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3151 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3152 struct l2cap_conf_rfc *rfc)
3154 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3155 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3157 /* Class 1 devices have must have ERTM timeouts
3158 * exceeding the Link Supervision Timeout. The
3159 * default Link Supervision Timeout for AMP
3160 * controllers is 10 seconds.
3162 * Class 1 devices use 0xffffffff for their
3163 * best-effort flush timeout, so the clamping logic
3164 * will result in a timeout that meets the above
3165 * requirement. ERTM timeouts are 16-bit values, so
3166 * the maximum timeout is 65.535 seconds.
3169 /* Convert timeout to milliseconds and round */
3170 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3172 /* This is the recommended formula for class 2 devices
3173 * that start ERTM timers when packets are sent to the
3176 ertm_to = 3 * ertm_to + 500;
3178 if (ertm_to > 0xffff)
3181 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3182 rfc->monitor_timeout = rfc->retrans_timeout;
3184 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3185 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3189 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3191 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3192 __l2cap_ews_supported(chan->conn)) {
3193 /* use extended control field */
3194 set_bit(FLAG_EXT_CTRL, &chan->flags);
3195 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3197 chan->tx_win = min_t(u16, chan->tx_win,
3198 L2CAP_DEFAULT_TX_WINDOW);
3199 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3201 chan->ack_win = chan->tx_win;
3204 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3206 struct l2cap_conf_req *req = data;
3207 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3208 void *ptr = req->data;
3211 BT_DBG("chan %p", chan);
3213 if (chan->num_conf_req || chan->num_conf_rsp)
3216 switch (chan->mode) {
3217 case L2CAP_MODE_STREAMING:
3218 case L2CAP_MODE_ERTM:
3219 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3222 if (__l2cap_efs_supported(chan->conn))
3223 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3227 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3232 if (chan->imtu != L2CAP_DEFAULT_MTU)
3233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3235 switch (chan->mode) {
3236 case L2CAP_MODE_BASIC:
3240 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3241 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3244 rfc.mode = L2CAP_MODE_BASIC;
3246 rfc.max_transmit = 0;
3247 rfc.retrans_timeout = 0;
3248 rfc.monitor_timeout = 0;
3249 rfc.max_pdu_size = 0;
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 (unsigned long) &rfc);
3255 case L2CAP_MODE_ERTM:
3256 rfc.mode = L2CAP_MODE_ERTM;
3257 rfc.max_transmit = chan->max_tx;
3259 __l2cap_set_ertm_timeouts(chan, &rfc);
3261 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3262 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3264 rfc.max_pdu_size = cpu_to_le16(size);
3266 l2cap_txwin_setup(chan);
3268 rfc.txwin_size = min_t(u16, chan->tx_win,
3269 L2CAP_DEFAULT_TX_WINDOW);
3271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3272 (unsigned long) &rfc);
3274 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3275 l2cap_add_opt_efs(&ptr, chan);
3277 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3278 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3281 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3282 if (chan->fcs == L2CAP_FCS_NONE ||
3283 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3284 chan->fcs = L2CAP_FCS_NONE;
3285 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3290 case L2CAP_MODE_STREAMING:
3291 l2cap_txwin_setup(chan);
3292 rfc.mode = L2CAP_MODE_STREAMING;
3294 rfc.max_transmit = 0;
3295 rfc.retrans_timeout = 0;
3296 rfc.monitor_timeout = 0;
3298 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3299 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3301 rfc.max_pdu_size = cpu_to_le16(size);
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3304 (unsigned long) &rfc);
3306 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3307 l2cap_add_opt_efs(&ptr, chan);
3309 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3310 if (chan->fcs == L2CAP_FCS_NONE ||
3311 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3312 chan->fcs = L2CAP_FCS_NONE;
3313 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3319 req->dcid = cpu_to_le16(chan->dcid);
3320 req->flags = cpu_to_le16(0);
3325 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3327 struct l2cap_conf_rsp *rsp = data;
3328 void *ptr = rsp->data;
3329 void *req = chan->conf_req;
3330 int len = chan->conf_len;
3331 int type, hint, olen;
3333 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3334 struct l2cap_conf_efs efs;
3336 u16 mtu = L2CAP_DEFAULT_MTU;
3337 u16 result = L2CAP_CONF_SUCCESS;
3340 BT_DBG("chan %p", chan);
3342 while (len >= L2CAP_CONF_OPT_SIZE) {
3343 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3345 hint = type & L2CAP_CONF_HINT;
3346 type &= L2CAP_CONF_MASK;
3349 case L2CAP_CONF_MTU:
3353 case L2CAP_CONF_FLUSH_TO:
3354 chan->flush_to = val;
3357 case L2CAP_CONF_QOS:
3360 case L2CAP_CONF_RFC:
3361 if (olen == sizeof(rfc))
3362 memcpy(&rfc, (void *) val, olen);
3365 case L2CAP_CONF_FCS:
3366 if (val == L2CAP_FCS_NONE)
3367 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3370 case L2CAP_CONF_EFS:
3372 if (olen == sizeof(efs))
3373 memcpy(&efs, (void *) val, olen);
3376 case L2CAP_CONF_EWS:
3377 if (!chan->conn->hs_enabled)
3378 return -ECONNREFUSED;
3380 set_bit(FLAG_EXT_CTRL, &chan->flags);
3381 set_bit(CONF_EWS_RECV, &chan->conf_state);
3382 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3383 chan->remote_tx_win = val;
3390 result = L2CAP_CONF_UNKNOWN;
3391 *((u8 *) ptr++) = type;
3396 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3399 switch (chan->mode) {
3400 case L2CAP_MODE_STREAMING:
3401 case L2CAP_MODE_ERTM:
3402 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3403 chan->mode = l2cap_select_mode(rfc.mode,
3404 chan->conn->feat_mask);
3409 if (__l2cap_efs_supported(chan->conn))
3410 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3412 return -ECONNREFUSED;
3415 if (chan->mode != rfc.mode)
3416 return -ECONNREFUSED;
3422 if (chan->mode != rfc.mode) {
3423 result = L2CAP_CONF_UNACCEPT;
3424 rfc.mode = chan->mode;
3426 if (chan->num_conf_rsp == 1)
3427 return -ECONNREFUSED;
3429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3430 (unsigned long) &rfc);
3433 if (result == L2CAP_CONF_SUCCESS) {
3434 /* Configure output options and let the other side know
3435 * which ones we don't like. */
3437 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3438 result = L2CAP_CONF_UNACCEPT;
3441 set_bit(CONF_MTU_DONE, &chan->conf_state);
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3446 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3447 efs.stype != L2CAP_SERV_NOTRAFIC &&
3448 efs.stype != chan->local_stype) {
3450 result = L2CAP_CONF_UNACCEPT;
3452 if (chan->num_conf_req >= 1)
3453 return -ECONNREFUSED;
3455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3457 (unsigned long) &efs);
3459 /* Send PENDING Conf Rsp */
3460 result = L2CAP_CONF_PENDING;
3461 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3466 case L2CAP_MODE_BASIC:
3467 chan->fcs = L2CAP_FCS_NONE;
3468 set_bit(CONF_MODE_DONE, &chan->conf_state);
3471 case L2CAP_MODE_ERTM:
3472 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3473 chan->remote_tx_win = rfc.txwin_size;
3475 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3477 chan->remote_max_tx = rfc.max_transmit;
3479 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3480 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3481 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3482 rfc.max_pdu_size = cpu_to_le16(size);
3483 chan->remote_mps = size;
3485 __l2cap_set_ertm_timeouts(chan, &rfc);
3487 set_bit(CONF_MODE_DONE, &chan->conf_state);
3489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3490 sizeof(rfc), (unsigned long) &rfc);
3492 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3493 chan->remote_id = efs.id;
3494 chan->remote_stype = efs.stype;
3495 chan->remote_msdu = le16_to_cpu(efs.msdu);
3496 chan->remote_flush_to =
3497 le32_to_cpu(efs.flush_to);
3498 chan->remote_acc_lat =
3499 le32_to_cpu(efs.acc_lat);
3500 chan->remote_sdu_itime =
3501 le32_to_cpu(efs.sdu_itime);
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3504 (unsigned long) &efs);
3508 case L2CAP_MODE_STREAMING:
3509 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3510 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3511 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3512 rfc.max_pdu_size = cpu_to_le16(size);
3513 chan->remote_mps = size;
3515 set_bit(CONF_MODE_DONE, &chan->conf_state);
3517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3518 (unsigned long) &rfc);
3523 result = L2CAP_CONF_UNACCEPT;
3525 memset(&rfc, 0, sizeof(rfc));
3526 rfc.mode = chan->mode;
3529 if (result == L2CAP_CONF_SUCCESS)
3530 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3532 rsp->scid = cpu_to_le16(chan->dcid);
3533 rsp->result = cpu_to_le16(result);
3534 rsp->flags = cpu_to_le16(0);
3539 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3540 void *data, u16 *result)
3542 struct l2cap_conf_req *req = data;
3543 void *ptr = req->data;
3546 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3547 struct l2cap_conf_efs efs;
3549 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3551 while (len >= L2CAP_CONF_OPT_SIZE) {
3552 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3555 case L2CAP_CONF_MTU:
3556 if (val < L2CAP_DEFAULT_MIN_MTU) {
3557 *result = L2CAP_CONF_UNACCEPT;
3558 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3564 case L2CAP_CONF_FLUSH_TO:
3565 chan->flush_to = val;
3566 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3570 case L2CAP_CONF_RFC:
3571 if (olen == sizeof(rfc))
3572 memcpy(&rfc, (void *)val, olen);
3574 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3575 rfc.mode != chan->mode)
3576 return -ECONNREFUSED;
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3581 sizeof(rfc), (unsigned long) &rfc);
3584 case L2CAP_CONF_EWS:
3585 chan->ack_win = min_t(u16, val, chan->ack_win);
3586 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3590 case L2CAP_CONF_EFS:
3591 if (olen == sizeof(efs))
3592 memcpy(&efs, (void *)val, olen);
3594 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3595 efs.stype != L2CAP_SERV_NOTRAFIC &&
3596 efs.stype != chan->local_stype)
3597 return -ECONNREFUSED;
3599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3600 (unsigned long) &efs);
3603 case L2CAP_CONF_FCS:
3604 if (*result == L2CAP_CONF_PENDING)
3605 if (val == L2CAP_FCS_NONE)
3606 set_bit(CONF_RECV_NO_FCS,
3612 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3613 return -ECONNREFUSED;
3615 chan->mode = rfc.mode;
3617 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3619 case L2CAP_MODE_ERTM:
3620 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3621 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3622 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3623 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3624 chan->ack_win = min_t(u16, chan->ack_win,
3627 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3628 chan->local_msdu = le16_to_cpu(efs.msdu);
3629 chan->local_sdu_itime =
3630 le32_to_cpu(efs.sdu_itime);
3631 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3632 chan->local_flush_to =
3633 le32_to_cpu(efs.flush_to);
3637 case L2CAP_MODE_STREAMING:
3638 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3642 req->dcid = cpu_to_le16(chan->dcid);
3643 req->flags = cpu_to_le16(0);
3648 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3649 u16 result, u16 flags)
3651 struct l2cap_conf_rsp *rsp = data;
3652 void *ptr = rsp->data;
3654 BT_DBG("chan %p", chan);
3656 rsp->scid = cpu_to_le16(chan->dcid);
3657 rsp->result = cpu_to_le16(result);
3658 rsp->flags = cpu_to_le16(flags);
3663 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3665 struct l2cap_le_conn_rsp rsp;
3666 struct l2cap_conn *conn = chan->conn;
3668 BT_DBG("chan %p", chan);
3670 rsp.dcid = cpu_to_le16(chan->scid);
3671 rsp.mtu = cpu_to_le16(chan->imtu);
3672 rsp.mps = cpu_to_le16(chan->mps);
3673 rsp.credits = cpu_to_le16(chan->rx_credits);
3674 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3676 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3680 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3682 struct l2cap_conn_rsp rsp;
3683 struct l2cap_conn *conn = chan->conn;
3687 rsp.scid = cpu_to_le16(chan->dcid);
3688 rsp.dcid = cpu_to_le16(chan->scid);
3689 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3690 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3693 rsp_code = L2CAP_CREATE_CHAN_RSP;
3695 rsp_code = L2CAP_CONN_RSP;
3697 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3699 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3701 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3705 l2cap_build_conf_req(chan, buf), buf);
3706 chan->num_conf_req++;
3709 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3713 /* Use sane default values in case a misbehaving remote device
3714 * did not send an RFC or extended window size option.
3716 u16 txwin_ext = chan->ack_win;
3717 struct l2cap_conf_rfc rfc = {
3719 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3720 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3721 .max_pdu_size = cpu_to_le16(chan->imtu),
3722 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3725 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3727 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3730 while (len >= L2CAP_CONF_OPT_SIZE) {
3731 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3734 case L2CAP_CONF_RFC:
3735 if (olen == sizeof(rfc))
3736 memcpy(&rfc, (void *)val, olen);
3738 case L2CAP_CONF_EWS:
3745 case L2CAP_MODE_ERTM:
3746 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3747 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3748 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3749 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3750 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3752 chan->ack_win = min_t(u16, chan->ack_win,
3755 case L2CAP_MODE_STREAMING:
3756 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3760 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3761 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3764 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3766 if (cmd_len < sizeof(*rej))
3769 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3772 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3773 cmd->ident == conn->info_ident) {
3774 cancel_delayed_work(&conn->info_timer);
3776 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3777 conn->info_ident = 0;
3779 l2cap_conn_start(conn);
3785 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3786 struct l2cap_cmd_hdr *cmd,
3787 u8 *data, u8 rsp_code, u8 amp_id)
3789 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3790 struct l2cap_conn_rsp rsp;
3791 struct l2cap_chan *chan = NULL, *pchan;
3792 int result, status = L2CAP_CS_NO_INFO;
3794 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3795 __le16 psm = req->psm;
3797 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3799 /* Check if we have socket listening on psm */
3800 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3801 &conn->hcon->dst, ACL_LINK);
3803 result = L2CAP_CR_BAD_PSM;
3807 mutex_lock(&conn->chan_lock);
3808 l2cap_chan_lock(pchan);
3810 /* Check if the ACL is secure enough (if not SDP) */
3811 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3812 !hci_conn_check_link_mode(conn->hcon)) {
3813 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3814 result = L2CAP_CR_SEC_BLOCK;
3818 result = L2CAP_CR_NO_MEM;
3820 /* Check if we already have channel with that dcid */
3821 if (__l2cap_get_chan_by_dcid(conn, scid))
3824 chan = pchan->ops->new_connection(pchan);
3828 /* For certain devices (ex: HID mouse), support for authentication,
3829 * pairing and bonding is optional. For such devices, inorder to avoid
3830 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3831 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3833 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3835 bacpy(&chan->src, &conn->hcon->src);
3836 bacpy(&chan->dst, &conn->hcon->dst);
3837 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3838 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3841 chan->local_amp_id = amp_id;
3843 __l2cap_chan_add(conn, chan);
3847 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3849 chan->ident = cmd->ident;
3851 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3852 if (l2cap_chan_check_security(chan)) {
3853 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3854 l2cap_state_change(chan, BT_CONNECT2);
3855 result = L2CAP_CR_PEND;
3856 status = L2CAP_CS_AUTHOR_PEND;
3857 chan->ops->defer(chan);
3859 /* Force pending result for AMP controllers.
3860 * The connection will succeed after the
3861 * physical link is up.
3863 if (amp_id == AMP_ID_BREDR) {
3864 l2cap_state_change(chan, BT_CONFIG);
3865 result = L2CAP_CR_SUCCESS;
3867 l2cap_state_change(chan, BT_CONNECT2);
3868 result = L2CAP_CR_PEND;
3870 status = L2CAP_CS_NO_INFO;
3873 l2cap_state_change(chan, BT_CONNECT2);
3874 result = L2CAP_CR_PEND;
3875 status = L2CAP_CS_AUTHEN_PEND;
3878 l2cap_state_change(chan, BT_CONNECT2);
3879 result = L2CAP_CR_PEND;
3880 status = L2CAP_CS_NO_INFO;
3884 l2cap_chan_unlock(pchan);
3885 mutex_unlock(&conn->chan_lock);
3888 rsp.scid = cpu_to_le16(scid);
3889 rsp.dcid = cpu_to_le16(dcid);
3890 rsp.result = cpu_to_le16(result);
3891 rsp.status = cpu_to_le16(status);
3892 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3894 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3895 struct l2cap_info_req info;
3896 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3898 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3899 conn->info_ident = l2cap_get_ident(conn);
3901 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3903 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3904 sizeof(info), &info);
3907 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3908 result == L2CAP_CR_SUCCESS) {
3910 set_bit(CONF_REQ_SENT, &chan->conf_state);
3911 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3912 l2cap_build_conf_req(chan, buf), buf);
3913 chan->num_conf_req++;
3919 static int l2cap_connect_req(struct l2cap_conn *conn,
3920 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3922 struct hci_dev *hdev = conn->hcon->hdev;
3923 struct hci_conn *hcon = conn->hcon;
3925 if (cmd_len < sizeof(struct l2cap_conn_req))
3929 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3930 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3931 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3932 hcon->dst_type, 0, NULL, 0,
3934 hci_dev_unlock(hdev);
3936 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3940 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3941 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3944 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3945 u16 scid, dcid, result, status;
3946 struct l2cap_chan *chan;
3950 if (cmd_len < sizeof(*rsp))
3953 scid = __le16_to_cpu(rsp->scid);
3954 dcid = __le16_to_cpu(rsp->dcid);
3955 result = __le16_to_cpu(rsp->result);
3956 status = __le16_to_cpu(rsp->status);
3958 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3959 dcid, scid, result, status);
3961 mutex_lock(&conn->chan_lock);
3964 chan = __l2cap_get_chan_by_scid(conn, scid);
3970 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3979 l2cap_chan_lock(chan);
3982 case L2CAP_CR_SUCCESS:
3983 l2cap_state_change(chan, BT_CONFIG);
3986 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3988 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3991 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3992 l2cap_build_conf_req(chan, req), req);
3993 chan->num_conf_req++;
3997 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4001 l2cap_chan_del(chan, ECONNREFUSED);
4005 l2cap_chan_unlock(chan);
4008 mutex_unlock(&conn->chan_lock);
4013 static inline void set_default_fcs(struct l2cap_chan *chan)
4015 /* FCS is enabled only in ERTM or streaming mode, if one or both
4018 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4019 chan->fcs = L2CAP_FCS_NONE;
4020 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4021 chan->fcs = L2CAP_FCS_CRC16;
4024 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4025 u8 ident, u16 flags)
4027 struct l2cap_conn *conn = chan->conn;
4029 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4032 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4033 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4035 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4036 l2cap_build_conf_rsp(chan, data,
4037 L2CAP_CONF_SUCCESS, flags), data);
4040 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4043 struct l2cap_cmd_rej_cid rej;
4045 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4046 rej.scid = __cpu_to_le16(scid);
4047 rej.dcid = __cpu_to_le16(dcid);
4049 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4052 static inline int l2cap_config_req(struct l2cap_conn *conn,
4053 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4056 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4059 struct l2cap_chan *chan;
4062 if (cmd_len < sizeof(*req))
4065 dcid = __le16_to_cpu(req->dcid);
4066 flags = __le16_to_cpu(req->flags);
4068 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4070 chan = l2cap_get_chan_by_scid(conn, dcid);
4072 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4076 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4077 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4082 /* Reject if config buffer is too small. */
4083 len = cmd_len - sizeof(*req);
4084 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4085 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4086 l2cap_build_conf_rsp(chan, rsp,
4087 L2CAP_CONF_REJECT, flags), rsp);
4092 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4093 chan->conf_len += len;
4095 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4096 /* Incomplete config. Send empty response. */
4097 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4098 l2cap_build_conf_rsp(chan, rsp,
4099 L2CAP_CONF_SUCCESS, flags), rsp);
4103 /* Complete config. */
4104 len = l2cap_parse_conf_req(chan, rsp);
4106 l2cap_send_disconn_req(chan, ECONNRESET);
4110 chan->ident = cmd->ident;
4111 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4112 chan->num_conf_rsp++;
4114 /* Reset config buffer. */
4117 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4120 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4121 set_default_fcs(chan);
4123 if (chan->mode == L2CAP_MODE_ERTM ||
4124 chan->mode == L2CAP_MODE_STREAMING)
4125 err = l2cap_ertm_init(chan);
4128 l2cap_send_disconn_req(chan, -err);
4130 l2cap_chan_ready(chan);
4135 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4137 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4138 l2cap_build_conf_req(chan, buf), buf);
4139 chan->num_conf_req++;
4142 /* Got Conf Rsp PENDING from remote side and asume we sent
4143 Conf Rsp PENDING in the code above */
4144 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4145 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4147 /* check compatibility */
4149 /* Send rsp for BR/EDR channel */
4151 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4153 chan->ident = cmd->ident;
4157 l2cap_chan_unlock(chan);
4161 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4162 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4165 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4166 u16 scid, flags, result;
4167 struct l2cap_chan *chan;
4168 int len = cmd_len - sizeof(*rsp);
4171 if (cmd_len < sizeof(*rsp))
4174 scid = __le16_to_cpu(rsp->scid);
4175 flags = __le16_to_cpu(rsp->flags);
4176 result = __le16_to_cpu(rsp->result);
4178 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4181 chan = l2cap_get_chan_by_scid(conn, scid);
4186 case L2CAP_CONF_SUCCESS:
4187 l2cap_conf_rfc_get(chan, rsp->data, len);
4188 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4191 case L2CAP_CONF_PENDING:
4192 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4194 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4197 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4200 l2cap_send_disconn_req(chan, ECONNRESET);
4204 if (!chan->hs_hcon) {
4205 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4208 if (l2cap_check_efs(chan)) {
4209 amp_create_logical_link(chan);
4210 chan->ident = cmd->ident;
4216 case L2CAP_CONF_UNACCEPT:
4217 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4220 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4221 l2cap_send_disconn_req(chan, ECONNRESET);
4225 /* throw out any old stored conf requests */
4226 result = L2CAP_CONF_SUCCESS;
4227 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4230 l2cap_send_disconn_req(chan, ECONNRESET);
4234 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4235 L2CAP_CONF_REQ, len, req);
4236 chan->num_conf_req++;
4237 if (result != L2CAP_CONF_SUCCESS)
4243 l2cap_chan_set_err(chan, ECONNRESET);
4245 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4246 l2cap_send_disconn_req(chan, ECONNRESET);
4250 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4253 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4255 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4256 set_default_fcs(chan);
4258 if (chan->mode == L2CAP_MODE_ERTM ||
4259 chan->mode == L2CAP_MODE_STREAMING)
4260 err = l2cap_ertm_init(chan);
4263 l2cap_send_disconn_req(chan, -err);
4265 l2cap_chan_ready(chan);
4269 l2cap_chan_unlock(chan);
4273 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4278 struct l2cap_disconn_rsp rsp;
4280 struct l2cap_chan *chan;
4282 if (cmd_len != sizeof(*req))
4285 scid = __le16_to_cpu(req->scid);
4286 dcid = __le16_to_cpu(req->dcid);
4288 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4290 mutex_lock(&conn->chan_lock);
4292 chan = __l2cap_get_chan_by_scid(conn, dcid);
4294 mutex_unlock(&conn->chan_lock);
4295 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4299 l2cap_chan_lock(chan);
4301 rsp.dcid = cpu_to_le16(chan->scid);
4302 rsp.scid = cpu_to_le16(chan->dcid);
4303 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4305 chan->ops->set_shutdown(chan);
4307 l2cap_chan_hold(chan);
4308 l2cap_chan_del(chan, ECONNRESET);
4310 l2cap_chan_unlock(chan);
4312 chan->ops->close(chan);
4313 l2cap_chan_put(chan);
4315 mutex_unlock(&conn->chan_lock);
4320 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4321 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4324 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4326 struct l2cap_chan *chan;
4328 if (cmd_len != sizeof(*rsp))
4331 scid = __le16_to_cpu(rsp->scid);
4332 dcid = __le16_to_cpu(rsp->dcid);
4334 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4336 mutex_lock(&conn->chan_lock);
4338 chan = __l2cap_get_chan_by_scid(conn, scid);
4340 mutex_unlock(&conn->chan_lock);
4344 l2cap_chan_lock(chan);
4346 l2cap_chan_hold(chan);
4347 l2cap_chan_del(chan, 0);
4349 l2cap_chan_unlock(chan);
4351 chan->ops->close(chan);
4352 l2cap_chan_put(chan);
4354 mutex_unlock(&conn->chan_lock);
4359 static inline int l2cap_information_req(struct l2cap_conn *conn,
4360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4363 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4366 if (cmd_len != sizeof(*req))
4369 type = __le16_to_cpu(req->type);
4371 BT_DBG("type 0x%4.4x", type);
4373 if (type == L2CAP_IT_FEAT_MASK) {
4375 u32 feat_mask = l2cap_feat_mask;
4376 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4377 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4378 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4380 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4382 if (conn->hs_enabled)
4383 feat_mask |= L2CAP_FEAT_EXT_FLOW
4384 | L2CAP_FEAT_EXT_WINDOW;
4386 put_unaligned_le32(feat_mask, rsp->data);
4387 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4389 } else if (type == L2CAP_IT_FIXED_CHAN) {
4391 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4393 if (conn->hs_enabled)
4394 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4396 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4398 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4399 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4400 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4401 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4404 struct l2cap_info_rsp rsp;
4405 rsp.type = cpu_to_le16(type);
4406 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4407 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4414 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4415 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4418 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4421 if (cmd_len < sizeof(*rsp))
4424 type = __le16_to_cpu(rsp->type);
4425 result = __le16_to_cpu(rsp->result);
4427 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4429 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4430 if (cmd->ident != conn->info_ident ||
4431 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4434 cancel_delayed_work(&conn->info_timer);
4436 if (result != L2CAP_IR_SUCCESS) {
4437 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4438 conn->info_ident = 0;
4440 l2cap_conn_start(conn);
4446 case L2CAP_IT_FEAT_MASK:
4447 conn->feat_mask = get_unaligned_le32(rsp->data);
4449 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4450 struct l2cap_info_req req;
4451 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4453 conn->info_ident = l2cap_get_ident(conn);
4455 l2cap_send_cmd(conn, conn->info_ident,
4456 L2CAP_INFO_REQ, sizeof(req), &req);
4458 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4459 conn->info_ident = 0;
4461 l2cap_conn_start(conn);
4465 case L2CAP_IT_FIXED_CHAN:
4466 conn->fixed_chan_mask = rsp->data[0];
4467 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4468 conn->info_ident = 0;
4470 l2cap_conn_start(conn);
4477 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4478 struct l2cap_cmd_hdr *cmd,
4479 u16 cmd_len, void *data)
4481 struct l2cap_create_chan_req *req = data;
4482 struct l2cap_create_chan_rsp rsp;
4483 struct l2cap_chan *chan;
4484 struct hci_dev *hdev;
4487 if (cmd_len != sizeof(*req))
4490 if (!conn->hs_enabled)
4493 psm = le16_to_cpu(req->psm);
4494 scid = le16_to_cpu(req->scid);
4496 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4498 /* For controller id 0 make BR/EDR connection */
4499 if (req->amp_id == AMP_ID_BREDR) {
4500 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4505 /* Validate AMP controller id */
4506 hdev = hci_dev_get(req->amp_id);
4510 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4515 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4518 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4519 struct hci_conn *hs_hcon;
4521 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4525 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4530 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4532 mgr->bredr_chan = chan;
4533 chan->hs_hcon = hs_hcon;
4534 chan->fcs = L2CAP_FCS_NONE;
4535 conn->mtu = hdev->block_mtu;
4544 rsp.scid = cpu_to_le16(scid);
4545 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4546 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4548 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4554 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4556 struct l2cap_move_chan_req req;
4559 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4561 ident = l2cap_get_ident(chan->conn);
4562 chan->ident = ident;
4564 req.icid = cpu_to_le16(chan->scid);
4565 req.dest_amp_id = dest_amp_id;
4567 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4570 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4573 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4575 struct l2cap_move_chan_rsp rsp;
4577 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4579 rsp.icid = cpu_to_le16(chan->dcid);
4580 rsp.result = cpu_to_le16(result);
4582 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4586 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4588 struct l2cap_move_chan_cfm cfm;
4590 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4592 chan->ident = l2cap_get_ident(chan->conn);
4594 cfm.icid = cpu_to_le16(chan->scid);
4595 cfm.result = cpu_to_le16(result);
4597 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4600 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4603 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4605 struct l2cap_move_chan_cfm cfm;
4607 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4609 cfm.icid = cpu_to_le16(icid);
4610 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4612 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4616 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4619 struct l2cap_move_chan_cfm_rsp rsp;
4621 BT_DBG("icid 0x%4.4x", icid);
4623 rsp.icid = cpu_to_le16(icid);
4624 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4627 static void __release_logical_link(struct l2cap_chan *chan)
4629 chan->hs_hchan = NULL;
4630 chan->hs_hcon = NULL;
4632 /* Placeholder - release the logical link */
4635 static void l2cap_logical_fail(struct l2cap_chan *chan)
4637 /* Logical link setup failed */
4638 if (chan->state != BT_CONNECTED) {
4639 /* Create channel failure, disconnect */
4640 l2cap_send_disconn_req(chan, ECONNRESET);
4644 switch (chan->move_role) {
4645 case L2CAP_MOVE_ROLE_RESPONDER:
4646 l2cap_move_done(chan);
4647 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4649 case L2CAP_MOVE_ROLE_INITIATOR:
4650 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4651 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4652 /* Remote has only sent pending or
4653 * success responses, clean up
4655 l2cap_move_done(chan);
4658 /* Other amp move states imply that the move
4659 * has already aborted
4661 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4666 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4667 struct hci_chan *hchan)
4669 struct l2cap_conf_rsp rsp;
4671 chan->hs_hchan = hchan;
4672 chan->hs_hcon->l2cap_data = chan->conn;
4674 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4676 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4679 set_default_fcs(chan);
4681 err = l2cap_ertm_init(chan);
4683 l2cap_send_disconn_req(chan, -err);
4685 l2cap_chan_ready(chan);
4689 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4690 struct hci_chan *hchan)
4692 chan->hs_hcon = hchan->conn;
4693 chan->hs_hcon->l2cap_data = chan->conn;
4695 BT_DBG("move_state %d", chan->move_state);
4697 switch (chan->move_state) {
4698 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4699 /* Move confirm will be sent after a success
4700 * response is received
4702 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4704 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4705 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4706 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4707 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4708 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4709 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4710 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4711 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4712 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4716 /* Move was not in expected state, free the channel */
4717 __release_logical_link(chan);
4719 chan->move_state = L2CAP_MOVE_STABLE;
4723 /* Call with chan locked */
4724 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4727 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4730 l2cap_logical_fail(chan);
4731 __release_logical_link(chan);
4735 if (chan->state != BT_CONNECTED) {
4736 /* Ignore logical link if channel is on BR/EDR */
4737 if (chan->local_amp_id != AMP_ID_BREDR)
4738 l2cap_logical_finish_create(chan, hchan);
4740 l2cap_logical_finish_move(chan, hchan);
4744 void l2cap_move_start(struct l2cap_chan *chan)
4746 BT_DBG("chan %p", chan);
4748 if (chan->local_amp_id == AMP_ID_BREDR) {
4749 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4751 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4752 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4753 /* Placeholder - start physical link setup */
4755 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4756 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4758 l2cap_move_setup(chan);
4759 l2cap_send_move_chan_req(chan, 0);
4763 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4764 u8 local_amp_id, u8 remote_amp_id)
4766 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4767 local_amp_id, remote_amp_id);
4769 chan->fcs = L2CAP_FCS_NONE;
4771 /* Outgoing channel on AMP */
4772 if (chan->state == BT_CONNECT) {
4773 if (result == L2CAP_CR_SUCCESS) {
4774 chan->local_amp_id = local_amp_id;
4775 l2cap_send_create_chan_req(chan, remote_amp_id);
4777 /* Revert to BR/EDR connect */
4778 l2cap_send_conn_req(chan);
4784 /* Incoming channel on AMP */
4785 if (__l2cap_no_conn_pending(chan)) {
4786 struct l2cap_conn_rsp rsp;
4788 rsp.scid = cpu_to_le16(chan->dcid);
4789 rsp.dcid = cpu_to_le16(chan->scid);
4791 if (result == L2CAP_CR_SUCCESS) {
4792 /* Send successful response */
4793 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4796 /* Send negative response */
4797 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4798 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4801 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4804 if (result == L2CAP_CR_SUCCESS) {
4805 l2cap_state_change(chan, BT_CONFIG);
4806 set_bit(CONF_REQ_SENT, &chan->conf_state);
4807 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4809 l2cap_build_conf_req(chan, buf), buf);
4810 chan->num_conf_req++;
4815 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4818 l2cap_move_setup(chan);
4819 chan->move_id = local_amp_id;
4820 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4822 l2cap_send_move_chan_req(chan, remote_amp_id);
4825 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4827 struct hci_chan *hchan = NULL;
4829 /* Placeholder - get hci_chan for logical link */
4832 if (hchan->state == BT_CONNECTED) {
4833 /* Logical link is ready to go */
4834 chan->hs_hcon = hchan->conn;
4835 chan->hs_hcon->l2cap_data = chan->conn;
4836 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4837 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4839 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4841 /* Wait for logical link to be ready */
4842 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4845 /* Logical link not available */
4846 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4850 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4852 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4854 if (result == -EINVAL)
4855 rsp_result = L2CAP_MR_BAD_ID;
4857 rsp_result = L2CAP_MR_NOT_ALLOWED;
4859 l2cap_send_move_chan_rsp(chan, rsp_result);
4862 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4863 chan->move_state = L2CAP_MOVE_STABLE;
4865 /* Restart data transmission */
4866 l2cap_ertm_send(chan);
4869 /* Invoke with locked chan */
4870 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4872 u8 local_amp_id = chan->local_amp_id;
4873 u8 remote_amp_id = chan->remote_amp_id;
4875 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4876 chan, result, local_amp_id, remote_amp_id);
4878 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4879 l2cap_chan_unlock(chan);
4883 if (chan->state != BT_CONNECTED) {
4884 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4885 } else if (result != L2CAP_MR_SUCCESS) {
4886 l2cap_do_move_cancel(chan, result);
4888 switch (chan->move_role) {
4889 case L2CAP_MOVE_ROLE_INITIATOR:
4890 l2cap_do_move_initiate(chan, local_amp_id,
4893 case L2CAP_MOVE_ROLE_RESPONDER:
4894 l2cap_do_move_respond(chan, result);
4897 l2cap_do_move_cancel(chan, result);
4903 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4904 struct l2cap_cmd_hdr *cmd,
4905 u16 cmd_len, void *data)
4907 struct l2cap_move_chan_req *req = data;
4908 struct l2cap_move_chan_rsp rsp;
4909 struct l2cap_chan *chan;
4911 u16 result = L2CAP_MR_NOT_ALLOWED;
4913 if (cmd_len != sizeof(*req))
4916 icid = le16_to_cpu(req->icid);
4918 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4920 if (!conn->hs_enabled)
4923 chan = l2cap_get_chan_by_dcid(conn, icid);
4925 rsp.icid = cpu_to_le16(icid);
4926 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4927 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4932 chan->ident = cmd->ident;
4934 if (chan->scid < L2CAP_CID_DYN_START ||
4935 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4936 (chan->mode != L2CAP_MODE_ERTM &&
4937 chan->mode != L2CAP_MODE_STREAMING)) {
4938 result = L2CAP_MR_NOT_ALLOWED;
4939 goto send_move_response;
4942 if (chan->local_amp_id == req->dest_amp_id) {
4943 result = L2CAP_MR_SAME_ID;
4944 goto send_move_response;
4947 if (req->dest_amp_id != AMP_ID_BREDR) {
4948 struct hci_dev *hdev;
4949 hdev = hci_dev_get(req->dest_amp_id);
4950 if (!hdev || hdev->dev_type != HCI_AMP ||
4951 !test_bit(HCI_UP, &hdev->flags)) {
4955 result = L2CAP_MR_BAD_ID;
4956 goto send_move_response;
4961 /* Detect a move collision. Only send a collision response
4962 * if this side has "lost", otherwise proceed with the move.
4963 * The winner has the larger bd_addr.
4965 if ((__chan_is_moving(chan) ||
4966 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4967 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4968 result = L2CAP_MR_COLLISION;
4969 goto send_move_response;
4972 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4973 l2cap_move_setup(chan);
4974 chan->move_id = req->dest_amp_id;
4977 if (req->dest_amp_id == AMP_ID_BREDR) {
4978 /* Moving to BR/EDR */
4979 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4980 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4981 result = L2CAP_MR_PEND;
4983 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4984 result = L2CAP_MR_SUCCESS;
4987 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4988 /* Placeholder - uncomment when amp functions are available */
4989 /*amp_accept_physical(chan, req->dest_amp_id);*/
4990 result = L2CAP_MR_PEND;
4994 l2cap_send_move_chan_rsp(chan, result);
4996 l2cap_chan_unlock(chan);
5001 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5003 struct l2cap_chan *chan;
5004 struct hci_chan *hchan = NULL;
5006 chan = l2cap_get_chan_by_scid(conn, icid);
5008 l2cap_send_move_chan_cfm_icid(conn, icid);
5012 __clear_chan_timer(chan);
5013 if (result == L2CAP_MR_PEND)
5014 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5016 switch (chan->move_state) {
5017 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5018 /* Move confirm will be sent when logical link
5021 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5023 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5024 if (result == L2CAP_MR_PEND) {
5026 } else if (test_bit(CONN_LOCAL_BUSY,
5027 &chan->conn_state)) {
5028 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5030 /* Logical link is up or moving to BR/EDR,
5033 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5034 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5037 case L2CAP_MOVE_WAIT_RSP:
5039 if (result == L2CAP_MR_SUCCESS) {
5040 /* Remote is ready, send confirm immediately
5041 * after logical link is ready
5043 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5045 /* Both logical link and move success
5046 * are required to confirm
5048 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5051 /* Placeholder - get hci_chan for logical link */
5053 /* Logical link not available */
5054 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5058 /* If the logical link is not yet connected, do not
5059 * send confirmation.
5061 if (hchan->state != BT_CONNECTED)
5064 /* Logical link is already ready to go */
5066 chan->hs_hcon = hchan->conn;
5067 chan->hs_hcon->l2cap_data = chan->conn;
5069 if (result == L2CAP_MR_SUCCESS) {
5070 /* Can confirm now */
5071 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5073 /* Now only need move success
5076 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5079 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5082 /* Any other amp move state means the move failed. */
5083 chan->move_id = chan->local_amp_id;
5084 l2cap_move_done(chan);
5085 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5088 l2cap_chan_unlock(chan);
5091 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5094 struct l2cap_chan *chan;
5096 chan = l2cap_get_chan_by_ident(conn, ident);
5098 /* Could not locate channel, icid is best guess */
5099 l2cap_send_move_chan_cfm_icid(conn, icid);
5103 __clear_chan_timer(chan);
5105 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5106 if (result == L2CAP_MR_COLLISION) {
5107 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5109 /* Cleanup - cancel move */
5110 chan->move_id = chan->local_amp_id;
5111 l2cap_move_done(chan);
5115 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5117 l2cap_chan_unlock(chan);
5120 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5121 struct l2cap_cmd_hdr *cmd,
5122 u16 cmd_len, void *data)
5124 struct l2cap_move_chan_rsp *rsp = data;
5127 if (cmd_len != sizeof(*rsp))
5130 icid = le16_to_cpu(rsp->icid);
5131 result = le16_to_cpu(rsp->result);
5133 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5135 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5136 l2cap_move_continue(conn, icid, result);
5138 l2cap_move_fail(conn, cmd->ident, icid, result);
5143 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5144 struct l2cap_cmd_hdr *cmd,
5145 u16 cmd_len, void *data)
5147 struct l2cap_move_chan_cfm *cfm = data;
5148 struct l2cap_chan *chan;
5151 if (cmd_len != sizeof(*cfm))
5154 icid = le16_to_cpu(cfm->icid);
5155 result = le16_to_cpu(cfm->result);
5157 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5159 chan = l2cap_get_chan_by_dcid(conn, icid);
5161 /* Spec requires a response even if the icid was not found */
5162 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5166 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5167 if (result == L2CAP_MC_CONFIRMED) {
5168 chan->local_amp_id = chan->move_id;
5169 if (chan->local_amp_id == AMP_ID_BREDR)
5170 __release_logical_link(chan);
5172 chan->move_id = chan->local_amp_id;
5175 l2cap_move_done(chan);
5178 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5180 l2cap_chan_unlock(chan);
5185 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5186 struct l2cap_cmd_hdr *cmd,
5187 u16 cmd_len, void *data)
5189 struct l2cap_move_chan_cfm_rsp *rsp = data;
5190 struct l2cap_chan *chan;
5193 if (cmd_len != sizeof(*rsp))
5196 icid = le16_to_cpu(rsp->icid);
5198 BT_DBG("icid 0x%4.4x", icid);
5200 chan = l2cap_get_chan_by_scid(conn, icid);
5204 __clear_chan_timer(chan);
5206 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5207 chan->local_amp_id = chan->move_id;
5209 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5210 __release_logical_link(chan);
5212 l2cap_move_done(chan);
5215 l2cap_chan_unlock(chan);
5220 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5221 struct l2cap_cmd_hdr *cmd,
5222 u16 cmd_len, u8 *data)
5224 struct hci_conn *hcon = conn->hcon;
5225 struct l2cap_conn_param_update_req *req;
5226 struct l2cap_conn_param_update_rsp rsp;
5227 u16 min, max, latency, to_multiplier;
5230 if (!test_bit(HCI_CONN_MASTER, &hcon->flags))
5233 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5236 req = (struct l2cap_conn_param_update_req *) data;
5237 min = __le16_to_cpu(req->min);
5238 max = __le16_to_cpu(req->max);
5239 latency = __le16_to_cpu(req->latency);
5240 to_multiplier = __le16_to_cpu(req->to_multiplier);
5242 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5243 min, max, latency, to_multiplier);
5245 memset(&rsp, 0, sizeof(rsp));
5247 err = hci_check_conn_params(min, max, latency, to_multiplier);
5249 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5251 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5253 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5259 store_hint = hci_le_conn_update(hcon, min, max, latency,
5261 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5262 store_hint, min, max, latency,
5270 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5271 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5274 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5275 u16 dcid, mtu, mps, credits, result;
5276 struct l2cap_chan *chan;
5279 if (cmd_len < sizeof(*rsp))
5282 dcid = __le16_to_cpu(rsp->dcid);
5283 mtu = __le16_to_cpu(rsp->mtu);
5284 mps = __le16_to_cpu(rsp->mps);
5285 credits = __le16_to_cpu(rsp->credits);
5286 result = __le16_to_cpu(rsp->result);
5288 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5291 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5292 dcid, mtu, mps, credits, result);
5294 mutex_lock(&conn->chan_lock);
5296 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5304 l2cap_chan_lock(chan);
5307 case L2CAP_CR_SUCCESS:
5311 chan->remote_mps = mps;
5312 chan->tx_credits = credits;
5313 l2cap_chan_ready(chan);
5317 l2cap_chan_del(chan, ECONNREFUSED);
5321 l2cap_chan_unlock(chan);
5324 mutex_unlock(&conn->chan_lock);
5329 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5330 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5335 switch (cmd->code) {
5336 case L2CAP_COMMAND_REJ:
5337 l2cap_command_rej(conn, cmd, cmd_len, data);
5340 case L2CAP_CONN_REQ:
5341 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5344 case L2CAP_CONN_RSP:
5345 case L2CAP_CREATE_CHAN_RSP:
5346 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5349 case L2CAP_CONF_REQ:
5350 err = l2cap_config_req(conn, cmd, cmd_len, data);
5353 case L2CAP_CONF_RSP:
5354 l2cap_config_rsp(conn, cmd, cmd_len, data);
5357 case L2CAP_DISCONN_REQ:
5358 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5361 case L2CAP_DISCONN_RSP:
5362 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5365 case L2CAP_ECHO_REQ:
5366 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5369 case L2CAP_ECHO_RSP:
5372 case L2CAP_INFO_REQ:
5373 err = l2cap_information_req(conn, cmd, cmd_len, data);
5376 case L2CAP_INFO_RSP:
5377 l2cap_information_rsp(conn, cmd, cmd_len, data);
5380 case L2CAP_CREATE_CHAN_REQ:
5381 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5384 case L2CAP_MOVE_CHAN_REQ:
5385 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5388 case L2CAP_MOVE_CHAN_RSP:
5389 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5392 case L2CAP_MOVE_CHAN_CFM:
5393 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5396 case L2CAP_MOVE_CHAN_CFM_RSP:
5397 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5401 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5409 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5410 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5413 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5414 struct l2cap_le_conn_rsp rsp;
5415 struct l2cap_chan *chan, *pchan;
5416 u16 dcid, scid, credits, mtu, mps;
5420 if (cmd_len != sizeof(*req))
5423 scid = __le16_to_cpu(req->scid);
5424 mtu = __le16_to_cpu(req->mtu);
5425 mps = __le16_to_cpu(req->mps);
5430 if (mtu < 23 || mps < 23)
5433 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5436 /* Check if we have socket listening on psm */
5437 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5438 &conn->hcon->dst, LE_LINK);
5440 result = L2CAP_CR_BAD_PSM;
5445 mutex_lock(&conn->chan_lock);
5446 l2cap_chan_lock(pchan);
5448 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5449 result = L2CAP_CR_AUTHENTICATION;
5451 goto response_unlock;
5454 /* Check if we already have channel with that dcid */
5455 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5456 result = L2CAP_CR_NO_MEM;
5458 goto response_unlock;
5461 chan = pchan->ops->new_connection(pchan);
5463 result = L2CAP_CR_NO_MEM;
5464 goto response_unlock;
5467 l2cap_le_flowctl_init(chan);
5469 bacpy(&chan->src, &conn->hcon->src);
5470 bacpy(&chan->dst, &conn->hcon->dst);
5471 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5472 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5476 chan->remote_mps = mps;
5477 chan->tx_credits = __le16_to_cpu(req->credits);
5479 __l2cap_chan_add(conn, chan);
5481 credits = chan->rx_credits;
5483 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5485 chan->ident = cmd->ident;
5487 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5488 l2cap_state_change(chan, BT_CONNECT2);
5489 result = L2CAP_CR_PEND;
5490 chan->ops->defer(chan);
5492 l2cap_chan_ready(chan);
5493 result = L2CAP_CR_SUCCESS;
5497 l2cap_chan_unlock(pchan);
5498 mutex_unlock(&conn->chan_lock);
5500 if (result == L2CAP_CR_PEND)
5505 rsp.mtu = cpu_to_le16(chan->imtu);
5506 rsp.mps = cpu_to_le16(chan->mps);
5512 rsp.dcid = cpu_to_le16(dcid);
5513 rsp.credits = cpu_to_le16(credits);
5514 rsp.result = cpu_to_le16(result);
5516 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5521 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5522 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5525 struct l2cap_le_credits *pkt;
5526 struct l2cap_chan *chan;
5527 u16 cid, credits, max_credits;
5529 if (cmd_len != sizeof(*pkt))
5532 pkt = (struct l2cap_le_credits *) data;
5533 cid = __le16_to_cpu(pkt->cid);
5534 credits = __le16_to_cpu(pkt->credits);
5536 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5538 chan = l2cap_get_chan_by_dcid(conn, cid);
5542 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5543 if (credits > max_credits) {
5544 BT_ERR("LE credits overflow");
5545 l2cap_send_disconn_req(chan, ECONNRESET);
5547 /* Return 0 so that we don't trigger an unnecessary
5548 * command reject packet.
5553 chan->tx_credits += credits;
5555 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5556 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5560 if (chan->tx_credits)
5561 chan->ops->resume(chan);
5563 l2cap_chan_unlock(chan);
5568 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5569 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5572 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5573 struct l2cap_chan *chan;
5575 if (cmd_len < sizeof(*rej))
5578 mutex_lock(&conn->chan_lock);
5580 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5584 l2cap_chan_lock(chan);
5585 l2cap_chan_del(chan, ECONNREFUSED);
5586 l2cap_chan_unlock(chan);
5589 mutex_unlock(&conn->chan_lock);
5593 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5594 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5599 switch (cmd->code) {
5600 case L2CAP_COMMAND_REJ:
5601 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5604 case L2CAP_CONN_PARAM_UPDATE_REQ:
5605 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5608 case L2CAP_CONN_PARAM_UPDATE_RSP:
5611 case L2CAP_LE_CONN_RSP:
5612 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5615 case L2CAP_LE_CONN_REQ:
5616 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5619 case L2CAP_LE_CREDITS:
5620 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5623 case L2CAP_DISCONN_REQ:
5624 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5627 case L2CAP_DISCONN_RSP:
5628 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5632 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5640 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5641 struct sk_buff *skb)
5643 struct hci_conn *hcon = conn->hcon;
5644 struct l2cap_cmd_hdr *cmd;
5648 if (hcon->type != LE_LINK)
5651 if (skb->len < L2CAP_CMD_HDR_SIZE)
5654 cmd = (void *) skb->data;
5655 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5657 len = le16_to_cpu(cmd->len);
5659 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5661 if (len != skb->len || !cmd->ident) {
5662 BT_DBG("corrupted command");
5666 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5668 struct l2cap_cmd_rej_unk rej;
5670 BT_ERR("Wrong link type (%d)", err);
5672 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5673 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5681 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5682 struct sk_buff *skb)
5684 struct hci_conn *hcon = conn->hcon;
5685 u8 *data = skb->data;
5687 struct l2cap_cmd_hdr cmd;
5690 l2cap_raw_recv(conn, skb);
5692 if (hcon->type != ACL_LINK)
5695 while (len >= L2CAP_CMD_HDR_SIZE) {
5697 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5698 data += L2CAP_CMD_HDR_SIZE;
5699 len -= L2CAP_CMD_HDR_SIZE;
5701 cmd_len = le16_to_cpu(cmd.len);
5703 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5706 if (cmd_len > len || !cmd.ident) {
5707 BT_DBG("corrupted command");
5711 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5713 struct l2cap_cmd_rej_unk rej;
5715 BT_ERR("Wrong link type (%d)", err);
5717 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5718 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5730 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5732 u16 our_fcs, rcv_fcs;
5735 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5736 hdr_size = L2CAP_EXT_HDR_SIZE;
5738 hdr_size = L2CAP_ENH_HDR_SIZE;
5740 if (chan->fcs == L2CAP_FCS_CRC16) {
5741 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5742 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5743 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5745 if (our_fcs != rcv_fcs)
5751 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5753 struct l2cap_ctrl control;
5755 BT_DBG("chan %p", chan);
5757 memset(&control, 0, sizeof(control));
5760 control.reqseq = chan->buffer_seq;
5761 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5763 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5764 control.super = L2CAP_SUPER_RNR;
5765 l2cap_send_sframe(chan, &control);
5768 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5769 chan->unacked_frames > 0)
5770 __set_retrans_timer(chan);
5772 /* Send pending iframes */
5773 l2cap_ertm_send(chan);
5775 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5776 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5777 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5780 control.super = L2CAP_SUPER_RR;
5781 l2cap_send_sframe(chan, &control);
5785 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5786 struct sk_buff **last_frag)
5788 /* skb->len reflects data in skb as well as all fragments
5789 * skb->data_len reflects only data in fragments
5791 if (!skb_has_frag_list(skb))
5792 skb_shinfo(skb)->frag_list = new_frag;
5794 new_frag->next = NULL;
5796 (*last_frag)->next = new_frag;
5797 *last_frag = new_frag;
5799 skb->len += new_frag->len;
5800 skb->data_len += new_frag->len;
5801 skb->truesize += new_frag->truesize;
5804 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5805 struct l2cap_ctrl *control)
5809 switch (control->sar) {
5810 case L2CAP_SAR_UNSEGMENTED:
5814 err = chan->ops->recv(chan, skb);
5817 case L2CAP_SAR_START:
5821 chan->sdu_len = get_unaligned_le16(skb->data);
5822 skb_pull(skb, L2CAP_SDULEN_SIZE);
5824 if (chan->sdu_len > chan->imtu) {
5829 if (skb->len >= chan->sdu_len)
5833 chan->sdu_last_frag = skb;
5839 case L2CAP_SAR_CONTINUE:
5843 append_skb_frag(chan->sdu, skb,
5844 &chan->sdu_last_frag);
5847 if (chan->sdu->len >= chan->sdu_len)
5857 append_skb_frag(chan->sdu, skb,
5858 &chan->sdu_last_frag);
5861 if (chan->sdu->len != chan->sdu_len)
5864 err = chan->ops->recv(chan, chan->sdu);
5867 /* Reassembly complete */
5869 chan->sdu_last_frag = NULL;
5877 kfree_skb(chan->sdu);
5879 chan->sdu_last_frag = NULL;
5886 static int l2cap_resegment(struct l2cap_chan *chan)
5892 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5896 if (chan->mode != L2CAP_MODE_ERTM)
5899 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5900 l2cap_tx(chan, NULL, NULL, event);
5903 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5906 /* Pass sequential frames to l2cap_reassemble_sdu()
5907 * until a gap is encountered.
5910 BT_DBG("chan %p", chan);
5912 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5913 struct sk_buff *skb;
5914 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5915 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5917 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5922 skb_unlink(skb, &chan->srej_q);
5923 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5924 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5929 if (skb_queue_empty(&chan->srej_q)) {
5930 chan->rx_state = L2CAP_RX_STATE_RECV;
5931 l2cap_send_ack(chan);
5937 static void l2cap_handle_srej(struct l2cap_chan *chan,
5938 struct l2cap_ctrl *control)
5940 struct sk_buff *skb;
5942 BT_DBG("chan %p, control %p", chan, control);
5944 if (control->reqseq == chan->next_tx_seq) {
5945 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5946 l2cap_send_disconn_req(chan, ECONNRESET);
5950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5953 BT_DBG("Seq %d not available for retransmission",
5958 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5959 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5960 l2cap_send_disconn_req(chan, ECONNRESET);
5964 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5966 if (control->poll) {
5967 l2cap_pass_to_tx(chan, control);
5969 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5970 l2cap_retransmit(chan, control);
5971 l2cap_ertm_send(chan);
5973 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5974 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5975 chan->srej_save_reqseq = control->reqseq;
5978 l2cap_pass_to_tx_fbit(chan, control);
5980 if (control->final) {
5981 if (chan->srej_save_reqseq != control->reqseq ||
5982 !test_and_clear_bit(CONN_SREJ_ACT,
5984 l2cap_retransmit(chan, control);
5986 l2cap_retransmit(chan, control);
5987 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5988 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5989 chan->srej_save_reqseq = control->reqseq;
5995 static void l2cap_handle_rej(struct l2cap_chan *chan,
5996 struct l2cap_ctrl *control)
5998 struct sk_buff *skb;
6000 BT_DBG("chan %p, control %p", chan, control);
6002 if (control->reqseq == chan->next_tx_seq) {
6003 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6004 l2cap_send_disconn_req(chan, ECONNRESET);
6008 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6010 if (chan->max_tx && skb &&
6011 bt_cb(skb)->control.retries >= chan->max_tx) {
6012 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6013 l2cap_send_disconn_req(chan, ECONNRESET);
6017 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6019 l2cap_pass_to_tx(chan, control);
6021 if (control->final) {
6022 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6023 l2cap_retransmit_all(chan, control);
6025 l2cap_retransmit_all(chan, control);
6026 l2cap_ertm_send(chan);
6027 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6028 set_bit(CONN_REJ_ACT, &chan->conn_state);
6032 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6034 BT_DBG("chan %p, txseq %d", chan, txseq);
6036 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6037 chan->expected_tx_seq);
6039 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6040 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6042 /* See notes below regarding "double poll" and
6045 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6046 BT_DBG("Invalid/Ignore - after SREJ");
6047 return L2CAP_TXSEQ_INVALID_IGNORE;
6049 BT_DBG("Invalid - in window after SREJ sent");
6050 return L2CAP_TXSEQ_INVALID;
6054 if (chan->srej_list.head == txseq) {
6055 BT_DBG("Expected SREJ");
6056 return L2CAP_TXSEQ_EXPECTED_SREJ;
6059 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6060 BT_DBG("Duplicate SREJ - txseq already stored");
6061 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6064 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6065 BT_DBG("Unexpected SREJ - not requested");
6066 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6070 if (chan->expected_tx_seq == txseq) {
6071 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6073 BT_DBG("Invalid - txseq outside tx window");
6074 return L2CAP_TXSEQ_INVALID;
6077 return L2CAP_TXSEQ_EXPECTED;
6081 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6082 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6083 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6084 return L2CAP_TXSEQ_DUPLICATE;
6087 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6088 /* A source of invalid packets is a "double poll" condition,
6089 * where delays cause us to send multiple poll packets. If
6090 * the remote stack receives and processes both polls,
6091 * sequence numbers can wrap around in such a way that a
6092 * resent frame has a sequence number that looks like new data
6093 * with a sequence gap. This would trigger an erroneous SREJ
6096 * Fortunately, this is impossible with a tx window that's
6097 * less than half of the maximum sequence number, which allows
6098 * invalid frames to be safely ignored.
6100 * With tx window sizes greater than half of the tx window
6101 * maximum, the frame is invalid and cannot be ignored. This
6102 * causes a disconnect.
6105 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6106 BT_DBG("Invalid/Ignore - txseq outside tx window");
6107 return L2CAP_TXSEQ_INVALID_IGNORE;
6109 BT_DBG("Invalid - txseq outside tx window");
6110 return L2CAP_TXSEQ_INVALID;
6113 BT_DBG("Unexpected - txseq indicates missing frames");
6114 return L2CAP_TXSEQ_UNEXPECTED;
6118 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6119 struct l2cap_ctrl *control,
6120 struct sk_buff *skb, u8 event)
6123 bool skb_in_use = false;
6125 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6129 case L2CAP_EV_RECV_IFRAME:
6130 switch (l2cap_classify_txseq(chan, control->txseq)) {
6131 case L2CAP_TXSEQ_EXPECTED:
6132 l2cap_pass_to_tx(chan, control);
6134 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6135 BT_DBG("Busy, discarding expected seq %d",
6140 chan->expected_tx_seq = __next_seq(chan,
6143 chan->buffer_seq = chan->expected_tx_seq;
6146 err = l2cap_reassemble_sdu(chan, skb, control);
6150 if (control->final) {
6151 if (!test_and_clear_bit(CONN_REJ_ACT,
6152 &chan->conn_state)) {
6154 l2cap_retransmit_all(chan, control);
6155 l2cap_ertm_send(chan);
6159 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6160 l2cap_send_ack(chan);
6162 case L2CAP_TXSEQ_UNEXPECTED:
6163 l2cap_pass_to_tx(chan, control);
6165 /* Can't issue SREJ frames in the local busy state.
6166 * Drop this frame, it will be seen as missing
6167 * when local busy is exited.
6169 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6170 BT_DBG("Busy, discarding unexpected seq %d",
6175 /* There was a gap in the sequence, so an SREJ
6176 * must be sent for each missing frame. The
6177 * current frame is stored for later use.
6179 skb_queue_tail(&chan->srej_q, skb);
6181 BT_DBG("Queued %p (queue len %d)", skb,
6182 skb_queue_len(&chan->srej_q));
6184 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6185 l2cap_seq_list_clear(&chan->srej_list);
6186 l2cap_send_srej(chan, control->txseq);
6188 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6190 case L2CAP_TXSEQ_DUPLICATE:
6191 l2cap_pass_to_tx(chan, control);
6193 case L2CAP_TXSEQ_INVALID_IGNORE:
6195 case L2CAP_TXSEQ_INVALID:
6197 l2cap_send_disconn_req(chan, ECONNRESET);
6201 case L2CAP_EV_RECV_RR:
6202 l2cap_pass_to_tx(chan, control);
6203 if (control->final) {
6204 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6206 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6207 !__chan_is_moving(chan)) {
6209 l2cap_retransmit_all(chan, control);
6212 l2cap_ertm_send(chan);
6213 } else if (control->poll) {
6214 l2cap_send_i_or_rr_or_rnr(chan);
6216 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6217 &chan->conn_state) &&
6218 chan->unacked_frames)
6219 __set_retrans_timer(chan);
6221 l2cap_ertm_send(chan);
6224 case L2CAP_EV_RECV_RNR:
6225 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6226 l2cap_pass_to_tx(chan, control);
6227 if (control && control->poll) {
6228 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6229 l2cap_send_rr_or_rnr(chan, 0);
6231 __clear_retrans_timer(chan);
6232 l2cap_seq_list_clear(&chan->retrans_list);
6234 case L2CAP_EV_RECV_REJ:
6235 l2cap_handle_rej(chan, control);
6237 case L2CAP_EV_RECV_SREJ:
6238 l2cap_handle_srej(chan, control);
6244 if (skb && !skb_in_use) {
6245 BT_DBG("Freeing %p", skb);
6252 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6253 struct l2cap_ctrl *control,
6254 struct sk_buff *skb, u8 event)
6257 u16 txseq = control->txseq;
6258 bool skb_in_use = false;
6260 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6264 case L2CAP_EV_RECV_IFRAME:
6265 switch (l2cap_classify_txseq(chan, txseq)) {
6266 case L2CAP_TXSEQ_EXPECTED:
6267 /* Keep frame for reassembly later */
6268 l2cap_pass_to_tx(chan, control);
6269 skb_queue_tail(&chan->srej_q, skb);
6271 BT_DBG("Queued %p (queue len %d)", skb,
6272 skb_queue_len(&chan->srej_q));
6274 chan->expected_tx_seq = __next_seq(chan, txseq);
6276 case L2CAP_TXSEQ_EXPECTED_SREJ:
6277 l2cap_seq_list_pop(&chan->srej_list);
6279 l2cap_pass_to_tx(chan, control);
6280 skb_queue_tail(&chan->srej_q, skb);
6282 BT_DBG("Queued %p (queue len %d)", skb,
6283 skb_queue_len(&chan->srej_q));
6285 err = l2cap_rx_queued_iframes(chan);
6290 case L2CAP_TXSEQ_UNEXPECTED:
6291 /* Got a frame that can't be reassembled yet.
6292 * Save it for later, and send SREJs to cover
6293 * the missing frames.
6295 skb_queue_tail(&chan->srej_q, skb);
6297 BT_DBG("Queued %p (queue len %d)", skb,
6298 skb_queue_len(&chan->srej_q));
6300 l2cap_pass_to_tx(chan, control);
6301 l2cap_send_srej(chan, control->txseq);
6303 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6304 /* This frame was requested with an SREJ, but
6305 * some expected retransmitted frames are
6306 * missing. Request retransmission of missing
6309 skb_queue_tail(&chan->srej_q, skb);
6311 BT_DBG("Queued %p (queue len %d)", skb,
6312 skb_queue_len(&chan->srej_q));
6314 l2cap_pass_to_tx(chan, control);
6315 l2cap_send_srej_list(chan, control->txseq);
6317 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6318 /* We've already queued this frame. Drop this copy. */
6319 l2cap_pass_to_tx(chan, control);
6321 case L2CAP_TXSEQ_DUPLICATE:
6322 /* Expecting a later sequence number, so this frame
6323 * was already received. Ignore it completely.
6326 case L2CAP_TXSEQ_INVALID_IGNORE:
6328 case L2CAP_TXSEQ_INVALID:
6330 l2cap_send_disconn_req(chan, ECONNRESET);
6334 case L2CAP_EV_RECV_RR:
6335 l2cap_pass_to_tx(chan, control);
6336 if (control->final) {
6337 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6339 if (!test_and_clear_bit(CONN_REJ_ACT,
6340 &chan->conn_state)) {
6342 l2cap_retransmit_all(chan, control);
6345 l2cap_ertm_send(chan);
6346 } else if (control->poll) {
6347 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6348 &chan->conn_state) &&
6349 chan->unacked_frames) {
6350 __set_retrans_timer(chan);
6353 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6354 l2cap_send_srej_tail(chan);
6356 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6357 &chan->conn_state) &&
6358 chan->unacked_frames)
6359 __set_retrans_timer(chan);
6361 l2cap_send_ack(chan);
6364 case L2CAP_EV_RECV_RNR:
6365 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6366 l2cap_pass_to_tx(chan, control);
6367 if (control->poll) {
6368 l2cap_send_srej_tail(chan);
6370 struct l2cap_ctrl rr_control;
6371 memset(&rr_control, 0, sizeof(rr_control));
6372 rr_control.sframe = 1;
6373 rr_control.super = L2CAP_SUPER_RR;
6374 rr_control.reqseq = chan->buffer_seq;
6375 l2cap_send_sframe(chan, &rr_control);
6379 case L2CAP_EV_RECV_REJ:
6380 l2cap_handle_rej(chan, control);
6382 case L2CAP_EV_RECV_SREJ:
6383 l2cap_handle_srej(chan, control);
6387 if (skb && !skb_in_use) {
6388 BT_DBG("Freeing %p", skb);
6395 static int l2cap_finish_move(struct l2cap_chan *chan)
6397 BT_DBG("chan %p", chan);
6399 chan->rx_state = L2CAP_RX_STATE_RECV;
6402 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6404 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6406 return l2cap_resegment(chan);
6409 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6410 struct l2cap_ctrl *control,
6411 struct sk_buff *skb, u8 event)
6415 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6421 l2cap_process_reqseq(chan, control->reqseq);
6423 if (!skb_queue_empty(&chan->tx_q))
6424 chan->tx_send_head = skb_peek(&chan->tx_q);
6426 chan->tx_send_head = NULL;
6428 /* Rewind next_tx_seq to the point expected
6431 chan->next_tx_seq = control->reqseq;
6432 chan->unacked_frames = 0;
6434 err = l2cap_finish_move(chan);
6438 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6439 l2cap_send_i_or_rr_or_rnr(chan);
6441 if (event == L2CAP_EV_RECV_IFRAME)
6444 return l2cap_rx_state_recv(chan, control, NULL, event);
6447 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6448 struct l2cap_ctrl *control,
6449 struct sk_buff *skb, u8 event)
6453 if (!control->final)
6456 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6458 chan->rx_state = L2CAP_RX_STATE_RECV;
6459 l2cap_process_reqseq(chan, control->reqseq);
6461 if (!skb_queue_empty(&chan->tx_q))
6462 chan->tx_send_head = skb_peek(&chan->tx_q);
6464 chan->tx_send_head = NULL;
6466 /* Rewind next_tx_seq to the point expected
6469 chan->next_tx_seq = control->reqseq;
6470 chan->unacked_frames = 0;
6473 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6475 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6477 err = l2cap_resegment(chan);
6480 err = l2cap_rx_state_recv(chan, control, skb, event);
6485 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6487 /* Make sure reqseq is for a packet that has been sent but not acked */
6490 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6491 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6494 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6495 struct sk_buff *skb, u8 event)
6499 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6500 control, skb, event, chan->rx_state);
6502 if (__valid_reqseq(chan, control->reqseq)) {
6503 switch (chan->rx_state) {
6504 case L2CAP_RX_STATE_RECV:
6505 err = l2cap_rx_state_recv(chan, control, skb, event);
6507 case L2CAP_RX_STATE_SREJ_SENT:
6508 err = l2cap_rx_state_srej_sent(chan, control, skb,
6511 case L2CAP_RX_STATE_WAIT_P:
6512 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6514 case L2CAP_RX_STATE_WAIT_F:
6515 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6522 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6523 control->reqseq, chan->next_tx_seq,
6524 chan->expected_ack_seq);
6525 l2cap_send_disconn_req(chan, ECONNRESET);
6531 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6532 struct sk_buff *skb)
6536 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6539 if (l2cap_classify_txseq(chan, control->txseq) ==
6540 L2CAP_TXSEQ_EXPECTED) {
6541 l2cap_pass_to_tx(chan, control);
6543 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6544 __next_seq(chan, chan->buffer_seq));
6546 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6548 l2cap_reassemble_sdu(chan, skb, control);
6551 kfree_skb(chan->sdu);
6554 chan->sdu_last_frag = NULL;
6558 BT_DBG("Freeing %p", skb);
6563 chan->last_acked_seq = control->txseq;
6564 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6569 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6571 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6575 __unpack_control(chan, skb);
6580 * We can just drop the corrupted I-frame here.
6581 * Receiver will miss it and start proper recovery
6582 * procedures and ask for retransmission.
6584 if (l2cap_check_fcs(chan, skb))
6587 if (!control->sframe && control->sar == L2CAP_SAR_START)
6588 len -= L2CAP_SDULEN_SIZE;
6590 if (chan->fcs == L2CAP_FCS_CRC16)
6591 len -= L2CAP_FCS_SIZE;
6593 if (len > chan->mps) {
6594 l2cap_send_disconn_req(chan, ECONNRESET);
6598 if (!control->sframe) {
6601 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6602 control->sar, control->reqseq, control->final,
6605 /* Validate F-bit - F=0 always valid, F=1 only
6606 * valid in TX WAIT_F
6608 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6611 if (chan->mode != L2CAP_MODE_STREAMING) {
6612 event = L2CAP_EV_RECV_IFRAME;
6613 err = l2cap_rx(chan, control, skb, event);
6615 err = l2cap_stream_rx(chan, control, skb);
6619 l2cap_send_disconn_req(chan, ECONNRESET);
6621 const u8 rx_func_to_event[4] = {
6622 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6623 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6626 /* Only I-frames are expected in streaming mode */
6627 if (chan->mode == L2CAP_MODE_STREAMING)
6630 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6631 control->reqseq, control->final, control->poll,
6635 BT_ERR("Trailing bytes: %d in sframe", len);
6636 l2cap_send_disconn_req(chan, ECONNRESET);
6640 /* Validate F and P bits */
6641 if (control->final && (control->poll ||
6642 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6645 event = rx_func_to_event[control->super];
6646 if (l2cap_rx(chan, control, skb, event))
6647 l2cap_send_disconn_req(chan, ECONNRESET);
6657 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6659 struct l2cap_conn *conn = chan->conn;
6660 struct l2cap_le_credits pkt;
6663 /* We return more credits to the sender only after the amount of
6664 * credits falls below half of the initial amount.
6666 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6669 return_credits = le_max_credits - chan->rx_credits;
6671 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6673 chan->rx_credits += return_credits;
6675 pkt.cid = cpu_to_le16(chan->scid);
6676 pkt.credits = cpu_to_le16(return_credits);
6678 chan->ident = l2cap_get_ident(conn);
6680 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6683 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6687 if (!chan->rx_credits) {
6688 BT_ERR("No credits to receive LE L2CAP data");
6689 l2cap_send_disconn_req(chan, ECONNRESET);
6693 if (chan->imtu < skb->len) {
6694 BT_ERR("Too big LE L2CAP PDU");
6699 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6701 l2cap_chan_le_send_credits(chan);
6708 sdu_len = get_unaligned_le16(skb->data);
6709 skb_pull(skb, L2CAP_SDULEN_SIZE);
6711 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6712 sdu_len, skb->len, chan->imtu);
6714 if (sdu_len > chan->imtu) {
6715 BT_ERR("Too big LE L2CAP SDU length received");
6720 if (skb->len > sdu_len) {
6721 BT_ERR("Too much LE L2CAP data received");
6726 if (skb->len == sdu_len)
6727 return chan->ops->recv(chan, skb);
6730 chan->sdu_len = sdu_len;
6731 chan->sdu_last_frag = skb;
6736 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6737 chan->sdu->len, skb->len, chan->sdu_len);
6739 if (chan->sdu->len + skb->len > chan->sdu_len) {
6740 BT_ERR("Too much LE L2CAP data received");
6745 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6748 if (chan->sdu->len == chan->sdu_len) {
6749 err = chan->ops->recv(chan, chan->sdu);
6752 chan->sdu_last_frag = NULL;
6760 kfree_skb(chan->sdu);
6762 chan->sdu_last_frag = NULL;
6766 /* We can't return an error here since we took care of the skb
6767 * freeing internally. An error return would cause the caller to
6768 * do a double-free of the skb.
6773 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6774 struct sk_buff *skb)
6776 struct l2cap_chan *chan;
6778 chan = l2cap_get_chan_by_scid(conn, cid);
6780 if (cid == L2CAP_CID_A2MP) {
6781 chan = a2mp_channel_create(conn, skb);
6787 l2cap_chan_lock(chan);
6789 BT_DBG("unknown cid 0x%4.4x", cid);
6790 /* Drop packet and return */
6796 BT_DBG("chan %p, len %d", chan, skb->len);
6798 if (chan->state != BT_CONNECTED)
6801 switch (chan->mode) {
6802 case L2CAP_MODE_LE_FLOWCTL:
6803 if (l2cap_le_data_rcv(chan, skb) < 0)
6808 case L2CAP_MODE_BASIC:
6809 /* If socket recv buffers overflows we drop data here
6810 * which is *bad* because L2CAP has to be reliable.
6811 * But we don't have any other choice. L2CAP doesn't
6812 * provide flow control mechanism. */
6814 if (chan->imtu < skb->len) {
6815 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6819 if (!chan->ops->recv(chan, skb))
6823 case L2CAP_MODE_ERTM:
6824 case L2CAP_MODE_STREAMING:
6825 l2cap_data_rcv(chan, skb);
6829 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6837 l2cap_chan_unlock(chan);
6840 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6841 struct sk_buff *skb)
6843 struct hci_conn *hcon = conn->hcon;
6844 struct l2cap_chan *chan;
6846 if (hcon->type != ACL_LINK)
6849 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6854 BT_DBG("chan %p, len %d", chan, skb->len);
6856 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6859 if (chan->imtu < skb->len)
6862 /* Store remote BD_ADDR and PSM for msg_name */
6863 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6864 bt_cb(skb)->psm = psm;
6866 if (!chan->ops->recv(chan, skb))
6873 static void l2cap_att_channel(struct l2cap_conn *conn,
6874 struct sk_buff *skb)
6876 struct hci_conn *hcon = conn->hcon;
6877 struct l2cap_chan *chan;
6879 if (hcon->type != LE_LINK)
6882 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6883 &hcon->src, &hcon->dst);
6887 BT_DBG("chan %p, len %d", chan, skb->len);
6889 if (chan->imtu < skb->len)
6892 if (!chan->ops->recv(chan, skb))
6899 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6901 struct l2cap_hdr *lh = (void *) skb->data;
6902 struct hci_conn *hcon = conn->hcon;
6906 if (hcon->state != BT_CONNECTED) {
6907 BT_DBG("queueing pending rx skb");
6908 skb_queue_tail(&conn->pending_rx, skb);
6912 skb_pull(skb, L2CAP_HDR_SIZE);
6913 cid = __le16_to_cpu(lh->cid);
6914 len = __le16_to_cpu(lh->len);
6916 if (len != skb->len) {
6921 /* Since we can't actively block incoming LE connections we must
6922 * at least ensure that we ignore incoming data from them.
6924 if (hcon->type == LE_LINK &&
6925 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6926 bdaddr_type(hcon, hcon->dst_type))) {
6931 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6934 case L2CAP_CID_SIGNALING:
6935 l2cap_sig_channel(conn, skb);
6938 case L2CAP_CID_CONN_LESS:
6939 psm = get_unaligned((__le16 *) skb->data);
6940 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6941 l2cap_conless_channel(conn, psm, skb);
6945 l2cap_att_channel(conn, skb);
6948 case L2CAP_CID_LE_SIGNALING:
6949 l2cap_le_sig_channel(conn, skb);
6953 if (smp_sig_channel(conn, skb))
6954 l2cap_conn_del(conn->hcon, EACCES);
6958 l2cap_data_channel(conn, cid, skb);
6963 static void process_pending_rx(struct work_struct *work)
6965 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6967 struct sk_buff *skb;
6971 while ((skb = skb_dequeue(&conn->pending_rx)))
6972 l2cap_recv_frame(conn, skb);
6975 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6977 struct l2cap_conn *conn = hcon->l2cap_data;
6978 struct hci_chan *hchan;
6983 hchan = hci_chan_create(hcon);
6987 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6989 hci_chan_del(hchan);
6993 kref_init(&conn->ref);
6994 hcon->l2cap_data = conn;
6996 hci_conn_get(conn->hcon);
6997 conn->hchan = hchan;
6999 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7001 switch (hcon->type) {
7003 if (hcon->hdev->le_mtu) {
7004 conn->mtu = hcon->hdev->le_mtu;
7009 conn->mtu = hcon->hdev->acl_mtu;
7013 conn->feat_mask = 0;
7015 if (hcon->type == ACL_LINK)
7016 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7017 &hcon->hdev->dev_flags);
7019 spin_lock_init(&conn->lock);
7020 mutex_init(&conn->chan_lock);
7022 INIT_LIST_HEAD(&conn->chan_l);
7023 INIT_LIST_HEAD(&conn->users);
7025 if (hcon->type == LE_LINK)
7026 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7028 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7030 skb_queue_head_init(&conn->pending_rx);
7031 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7033 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7038 static bool is_valid_psm(u16 psm, u8 dst_type) {
7042 if (bdaddr_type_is_le(dst_type))
7043 return (psm <= 0x00ff);
7045 /* PSM must be odd and lsb of upper byte must be 0 */
7046 return ((psm & 0x0101) == 0x0001);
7049 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7050 bdaddr_t *dst, u8 dst_type)
7052 struct l2cap_conn *conn;
7053 struct hci_conn *hcon;
7054 struct hci_dev *hdev;
7057 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7058 dst_type, __le16_to_cpu(psm));
7060 hdev = hci_get_route(dst, &chan->src);
7062 return -EHOSTUNREACH;
7066 l2cap_chan_lock(chan);
7068 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7069 chan->chan_type != L2CAP_CHAN_RAW) {
7074 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7079 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7084 switch (chan->mode) {
7085 case L2CAP_MODE_BASIC:
7087 case L2CAP_MODE_LE_FLOWCTL:
7088 l2cap_le_flowctl_init(chan);
7090 case L2CAP_MODE_ERTM:
7091 case L2CAP_MODE_STREAMING:
7100 switch (chan->state) {
7104 /* Already connecting */
7109 /* Already connected */
7123 /* Set destination address and psm */
7124 bacpy(&chan->dst, dst);
7125 chan->dst_type = dst_type;
7130 if (bdaddr_type_is_le(dst_type)) {
7133 /* Convert from L2CAP channel address type to HCI address type
7135 if (dst_type == BDADDR_LE_PUBLIC)
7136 dst_type = ADDR_LE_DEV_PUBLIC;
7138 dst_type = ADDR_LE_DEV_RANDOM;
7140 master = !test_bit(HCI_ADVERTISING, &hdev->dev_flags);
7142 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7143 HCI_LE_CONN_TIMEOUT, master);
7145 u8 auth_type = l2cap_get_auth_type(chan);
7146 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7150 err = PTR_ERR(hcon);
7154 conn = l2cap_conn_add(hcon);
7156 hci_conn_drop(hcon);
7161 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7162 hci_conn_drop(hcon);
7167 /* Update source addr of the socket */
7168 bacpy(&chan->src, &hcon->src);
7169 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7171 l2cap_chan_unlock(chan);
7172 l2cap_chan_add(conn, chan);
7173 l2cap_chan_lock(chan);
7175 /* l2cap_chan_add takes its own ref so we can drop this one */
7176 hci_conn_drop(hcon);
7178 l2cap_state_change(chan, BT_CONNECT);
7179 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7181 /* Release chan->sport so that it can be reused by other
7182 * sockets (as it's only used for listening sockets).
7184 write_lock(&chan_list_lock);
7186 write_unlock(&chan_list_lock);
7188 if (hcon->state == BT_CONNECTED) {
7189 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7190 __clear_chan_timer(chan);
7191 if (l2cap_chan_check_security(chan))
7192 l2cap_state_change(chan, BT_CONNECTED);
7194 l2cap_do_start(chan);
7200 l2cap_chan_unlock(chan);
7201 hci_dev_unlock(hdev);
7205 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7207 /* ---- L2CAP interface with lower layer (HCI) ---- */
7209 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7211 int exact = 0, lm1 = 0, lm2 = 0;
7212 struct l2cap_chan *c;
7214 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7216 /* Find listening sockets and check their link_mode */
7217 read_lock(&chan_list_lock);
7218 list_for_each_entry(c, &chan_list, global_l) {
7219 if (c->state != BT_LISTEN)
7222 if (!bacmp(&c->src, &hdev->bdaddr)) {
7223 lm1 |= HCI_LM_ACCEPT;
7224 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7225 lm1 |= HCI_LM_MASTER;
7227 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7228 lm2 |= HCI_LM_ACCEPT;
7229 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7230 lm2 |= HCI_LM_MASTER;
7233 read_unlock(&chan_list_lock);
7235 return exact ? lm1 : lm2;
7238 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7240 struct l2cap_conn *conn;
7242 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7245 conn = l2cap_conn_add(hcon);
7247 l2cap_conn_ready(conn);
7249 l2cap_conn_del(hcon, bt_to_errno(status));
7253 int l2cap_disconn_ind(struct hci_conn *hcon)
7255 struct l2cap_conn *conn = hcon->l2cap_data;
7257 BT_DBG("hcon %p", hcon);
7260 return HCI_ERROR_REMOTE_USER_TERM;
7261 return conn->disc_reason;
7264 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7266 BT_DBG("hcon %p reason %d", hcon, reason);
7268 l2cap_conn_del(hcon, bt_to_errno(reason));
7271 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7273 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7276 if (encrypt == 0x00) {
7277 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7278 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7279 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7280 chan->sec_level == BT_SECURITY_FIPS)
7281 l2cap_chan_close(chan, ECONNREFUSED);
7283 if (chan->sec_level == BT_SECURITY_MEDIUM)
7284 __clear_chan_timer(chan);
7288 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7290 struct l2cap_conn *conn = hcon->l2cap_data;
7291 struct l2cap_chan *chan;
7296 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7298 if (hcon->type == LE_LINK) {
7299 if (!status && encrypt)
7300 smp_distribute_keys(conn);
7301 cancel_delayed_work(&conn->security_timer);
7304 mutex_lock(&conn->chan_lock);
7306 list_for_each_entry(chan, &conn->chan_l, list) {
7307 l2cap_chan_lock(chan);
7309 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7310 state_to_string(chan->state));
7312 if (chan->scid == L2CAP_CID_A2MP) {
7313 l2cap_chan_unlock(chan);
7317 if (chan->scid == L2CAP_CID_ATT) {
7318 if (!status && encrypt) {
7319 chan->sec_level = hcon->sec_level;
7320 l2cap_chan_ready(chan);
7323 l2cap_chan_unlock(chan);
7327 if (!__l2cap_no_conn_pending(chan)) {
7328 l2cap_chan_unlock(chan);
7332 if (!status && (chan->state == BT_CONNECTED ||
7333 chan->state == BT_CONFIG)) {
7334 chan->ops->resume(chan);
7335 l2cap_check_encryption(chan, encrypt);
7336 l2cap_chan_unlock(chan);
7340 if (chan->state == BT_CONNECT) {
7342 l2cap_start_connection(chan);
7344 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7345 } else if (chan->state == BT_CONNECT2) {
7346 struct l2cap_conn_rsp rsp;
7350 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7351 res = L2CAP_CR_PEND;
7352 stat = L2CAP_CS_AUTHOR_PEND;
7353 chan->ops->defer(chan);
7355 l2cap_state_change(chan, BT_CONFIG);
7356 res = L2CAP_CR_SUCCESS;
7357 stat = L2CAP_CS_NO_INFO;
7360 l2cap_state_change(chan, BT_DISCONN);
7361 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7362 res = L2CAP_CR_SEC_BLOCK;
7363 stat = L2CAP_CS_NO_INFO;
7366 rsp.scid = cpu_to_le16(chan->dcid);
7367 rsp.dcid = cpu_to_le16(chan->scid);
7368 rsp.result = cpu_to_le16(res);
7369 rsp.status = cpu_to_le16(stat);
7370 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7373 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7374 res == L2CAP_CR_SUCCESS) {
7376 set_bit(CONF_REQ_SENT, &chan->conf_state);
7377 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7379 l2cap_build_conf_req(chan, buf),
7381 chan->num_conf_req++;
7385 l2cap_chan_unlock(chan);
7388 mutex_unlock(&conn->chan_lock);
7393 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7395 struct l2cap_conn *conn = hcon->l2cap_data;
7396 struct l2cap_hdr *hdr;
7399 /* For AMP controller do not create l2cap conn */
7400 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7404 conn = l2cap_conn_add(hcon);
7409 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7413 case ACL_START_NO_FLUSH:
7416 BT_ERR("Unexpected start frame (len %d)", skb->len);
7417 kfree_skb(conn->rx_skb);
7418 conn->rx_skb = NULL;
7420 l2cap_conn_unreliable(conn, ECOMM);
7423 /* Start fragment always begin with Basic L2CAP header */
7424 if (skb->len < L2CAP_HDR_SIZE) {
7425 BT_ERR("Frame is too short (len %d)", skb->len);
7426 l2cap_conn_unreliable(conn, ECOMM);
7430 hdr = (struct l2cap_hdr *) skb->data;
7431 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7433 if (len == skb->len) {
7434 /* Complete frame received */
7435 l2cap_recv_frame(conn, skb);
7439 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7441 if (skb->len > len) {
7442 BT_ERR("Frame is too long (len %d, expected len %d)",
7444 l2cap_conn_unreliable(conn, ECOMM);
7448 /* Allocate skb for the complete frame (with header) */
7449 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7453 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7455 conn->rx_len = len - skb->len;
7459 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7461 if (!conn->rx_len) {
7462 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7463 l2cap_conn_unreliable(conn, ECOMM);
7467 if (skb->len > conn->rx_len) {
7468 BT_ERR("Fragment is too long (len %d, expected %d)",
7469 skb->len, conn->rx_len);
7470 kfree_skb(conn->rx_skb);
7471 conn->rx_skb = NULL;
7473 l2cap_conn_unreliable(conn, ECOMM);
7477 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7479 conn->rx_len -= skb->len;
7481 if (!conn->rx_len) {
7482 /* Complete frame received. l2cap_recv_frame
7483 * takes ownership of the skb so set the global
7484 * rx_skb pointer to NULL first.
7486 struct sk_buff *rx_skb = conn->rx_skb;
7487 conn->rx_skb = NULL;
7488 l2cap_recv_frame(conn, rx_skb);
7498 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7500 struct l2cap_chan *c;
7502 read_lock(&chan_list_lock);
7504 list_for_each_entry(c, &chan_list, global_l) {
7505 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7507 c->state, __le16_to_cpu(c->psm),
7508 c->scid, c->dcid, c->imtu, c->omtu,
7509 c->sec_level, c->mode);
7512 read_unlock(&chan_list_lock);
7517 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7519 return single_open(file, l2cap_debugfs_show, inode->i_private);
7522 static const struct file_operations l2cap_debugfs_fops = {
7523 .open = l2cap_debugfs_open,
7525 .llseek = seq_lseek,
7526 .release = single_release,
7529 static struct dentry *l2cap_debugfs;
7531 int __init l2cap_init(void)
7535 err = l2cap_init_sockets();
7539 if (IS_ERR_OR_NULL(bt_debugfs))
7542 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7543 NULL, &l2cap_debugfs_fops);
7545 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7547 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7553 void l2cap_exit(void)
7555 debugfs_remove(l2cap_debugfs);
7556 l2cap_cleanup_sockets();
7559 module_param(disable_ertm, bool, 0644);
7560 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");