2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
73 return BDADDR_LE_RANDOM;
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
86 list_for_each_entry(c, &conn->chan_l, list) {
93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
98 list_for_each_entry(c, &conn->chan_l, list) {
105 /* Find channel with given SCID.
106 * Returns locked channel. */
107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
110 struct l2cap_chan *c;
112 mutex_lock(&conn->chan_lock);
113 c = __l2cap_get_chan_by_scid(conn, cid);
116 mutex_unlock(&conn->chan_lock);
121 /* Find channel with given DCID.
122 * Returns locked channel.
124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
127 struct l2cap_chan *c;
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
133 mutex_unlock(&conn->chan_lock);
138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
141 struct l2cap_chan *c;
143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153 struct l2cap_chan *c;
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
159 mutex_unlock(&conn->chan_lock);
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 struct l2cap_chan *c;
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&c->src, src))
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
179 write_lock(&chan_list_lock);
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
204 write_unlock(&chan_list_lock);
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
209 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
211 write_lock(&chan_list_lock);
215 write_unlock(&chan_list_lock);
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
224 if (conn->hcon->type == LE_LINK)
225 dyn_end = L2CAP_CID_LE_DYN_END;
227 dyn_end = L2CAP_CID_DYN_END;
229 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 if (!__l2cap_get_chan_by_scid(conn, cid))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
239 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 state_to_string(state));
243 chan->ops->state_change(chan, state, 0);
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 chan->ops->state_change(chan, chan->state, err);
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
255 chan->ops->state_change(chan, chan->state, err);
258 static void __set_retrans_timer(struct l2cap_chan *chan)
260 if (!delayed_work_pending(&chan->monitor_timer) &&
261 chan->retrans_timeout) {
262 l2cap_set_timer(chan, &chan->retrans_timer,
263 msecs_to_jiffies(chan->retrans_timeout));
267 static void __set_monitor_timer(struct l2cap_chan *chan)
269 __clear_retrans_timer(chan);
270 if (chan->monitor_timeout) {
271 l2cap_set_timer(chan, &chan->monitor_timer,
272 msecs_to_jiffies(chan->monitor_timeout));
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 skb_queue_walk(head, skb) {
282 if (bt_cb(skb)->control.txseq == seq)
289 /* ---- L2CAP sequence number lists ---- */
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292 * SREJ requests that are received and for frames that are to be
293 * retransmitted. These seq_list functions implement a singly-linked
294 * list in an array, where membership in the list can also be checked
295 * in constant time. Items can also be added to the tail of the list
296 * and removed from the head in constant time, without further memory
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
302 size_t alloc_size, i;
304 /* Allocated size is a power of 2 to map sequence numbers
305 * (which may be up to 14 bits) in to a smaller array that is
306 * sized for the negotiated ERTM transmit windows.
308 alloc_size = roundup_pow_of_two(size);
310 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 seq_list->mask = alloc_size - 1;
315 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 for (i = 0; i < alloc_size; i++)
318 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
325 kfree(seq_list->list);
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
331 /* Constant-time check for list membership */
332 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
337 u16 seq = seq_list->head;
338 u16 mask = seq_list->mask;
340 seq_list->head = seq_list->list[seq & mask];
341 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
355 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
358 for (i = 0; i <= seq_list->mask; i++)
359 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
367 u16 mask = seq_list->mask;
369 /* All appends happen in constant time */
371 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
374 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 seq_list->head = seq;
377 seq_list->list[seq_list->tail & mask] = seq;
379 seq_list->tail = seq;
380 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
383 static void l2cap_chan_timeout(struct work_struct *work)
385 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
387 struct l2cap_conn *conn = chan->conn;
390 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
392 mutex_lock(&conn->chan_lock);
393 l2cap_chan_lock(chan);
395 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 reason = ECONNREFUSED;
397 else if (chan->state == BT_CONNECT &&
398 chan->sec_level != BT_SECURITY_SDP)
399 reason = ECONNREFUSED;
403 l2cap_chan_close(chan, reason);
405 l2cap_chan_unlock(chan);
407 chan->ops->close(chan);
408 mutex_unlock(&conn->chan_lock);
410 l2cap_chan_put(chan);
413 struct l2cap_chan *l2cap_chan_create(void)
415 struct l2cap_chan *chan;
417 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 mutex_init(&chan->lock);
423 write_lock(&chan_list_lock);
424 list_add(&chan->global_l, &chan_list);
425 write_unlock(&chan_list_lock);
427 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
429 chan->state = BT_OPEN;
431 kref_init(&chan->kref);
433 /* This flag is cleared in l2cap_chan_ready() */
434 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
436 BT_DBG("chan %p", chan);
440 EXPORT_SYMBOL_GPL(l2cap_chan_create);
442 static void l2cap_chan_destroy(struct kref *kref)
444 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
446 BT_DBG("chan %p", chan);
448 write_lock(&chan_list_lock);
449 list_del(&chan->global_l);
450 write_unlock(&chan_list_lock);
455 void l2cap_chan_hold(struct l2cap_chan *c)
457 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
462 void l2cap_chan_put(struct l2cap_chan *c)
464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
466 kref_put(&c->kref, l2cap_chan_destroy);
468 EXPORT_SYMBOL_GPL(l2cap_chan_put);
470 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
472 chan->fcs = L2CAP_FCS_CRC16;
473 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
474 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
475 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
476 chan->remote_max_tx = chan->max_tx;
477 chan->remote_tx_win = chan->tx_win;
478 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->sec_level = BT_SECURITY_LOW;
480 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
481 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
482 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
483 chan->conf_state = 0;
485 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
487 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
489 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
492 chan->sdu_last_frag = NULL;
494 chan->tx_credits = 0;
495 chan->rx_credits = le_max_credits;
496 chan->mps = min_t(u16, chan->imtu, le_default_mps);
498 skb_queue_head_init(&chan->tx_q);
501 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
503 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
504 __le16_to_cpu(chan->psm), chan->dcid);
506 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
510 switch (chan->chan_type) {
511 case L2CAP_CHAN_CONN_ORIENTED:
512 /* Alloc CID for connection-oriented socket */
513 chan->scid = l2cap_alloc_cid(conn);
514 if (conn->hcon->type == ACL_LINK)
515 chan->omtu = L2CAP_DEFAULT_MTU;
518 case L2CAP_CHAN_CONN_LESS:
519 /* Connectionless socket */
520 chan->scid = L2CAP_CID_CONN_LESS;
521 chan->dcid = L2CAP_CID_CONN_LESS;
522 chan->omtu = L2CAP_DEFAULT_MTU;
525 case L2CAP_CHAN_FIXED:
526 /* Caller will set CID and CID specific MTU values */
530 /* Raw socket can send/recv signalling messages only */
531 chan->scid = L2CAP_CID_SIGNALING;
532 chan->dcid = L2CAP_CID_SIGNALING;
533 chan->omtu = L2CAP_DEFAULT_MTU;
536 chan->local_id = L2CAP_BESTEFFORT_ID;
537 chan->local_stype = L2CAP_SERV_BESTEFFORT;
538 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
539 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
540 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
541 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
543 l2cap_chan_hold(chan);
545 hci_conn_hold(conn->hcon);
547 list_add(&chan->list, &conn->chan_l);
550 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
552 mutex_lock(&conn->chan_lock);
553 __l2cap_chan_add(conn, chan);
554 mutex_unlock(&conn->chan_lock);
557 void l2cap_chan_del(struct l2cap_chan *chan, int err)
559 struct l2cap_conn *conn = chan->conn;
561 __clear_chan_timer(chan);
563 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
566 struct amp_mgr *mgr = conn->hcon->amp_mgr;
567 /* Delete from channel list */
568 list_del(&chan->list);
570 l2cap_chan_put(chan);
574 if (chan->scid != L2CAP_CID_A2MP)
575 hci_conn_drop(conn->hcon);
577 if (mgr && mgr->bredr_chan == chan)
578 mgr->bredr_chan = NULL;
581 if (chan->hs_hchan) {
582 struct hci_chan *hs_hchan = chan->hs_hchan;
584 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
585 amp_disconnect_logical_link(hs_hchan);
588 chan->ops->teardown(chan, err);
590 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 case L2CAP_MODE_BASIC:
597 case L2CAP_MODE_LE_FLOWCTL:
598 skb_queue_purge(&chan->tx_q);
601 case L2CAP_MODE_ERTM:
602 __clear_retrans_timer(chan);
603 __clear_monitor_timer(chan);
604 __clear_ack_timer(chan);
606 skb_queue_purge(&chan->srej_q);
608 l2cap_seq_list_free(&chan->srej_list);
609 l2cap_seq_list_free(&chan->retrans_list);
613 case L2CAP_MODE_STREAMING:
614 skb_queue_purge(&chan->tx_q);
620 EXPORT_SYMBOL_GPL(l2cap_chan_del);
622 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
624 struct l2cap_conn *conn = hcon->l2cap_data;
625 struct l2cap_chan *chan;
627 mutex_lock(&conn->chan_lock);
629 list_for_each_entry(chan, &conn->chan_l, list) {
630 l2cap_chan_lock(chan);
631 bacpy(&chan->dst, &hcon->dst);
632 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
633 l2cap_chan_unlock(chan);
636 mutex_unlock(&conn->chan_lock);
639 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
641 struct l2cap_conn *conn = chan->conn;
642 struct l2cap_le_conn_rsp rsp;
645 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
646 result = L2CAP_CR_AUTHORIZATION;
648 result = L2CAP_CR_BAD_PSM;
650 l2cap_state_change(chan, BT_DISCONN);
652 rsp.dcid = cpu_to_le16(chan->scid);
653 rsp.mtu = cpu_to_le16(chan->imtu);
654 rsp.mps = cpu_to_le16(chan->mps);
655 rsp.credits = cpu_to_le16(chan->rx_credits);
656 rsp.result = cpu_to_le16(result);
658 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
662 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
664 struct l2cap_conn *conn = chan->conn;
665 struct l2cap_conn_rsp rsp;
668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
669 result = L2CAP_CR_SEC_BLOCK;
671 result = L2CAP_CR_BAD_PSM;
673 l2cap_state_change(chan, BT_DISCONN);
675 rsp.scid = cpu_to_le16(chan->dcid);
676 rsp.dcid = cpu_to_le16(chan->scid);
677 rsp.result = cpu_to_le16(result);
678 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
680 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
683 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
685 struct l2cap_conn *conn = chan->conn;
687 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
689 switch (chan->state) {
691 chan->ops->teardown(chan, 0);
696 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
697 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
698 l2cap_send_disconn_req(chan, reason);
700 l2cap_chan_del(chan, reason);
704 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
705 if (conn->hcon->type == ACL_LINK)
706 l2cap_chan_connect_reject(chan);
707 else if (conn->hcon->type == LE_LINK)
708 l2cap_chan_le_connect_reject(chan);
711 l2cap_chan_del(chan, reason);
716 l2cap_chan_del(chan, reason);
720 chan->ops->teardown(chan, 0);
724 EXPORT_SYMBOL(l2cap_chan_close);
726 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
728 switch (chan->chan_type) {
730 switch (chan->sec_level) {
731 case BT_SECURITY_HIGH:
732 case BT_SECURITY_FIPS:
733 return HCI_AT_DEDICATED_BONDING_MITM;
734 case BT_SECURITY_MEDIUM:
735 return HCI_AT_DEDICATED_BONDING;
737 return HCI_AT_NO_BONDING;
740 case L2CAP_CHAN_CONN_LESS:
741 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
742 if (chan->sec_level == BT_SECURITY_LOW)
743 chan->sec_level = BT_SECURITY_SDP;
745 if (chan->sec_level == BT_SECURITY_HIGH ||
746 chan->sec_level == BT_SECURITY_FIPS)
747 return HCI_AT_NO_BONDING_MITM;
749 return HCI_AT_NO_BONDING;
751 case L2CAP_CHAN_CONN_ORIENTED:
752 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
753 if (chan->sec_level == BT_SECURITY_LOW)
754 chan->sec_level = BT_SECURITY_SDP;
756 if (chan->sec_level == BT_SECURITY_HIGH ||
757 chan->sec_level == BT_SECURITY_FIPS)
758 return HCI_AT_NO_BONDING_MITM;
760 return HCI_AT_NO_BONDING;
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_GENERAL_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_GENERAL_BONDING;
771 return HCI_AT_NO_BONDING;
777 /* Service level security */
778 int l2cap_chan_check_security(struct l2cap_chan *chan)
780 struct l2cap_conn *conn = chan->conn;
783 if (conn->hcon->type == LE_LINK)
784 return smp_conn_security(conn->hcon, chan->sec_level);
786 auth_type = l2cap_get_auth_type(chan);
788 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
791 static u8 l2cap_get_ident(struct l2cap_conn *conn)
795 /* Get next available identificator.
796 * 1 - 128 are used by kernel.
797 * 129 - 199 are reserved.
798 * 200 - 254 are used by utilities like l2ping, etc.
801 spin_lock(&conn->lock);
803 if (++conn->tx_ident > 128)
808 spin_unlock(&conn->lock);
813 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
816 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
819 BT_DBG("code 0x%2.2x", code);
824 if (lmp_no_flush_capable(conn->hcon->hdev))
825 flags = ACL_START_NO_FLUSH;
829 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
830 skb->priority = HCI_PRIO_MAX;
832 hci_send_acl(conn->hchan, skb, flags);
835 static bool __chan_is_moving(struct l2cap_chan *chan)
837 return chan->move_state != L2CAP_MOVE_STABLE &&
838 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
841 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
843 struct hci_conn *hcon = chan->conn->hcon;
846 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
849 if (chan->hs_hcon && !__chan_is_moving(chan)) {
851 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
858 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
859 lmp_no_flush_capable(hcon->hdev))
860 flags = ACL_START_NO_FLUSH;
864 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
865 hci_send_acl(chan->conn->hchan, skb, flags);
868 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
870 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
871 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
873 if (enh & L2CAP_CTRL_FRAME_TYPE) {
876 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
877 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
884 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
885 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
892 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
894 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
895 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
897 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
900 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
901 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
908 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
909 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
916 static inline void __unpack_control(struct l2cap_chan *chan,
919 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
920 __unpack_extended_control(get_unaligned_le32(skb->data),
921 &bt_cb(skb)->control);
922 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
924 __unpack_enhanced_control(get_unaligned_le16(skb->data),
925 &bt_cb(skb)->control);
926 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
930 static u32 __pack_extended_control(struct l2cap_ctrl *control)
934 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
935 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
937 if (control->sframe) {
938 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
939 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
940 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
942 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
943 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
949 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
953 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
954 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
956 if (control->sframe) {
957 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
958 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
959 packed |= L2CAP_CTRL_FRAME_TYPE;
961 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
962 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
968 static inline void __pack_control(struct l2cap_chan *chan,
969 struct l2cap_ctrl *control,
972 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
973 put_unaligned_le32(__pack_extended_control(control),
974 skb->data + L2CAP_HDR_SIZE);
976 put_unaligned_le16(__pack_enhanced_control(control),
977 skb->data + L2CAP_HDR_SIZE);
981 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
983 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
984 return L2CAP_EXT_HDR_SIZE;
986 return L2CAP_ENH_HDR_SIZE;
989 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
993 struct l2cap_hdr *lh;
994 int hlen = __ertm_hdr_size(chan);
996 if (chan->fcs == L2CAP_FCS_CRC16)
997 hlen += L2CAP_FCS_SIZE;
999 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1002 return ERR_PTR(-ENOMEM);
1004 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1005 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1006 lh->cid = cpu_to_le16(chan->dcid);
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1009 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1011 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1013 if (chan->fcs == L2CAP_FCS_CRC16) {
1014 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1015 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1018 skb->priority = HCI_PRIO_MAX;
1022 static void l2cap_send_sframe(struct l2cap_chan *chan,
1023 struct l2cap_ctrl *control)
1025 struct sk_buff *skb;
1028 BT_DBG("chan %p, control %p", chan, control);
1030 if (!control->sframe)
1033 if (__chan_is_moving(chan))
1036 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1040 if (control->super == L2CAP_SUPER_RR)
1041 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1042 else if (control->super == L2CAP_SUPER_RNR)
1043 set_bit(CONN_RNR_SENT, &chan->conn_state);
1045 if (control->super != L2CAP_SUPER_SREJ) {
1046 chan->last_acked_seq = control->reqseq;
1047 __clear_ack_timer(chan);
1050 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1051 control->final, control->poll, control->super);
1053 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1054 control_field = __pack_extended_control(control);
1056 control_field = __pack_enhanced_control(control);
1058 skb = l2cap_create_sframe_pdu(chan, control_field);
1060 l2cap_do_send(chan, skb);
1063 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1065 struct l2cap_ctrl control;
1067 BT_DBG("chan %p, poll %d", chan, poll);
1069 memset(&control, 0, sizeof(control));
1071 control.poll = poll;
1073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1074 control.super = L2CAP_SUPER_RNR;
1076 control.super = L2CAP_SUPER_RR;
1078 control.reqseq = chan->buffer_seq;
1079 l2cap_send_sframe(chan, &control);
1082 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1084 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1087 static bool __amp_capable(struct l2cap_chan *chan)
1089 struct l2cap_conn *conn = chan->conn;
1090 struct hci_dev *hdev;
1091 bool amp_available = false;
1093 if (!conn->hs_enabled)
1096 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1099 read_lock(&hci_dev_list_lock);
1100 list_for_each_entry(hdev, &hci_dev_list, list) {
1101 if (hdev->amp_type != AMP_TYPE_BREDR &&
1102 test_bit(HCI_UP, &hdev->flags)) {
1103 amp_available = true;
1107 read_unlock(&hci_dev_list_lock);
1109 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1110 return amp_available;
1115 static bool l2cap_check_efs(struct l2cap_chan *chan)
1117 /* Check EFS parameters */
1121 void l2cap_send_conn_req(struct l2cap_chan *chan)
1123 struct l2cap_conn *conn = chan->conn;
1124 struct l2cap_conn_req req;
1126 req.scid = cpu_to_le16(chan->scid);
1127 req.psm = chan->psm;
1129 chan->ident = l2cap_get_ident(conn);
1131 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1133 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1136 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1138 struct l2cap_create_chan_req req;
1139 req.scid = cpu_to_le16(chan->scid);
1140 req.psm = chan->psm;
1141 req.amp_id = amp_id;
1143 chan->ident = l2cap_get_ident(chan->conn);
1145 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1149 static void l2cap_move_setup(struct l2cap_chan *chan)
1151 struct sk_buff *skb;
1153 BT_DBG("chan %p", chan);
1155 if (chan->mode != L2CAP_MODE_ERTM)
1158 __clear_retrans_timer(chan);
1159 __clear_monitor_timer(chan);
1160 __clear_ack_timer(chan);
1162 chan->retry_count = 0;
1163 skb_queue_walk(&chan->tx_q, skb) {
1164 if (bt_cb(skb)->control.retries)
1165 bt_cb(skb)->control.retries = 1;
1170 chan->expected_tx_seq = chan->buffer_seq;
1172 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1173 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1174 l2cap_seq_list_clear(&chan->retrans_list);
1175 l2cap_seq_list_clear(&chan->srej_list);
1176 skb_queue_purge(&chan->srej_q);
1178 chan->tx_state = L2CAP_TX_STATE_XMIT;
1179 chan->rx_state = L2CAP_RX_STATE_MOVE;
1181 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1184 static void l2cap_move_done(struct l2cap_chan *chan)
1186 u8 move_role = chan->move_role;
1187 BT_DBG("chan %p", chan);
1189 chan->move_state = L2CAP_MOVE_STABLE;
1190 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1192 if (chan->mode != L2CAP_MODE_ERTM)
1195 switch (move_role) {
1196 case L2CAP_MOVE_ROLE_INITIATOR:
1197 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1198 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1200 case L2CAP_MOVE_ROLE_RESPONDER:
1201 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1206 static void l2cap_chan_ready(struct l2cap_chan *chan)
1208 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1209 chan->conf_state = 0;
1210 __clear_chan_timer(chan);
1212 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1213 chan->ops->suspend(chan);
1215 chan->state = BT_CONNECTED;
1217 chan->ops->ready(chan);
1220 static void l2cap_le_connect(struct l2cap_chan *chan)
1222 struct l2cap_conn *conn = chan->conn;
1223 struct l2cap_le_conn_req req;
1225 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1228 req.psm = chan->psm;
1229 req.scid = cpu_to_le16(chan->scid);
1230 req.mtu = cpu_to_le16(chan->imtu);
1231 req.mps = cpu_to_le16(chan->mps);
1232 req.credits = cpu_to_le16(chan->rx_credits);
1234 chan->ident = l2cap_get_ident(conn);
1236 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1240 static void l2cap_le_start(struct l2cap_chan *chan)
1242 struct l2cap_conn *conn = chan->conn;
1244 if (!smp_conn_security(conn->hcon, chan->sec_level))
1248 l2cap_chan_ready(chan);
1252 if (chan->state == BT_CONNECT)
1253 l2cap_le_connect(chan);
1256 static void l2cap_start_connection(struct l2cap_chan *chan)
1258 if (__amp_capable(chan)) {
1259 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1260 a2mp_discover_amp(chan);
1261 } else if (chan->conn->hcon->type == LE_LINK) {
1262 l2cap_le_start(chan);
1264 l2cap_send_conn_req(chan);
1268 static void l2cap_do_start(struct l2cap_chan *chan)
1270 struct l2cap_conn *conn = chan->conn;
1272 if (conn->hcon->type == LE_LINK) {
1273 l2cap_le_start(chan);
1277 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1278 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1281 if (l2cap_chan_check_security(chan) &&
1282 __l2cap_no_conn_pending(chan)) {
1283 l2cap_start_connection(chan);
1286 struct l2cap_info_req req;
1287 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1289 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1290 conn->info_ident = l2cap_get_ident(conn);
1292 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1294 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1299 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1301 u32 local_feat_mask = l2cap_feat_mask;
1303 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1306 case L2CAP_MODE_ERTM:
1307 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1308 case L2CAP_MODE_STREAMING:
1309 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1315 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1317 struct l2cap_conn *conn = chan->conn;
1318 struct l2cap_disconn_req req;
1323 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1324 __clear_retrans_timer(chan);
1325 __clear_monitor_timer(chan);
1326 __clear_ack_timer(chan);
1329 if (chan->scid == L2CAP_CID_A2MP) {
1330 l2cap_state_change(chan, BT_DISCONN);
1334 req.dcid = cpu_to_le16(chan->dcid);
1335 req.scid = cpu_to_le16(chan->scid);
1336 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1339 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1342 /* ---- L2CAP connections ---- */
1343 static void l2cap_conn_start(struct l2cap_conn *conn)
1345 struct l2cap_chan *chan, *tmp;
1347 BT_DBG("conn %p", conn);
1349 mutex_lock(&conn->chan_lock);
1351 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1352 l2cap_chan_lock(chan);
1354 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1355 l2cap_chan_unlock(chan);
1359 if (chan->state == BT_CONNECT) {
1360 if (!l2cap_chan_check_security(chan) ||
1361 !__l2cap_no_conn_pending(chan)) {
1362 l2cap_chan_unlock(chan);
1366 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1367 && test_bit(CONF_STATE2_DEVICE,
1368 &chan->conf_state)) {
1369 l2cap_chan_close(chan, ECONNRESET);
1370 l2cap_chan_unlock(chan);
1374 l2cap_start_connection(chan);
1376 } else if (chan->state == BT_CONNECT2) {
1377 struct l2cap_conn_rsp rsp;
1379 rsp.scid = cpu_to_le16(chan->dcid);
1380 rsp.dcid = cpu_to_le16(chan->scid);
1382 if (l2cap_chan_check_security(chan)) {
1383 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1386 chan->ops->defer(chan);
1389 l2cap_state_change(chan, BT_CONFIG);
1390 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1391 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1394 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1395 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1398 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1401 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1402 rsp.result != L2CAP_CR_SUCCESS) {
1403 l2cap_chan_unlock(chan);
1407 set_bit(CONF_REQ_SENT, &chan->conf_state);
1408 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1409 l2cap_build_conf_req(chan, buf), buf);
1410 chan->num_conf_req++;
1413 l2cap_chan_unlock(chan);
1416 mutex_unlock(&conn->chan_lock);
1419 /* Find socket with cid and source/destination bdaddr.
1420 * Returns closest match, locked.
1422 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1426 struct l2cap_chan *c, *c1 = NULL;
1428 read_lock(&chan_list_lock);
1430 list_for_each_entry(c, &chan_list, global_l) {
1431 if (state && c->state != state)
1434 if (c->scid == cid) {
1435 int src_match, dst_match;
1436 int src_any, dst_any;
1439 src_match = !bacmp(&c->src, src);
1440 dst_match = !bacmp(&c->dst, dst);
1441 if (src_match && dst_match) {
1442 read_unlock(&chan_list_lock);
1447 src_any = !bacmp(&c->src, BDADDR_ANY);
1448 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1449 if ((src_match && dst_any) || (src_any && dst_match) ||
1450 (src_any && dst_any))
1455 read_unlock(&chan_list_lock);
1460 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1462 struct hci_conn *hcon = conn->hcon;
1463 struct hci_dev *hdev = hcon->hdev;
1464 struct l2cap_chan *chan, *pchan;
1469 /* Check if we have socket listening on cid */
1470 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1471 &hcon->src, &hcon->dst);
1475 /* Client ATT sockets should override the server one */
1476 if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1479 dst_type = bdaddr_type(hcon, hcon->dst_type);
1481 /* If device is blocked, do not create a channel for it */
1482 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
1485 /* For LE slave connections, make sure the connection interval
1486 * is in the range of the minium and maximum interval that has
1487 * been configured for this connection. If not, then trigger
1488 * the connection update procedure.
1490 if (!test_bit(HCI_CONN_MASTER, &hcon->flags) &&
1491 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1492 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1493 struct l2cap_conn_param_update_req req;
1495 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1496 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1497 req.latency = cpu_to_le16(hcon->le_conn_latency);
1498 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1500 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1501 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1504 l2cap_chan_lock(pchan);
1506 chan = pchan->ops->new_connection(pchan);
1510 bacpy(&chan->src, &hcon->src);
1511 bacpy(&chan->dst, &hcon->dst);
1512 chan->src_type = bdaddr_type(hcon, hcon->src_type);
1513 chan->dst_type = dst_type;
1515 __l2cap_chan_add(conn, chan);
1518 l2cap_chan_unlock(pchan);
1521 static void l2cap_conn_ready(struct l2cap_conn *conn)
1523 struct l2cap_chan *chan;
1524 struct hci_conn *hcon = conn->hcon;
1526 BT_DBG("conn %p", conn);
1528 /* For outgoing pairing which doesn't necessarily have an
1529 * associated socket (e.g. mgmt_pair_device).
1531 if (hcon->out && hcon->type == LE_LINK)
1532 smp_conn_security(hcon, hcon->pending_sec_level);
1534 mutex_lock(&conn->chan_lock);
1536 if (hcon->type == LE_LINK)
1537 l2cap_le_conn_ready(conn);
1539 list_for_each_entry(chan, &conn->chan_l, list) {
1541 l2cap_chan_lock(chan);
1543 if (chan->scid == L2CAP_CID_A2MP) {
1544 l2cap_chan_unlock(chan);
1548 if (hcon->type == LE_LINK) {
1549 l2cap_le_start(chan);
1550 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1551 l2cap_chan_ready(chan);
1553 } else if (chan->state == BT_CONNECT) {
1554 l2cap_do_start(chan);
1557 l2cap_chan_unlock(chan);
1560 mutex_unlock(&conn->chan_lock);
1562 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1565 /* Notify sockets that we cannot guaranty reliability anymore */
1566 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1568 struct l2cap_chan *chan;
1570 BT_DBG("conn %p", conn);
1572 mutex_lock(&conn->chan_lock);
1574 list_for_each_entry(chan, &conn->chan_l, list) {
1575 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1576 l2cap_chan_set_err(chan, err);
1579 mutex_unlock(&conn->chan_lock);
1582 static void l2cap_info_timeout(struct work_struct *work)
1584 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1588 conn->info_ident = 0;
1590 l2cap_conn_start(conn);
1595 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1596 * callback is called during registration. The ->remove callback is called
1597 * during unregistration.
1598 * An l2cap_user object can either be explicitly unregistered or when the
1599 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1600 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1601 * External modules must own a reference to the l2cap_conn object if they intend
1602 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1603 * any time if they don't.
1606 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1608 struct hci_dev *hdev = conn->hcon->hdev;
1611 /* We need to check whether l2cap_conn is registered. If it is not, we
1612 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1613 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1614 * relies on the parent hci_conn object to be locked. This itself relies
1615 * on the hci_dev object to be locked. So we must lock the hci device
1620 if (user->list.next || user->list.prev) {
1625 /* conn->hchan is NULL after l2cap_conn_del() was called */
1631 ret = user->probe(conn, user);
1635 list_add(&user->list, &conn->users);
1639 hci_dev_unlock(hdev);
1642 EXPORT_SYMBOL(l2cap_register_user);
1644 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1646 struct hci_dev *hdev = conn->hcon->hdev;
1650 if (!user->list.next || !user->list.prev)
1653 list_del(&user->list);
1654 user->list.next = NULL;
1655 user->list.prev = NULL;
1656 user->remove(conn, user);
1659 hci_dev_unlock(hdev);
1661 EXPORT_SYMBOL(l2cap_unregister_user);
1663 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1665 struct l2cap_user *user;
1667 while (!list_empty(&conn->users)) {
1668 user = list_first_entry(&conn->users, struct l2cap_user, list);
1669 list_del(&user->list);
1670 user->list.next = NULL;
1671 user->list.prev = NULL;
1672 user->remove(conn, user);
1676 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1678 struct l2cap_conn *conn = hcon->l2cap_data;
1679 struct l2cap_chan *chan, *l;
1684 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1686 kfree_skb(conn->rx_skb);
1688 skb_queue_purge(&conn->pending_rx);
1690 /* We can not call flush_work(&conn->pending_rx_work) here since we
1691 * might block if we are running on a worker from the same workqueue
1692 * pending_rx_work is waiting on.
1694 if (work_pending(&conn->pending_rx_work))
1695 cancel_work_sync(&conn->pending_rx_work);
1697 l2cap_unregister_all_users(conn);
1699 mutex_lock(&conn->chan_lock);
1702 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1703 l2cap_chan_hold(chan);
1704 l2cap_chan_lock(chan);
1706 l2cap_chan_del(chan, err);
1708 l2cap_chan_unlock(chan);
1710 chan->ops->close(chan);
1711 l2cap_chan_put(chan);
1714 mutex_unlock(&conn->chan_lock);
1716 hci_chan_del(conn->hchan);
1718 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1719 cancel_delayed_work_sync(&conn->info_timer);
1721 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1722 cancel_delayed_work_sync(&conn->security_timer);
1723 smp_chan_destroy(conn);
1726 hcon->l2cap_data = NULL;
1728 l2cap_conn_put(conn);
1731 static void security_timeout(struct work_struct *work)
1733 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1734 security_timer.work);
1736 BT_DBG("conn %p", conn);
1738 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1739 smp_chan_destroy(conn);
1740 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1744 static void l2cap_conn_free(struct kref *ref)
1746 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1748 hci_conn_put(conn->hcon);
1752 void l2cap_conn_get(struct l2cap_conn *conn)
1754 kref_get(&conn->ref);
1756 EXPORT_SYMBOL(l2cap_conn_get);
1758 void l2cap_conn_put(struct l2cap_conn *conn)
1760 kref_put(&conn->ref, l2cap_conn_free);
1762 EXPORT_SYMBOL(l2cap_conn_put);
1764 /* ---- Socket interface ---- */
1766 /* Find socket with psm and source / destination bdaddr.
1767 * Returns closest match.
1769 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1774 struct l2cap_chan *c, *c1 = NULL;
1776 read_lock(&chan_list_lock);
1778 list_for_each_entry(c, &chan_list, global_l) {
1779 if (state && c->state != state)
1782 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1785 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1788 if (c->psm == psm) {
1789 int src_match, dst_match;
1790 int src_any, dst_any;
1793 src_match = !bacmp(&c->src, src);
1794 dst_match = !bacmp(&c->dst, dst);
1795 if (src_match && dst_match) {
1796 read_unlock(&chan_list_lock);
1801 src_any = !bacmp(&c->src, BDADDR_ANY);
1802 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1803 if ((src_match && dst_any) || (src_any && dst_match) ||
1804 (src_any && dst_any))
1809 read_unlock(&chan_list_lock);
1814 static void l2cap_monitor_timeout(struct work_struct *work)
1816 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1817 monitor_timer.work);
1819 BT_DBG("chan %p", chan);
1821 l2cap_chan_lock(chan);
1824 l2cap_chan_unlock(chan);
1825 l2cap_chan_put(chan);
1829 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1831 l2cap_chan_unlock(chan);
1832 l2cap_chan_put(chan);
1835 static void l2cap_retrans_timeout(struct work_struct *work)
1837 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1838 retrans_timer.work);
1840 BT_DBG("chan %p", chan);
1842 l2cap_chan_lock(chan);
1845 l2cap_chan_unlock(chan);
1846 l2cap_chan_put(chan);
1850 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1851 l2cap_chan_unlock(chan);
1852 l2cap_chan_put(chan);
1855 static void l2cap_streaming_send(struct l2cap_chan *chan,
1856 struct sk_buff_head *skbs)
1858 struct sk_buff *skb;
1859 struct l2cap_ctrl *control;
1861 BT_DBG("chan %p, skbs %p", chan, skbs);
1863 if (__chan_is_moving(chan))
1866 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1868 while (!skb_queue_empty(&chan->tx_q)) {
1870 skb = skb_dequeue(&chan->tx_q);
1872 bt_cb(skb)->control.retries = 1;
1873 control = &bt_cb(skb)->control;
1875 control->reqseq = 0;
1876 control->txseq = chan->next_tx_seq;
1878 __pack_control(chan, control, skb);
1880 if (chan->fcs == L2CAP_FCS_CRC16) {
1881 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1882 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1885 l2cap_do_send(chan, skb);
1887 BT_DBG("Sent txseq %u", control->txseq);
1889 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1890 chan->frames_sent++;
1894 static int l2cap_ertm_send(struct l2cap_chan *chan)
1896 struct sk_buff *skb, *tx_skb;
1897 struct l2cap_ctrl *control;
1900 BT_DBG("chan %p", chan);
1902 if (chan->state != BT_CONNECTED)
1905 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1908 if (__chan_is_moving(chan))
1911 while (chan->tx_send_head &&
1912 chan->unacked_frames < chan->remote_tx_win &&
1913 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1915 skb = chan->tx_send_head;
1917 bt_cb(skb)->control.retries = 1;
1918 control = &bt_cb(skb)->control;
1920 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1923 control->reqseq = chan->buffer_seq;
1924 chan->last_acked_seq = chan->buffer_seq;
1925 control->txseq = chan->next_tx_seq;
1927 __pack_control(chan, control, skb);
1929 if (chan->fcs == L2CAP_FCS_CRC16) {
1930 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1931 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1934 /* Clone after data has been modified. Data is assumed to be
1935 read-only (for locking purposes) on cloned sk_buffs.
1937 tx_skb = skb_clone(skb, GFP_KERNEL);
1942 __set_retrans_timer(chan);
1944 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1945 chan->unacked_frames++;
1946 chan->frames_sent++;
1949 if (skb_queue_is_last(&chan->tx_q, skb))
1950 chan->tx_send_head = NULL;
1952 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1954 l2cap_do_send(chan, tx_skb);
1955 BT_DBG("Sent txseq %u", control->txseq);
1958 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1959 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1964 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1966 struct l2cap_ctrl control;
1967 struct sk_buff *skb;
1968 struct sk_buff *tx_skb;
1971 BT_DBG("chan %p", chan);
1973 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1976 if (__chan_is_moving(chan))
1979 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1980 seq = l2cap_seq_list_pop(&chan->retrans_list);
1982 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1984 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1989 bt_cb(skb)->control.retries++;
1990 control = bt_cb(skb)->control;
1992 if (chan->max_tx != 0 &&
1993 bt_cb(skb)->control.retries > chan->max_tx) {
1994 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1995 l2cap_send_disconn_req(chan, ECONNRESET);
1996 l2cap_seq_list_clear(&chan->retrans_list);
2000 control.reqseq = chan->buffer_seq;
2001 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2006 if (skb_cloned(skb)) {
2007 /* Cloned sk_buffs are read-only, so we need a
2010 tx_skb = skb_copy(skb, GFP_KERNEL);
2012 tx_skb = skb_clone(skb, GFP_KERNEL);
2016 l2cap_seq_list_clear(&chan->retrans_list);
2020 /* Update skb contents */
2021 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2022 put_unaligned_le32(__pack_extended_control(&control),
2023 tx_skb->data + L2CAP_HDR_SIZE);
2025 put_unaligned_le16(__pack_enhanced_control(&control),
2026 tx_skb->data + L2CAP_HDR_SIZE);
2029 if (chan->fcs == L2CAP_FCS_CRC16) {
2030 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2031 put_unaligned_le16(fcs, skb_put(tx_skb,
2035 l2cap_do_send(chan, tx_skb);
2037 BT_DBG("Resent txseq %d", control.txseq);
2039 chan->last_acked_seq = chan->buffer_seq;
2043 static void l2cap_retransmit(struct l2cap_chan *chan,
2044 struct l2cap_ctrl *control)
2046 BT_DBG("chan %p, control %p", chan, control);
2048 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2049 l2cap_ertm_resend(chan);
2052 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2053 struct l2cap_ctrl *control)
2055 struct sk_buff *skb;
2057 BT_DBG("chan %p, control %p", chan, control);
2060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2062 l2cap_seq_list_clear(&chan->retrans_list);
2064 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2067 if (chan->unacked_frames) {
2068 skb_queue_walk(&chan->tx_q, skb) {
2069 if (bt_cb(skb)->control.txseq == control->reqseq ||
2070 skb == chan->tx_send_head)
2074 skb_queue_walk_from(&chan->tx_q, skb) {
2075 if (skb == chan->tx_send_head)
2078 l2cap_seq_list_append(&chan->retrans_list,
2079 bt_cb(skb)->control.txseq);
2082 l2cap_ertm_resend(chan);
2086 static void l2cap_send_ack(struct l2cap_chan *chan)
2088 struct l2cap_ctrl control;
2089 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2090 chan->last_acked_seq);
2093 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2094 chan, chan->last_acked_seq, chan->buffer_seq);
2096 memset(&control, 0, sizeof(control));
2099 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2100 chan->rx_state == L2CAP_RX_STATE_RECV) {
2101 __clear_ack_timer(chan);
2102 control.super = L2CAP_SUPER_RNR;
2103 control.reqseq = chan->buffer_seq;
2104 l2cap_send_sframe(chan, &control);
2106 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2107 l2cap_ertm_send(chan);
2108 /* If any i-frames were sent, they included an ack */
2109 if (chan->buffer_seq == chan->last_acked_seq)
2113 /* Ack now if the window is 3/4ths full.
2114 * Calculate without mul or div
2116 threshold = chan->ack_win;
2117 threshold += threshold << 1;
2120 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2123 if (frames_to_ack >= threshold) {
2124 __clear_ack_timer(chan);
2125 control.super = L2CAP_SUPER_RR;
2126 control.reqseq = chan->buffer_seq;
2127 l2cap_send_sframe(chan, &control);
2132 __set_ack_timer(chan);
2136 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2137 struct msghdr *msg, int len,
2138 int count, struct sk_buff *skb)
2140 struct l2cap_conn *conn = chan->conn;
2141 struct sk_buff **frag;
2144 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2145 msg->msg_iov, count))
2151 /* Continuation fragments (no L2CAP header) */
2152 frag = &skb_shinfo(skb)->frag_list;
2154 struct sk_buff *tmp;
2156 count = min_t(unsigned int, conn->mtu, len);
2158 tmp = chan->ops->alloc_skb(chan, 0, count,
2159 msg->msg_flags & MSG_DONTWAIT);
2161 return PTR_ERR(tmp);
2165 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2166 msg->msg_iov, count))
2172 skb->len += (*frag)->len;
2173 skb->data_len += (*frag)->len;
2175 frag = &(*frag)->next;
2181 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2182 struct msghdr *msg, size_t len)
2184 struct l2cap_conn *conn = chan->conn;
2185 struct sk_buff *skb;
2186 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2187 struct l2cap_hdr *lh;
2189 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2190 __le16_to_cpu(chan->psm), len);
2192 count = min_t(unsigned int, (conn->mtu - hlen), len);
2194 skb = chan->ops->alloc_skb(chan, hlen, count,
2195 msg->msg_flags & MSG_DONTWAIT);
2199 /* Create L2CAP header */
2200 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2201 lh->cid = cpu_to_le16(chan->dcid);
2202 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2203 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2205 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2206 if (unlikely(err < 0)) {
2208 return ERR_PTR(err);
2213 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2214 struct msghdr *msg, size_t len)
2216 struct l2cap_conn *conn = chan->conn;
2217 struct sk_buff *skb;
2219 struct l2cap_hdr *lh;
2221 BT_DBG("chan %p len %zu", chan, len);
2223 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2225 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2226 msg->msg_flags & MSG_DONTWAIT);
2230 /* Create L2CAP header */
2231 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2232 lh->cid = cpu_to_le16(chan->dcid);
2233 lh->len = cpu_to_le16(len);
2235 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2236 if (unlikely(err < 0)) {
2238 return ERR_PTR(err);
2243 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2244 struct msghdr *msg, size_t len,
2247 struct l2cap_conn *conn = chan->conn;
2248 struct sk_buff *skb;
2249 int err, count, hlen;
2250 struct l2cap_hdr *lh;
2252 BT_DBG("chan %p len %zu", chan, len);
2255 return ERR_PTR(-ENOTCONN);
2257 hlen = __ertm_hdr_size(chan);
2260 hlen += L2CAP_SDULEN_SIZE;
2262 if (chan->fcs == L2CAP_FCS_CRC16)
2263 hlen += L2CAP_FCS_SIZE;
2265 count = min_t(unsigned int, (conn->mtu - hlen), len);
2267 skb = chan->ops->alloc_skb(chan, hlen, count,
2268 msg->msg_flags & MSG_DONTWAIT);
2272 /* Create L2CAP header */
2273 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2274 lh->cid = cpu_to_le16(chan->dcid);
2275 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2277 /* Control header is populated later */
2278 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2279 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2281 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2284 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2286 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2287 if (unlikely(err < 0)) {
2289 return ERR_PTR(err);
2292 bt_cb(skb)->control.fcs = chan->fcs;
2293 bt_cb(skb)->control.retries = 0;
2297 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2298 struct sk_buff_head *seg_queue,
2299 struct msghdr *msg, size_t len)
2301 struct sk_buff *skb;
2306 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2308 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2309 * so fragmented skbs are not used. The HCI layer's handling
2310 * of fragmented skbs is not compatible with ERTM's queueing.
2313 /* PDU size is derived from the HCI MTU */
2314 pdu_len = chan->conn->mtu;
2316 /* Constrain PDU size for BR/EDR connections */
2318 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2320 /* Adjust for largest possible L2CAP overhead. */
2322 pdu_len -= L2CAP_FCS_SIZE;
2324 pdu_len -= __ertm_hdr_size(chan);
2326 /* Remote device may have requested smaller PDUs */
2327 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2329 if (len <= pdu_len) {
2330 sar = L2CAP_SAR_UNSEGMENTED;
2334 sar = L2CAP_SAR_START;
2336 pdu_len -= L2CAP_SDULEN_SIZE;
2340 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2343 __skb_queue_purge(seg_queue);
2344 return PTR_ERR(skb);
2347 bt_cb(skb)->control.sar = sar;
2348 __skb_queue_tail(seg_queue, skb);
2353 pdu_len += L2CAP_SDULEN_SIZE;
2356 if (len <= pdu_len) {
2357 sar = L2CAP_SAR_END;
2360 sar = L2CAP_SAR_CONTINUE;
2367 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2369 size_t len, u16 sdulen)
2371 struct l2cap_conn *conn = chan->conn;
2372 struct sk_buff *skb;
2373 int err, count, hlen;
2374 struct l2cap_hdr *lh;
2376 BT_DBG("chan %p len %zu", chan, len);
2379 return ERR_PTR(-ENOTCONN);
2381 hlen = L2CAP_HDR_SIZE;
2384 hlen += L2CAP_SDULEN_SIZE;
2386 count = min_t(unsigned int, (conn->mtu - hlen), len);
2388 skb = chan->ops->alloc_skb(chan, hlen, count,
2389 msg->msg_flags & MSG_DONTWAIT);
2393 /* Create L2CAP header */
2394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2395 lh->cid = cpu_to_le16(chan->dcid);
2396 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2399 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2401 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2402 if (unlikely(err < 0)) {
2404 return ERR_PTR(err);
2410 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2411 struct sk_buff_head *seg_queue,
2412 struct msghdr *msg, size_t len)
2414 struct sk_buff *skb;
2418 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2420 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2422 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2425 pdu_len -= L2CAP_SDULEN_SIZE;
2431 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2433 __skb_queue_purge(seg_queue);
2434 return PTR_ERR(skb);
2437 __skb_queue_tail(seg_queue, skb);
2443 pdu_len += L2CAP_SDULEN_SIZE;
2450 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2452 struct sk_buff *skb;
2454 struct sk_buff_head seg_queue;
2459 /* Connectionless channel */
2460 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2461 skb = l2cap_create_connless_pdu(chan, msg, len);
2463 return PTR_ERR(skb);
2465 /* Channel lock is released before requesting new skb and then
2466 * reacquired thus we need to recheck channel state.
2468 if (chan->state != BT_CONNECTED) {
2473 l2cap_do_send(chan, skb);
2477 switch (chan->mode) {
2478 case L2CAP_MODE_LE_FLOWCTL:
2479 /* Check outgoing MTU */
2480 if (len > chan->omtu)
2483 if (!chan->tx_credits)
2486 __skb_queue_head_init(&seg_queue);
2488 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2490 if (chan->state != BT_CONNECTED) {
2491 __skb_queue_purge(&seg_queue);
2498 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2500 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2501 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2505 if (!chan->tx_credits)
2506 chan->ops->suspend(chan);
2512 case L2CAP_MODE_BASIC:
2513 /* Check outgoing MTU */
2514 if (len > chan->omtu)
2517 /* Create a basic PDU */
2518 skb = l2cap_create_basic_pdu(chan, msg, len);
2520 return PTR_ERR(skb);
2522 /* Channel lock is released before requesting new skb and then
2523 * reacquired thus we need to recheck channel state.
2525 if (chan->state != BT_CONNECTED) {
2530 l2cap_do_send(chan, skb);
2534 case L2CAP_MODE_ERTM:
2535 case L2CAP_MODE_STREAMING:
2536 /* Check outgoing MTU */
2537 if (len > chan->omtu) {
2542 __skb_queue_head_init(&seg_queue);
2544 /* Do segmentation before calling in to the state machine,
2545 * since it's possible to block while waiting for memory
2548 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2550 /* The channel could have been closed while segmenting,
2551 * check that it is still connected.
2553 if (chan->state != BT_CONNECTED) {
2554 __skb_queue_purge(&seg_queue);
2561 if (chan->mode == L2CAP_MODE_ERTM)
2562 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2564 l2cap_streaming_send(chan, &seg_queue);
2568 /* If the skbs were not queued for sending, they'll still be in
2569 * seg_queue and need to be purged.
2571 __skb_queue_purge(&seg_queue);
2575 BT_DBG("bad state %1.1x", chan->mode);
2581 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2583 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2585 struct l2cap_ctrl control;
2588 BT_DBG("chan %p, txseq %u", chan, txseq);
2590 memset(&control, 0, sizeof(control));
2592 control.super = L2CAP_SUPER_SREJ;
2594 for (seq = chan->expected_tx_seq; seq != txseq;
2595 seq = __next_seq(chan, seq)) {
2596 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2597 control.reqseq = seq;
2598 l2cap_send_sframe(chan, &control);
2599 l2cap_seq_list_append(&chan->srej_list, seq);
2603 chan->expected_tx_seq = __next_seq(chan, txseq);
2606 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2608 struct l2cap_ctrl control;
2610 BT_DBG("chan %p", chan);
2612 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2615 memset(&control, 0, sizeof(control));
2617 control.super = L2CAP_SUPER_SREJ;
2618 control.reqseq = chan->srej_list.tail;
2619 l2cap_send_sframe(chan, &control);
2622 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2624 struct l2cap_ctrl control;
2628 BT_DBG("chan %p, txseq %u", chan, txseq);
2630 memset(&control, 0, sizeof(control));
2632 control.super = L2CAP_SUPER_SREJ;
2634 /* Capture initial list head to allow only one pass through the list. */
2635 initial_head = chan->srej_list.head;
2638 seq = l2cap_seq_list_pop(&chan->srej_list);
2639 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2642 control.reqseq = seq;
2643 l2cap_send_sframe(chan, &control);
2644 l2cap_seq_list_append(&chan->srej_list, seq);
2645 } while (chan->srej_list.head != initial_head);
2648 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2650 struct sk_buff *acked_skb;
2653 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2655 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2658 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2659 chan->expected_ack_seq, chan->unacked_frames);
2661 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2662 ackseq = __next_seq(chan, ackseq)) {
2664 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2666 skb_unlink(acked_skb, &chan->tx_q);
2667 kfree_skb(acked_skb);
2668 chan->unacked_frames--;
2672 chan->expected_ack_seq = reqseq;
2674 if (chan->unacked_frames == 0)
2675 __clear_retrans_timer(chan);
2677 BT_DBG("unacked_frames %u", chan->unacked_frames);
2680 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2682 BT_DBG("chan %p", chan);
2684 chan->expected_tx_seq = chan->buffer_seq;
2685 l2cap_seq_list_clear(&chan->srej_list);
2686 skb_queue_purge(&chan->srej_q);
2687 chan->rx_state = L2CAP_RX_STATE_RECV;
2690 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2691 struct l2cap_ctrl *control,
2692 struct sk_buff_head *skbs, u8 event)
2694 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2698 case L2CAP_EV_DATA_REQUEST:
2699 if (chan->tx_send_head == NULL)
2700 chan->tx_send_head = skb_peek(skbs);
2702 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2703 l2cap_ertm_send(chan);
2705 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2706 BT_DBG("Enter LOCAL_BUSY");
2707 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2709 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2710 /* The SREJ_SENT state must be aborted if we are to
2711 * enter the LOCAL_BUSY state.
2713 l2cap_abort_rx_srej_sent(chan);
2716 l2cap_send_ack(chan);
2719 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2720 BT_DBG("Exit LOCAL_BUSY");
2721 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2724 struct l2cap_ctrl local_control;
2726 memset(&local_control, 0, sizeof(local_control));
2727 local_control.sframe = 1;
2728 local_control.super = L2CAP_SUPER_RR;
2729 local_control.poll = 1;
2730 local_control.reqseq = chan->buffer_seq;
2731 l2cap_send_sframe(chan, &local_control);
2733 chan->retry_count = 1;
2734 __set_monitor_timer(chan);
2735 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2738 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2739 l2cap_process_reqseq(chan, control->reqseq);
2741 case L2CAP_EV_EXPLICIT_POLL:
2742 l2cap_send_rr_or_rnr(chan, 1);
2743 chan->retry_count = 1;
2744 __set_monitor_timer(chan);
2745 __clear_ack_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 case L2CAP_EV_RETRANS_TO:
2749 l2cap_send_rr_or_rnr(chan, 1);
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2754 case L2CAP_EV_RECV_FBIT:
2755 /* Nothing to process */
2762 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2763 struct l2cap_ctrl *control,
2764 struct sk_buff_head *skbs, u8 event)
2766 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2770 case L2CAP_EV_DATA_REQUEST:
2771 if (chan->tx_send_head == NULL)
2772 chan->tx_send_head = skb_peek(skbs);
2773 /* Queue data, but don't send. */
2774 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2776 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2777 BT_DBG("Enter LOCAL_BUSY");
2778 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2780 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2781 /* The SREJ_SENT state must be aborted if we are to
2782 * enter the LOCAL_BUSY state.
2784 l2cap_abort_rx_srej_sent(chan);
2787 l2cap_send_ack(chan);
2790 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2791 BT_DBG("Exit LOCAL_BUSY");
2792 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2794 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2795 struct l2cap_ctrl local_control;
2796 memset(&local_control, 0, sizeof(local_control));
2797 local_control.sframe = 1;
2798 local_control.super = L2CAP_SUPER_RR;
2799 local_control.poll = 1;
2800 local_control.reqseq = chan->buffer_seq;
2801 l2cap_send_sframe(chan, &local_control);
2803 chan->retry_count = 1;
2804 __set_monitor_timer(chan);
2805 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2808 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2809 l2cap_process_reqseq(chan, control->reqseq);
2813 case L2CAP_EV_RECV_FBIT:
2814 if (control && control->final) {
2815 __clear_monitor_timer(chan);
2816 if (chan->unacked_frames > 0)
2817 __set_retrans_timer(chan);
2818 chan->retry_count = 0;
2819 chan->tx_state = L2CAP_TX_STATE_XMIT;
2820 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2823 case L2CAP_EV_EXPLICIT_POLL:
2826 case L2CAP_EV_MONITOR_TO:
2827 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2828 l2cap_send_rr_or_rnr(chan, 1);
2829 __set_monitor_timer(chan);
2830 chan->retry_count++;
2832 l2cap_send_disconn_req(chan, ECONNABORTED);
2840 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2841 struct sk_buff_head *skbs, u8 event)
2843 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2844 chan, control, skbs, event, chan->tx_state);
2846 switch (chan->tx_state) {
2847 case L2CAP_TX_STATE_XMIT:
2848 l2cap_tx_state_xmit(chan, control, skbs, event);
2850 case L2CAP_TX_STATE_WAIT_F:
2851 l2cap_tx_state_wait_f(chan, control, skbs, event);
2859 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2860 struct l2cap_ctrl *control)
2862 BT_DBG("chan %p, control %p", chan, control);
2863 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2866 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2867 struct l2cap_ctrl *control)
2869 BT_DBG("chan %p, control %p", chan, control);
2870 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2873 /* Copy frame to all raw sockets on that connection */
2874 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2876 struct sk_buff *nskb;
2877 struct l2cap_chan *chan;
2879 BT_DBG("conn %p", conn);
2881 mutex_lock(&conn->chan_lock);
2883 list_for_each_entry(chan, &conn->chan_l, list) {
2884 if (chan->chan_type != L2CAP_CHAN_RAW)
2887 /* Don't send frame to the channel it came from */
2888 if (bt_cb(skb)->chan == chan)
2891 nskb = skb_clone(skb, GFP_KERNEL);
2894 if (chan->ops->recv(chan, nskb))
2898 mutex_unlock(&conn->chan_lock);
2901 /* ---- L2CAP signalling commands ---- */
2902 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2903 u8 ident, u16 dlen, void *data)
2905 struct sk_buff *skb, **frag;
2906 struct l2cap_cmd_hdr *cmd;
2907 struct l2cap_hdr *lh;
2910 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2911 conn, code, ident, dlen);
2913 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2916 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2917 count = min_t(unsigned int, conn->mtu, len);
2919 skb = bt_skb_alloc(count, GFP_KERNEL);
2923 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2924 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2926 if (conn->hcon->type == LE_LINK)
2927 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2929 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2931 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2934 cmd->len = cpu_to_le16(dlen);
2937 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2938 memcpy(skb_put(skb, count), data, count);
2944 /* Continuation fragments (no L2CAP header) */
2945 frag = &skb_shinfo(skb)->frag_list;
2947 count = min_t(unsigned int, conn->mtu, len);
2949 *frag = bt_skb_alloc(count, GFP_KERNEL);
2953 memcpy(skb_put(*frag, count), data, count);
2958 frag = &(*frag)->next;
2968 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2971 struct l2cap_conf_opt *opt = *ptr;
2974 len = L2CAP_CONF_OPT_SIZE + opt->len;
2982 *val = *((u8 *) opt->val);
2986 *val = get_unaligned_le16(opt->val);
2990 *val = get_unaligned_le32(opt->val);
2994 *val = (unsigned long) opt->val;
2998 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3002 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3004 struct l2cap_conf_opt *opt = *ptr;
3006 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3013 *((u8 *) opt->val) = val;
3017 put_unaligned_le16(val, opt->val);
3021 put_unaligned_le32(val, opt->val);
3025 memcpy(opt->val, (void *) val, len);
3029 *ptr += L2CAP_CONF_OPT_SIZE + len;
3032 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3034 struct l2cap_conf_efs efs;
3036 switch (chan->mode) {
3037 case L2CAP_MODE_ERTM:
3038 efs.id = chan->local_id;
3039 efs.stype = chan->local_stype;
3040 efs.msdu = cpu_to_le16(chan->local_msdu);
3041 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3042 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3043 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3046 case L2CAP_MODE_STREAMING:
3048 efs.stype = L2CAP_SERV_BESTEFFORT;
3049 efs.msdu = cpu_to_le16(chan->local_msdu);
3050 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3059 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3060 (unsigned long) &efs);
3063 static void l2cap_ack_timeout(struct work_struct *work)
3065 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3069 BT_DBG("chan %p", chan);
3071 l2cap_chan_lock(chan);
3073 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3074 chan->last_acked_seq);
3077 l2cap_send_rr_or_rnr(chan, 0);
3079 l2cap_chan_unlock(chan);
3080 l2cap_chan_put(chan);
3083 int l2cap_ertm_init(struct l2cap_chan *chan)
3087 chan->next_tx_seq = 0;
3088 chan->expected_tx_seq = 0;
3089 chan->expected_ack_seq = 0;
3090 chan->unacked_frames = 0;
3091 chan->buffer_seq = 0;
3092 chan->frames_sent = 0;
3093 chan->last_acked_seq = 0;
3095 chan->sdu_last_frag = NULL;
3098 skb_queue_head_init(&chan->tx_q);
3100 chan->local_amp_id = AMP_ID_BREDR;
3101 chan->move_id = AMP_ID_BREDR;
3102 chan->move_state = L2CAP_MOVE_STABLE;
3103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3105 if (chan->mode != L2CAP_MODE_ERTM)
3108 chan->rx_state = L2CAP_RX_STATE_RECV;
3109 chan->tx_state = L2CAP_TX_STATE_XMIT;
3111 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3112 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3113 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3115 skb_queue_head_init(&chan->srej_q);
3117 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3121 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3123 l2cap_seq_list_free(&chan->srej_list);
3128 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3131 case L2CAP_MODE_STREAMING:
3132 case L2CAP_MODE_ERTM:
3133 if (l2cap_mode_supported(mode, remote_feat_mask))
3137 return L2CAP_MODE_BASIC;
3141 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3143 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3146 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3148 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3151 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3152 struct l2cap_conf_rfc *rfc)
3154 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3155 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3157 /* Class 1 devices have must have ERTM timeouts
3158 * exceeding the Link Supervision Timeout. The
3159 * default Link Supervision Timeout for AMP
3160 * controllers is 10 seconds.
3162 * Class 1 devices use 0xffffffff for their
3163 * best-effort flush timeout, so the clamping logic
3164 * will result in a timeout that meets the above
3165 * requirement. ERTM timeouts are 16-bit values, so
3166 * the maximum timeout is 65.535 seconds.
3169 /* Convert timeout to milliseconds and round */
3170 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3172 /* This is the recommended formula for class 2 devices
3173 * that start ERTM timers when packets are sent to the
3176 ertm_to = 3 * ertm_to + 500;
3178 if (ertm_to > 0xffff)
3181 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3182 rfc->monitor_timeout = rfc->retrans_timeout;
3184 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3185 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3189 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3191 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3192 __l2cap_ews_supported(chan->conn)) {
3193 /* use extended control field */
3194 set_bit(FLAG_EXT_CTRL, &chan->flags);
3195 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3197 chan->tx_win = min_t(u16, chan->tx_win,
3198 L2CAP_DEFAULT_TX_WINDOW);
3199 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3201 chan->ack_win = chan->tx_win;
3204 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3206 struct l2cap_conf_req *req = data;
3207 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3208 void *ptr = req->data;
3211 BT_DBG("chan %p", chan);
3213 if (chan->num_conf_req || chan->num_conf_rsp)
3216 switch (chan->mode) {
3217 case L2CAP_MODE_STREAMING:
3218 case L2CAP_MODE_ERTM:
3219 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3222 if (__l2cap_efs_supported(chan->conn))
3223 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3227 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3232 if (chan->imtu != L2CAP_DEFAULT_MTU)
3233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3235 switch (chan->mode) {
3236 case L2CAP_MODE_BASIC:
3237 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3238 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3241 rfc.mode = L2CAP_MODE_BASIC;
3243 rfc.max_transmit = 0;
3244 rfc.retrans_timeout = 0;
3245 rfc.monitor_timeout = 0;
3246 rfc.max_pdu_size = 0;
3248 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3249 (unsigned long) &rfc);
3252 case L2CAP_MODE_ERTM:
3253 rfc.mode = L2CAP_MODE_ERTM;
3254 rfc.max_transmit = chan->max_tx;
3256 __l2cap_set_ertm_timeouts(chan, &rfc);
3258 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3259 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3261 rfc.max_pdu_size = cpu_to_le16(size);
3263 l2cap_txwin_setup(chan);
3265 rfc.txwin_size = min_t(u16, chan->tx_win,
3266 L2CAP_DEFAULT_TX_WINDOW);
3268 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3269 (unsigned long) &rfc);
3271 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3272 l2cap_add_opt_efs(&ptr, chan);
3274 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3275 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3278 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3279 if (chan->fcs == L2CAP_FCS_NONE ||
3280 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3281 chan->fcs = L2CAP_FCS_NONE;
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3287 case L2CAP_MODE_STREAMING:
3288 l2cap_txwin_setup(chan);
3289 rfc.mode = L2CAP_MODE_STREAMING;
3291 rfc.max_transmit = 0;
3292 rfc.retrans_timeout = 0;
3293 rfc.monitor_timeout = 0;
3295 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3296 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3298 rfc.max_pdu_size = cpu_to_le16(size);
3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3301 (unsigned long) &rfc);
3303 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3304 l2cap_add_opt_efs(&ptr, chan);
3306 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3307 if (chan->fcs == L2CAP_FCS_NONE ||
3308 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3309 chan->fcs = L2CAP_FCS_NONE;
3310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3316 req->dcid = cpu_to_le16(chan->dcid);
3317 req->flags = cpu_to_le16(0);
3322 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3324 struct l2cap_conf_rsp *rsp = data;
3325 void *ptr = rsp->data;
3326 void *req = chan->conf_req;
3327 int len = chan->conf_len;
3328 int type, hint, olen;
3330 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3331 struct l2cap_conf_efs efs;
3333 u16 mtu = L2CAP_DEFAULT_MTU;
3334 u16 result = L2CAP_CONF_SUCCESS;
3337 BT_DBG("chan %p", chan);
3339 while (len >= L2CAP_CONF_OPT_SIZE) {
3340 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3342 hint = type & L2CAP_CONF_HINT;
3343 type &= L2CAP_CONF_MASK;
3346 case L2CAP_CONF_MTU:
3350 case L2CAP_CONF_FLUSH_TO:
3351 chan->flush_to = val;
3354 case L2CAP_CONF_QOS:
3357 case L2CAP_CONF_RFC:
3358 if (olen == sizeof(rfc))
3359 memcpy(&rfc, (void *) val, olen);
3362 case L2CAP_CONF_FCS:
3363 if (val == L2CAP_FCS_NONE)
3364 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3367 case L2CAP_CONF_EFS:
3369 if (olen == sizeof(efs))
3370 memcpy(&efs, (void *) val, olen);
3373 case L2CAP_CONF_EWS:
3374 if (!chan->conn->hs_enabled)
3375 return -ECONNREFUSED;
3377 set_bit(FLAG_EXT_CTRL, &chan->flags);
3378 set_bit(CONF_EWS_RECV, &chan->conf_state);
3379 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3380 chan->remote_tx_win = val;
3387 result = L2CAP_CONF_UNKNOWN;
3388 *((u8 *) ptr++) = type;
3393 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3396 switch (chan->mode) {
3397 case L2CAP_MODE_STREAMING:
3398 case L2CAP_MODE_ERTM:
3399 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3400 chan->mode = l2cap_select_mode(rfc.mode,
3401 chan->conn->feat_mask);
3406 if (__l2cap_efs_supported(chan->conn))
3407 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3409 return -ECONNREFUSED;
3412 if (chan->mode != rfc.mode)
3413 return -ECONNREFUSED;
3419 if (chan->mode != rfc.mode) {
3420 result = L2CAP_CONF_UNACCEPT;
3421 rfc.mode = chan->mode;
3423 if (chan->num_conf_rsp == 1)
3424 return -ECONNREFUSED;
3426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3427 (unsigned long) &rfc);
3430 if (result == L2CAP_CONF_SUCCESS) {
3431 /* Configure output options and let the other side know
3432 * which ones we don't like. */
3434 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3435 result = L2CAP_CONF_UNACCEPT;
3438 set_bit(CONF_MTU_DONE, &chan->conf_state);
3440 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3443 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3444 efs.stype != L2CAP_SERV_NOTRAFIC &&
3445 efs.stype != chan->local_stype) {
3447 result = L2CAP_CONF_UNACCEPT;
3449 if (chan->num_conf_req >= 1)
3450 return -ECONNREFUSED;
3452 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3454 (unsigned long) &efs);
3456 /* Send PENDING Conf Rsp */
3457 result = L2CAP_CONF_PENDING;
3458 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3463 case L2CAP_MODE_BASIC:
3464 chan->fcs = L2CAP_FCS_NONE;
3465 set_bit(CONF_MODE_DONE, &chan->conf_state);
3468 case L2CAP_MODE_ERTM:
3469 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3470 chan->remote_tx_win = rfc.txwin_size;
3472 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3474 chan->remote_max_tx = rfc.max_transmit;
3476 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3477 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3478 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3479 rfc.max_pdu_size = cpu_to_le16(size);
3480 chan->remote_mps = size;
3482 __l2cap_set_ertm_timeouts(chan, &rfc);
3484 set_bit(CONF_MODE_DONE, &chan->conf_state);
3486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3487 sizeof(rfc), (unsigned long) &rfc);
3489 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3490 chan->remote_id = efs.id;
3491 chan->remote_stype = efs.stype;
3492 chan->remote_msdu = le16_to_cpu(efs.msdu);
3493 chan->remote_flush_to =
3494 le32_to_cpu(efs.flush_to);
3495 chan->remote_acc_lat =
3496 le32_to_cpu(efs.acc_lat);
3497 chan->remote_sdu_itime =
3498 le32_to_cpu(efs.sdu_itime);
3499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3501 (unsigned long) &efs);
3505 case L2CAP_MODE_STREAMING:
3506 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3507 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3508 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3509 rfc.max_pdu_size = cpu_to_le16(size);
3510 chan->remote_mps = size;
3512 set_bit(CONF_MODE_DONE, &chan->conf_state);
3514 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3515 (unsigned long) &rfc);
3520 result = L2CAP_CONF_UNACCEPT;
3522 memset(&rfc, 0, sizeof(rfc));
3523 rfc.mode = chan->mode;
3526 if (result == L2CAP_CONF_SUCCESS)
3527 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3529 rsp->scid = cpu_to_le16(chan->dcid);
3530 rsp->result = cpu_to_le16(result);
3531 rsp->flags = cpu_to_le16(0);
3536 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3537 void *data, u16 *result)
3539 struct l2cap_conf_req *req = data;
3540 void *ptr = req->data;
3543 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3544 struct l2cap_conf_efs efs;
3546 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3548 while (len >= L2CAP_CONF_OPT_SIZE) {
3549 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3552 case L2CAP_CONF_MTU:
3553 if (val < L2CAP_DEFAULT_MIN_MTU) {
3554 *result = L2CAP_CONF_UNACCEPT;
3555 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3558 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3561 case L2CAP_CONF_FLUSH_TO:
3562 chan->flush_to = val;
3563 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3567 case L2CAP_CONF_RFC:
3568 if (olen == sizeof(rfc))
3569 memcpy(&rfc, (void *)val, olen);
3571 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3572 rfc.mode != chan->mode)
3573 return -ECONNREFUSED;
3577 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3578 sizeof(rfc), (unsigned long) &rfc);
3581 case L2CAP_CONF_EWS:
3582 chan->ack_win = min_t(u16, val, chan->ack_win);
3583 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3587 case L2CAP_CONF_EFS:
3588 if (olen == sizeof(efs))
3589 memcpy(&efs, (void *)val, olen);
3591 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3592 efs.stype != L2CAP_SERV_NOTRAFIC &&
3593 efs.stype != chan->local_stype)
3594 return -ECONNREFUSED;
3596 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3597 (unsigned long) &efs);
3600 case L2CAP_CONF_FCS:
3601 if (*result == L2CAP_CONF_PENDING)
3602 if (val == L2CAP_FCS_NONE)
3603 set_bit(CONF_RECV_NO_FCS,
3609 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3610 return -ECONNREFUSED;
3612 chan->mode = rfc.mode;
3614 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3616 case L2CAP_MODE_ERTM:
3617 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3618 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3619 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3620 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3621 chan->ack_win = min_t(u16, chan->ack_win,
3624 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3625 chan->local_msdu = le16_to_cpu(efs.msdu);
3626 chan->local_sdu_itime =
3627 le32_to_cpu(efs.sdu_itime);
3628 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3629 chan->local_flush_to =
3630 le32_to_cpu(efs.flush_to);
3634 case L2CAP_MODE_STREAMING:
3635 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3639 req->dcid = cpu_to_le16(chan->dcid);
3640 req->flags = cpu_to_le16(0);
3645 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3646 u16 result, u16 flags)
3648 struct l2cap_conf_rsp *rsp = data;
3649 void *ptr = rsp->data;
3651 BT_DBG("chan %p", chan);
3653 rsp->scid = cpu_to_le16(chan->dcid);
3654 rsp->result = cpu_to_le16(result);
3655 rsp->flags = cpu_to_le16(flags);
3660 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3662 struct l2cap_le_conn_rsp rsp;
3663 struct l2cap_conn *conn = chan->conn;
3665 BT_DBG("chan %p", chan);
3667 rsp.dcid = cpu_to_le16(chan->scid);
3668 rsp.mtu = cpu_to_le16(chan->imtu);
3669 rsp.mps = cpu_to_le16(chan->mps);
3670 rsp.credits = cpu_to_le16(chan->rx_credits);
3671 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3673 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3677 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3679 struct l2cap_conn_rsp rsp;
3680 struct l2cap_conn *conn = chan->conn;
3684 rsp.scid = cpu_to_le16(chan->dcid);
3685 rsp.dcid = cpu_to_le16(chan->scid);
3686 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3687 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3690 rsp_code = L2CAP_CREATE_CHAN_RSP;
3692 rsp_code = L2CAP_CONN_RSP;
3694 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3696 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3698 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3701 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3702 l2cap_build_conf_req(chan, buf), buf);
3703 chan->num_conf_req++;
3706 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3710 /* Use sane default values in case a misbehaving remote device
3711 * did not send an RFC or extended window size option.
3713 u16 txwin_ext = chan->ack_win;
3714 struct l2cap_conf_rfc rfc = {
3716 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3717 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3718 .max_pdu_size = cpu_to_le16(chan->imtu),
3719 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3722 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3724 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3727 while (len >= L2CAP_CONF_OPT_SIZE) {
3728 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3731 case L2CAP_CONF_RFC:
3732 if (olen == sizeof(rfc))
3733 memcpy(&rfc, (void *)val, olen);
3735 case L2CAP_CONF_EWS:
3742 case L2CAP_MODE_ERTM:
3743 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3744 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3745 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3746 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3747 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3749 chan->ack_win = min_t(u16, chan->ack_win,
3752 case L2CAP_MODE_STREAMING:
3753 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3757 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3758 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3761 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3763 if (cmd_len < sizeof(*rej))
3766 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3769 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3770 cmd->ident == conn->info_ident) {
3771 cancel_delayed_work(&conn->info_timer);
3773 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3774 conn->info_ident = 0;
3776 l2cap_conn_start(conn);
3782 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3783 struct l2cap_cmd_hdr *cmd,
3784 u8 *data, u8 rsp_code, u8 amp_id)
3786 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3787 struct l2cap_conn_rsp rsp;
3788 struct l2cap_chan *chan = NULL, *pchan;
3789 int result, status = L2CAP_CS_NO_INFO;
3791 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3792 __le16 psm = req->psm;
3794 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3796 /* Check if we have socket listening on psm */
3797 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3798 &conn->hcon->dst, ACL_LINK);
3800 result = L2CAP_CR_BAD_PSM;
3804 mutex_lock(&conn->chan_lock);
3805 l2cap_chan_lock(pchan);
3807 /* Check if the ACL is secure enough (if not SDP) */
3808 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3809 !hci_conn_check_link_mode(conn->hcon)) {
3810 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3811 result = L2CAP_CR_SEC_BLOCK;
3815 result = L2CAP_CR_NO_MEM;
3817 /* Check if we already have channel with that dcid */
3818 if (__l2cap_get_chan_by_dcid(conn, scid))
3821 chan = pchan->ops->new_connection(pchan);
3825 /* For certain devices (ex: HID mouse), support for authentication,
3826 * pairing and bonding is optional. For such devices, inorder to avoid
3827 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3828 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3830 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3832 bacpy(&chan->src, &conn->hcon->src);
3833 bacpy(&chan->dst, &conn->hcon->dst);
3834 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3835 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3838 chan->local_amp_id = amp_id;
3840 __l2cap_chan_add(conn, chan);
3844 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3846 chan->ident = cmd->ident;
3848 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3849 if (l2cap_chan_check_security(chan)) {
3850 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3851 l2cap_state_change(chan, BT_CONNECT2);
3852 result = L2CAP_CR_PEND;
3853 status = L2CAP_CS_AUTHOR_PEND;
3854 chan->ops->defer(chan);
3856 /* Force pending result for AMP controllers.
3857 * The connection will succeed after the
3858 * physical link is up.
3860 if (amp_id == AMP_ID_BREDR) {
3861 l2cap_state_change(chan, BT_CONFIG);
3862 result = L2CAP_CR_SUCCESS;
3864 l2cap_state_change(chan, BT_CONNECT2);
3865 result = L2CAP_CR_PEND;
3867 status = L2CAP_CS_NO_INFO;
3870 l2cap_state_change(chan, BT_CONNECT2);
3871 result = L2CAP_CR_PEND;
3872 status = L2CAP_CS_AUTHEN_PEND;
3875 l2cap_state_change(chan, BT_CONNECT2);
3876 result = L2CAP_CR_PEND;
3877 status = L2CAP_CS_NO_INFO;
3881 l2cap_chan_unlock(pchan);
3882 mutex_unlock(&conn->chan_lock);
3885 rsp.scid = cpu_to_le16(scid);
3886 rsp.dcid = cpu_to_le16(dcid);
3887 rsp.result = cpu_to_le16(result);
3888 rsp.status = cpu_to_le16(status);
3889 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3891 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3892 struct l2cap_info_req info;
3893 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3895 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3896 conn->info_ident = l2cap_get_ident(conn);
3898 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3900 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3901 sizeof(info), &info);
3904 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3905 result == L2CAP_CR_SUCCESS) {
3907 set_bit(CONF_REQ_SENT, &chan->conf_state);
3908 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3909 l2cap_build_conf_req(chan, buf), buf);
3910 chan->num_conf_req++;
3916 static int l2cap_connect_req(struct l2cap_conn *conn,
3917 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3919 struct hci_dev *hdev = conn->hcon->hdev;
3920 struct hci_conn *hcon = conn->hcon;
3922 if (cmd_len < sizeof(struct l2cap_conn_req))
3926 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3927 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3928 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3929 hcon->dst_type, 0, NULL, 0,
3931 hci_dev_unlock(hdev);
3933 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3937 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3938 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3941 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3942 u16 scid, dcid, result, status;
3943 struct l2cap_chan *chan;
3947 if (cmd_len < sizeof(*rsp))
3950 scid = __le16_to_cpu(rsp->scid);
3951 dcid = __le16_to_cpu(rsp->dcid);
3952 result = __le16_to_cpu(rsp->result);
3953 status = __le16_to_cpu(rsp->status);
3955 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3956 dcid, scid, result, status);
3958 mutex_lock(&conn->chan_lock);
3961 chan = __l2cap_get_chan_by_scid(conn, scid);
3967 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3976 l2cap_chan_lock(chan);
3979 case L2CAP_CR_SUCCESS:
3980 l2cap_state_change(chan, BT_CONFIG);
3983 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3985 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3988 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3989 l2cap_build_conf_req(chan, req), req);
3990 chan->num_conf_req++;
3994 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3998 l2cap_chan_del(chan, ECONNREFUSED);
4002 l2cap_chan_unlock(chan);
4005 mutex_unlock(&conn->chan_lock);
4010 static inline void set_default_fcs(struct l2cap_chan *chan)
4012 /* FCS is enabled only in ERTM or streaming mode, if one or both
4015 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4016 chan->fcs = L2CAP_FCS_NONE;
4017 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4018 chan->fcs = L2CAP_FCS_CRC16;
4021 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4022 u8 ident, u16 flags)
4024 struct l2cap_conn *conn = chan->conn;
4026 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4029 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4030 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4032 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4033 l2cap_build_conf_rsp(chan, data,
4034 L2CAP_CONF_SUCCESS, flags), data);
4037 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4040 struct l2cap_cmd_rej_cid rej;
4042 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4043 rej.scid = __cpu_to_le16(scid);
4044 rej.dcid = __cpu_to_le16(dcid);
4046 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4049 static inline int l2cap_config_req(struct l2cap_conn *conn,
4050 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4053 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4056 struct l2cap_chan *chan;
4059 if (cmd_len < sizeof(*req))
4062 dcid = __le16_to_cpu(req->dcid);
4063 flags = __le16_to_cpu(req->flags);
4065 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4067 chan = l2cap_get_chan_by_scid(conn, dcid);
4069 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4073 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4074 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4079 /* Reject if config buffer is too small. */
4080 len = cmd_len - sizeof(*req);
4081 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4082 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4083 l2cap_build_conf_rsp(chan, rsp,
4084 L2CAP_CONF_REJECT, flags), rsp);
4089 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4090 chan->conf_len += len;
4092 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4093 /* Incomplete config. Send empty response. */
4094 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4095 l2cap_build_conf_rsp(chan, rsp,
4096 L2CAP_CONF_SUCCESS, flags), rsp);
4100 /* Complete config. */
4101 len = l2cap_parse_conf_req(chan, rsp);
4103 l2cap_send_disconn_req(chan, ECONNRESET);
4107 chan->ident = cmd->ident;
4108 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4109 chan->num_conf_rsp++;
4111 /* Reset config buffer. */
4114 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4117 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4118 set_default_fcs(chan);
4120 if (chan->mode == L2CAP_MODE_ERTM ||
4121 chan->mode == L2CAP_MODE_STREAMING)
4122 err = l2cap_ertm_init(chan);
4125 l2cap_send_disconn_req(chan, -err);
4127 l2cap_chan_ready(chan);
4132 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4134 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4135 l2cap_build_conf_req(chan, buf), buf);
4136 chan->num_conf_req++;
4139 /* Got Conf Rsp PENDING from remote side and asume we sent
4140 Conf Rsp PENDING in the code above */
4141 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4142 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4144 /* check compatibility */
4146 /* Send rsp for BR/EDR channel */
4148 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4150 chan->ident = cmd->ident;
4154 l2cap_chan_unlock(chan);
4158 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4159 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4162 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4163 u16 scid, flags, result;
4164 struct l2cap_chan *chan;
4165 int len = cmd_len - sizeof(*rsp);
4168 if (cmd_len < sizeof(*rsp))
4171 scid = __le16_to_cpu(rsp->scid);
4172 flags = __le16_to_cpu(rsp->flags);
4173 result = __le16_to_cpu(rsp->result);
4175 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4178 chan = l2cap_get_chan_by_scid(conn, scid);
4183 case L2CAP_CONF_SUCCESS:
4184 l2cap_conf_rfc_get(chan, rsp->data, len);
4185 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4188 case L2CAP_CONF_PENDING:
4189 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4191 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4194 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4197 l2cap_send_disconn_req(chan, ECONNRESET);
4201 if (!chan->hs_hcon) {
4202 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4205 if (l2cap_check_efs(chan)) {
4206 amp_create_logical_link(chan);
4207 chan->ident = cmd->ident;
4213 case L2CAP_CONF_UNACCEPT:
4214 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4217 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4218 l2cap_send_disconn_req(chan, ECONNRESET);
4222 /* throw out any old stored conf requests */
4223 result = L2CAP_CONF_SUCCESS;
4224 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4227 l2cap_send_disconn_req(chan, ECONNRESET);
4231 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4232 L2CAP_CONF_REQ, len, req);
4233 chan->num_conf_req++;
4234 if (result != L2CAP_CONF_SUCCESS)
4240 l2cap_chan_set_err(chan, ECONNRESET);
4242 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4243 l2cap_send_disconn_req(chan, ECONNRESET);
4247 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4250 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4252 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4253 set_default_fcs(chan);
4255 if (chan->mode == L2CAP_MODE_ERTM ||
4256 chan->mode == L2CAP_MODE_STREAMING)
4257 err = l2cap_ertm_init(chan);
4260 l2cap_send_disconn_req(chan, -err);
4262 l2cap_chan_ready(chan);
4266 l2cap_chan_unlock(chan);
4270 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4271 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4274 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4275 struct l2cap_disconn_rsp rsp;
4277 struct l2cap_chan *chan;
4279 if (cmd_len != sizeof(*req))
4282 scid = __le16_to_cpu(req->scid);
4283 dcid = __le16_to_cpu(req->dcid);
4285 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4287 mutex_lock(&conn->chan_lock);
4289 chan = __l2cap_get_chan_by_scid(conn, dcid);
4291 mutex_unlock(&conn->chan_lock);
4292 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4296 l2cap_chan_lock(chan);
4298 rsp.dcid = cpu_to_le16(chan->scid);
4299 rsp.scid = cpu_to_le16(chan->dcid);
4300 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4302 chan->ops->set_shutdown(chan);
4304 l2cap_chan_hold(chan);
4305 l2cap_chan_del(chan, ECONNRESET);
4307 l2cap_chan_unlock(chan);
4309 chan->ops->close(chan);
4310 l2cap_chan_put(chan);
4312 mutex_unlock(&conn->chan_lock);
4317 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4318 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4321 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4323 struct l2cap_chan *chan;
4325 if (cmd_len != sizeof(*rsp))
4328 scid = __le16_to_cpu(rsp->scid);
4329 dcid = __le16_to_cpu(rsp->dcid);
4331 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4333 mutex_lock(&conn->chan_lock);
4335 chan = __l2cap_get_chan_by_scid(conn, scid);
4337 mutex_unlock(&conn->chan_lock);
4341 l2cap_chan_lock(chan);
4343 l2cap_chan_hold(chan);
4344 l2cap_chan_del(chan, 0);
4346 l2cap_chan_unlock(chan);
4348 chan->ops->close(chan);
4349 l2cap_chan_put(chan);
4351 mutex_unlock(&conn->chan_lock);
4356 static inline int l2cap_information_req(struct l2cap_conn *conn,
4357 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4360 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4363 if (cmd_len != sizeof(*req))
4366 type = __le16_to_cpu(req->type);
4368 BT_DBG("type 0x%4.4x", type);
4370 if (type == L2CAP_IT_FEAT_MASK) {
4372 u32 feat_mask = l2cap_feat_mask;
4373 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4374 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4375 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4377 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4379 if (conn->hs_enabled)
4380 feat_mask |= L2CAP_FEAT_EXT_FLOW
4381 | L2CAP_FEAT_EXT_WINDOW;
4383 put_unaligned_le32(feat_mask, rsp->data);
4384 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4386 } else if (type == L2CAP_IT_FIXED_CHAN) {
4388 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4390 if (conn->hs_enabled)
4391 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4393 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4395 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4396 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4397 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4398 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4401 struct l2cap_info_rsp rsp;
4402 rsp.type = cpu_to_le16(type);
4403 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4404 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4411 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4412 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4415 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4418 if (cmd_len < sizeof(*rsp))
4421 type = __le16_to_cpu(rsp->type);
4422 result = __le16_to_cpu(rsp->result);
4424 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4426 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4427 if (cmd->ident != conn->info_ident ||
4428 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4431 cancel_delayed_work(&conn->info_timer);
4433 if (result != L2CAP_IR_SUCCESS) {
4434 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4435 conn->info_ident = 0;
4437 l2cap_conn_start(conn);
4443 case L2CAP_IT_FEAT_MASK:
4444 conn->feat_mask = get_unaligned_le32(rsp->data);
4446 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4447 struct l2cap_info_req req;
4448 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4450 conn->info_ident = l2cap_get_ident(conn);
4452 l2cap_send_cmd(conn, conn->info_ident,
4453 L2CAP_INFO_REQ, sizeof(req), &req);
4455 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4456 conn->info_ident = 0;
4458 l2cap_conn_start(conn);
4462 case L2CAP_IT_FIXED_CHAN:
4463 conn->fixed_chan_mask = rsp->data[0];
4464 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4465 conn->info_ident = 0;
4467 l2cap_conn_start(conn);
4474 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4475 struct l2cap_cmd_hdr *cmd,
4476 u16 cmd_len, void *data)
4478 struct l2cap_create_chan_req *req = data;
4479 struct l2cap_create_chan_rsp rsp;
4480 struct l2cap_chan *chan;
4481 struct hci_dev *hdev;
4484 if (cmd_len != sizeof(*req))
4487 if (!conn->hs_enabled)
4490 psm = le16_to_cpu(req->psm);
4491 scid = le16_to_cpu(req->scid);
4493 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4495 /* For controller id 0 make BR/EDR connection */
4496 if (req->amp_id == AMP_ID_BREDR) {
4497 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4502 /* Validate AMP controller id */
4503 hdev = hci_dev_get(req->amp_id);
4507 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4512 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4515 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4516 struct hci_conn *hs_hcon;
4518 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4522 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4527 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4529 mgr->bredr_chan = chan;
4530 chan->hs_hcon = hs_hcon;
4531 chan->fcs = L2CAP_FCS_NONE;
4532 conn->mtu = hdev->block_mtu;
4541 rsp.scid = cpu_to_le16(scid);
4542 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4543 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4545 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4551 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4553 struct l2cap_move_chan_req req;
4556 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4558 ident = l2cap_get_ident(chan->conn);
4559 chan->ident = ident;
4561 req.icid = cpu_to_le16(chan->scid);
4562 req.dest_amp_id = dest_amp_id;
4564 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4567 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4570 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4572 struct l2cap_move_chan_rsp rsp;
4574 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4576 rsp.icid = cpu_to_le16(chan->dcid);
4577 rsp.result = cpu_to_le16(result);
4579 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4583 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4585 struct l2cap_move_chan_cfm cfm;
4587 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4589 chan->ident = l2cap_get_ident(chan->conn);
4591 cfm.icid = cpu_to_le16(chan->scid);
4592 cfm.result = cpu_to_le16(result);
4594 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4597 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4600 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4602 struct l2cap_move_chan_cfm cfm;
4604 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4606 cfm.icid = cpu_to_le16(icid);
4607 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4609 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4613 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4616 struct l2cap_move_chan_cfm_rsp rsp;
4618 BT_DBG("icid 0x%4.4x", icid);
4620 rsp.icid = cpu_to_le16(icid);
4621 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4624 static void __release_logical_link(struct l2cap_chan *chan)
4626 chan->hs_hchan = NULL;
4627 chan->hs_hcon = NULL;
4629 /* Placeholder - release the logical link */
4632 static void l2cap_logical_fail(struct l2cap_chan *chan)
4634 /* Logical link setup failed */
4635 if (chan->state != BT_CONNECTED) {
4636 /* Create channel failure, disconnect */
4637 l2cap_send_disconn_req(chan, ECONNRESET);
4641 switch (chan->move_role) {
4642 case L2CAP_MOVE_ROLE_RESPONDER:
4643 l2cap_move_done(chan);
4644 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4646 case L2CAP_MOVE_ROLE_INITIATOR:
4647 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4648 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4649 /* Remote has only sent pending or
4650 * success responses, clean up
4652 l2cap_move_done(chan);
4655 /* Other amp move states imply that the move
4656 * has already aborted
4658 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4663 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4664 struct hci_chan *hchan)
4666 struct l2cap_conf_rsp rsp;
4668 chan->hs_hchan = hchan;
4669 chan->hs_hcon->l2cap_data = chan->conn;
4671 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4673 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4676 set_default_fcs(chan);
4678 err = l2cap_ertm_init(chan);
4680 l2cap_send_disconn_req(chan, -err);
4682 l2cap_chan_ready(chan);
4686 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4687 struct hci_chan *hchan)
4689 chan->hs_hcon = hchan->conn;
4690 chan->hs_hcon->l2cap_data = chan->conn;
4692 BT_DBG("move_state %d", chan->move_state);
4694 switch (chan->move_state) {
4695 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4696 /* Move confirm will be sent after a success
4697 * response is received
4699 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4701 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4702 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4703 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4704 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4705 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4706 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4707 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4708 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4709 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4713 /* Move was not in expected state, free the channel */
4714 __release_logical_link(chan);
4716 chan->move_state = L2CAP_MOVE_STABLE;
4720 /* Call with chan locked */
4721 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4724 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4727 l2cap_logical_fail(chan);
4728 __release_logical_link(chan);
4732 if (chan->state != BT_CONNECTED) {
4733 /* Ignore logical link if channel is on BR/EDR */
4734 if (chan->local_amp_id != AMP_ID_BREDR)
4735 l2cap_logical_finish_create(chan, hchan);
4737 l2cap_logical_finish_move(chan, hchan);
4741 void l2cap_move_start(struct l2cap_chan *chan)
4743 BT_DBG("chan %p", chan);
4745 if (chan->local_amp_id == AMP_ID_BREDR) {
4746 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4748 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4749 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4750 /* Placeholder - start physical link setup */
4752 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4753 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4755 l2cap_move_setup(chan);
4756 l2cap_send_move_chan_req(chan, 0);
4760 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4761 u8 local_amp_id, u8 remote_amp_id)
4763 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4764 local_amp_id, remote_amp_id);
4766 chan->fcs = L2CAP_FCS_NONE;
4768 /* Outgoing channel on AMP */
4769 if (chan->state == BT_CONNECT) {
4770 if (result == L2CAP_CR_SUCCESS) {
4771 chan->local_amp_id = local_amp_id;
4772 l2cap_send_create_chan_req(chan, remote_amp_id);
4774 /* Revert to BR/EDR connect */
4775 l2cap_send_conn_req(chan);
4781 /* Incoming channel on AMP */
4782 if (__l2cap_no_conn_pending(chan)) {
4783 struct l2cap_conn_rsp rsp;
4785 rsp.scid = cpu_to_le16(chan->dcid);
4786 rsp.dcid = cpu_to_le16(chan->scid);
4788 if (result == L2CAP_CR_SUCCESS) {
4789 /* Send successful response */
4790 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4791 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4793 /* Send negative response */
4794 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4795 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4798 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4801 if (result == L2CAP_CR_SUCCESS) {
4802 l2cap_state_change(chan, BT_CONFIG);
4803 set_bit(CONF_REQ_SENT, &chan->conf_state);
4804 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4806 l2cap_build_conf_req(chan, buf), buf);
4807 chan->num_conf_req++;
4812 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4815 l2cap_move_setup(chan);
4816 chan->move_id = local_amp_id;
4817 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4819 l2cap_send_move_chan_req(chan, remote_amp_id);
4822 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4824 struct hci_chan *hchan = NULL;
4826 /* Placeholder - get hci_chan for logical link */
4829 if (hchan->state == BT_CONNECTED) {
4830 /* Logical link is ready to go */
4831 chan->hs_hcon = hchan->conn;
4832 chan->hs_hcon->l2cap_data = chan->conn;
4833 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4834 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4836 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4838 /* Wait for logical link to be ready */
4839 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4842 /* Logical link not available */
4843 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4847 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4849 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4851 if (result == -EINVAL)
4852 rsp_result = L2CAP_MR_BAD_ID;
4854 rsp_result = L2CAP_MR_NOT_ALLOWED;
4856 l2cap_send_move_chan_rsp(chan, rsp_result);
4859 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4860 chan->move_state = L2CAP_MOVE_STABLE;
4862 /* Restart data transmission */
4863 l2cap_ertm_send(chan);
4866 /* Invoke with locked chan */
4867 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4869 u8 local_amp_id = chan->local_amp_id;
4870 u8 remote_amp_id = chan->remote_amp_id;
4872 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4873 chan, result, local_amp_id, remote_amp_id);
4875 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4876 l2cap_chan_unlock(chan);
4880 if (chan->state != BT_CONNECTED) {
4881 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4882 } else if (result != L2CAP_MR_SUCCESS) {
4883 l2cap_do_move_cancel(chan, result);
4885 switch (chan->move_role) {
4886 case L2CAP_MOVE_ROLE_INITIATOR:
4887 l2cap_do_move_initiate(chan, local_amp_id,
4890 case L2CAP_MOVE_ROLE_RESPONDER:
4891 l2cap_do_move_respond(chan, result);
4894 l2cap_do_move_cancel(chan, result);
4900 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4901 struct l2cap_cmd_hdr *cmd,
4902 u16 cmd_len, void *data)
4904 struct l2cap_move_chan_req *req = data;
4905 struct l2cap_move_chan_rsp rsp;
4906 struct l2cap_chan *chan;
4908 u16 result = L2CAP_MR_NOT_ALLOWED;
4910 if (cmd_len != sizeof(*req))
4913 icid = le16_to_cpu(req->icid);
4915 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4917 if (!conn->hs_enabled)
4920 chan = l2cap_get_chan_by_dcid(conn, icid);
4922 rsp.icid = cpu_to_le16(icid);
4923 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4924 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4929 chan->ident = cmd->ident;
4931 if (chan->scid < L2CAP_CID_DYN_START ||
4932 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4933 (chan->mode != L2CAP_MODE_ERTM &&
4934 chan->mode != L2CAP_MODE_STREAMING)) {
4935 result = L2CAP_MR_NOT_ALLOWED;
4936 goto send_move_response;
4939 if (chan->local_amp_id == req->dest_amp_id) {
4940 result = L2CAP_MR_SAME_ID;
4941 goto send_move_response;
4944 if (req->dest_amp_id != AMP_ID_BREDR) {
4945 struct hci_dev *hdev;
4946 hdev = hci_dev_get(req->dest_amp_id);
4947 if (!hdev || hdev->dev_type != HCI_AMP ||
4948 !test_bit(HCI_UP, &hdev->flags)) {
4952 result = L2CAP_MR_BAD_ID;
4953 goto send_move_response;
4958 /* Detect a move collision. Only send a collision response
4959 * if this side has "lost", otherwise proceed with the move.
4960 * The winner has the larger bd_addr.
4962 if ((__chan_is_moving(chan) ||
4963 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4964 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4965 result = L2CAP_MR_COLLISION;
4966 goto send_move_response;
4969 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4970 l2cap_move_setup(chan);
4971 chan->move_id = req->dest_amp_id;
4974 if (req->dest_amp_id == AMP_ID_BREDR) {
4975 /* Moving to BR/EDR */
4976 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4977 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4978 result = L2CAP_MR_PEND;
4980 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4981 result = L2CAP_MR_SUCCESS;
4984 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4985 /* Placeholder - uncomment when amp functions are available */
4986 /*amp_accept_physical(chan, req->dest_amp_id);*/
4987 result = L2CAP_MR_PEND;
4991 l2cap_send_move_chan_rsp(chan, result);
4993 l2cap_chan_unlock(chan);
4998 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5000 struct l2cap_chan *chan;
5001 struct hci_chan *hchan = NULL;
5003 chan = l2cap_get_chan_by_scid(conn, icid);
5005 l2cap_send_move_chan_cfm_icid(conn, icid);
5009 __clear_chan_timer(chan);
5010 if (result == L2CAP_MR_PEND)
5011 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5013 switch (chan->move_state) {
5014 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5015 /* Move confirm will be sent when logical link
5018 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5020 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5021 if (result == L2CAP_MR_PEND) {
5023 } else if (test_bit(CONN_LOCAL_BUSY,
5024 &chan->conn_state)) {
5025 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5027 /* Logical link is up or moving to BR/EDR,
5030 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5031 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5034 case L2CAP_MOVE_WAIT_RSP:
5036 if (result == L2CAP_MR_SUCCESS) {
5037 /* Remote is ready, send confirm immediately
5038 * after logical link is ready
5040 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5042 /* Both logical link and move success
5043 * are required to confirm
5045 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5048 /* Placeholder - get hci_chan for logical link */
5050 /* Logical link not available */
5051 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5055 /* If the logical link is not yet connected, do not
5056 * send confirmation.
5058 if (hchan->state != BT_CONNECTED)
5061 /* Logical link is already ready to go */
5063 chan->hs_hcon = hchan->conn;
5064 chan->hs_hcon->l2cap_data = chan->conn;
5066 if (result == L2CAP_MR_SUCCESS) {
5067 /* Can confirm now */
5068 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5070 /* Now only need move success
5073 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5076 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5079 /* Any other amp move state means the move failed. */
5080 chan->move_id = chan->local_amp_id;
5081 l2cap_move_done(chan);
5082 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5085 l2cap_chan_unlock(chan);
5088 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5091 struct l2cap_chan *chan;
5093 chan = l2cap_get_chan_by_ident(conn, ident);
5095 /* Could not locate channel, icid is best guess */
5096 l2cap_send_move_chan_cfm_icid(conn, icid);
5100 __clear_chan_timer(chan);
5102 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5103 if (result == L2CAP_MR_COLLISION) {
5104 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5106 /* Cleanup - cancel move */
5107 chan->move_id = chan->local_amp_id;
5108 l2cap_move_done(chan);
5112 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5114 l2cap_chan_unlock(chan);
5117 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5118 struct l2cap_cmd_hdr *cmd,
5119 u16 cmd_len, void *data)
5121 struct l2cap_move_chan_rsp *rsp = data;
5124 if (cmd_len != sizeof(*rsp))
5127 icid = le16_to_cpu(rsp->icid);
5128 result = le16_to_cpu(rsp->result);
5130 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5132 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5133 l2cap_move_continue(conn, icid, result);
5135 l2cap_move_fail(conn, cmd->ident, icid, result);
5140 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5141 struct l2cap_cmd_hdr *cmd,
5142 u16 cmd_len, void *data)
5144 struct l2cap_move_chan_cfm *cfm = data;
5145 struct l2cap_chan *chan;
5148 if (cmd_len != sizeof(*cfm))
5151 icid = le16_to_cpu(cfm->icid);
5152 result = le16_to_cpu(cfm->result);
5154 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5156 chan = l2cap_get_chan_by_dcid(conn, icid);
5158 /* Spec requires a response even if the icid was not found */
5159 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5163 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5164 if (result == L2CAP_MC_CONFIRMED) {
5165 chan->local_amp_id = chan->move_id;
5166 if (chan->local_amp_id == AMP_ID_BREDR)
5167 __release_logical_link(chan);
5169 chan->move_id = chan->local_amp_id;
5172 l2cap_move_done(chan);
5175 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5177 l2cap_chan_unlock(chan);
5182 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5183 struct l2cap_cmd_hdr *cmd,
5184 u16 cmd_len, void *data)
5186 struct l2cap_move_chan_cfm_rsp *rsp = data;
5187 struct l2cap_chan *chan;
5190 if (cmd_len != sizeof(*rsp))
5193 icid = le16_to_cpu(rsp->icid);
5195 BT_DBG("icid 0x%4.4x", icid);
5197 chan = l2cap_get_chan_by_scid(conn, icid);
5201 __clear_chan_timer(chan);
5203 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5204 chan->local_amp_id = chan->move_id;
5206 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5207 __release_logical_link(chan);
5209 l2cap_move_done(chan);
5212 l2cap_chan_unlock(chan);
5217 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5218 struct l2cap_cmd_hdr *cmd,
5219 u16 cmd_len, u8 *data)
5221 struct hci_conn *hcon = conn->hcon;
5222 struct l2cap_conn_param_update_req *req;
5223 struct l2cap_conn_param_update_rsp rsp;
5224 u16 min, max, latency, to_multiplier;
5227 if (!test_bit(HCI_CONN_MASTER, &hcon->flags))
5230 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5233 req = (struct l2cap_conn_param_update_req *) data;
5234 min = __le16_to_cpu(req->min);
5235 max = __le16_to_cpu(req->max);
5236 latency = __le16_to_cpu(req->latency);
5237 to_multiplier = __le16_to_cpu(req->to_multiplier);
5239 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5240 min, max, latency, to_multiplier);
5242 memset(&rsp, 0, sizeof(rsp));
5244 err = hci_check_conn_params(min, max, latency, to_multiplier);
5246 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5248 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5250 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5256 store_hint = hci_le_conn_update(hcon, min, max, latency,
5258 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5259 store_hint, min, max, latency,
5267 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5268 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5271 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5272 u16 dcid, mtu, mps, credits, result;
5273 struct l2cap_chan *chan;
5276 if (cmd_len < sizeof(*rsp))
5279 dcid = __le16_to_cpu(rsp->dcid);
5280 mtu = __le16_to_cpu(rsp->mtu);
5281 mps = __le16_to_cpu(rsp->mps);
5282 credits = __le16_to_cpu(rsp->credits);
5283 result = __le16_to_cpu(rsp->result);
5285 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5288 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5289 dcid, mtu, mps, credits, result);
5291 mutex_lock(&conn->chan_lock);
5293 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5301 l2cap_chan_lock(chan);
5304 case L2CAP_CR_SUCCESS:
5308 chan->remote_mps = mps;
5309 chan->tx_credits = credits;
5310 l2cap_chan_ready(chan);
5314 l2cap_chan_del(chan, ECONNREFUSED);
5318 l2cap_chan_unlock(chan);
5321 mutex_unlock(&conn->chan_lock);
5326 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5327 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5332 switch (cmd->code) {
5333 case L2CAP_COMMAND_REJ:
5334 l2cap_command_rej(conn, cmd, cmd_len, data);
5337 case L2CAP_CONN_REQ:
5338 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5341 case L2CAP_CONN_RSP:
5342 case L2CAP_CREATE_CHAN_RSP:
5343 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5346 case L2CAP_CONF_REQ:
5347 err = l2cap_config_req(conn, cmd, cmd_len, data);
5350 case L2CAP_CONF_RSP:
5351 l2cap_config_rsp(conn, cmd, cmd_len, data);
5354 case L2CAP_DISCONN_REQ:
5355 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5358 case L2CAP_DISCONN_RSP:
5359 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5362 case L2CAP_ECHO_REQ:
5363 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5366 case L2CAP_ECHO_RSP:
5369 case L2CAP_INFO_REQ:
5370 err = l2cap_information_req(conn, cmd, cmd_len, data);
5373 case L2CAP_INFO_RSP:
5374 l2cap_information_rsp(conn, cmd, cmd_len, data);
5377 case L2CAP_CREATE_CHAN_REQ:
5378 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5381 case L2CAP_MOVE_CHAN_REQ:
5382 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5385 case L2CAP_MOVE_CHAN_RSP:
5386 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5389 case L2CAP_MOVE_CHAN_CFM:
5390 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5393 case L2CAP_MOVE_CHAN_CFM_RSP:
5394 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5398 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5406 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5407 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5410 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5411 struct l2cap_le_conn_rsp rsp;
5412 struct l2cap_chan *chan, *pchan;
5413 u16 dcid, scid, credits, mtu, mps;
5417 if (cmd_len != sizeof(*req))
5420 scid = __le16_to_cpu(req->scid);
5421 mtu = __le16_to_cpu(req->mtu);
5422 mps = __le16_to_cpu(req->mps);
5427 if (mtu < 23 || mps < 23)
5430 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5433 /* Check if we have socket listening on psm */
5434 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5435 &conn->hcon->dst, LE_LINK);
5437 result = L2CAP_CR_BAD_PSM;
5442 mutex_lock(&conn->chan_lock);
5443 l2cap_chan_lock(pchan);
5445 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5446 result = L2CAP_CR_AUTHENTICATION;
5448 goto response_unlock;
5451 /* Check if we already have channel with that dcid */
5452 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5453 result = L2CAP_CR_NO_MEM;
5455 goto response_unlock;
5458 chan = pchan->ops->new_connection(pchan);
5460 result = L2CAP_CR_NO_MEM;
5461 goto response_unlock;
5464 l2cap_le_flowctl_init(chan);
5466 bacpy(&chan->src, &conn->hcon->src);
5467 bacpy(&chan->dst, &conn->hcon->dst);
5468 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5469 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5473 chan->remote_mps = mps;
5474 chan->tx_credits = __le16_to_cpu(req->credits);
5476 __l2cap_chan_add(conn, chan);
5478 credits = chan->rx_credits;
5480 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5482 chan->ident = cmd->ident;
5484 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5485 l2cap_state_change(chan, BT_CONNECT2);
5486 result = L2CAP_CR_PEND;
5487 chan->ops->defer(chan);
5489 l2cap_chan_ready(chan);
5490 result = L2CAP_CR_SUCCESS;
5494 l2cap_chan_unlock(pchan);
5495 mutex_unlock(&conn->chan_lock);
5497 if (result == L2CAP_CR_PEND)
5502 rsp.mtu = cpu_to_le16(chan->imtu);
5503 rsp.mps = cpu_to_le16(chan->mps);
5509 rsp.dcid = cpu_to_le16(dcid);
5510 rsp.credits = cpu_to_le16(credits);
5511 rsp.result = cpu_to_le16(result);
5513 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5518 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5519 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5522 struct l2cap_le_credits *pkt;
5523 struct l2cap_chan *chan;
5524 u16 cid, credits, max_credits;
5526 if (cmd_len != sizeof(*pkt))
5529 pkt = (struct l2cap_le_credits *) data;
5530 cid = __le16_to_cpu(pkt->cid);
5531 credits = __le16_to_cpu(pkt->credits);
5533 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5535 chan = l2cap_get_chan_by_dcid(conn, cid);
5539 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5540 if (credits > max_credits) {
5541 BT_ERR("LE credits overflow");
5542 l2cap_send_disconn_req(chan, ECONNRESET);
5544 /* Return 0 so that we don't trigger an unnecessary
5545 * command reject packet.
5550 chan->tx_credits += credits;
5552 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5553 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5557 if (chan->tx_credits)
5558 chan->ops->resume(chan);
5560 l2cap_chan_unlock(chan);
5565 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5566 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5569 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5570 struct l2cap_chan *chan;
5572 if (cmd_len < sizeof(*rej))
5575 mutex_lock(&conn->chan_lock);
5577 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5581 l2cap_chan_lock(chan);
5582 l2cap_chan_del(chan, ECONNREFUSED);
5583 l2cap_chan_unlock(chan);
5586 mutex_unlock(&conn->chan_lock);
5590 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5591 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 switch (cmd->code) {
5597 case L2CAP_COMMAND_REJ:
5598 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5601 case L2CAP_CONN_PARAM_UPDATE_REQ:
5602 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5605 case L2CAP_CONN_PARAM_UPDATE_RSP:
5608 case L2CAP_LE_CONN_RSP:
5609 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5612 case L2CAP_LE_CONN_REQ:
5613 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5616 case L2CAP_LE_CREDITS:
5617 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5620 case L2CAP_DISCONN_REQ:
5621 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5624 case L2CAP_DISCONN_RSP:
5625 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5629 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5637 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5638 struct sk_buff *skb)
5640 struct hci_conn *hcon = conn->hcon;
5641 struct l2cap_cmd_hdr *cmd;
5645 if (hcon->type != LE_LINK)
5648 if (skb->len < L2CAP_CMD_HDR_SIZE)
5651 cmd = (void *) skb->data;
5652 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5654 len = le16_to_cpu(cmd->len);
5656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5658 if (len != skb->len || !cmd->ident) {
5659 BT_DBG("corrupted command");
5663 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5665 struct l2cap_cmd_rej_unk rej;
5667 BT_ERR("Wrong link type (%d)", err);
5669 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5670 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5678 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5679 struct sk_buff *skb)
5681 struct hci_conn *hcon = conn->hcon;
5682 u8 *data = skb->data;
5684 struct l2cap_cmd_hdr cmd;
5687 l2cap_raw_recv(conn, skb);
5689 if (hcon->type != ACL_LINK)
5692 while (len >= L2CAP_CMD_HDR_SIZE) {
5694 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5695 data += L2CAP_CMD_HDR_SIZE;
5696 len -= L2CAP_CMD_HDR_SIZE;
5698 cmd_len = le16_to_cpu(cmd.len);
5700 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5703 if (cmd_len > len || !cmd.ident) {
5704 BT_DBG("corrupted command");
5708 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5710 struct l2cap_cmd_rej_unk rej;
5712 BT_ERR("Wrong link type (%d)", err);
5714 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5715 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5727 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5729 u16 our_fcs, rcv_fcs;
5732 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5733 hdr_size = L2CAP_EXT_HDR_SIZE;
5735 hdr_size = L2CAP_ENH_HDR_SIZE;
5737 if (chan->fcs == L2CAP_FCS_CRC16) {
5738 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5739 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5740 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5742 if (our_fcs != rcv_fcs)
5748 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5750 struct l2cap_ctrl control;
5752 BT_DBG("chan %p", chan);
5754 memset(&control, 0, sizeof(control));
5757 control.reqseq = chan->buffer_seq;
5758 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5760 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5761 control.super = L2CAP_SUPER_RNR;
5762 l2cap_send_sframe(chan, &control);
5765 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5766 chan->unacked_frames > 0)
5767 __set_retrans_timer(chan);
5769 /* Send pending iframes */
5770 l2cap_ertm_send(chan);
5772 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5773 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5774 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5777 control.super = L2CAP_SUPER_RR;
5778 l2cap_send_sframe(chan, &control);
5782 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5783 struct sk_buff **last_frag)
5785 /* skb->len reflects data in skb as well as all fragments
5786 * skb->data_len reflects only data in fragments
5788 if (!skb_has_frag_list(skb))
5789 skb_shinfo(skb)->frag_list = new_frag;
5791 new_frag->next = NULL;
5793 (*last_frag)->next = new_frag;
5794 *last_frag = new_frag;
5796 skb->len += new_frag->len;
5797 skb->data_len += new_frag->len;
5798 skb->truesize += new_frag->truesize;
5801 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5802 struct l2cap_ctrl *control)
5806 switch (control->sar) {
5807 case L2CAP_SAR_UNSEGMENTED:
5811 err = chan->ops->recv(chan, skb);
5814 case L2CAP_SAR_START:
5818 chan->sdu_len = get_unaligned_le16(skb->data);
5819 skb_pull(skb, L2CAP_SDULEN_SIZE);
5821 if (chan->sdu_len > chan->imtu) {
5826 if (skb->len >= chan->sdu_len)
5830 chan->sdu_last_frag = skb;
5836 case L2CAP_SAR_CONTINUE:
5840 append_skb_frag(chan->sdu, skb,
5841 &chan->sdu_last_frag);
5844 if (chan->sdu->len >= chan->sdu_len)
5854 append_skb_frag(chan->sdu, skb,
5855 &chan->sdu_last_frag);
5858 if (chan->sdu->len != chan->sdu_len)
5861 err = chan->ops->recv(chan, chan->sdu);
5864 /* Reassembly complete */
5866 chan->sdu_last_frag = NULL;
5874 kfree_skb(chan->sdu);
5876 chan->sdu_last_frag = NULL;
5883 static int l2cap_resegment(struct l2cap_chan *chan)
5889 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5893 if (chan->mode != L2CAP_MODE_ERTM)
5896 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5897 l2cap_tx(chan, NULL, NULL, event);
5900 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5903 /* Pass sequential frames to l2cap_reassemble_sdu()
5904 * until a gap is encountered.
5907 BT_DBG("chan %p", chan);
5909 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5910 struct sk_buff *skb;
5911 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5912 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5914 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5919 skb_unlink(skb, &chan->srej_q);
5920 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5921 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5926 if (skb_queue_empty(&chan->srej_q)) {
5927 chan->rx_state = L2CAP_RX_STATE_RECV;
5928 l2cap_send_ack(chan);
5934 static void l2cap_handle_srej(struct l2cap_chan *chan,
5935 struct l2cap_ctrl *control)
5937 struct sk_buff *skb;
5939 BT_DBG("chan %p, control %p", chan, control);
5941 if (control->reqseq == chan->next_tx_seq) {
5942 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5943 l2cap_send_disconn_req(chan, ECONNRESET);
5947 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5950 BT_DBG("Seq %d not available for retransmission",
5955 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5956 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5957 l2cap_send_disconn_req(chan, ECONNRESET);
5961 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5963 if (control->poll) {
5964 l2cap_pass_to_tx(chan, control);
5966 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5967 l2cap_retransmit(chan, control);
5968 l2cap_ertm_send(chan);
5970 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5971 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5972 chan->srej_save_reqseq = control->reqseq;
5975 l2cap_pass_to_tx_fbit(chan, control);
5977 if (control->final) {
5978 if (chan->srej_save_reqseq != control->reqseq ||
5979 !test_and_clear_bit(CONN_SREJ_ACT,
5981 l2cap_retransmit(chan, control);
5983 l2cap_retransmit(chan, control);
5984 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5985 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5986 chan->srej_save_reqseq = control->reqseq;
5992 static void l2cap_handle_rej(struct l2cap_chan *chan,
5993 struct l2cap_ctrl *control)
5995 struct sk_buff *skb;
5997 BT_DBG("chan %p, control %p", chan, control);
5999 if (control->reqseq == chan->next_tx_seq) {
6000 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6001 l2cap_send_disconn_req(chan, ECONNRESET);
6005 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6007 if (chan->max_tx && skb &&
6008 bt_cb(skb)->control.retries >= chan->max_tx) {
6009 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6010 l2cap_send_disconn_req(chan, ECONNRESET);
6014 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6016 l2cap_pass_to_tx(chan, control);
6018 if (control->final) {
6019 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6020 l2cap_retransmit_all(chan, control);
6022 l2cap_retransmit_all(chan, control);
6023 l2cap_ertm_send(chan);
6024 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6025 set_bit(CONN_REJ_ACT, &chan->conn_state);
6029 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6031 BT_DBG("chan %p, txseq %d", chan, txseq);
6033 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6034 chan->expected_tx_seq);
6036 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6037 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6039 /* See notes below regarding "double poll" and
6042 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6043 BT_DBG("Invalid/Ignore - after SREJ");
6044 return L2CAP_TXSEQ_INVALID_IGNORE;
6046 BT_DBG("Invalid - in window after SREJ sent");
6047 return L2CAP_TXSEQ_INVALID;
6051 if (chan->srej_list.head == txseq) {
6052 BT_DBG("Expected SREJ");
6053 return L2CAP_TXSEQ_EXPECTED_SREJ;
6056 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6057 BT_DBG("Duplicate SREJ - txseq already stored");
6058 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6061 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6062 BT_DBG("Unexpected SREJ - not requested");
6063 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6067 if (chan->expected_tx_seq == txseq) {
6068 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6070 BT_DBG("Invalid - txseq outside tx window");
6071 return L2CAP_TXSEQ_INVALID;
6074 return L2CAP_TXSEQ_EXPECTED;
6078 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6079 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6080 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6081 return L2CAP_TXSEQ_DUPLICATE;
6084 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6085 /* A source of invalid packets is a "double poll" condition,
6086 * where delays cause us to send multiple poll packets. If
6087 * the remote stack receives and processes both polls,
6088 * sequence numbers can wrap around in such a way that a
6089 * resent frame has a sequence number that looks like new data
6090 * with a sequence gap. This would trigger an erroneous SREJ
6093 * Fortunately, this is impossible with a tx window that's
6094 * less than half of the maximum sequence number, which allows
6095 * invalid frames to be safely ignored.
6097 * With tx window sizes greater than half of the tx window
6098 * maximum, the frame is invalid and cannot be ignored. This
6099 * causes a disconnect.
6102 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6103 BT_DBG("Invalid/Ignore - txseq outside tx window");
6104 return L2CAP_TXSEQ_INVALID_IGNORE;
6106 BT_DBG("Invalid - txseq outside tx window");
6107 return L2CAP_TXSEQ_INVALID;
6110 BT_DBG("Unexpected - txseq indicates missing frames");
6111 return L2CAP_TXSEQ_UNEXPECTED;
6115 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6116 struct l2cap_ctrl *control,
6117 struct sk_buff *skb, u8 event)
6120 bool skb_in_use = false;
6122 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6126 case L2CAP_EV_RECV_IFRAME:
6127 switch (l2cap_classify_txseq(chan, control->txseq)) {
6128 case L2CAP_TXSEQ_EXPECTED:
6129 l2cap_pass_to_tx(chan, control);
6131 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6132 BT_DBG("Busy, discarding expected seq %d",
6137 chan->expected_tx_seq = __next_seq(chan,
6140 chan->buffer_seq = chan->expected_tx_seq;
6143 err = l2cap_reassemble_sdu(chan, skb, control);
6147 if (control->final) {
6148 if (!test_and_clear_bit(CONN_REJ_ACT,
6149 &chan->conn_state)) {
6151 l2cap_retransmit_all(chan, control);
6152 l2cap_ertm_send(chan);
6156 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6157 l2cap_send_ack(chan);
6159 case L2CAP_TXSEQ_UNEXPECTED:
6160 l2cap_pass_to_tx(chan, control);
6162 /* Can't issue SREJ frames in the local busy state.
6163 * Drop this frame, it will be seen as missing
6164 * when local busy is exited.
6166 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6167 BT_DBG("Busy, discarding unexpected seq %d",
6172 /* There was a gap in the sequence, so an SREJ
6173 * must be sent for each missing frame. The
6174 * current frame is stored for later use.
6176 skb_queue_tail(&chan->srej_q, skb);
6178 BT_DBG("Queued %p (queue len %d)", skb,
6179 skb_queue_len(&chan->srej_q));
6181 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6182 l2cap_seq_list_clear(&chan->srej_list);
6183 l2cap_send_srej(chan, control->txseq);
6185 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6187 case L2CAP_TXSEQ_DUPLICATE:
6188 l2cap_pass_to_tx(chan, control);
6190 case L2CAP_TXSEQ_INVALID_IGNORE:
6192 case L2CAP_TXSEQ_INVALID:
6194 l2cap_send_disconn_req(chan, ECONNRESET);
6198 case L2CAP_EV_RECV_RR:
6199 l2cap_pass_to_tx(chan, control);
6200 if (control->final) {
6201 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6203 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6204 !__chan_is_moving(chan)) {
6206 l2cap_retransmit_all(chan, control);
6209 l2cap_ertm_send(chan);
6210 } else if (control->poll) {
6211 l2cap_send_i_or_rr_or_rnr(chan);
6213 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6214 &chan->conn_state) &&
6215 chan->unacked_frames)
6216 __set_retrans_timer(chan);
6218 l2cap_ertm_send(chan);
6221 case L2CAP_EV_RECV_RNR:
6222 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6223 l2cap_pass_to_tx(chan, control);
6224 if (control && control->poll) {
6225 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6226 l2cap_send_rr_or_rnr(chan, 0);
6228 __clear_retrans_timer(chan);
6229 l2cap_seq_list_clear(&chan->retrans_list);
6231 case L2CAP_EV_RECV_REJ:
6232 l2cap_handle_rej(chan, control);
6234 case L2CAP_EV_RECV_SREJ:
6235 l2cap_handle_srej(chan, control);
6241 if (skb && !skb_in_use) {
6242 BT_DBG("Freeing %p", skb);
6249 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6250 struct l2cap_ctrl *control,
6251 struct sk_buff *skb, u8 event)
6254 u16 txseq = control->txseq;
6255 bool skb_in_use = false;
6257 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6261 case L2CAP_EV_RECV_IFRAME:
6262 switch (l2cap_classify_txseq(chan, txseq)) {
6263 case L2CAP_TXSEQ_EXPECTED:
6264 /* Keep frame for reassembly later */
6265 l2cap_pass_to_tx(chan, control);
6266 skb_queue_tail(&chan->srej_q, skb);
6268 BT_DBG("Queued %p (queue len %d)", skb,
6269 skb_queue_len(&chan->srej_q));
6271 chan->expected_tx_seq = __next_seq(chan, txseq);
6273 case L2CAP_TXSEQ_EXPECTED_SREJ:
6274 l2cap_seq_list_pop(&chan->srej_list);
6276 l2cap_pass_to_tx(chan, control);
6277 skb_queue_tail(&chan->srej_q, skb);
6279 BT_DBG("Queued %p (queue len %d)", skb,
6280 skb_queue_len(&chan->srej_q));
6282 err = l2cap_rx_queued_iframes(chan);
6287 case L2CAP_TXSEQ_UNEXPECTED:
6288 /* Got a frame that can't be reassembled yet.
6289 * Save it for later, and send SREJs to cover
6290 * the missing frames.
6292 skb_queue_tail(&chan->srej_q, skb);
6294 BT_DBG("Queued %p (queue len %d)", skb,
6295 skb_queue_len(&chan->srej_q));
6297 l2cap_pass_to_tx(chan, control);
6298 l2cap_send_srej(chan, control->txseq);
6300 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6301 /* This frame was requested with an SREJ, but
6302 * some expected retransmitted frames are
6303 * missing. Request retransmission of missing
6306 skb_queue_tail(&chan->srej_q, skb);
6308 BT_DBG("Queued %p (queue len %d)", skb,
6309 skb_queue_len(&chan->srej_q));
6311 l2cap_pass_to_tx(chan, control);
6312 l2cap_send_srej_list(chan, control->txseq);
6314 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6315 /* We've already queued this frame. Drop this copy. */
6316 l2cap_pass_to_tx(chan, control);
6318 case L2CAP_TXSEQ_DUPLICATE:
6319 /* Expecting a later sequence number, so this frame
6320 * was already received. Ignore it completely.
6323 case L2CAP_TXSEQ_INVALID_IGNORE:
6325 case L2CAP_TXSEQ_INVALID:
6327 l2cap_send_disconn_req(chan, ECONNRESET);
6331 case L2CAP_EV_RECV_RR:
6332 l2cap_pass_to_tx(chan, control);
6333 if (control->final) {
6334 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6336 if (!test_and_clear_bit(CONN_REJ_ACT,
6337 &chan->conn_state)) {
6339 l2cap_retransmit_all(chan, control);
6342 l2cap_ertm_send(chan);
6343 } else if (control->poll) {
6344 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6345 &chan->conn_state) &&
6346 chan->unacked_frames) {
6347 __set_retrans_timer(chan);
6350 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6351 l2cap_send_srej_tail(chan);
6353 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6354 &chan->conn_state) &&
6355 chan->unacked_frames)
6356 __set_retrans_timer(chan);
6358 l2cap_send_ack(chan);
6361 case L2CAP_EV_RECV_RNR:
6362 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6363 l2cap_pass_to_tx(chan, control);
6364 if (control->poll) {
6365 l2cap_send_srej_tail(chan);
6367 struct l2cap_ctrl rr_control;
6368 memset(&rr_control, 0, sizeof(rr_control));
6369 rr_control.sframe = 1;
6370 rr_control.super = L2CAP_SUPER_RR;
6371 rr_control.reqseq = chan->buffer_seq;
6372 l2cap_send_sframe(chan, &rr_control);
6376 case L2CAP_EV_RECV_REJ:
6377 l2cap_handle_rej(chan, control);
6379 case L2CAP_EV_RECV_SREJ:
6380 l2cap_handle_srej(chan, control);
6384 if (skb && !skb_in_use) {
6385 BT_DBG("Freeing %p", skb);
6392 static int l2cap_finish_move(struct l2cap_chan *chan)
6394 BT_DBG("chan %p", chan);
6396 chan->rx_state = L2CAP_RX_STATE_RECV;
6399 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6401 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6403 return l2cap_resegment(chan);
6406 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6407 struct l2cap_ctrl *control,
6408 struct sk_buff *skb, u8 event)
6412 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6418 l2cap_process_reqseq(chan, control->reqseq);
6420 if (!skb_queue_empty(&chan->tx_q))
6421 chan->tx_send_head = skb_peek(&chan->tx_q);
6423 chan->tx_send_head = NULL;
6425 /* Rewind next_tx_seq to the point expected
6428 chan->next_tx_seq = control->reqseq;
6429 chan->unacked_frames = 0;
6431 err = l2cap_finish_move(chan);
6435 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6436 l2cap_send_i_or_rr_or_rnr(chan);
6438 if (event == L2CAP_EV_RECV_IFRAME)
6441 return l2cap_rx_state_recv(chan, control, NULL, event);
6444 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6445 struct l2cap_ctrl *control,
6446 struct sk_buff *skb, u8 event)
6450 if (!control->final)
6453 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6455 chan->rx_state = L2CAP_RX_STATE_RECV;
6456 l2cap_process_reqseq(chan, control->reqseq);
6458 if (!skb_queue_empty(&chan->tx_q))
6459 chan->tx_send_head = skb_peek(&chan->tx_q);
6461 chan->tx_send_head = NULL;
6463 /* Rewind next_tx_seq to the point expected
6466 chan->next_tx_seq = control->reqseq;
6467 chan->unacked_frames = 0;
6470 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6472 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6474 err = l2cap_resegment(chan);
6477 err = l2cap_rx_state_recv(chan, control, skb, event);
6482 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6484 /* Make sure reqseq is for a packet that has been sent but not acked */
6487 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6488 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6491 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6492 struct sk_buff *skb, u8 event)
6496 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6497 control, skb, event, chan->rx_state);
6499 if (__valid_reqseq(chan, control->reqseq)) {
6500 switch (chan->rx_state) {
6501 case L2CAP_RX_STATE_RECV:
6502 err = l2cap_rx_state_recv(chan, control, skb, event);
6504 case L2CAP_RX_STATE_SREJ_SENT:
6505 err = l2cap_rx_state_srej_sent(chan, control, skb,
6508 case L2CAP_RX_STATE_WAIT_P:
6509 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6511 case L2CAP_RX_STATE_WAIT_F:
6512 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6519 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6520 control->reqseq, chan->next_tx_seq,
6521 chan->expected_ack_seq);
6522 l2cap_send_disconn_req(chan, ECONNRESET);
6528 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6529 struct sk_buff *skb)
6533 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6536 if (l2cap_classify_txseq(chan, control->txseq) ==
6537 L2CAP_TXSEQ_EXPECTED) {
6538 l2cap_pass_to_tx(chan, control);
6540 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6541 __next_seq(chan, chan->buffer_seq));
6543 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6545 l2cap_reassemble_sdu(chan, skb, control);
6548 kfree_skb(chan->sdu);
6551 chan->sdu_last_frag = NULL;
6555 BT_DBG("Freeing %p", skb);
6560 chan->last_acked_seq = control->txseq;
6561 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6566 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6568 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6572 __unpack_control(chan, skb);
6577 * We can just drop the corrupted I-frame here.
6578 * Receiver will miss it and start proper recovery
6579 * procedures and ask for retransmission.
6581 if (l2cap_check_fcs(chan, skb))
6584 if (!control->sframe && control->sar == L2CAP_SAR_START)
6585 len -= L2CAP_SDULEN_SIZE;
6587 if (chan->fcs == L2CAP_FCS_CRC16)
6588 len -= L2CAP_FCS_SIZE;
6590 if (len > chan->mps) {
6591 l2cap_send_disconn_req(chan, ECONNRESET);
6595 if (!control->sframe) {
6598 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6599 control->sar, control->reqseq, control->final,
6602 /* Validate F-bit - F=0 always valid, F=1 only
6603 * valid in TX WAIT_F
6605 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6608 if (chan->mode != L2CAP_MODE_STREAMING) {
6609 event = L2CAP_EV_RECV_IFRAME;
6610 err = l2cap_rx(chan, control, skb, event);
6612 err = l2cap_stream_rx(chan, control, skb);
6616 l2cap_send_disconn_req(chan, ECONNRESET);
6618 const u8 rx_func_to_event[4] = {
6619 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6620 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6623 /* Only I-frames are expected in streaming mode */
6624 if (chan->mode == L2CAP_MODE_STREAMING)
6627 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6628 control->reqseq, control->final, control->poll,
6632 BT_ERR("Trailing bytes: %d in sframe", len);
6633 l2cap_send_disconn_req(chan, ECONNRESET);
6637 /* Validate F and P bits */
6638 if (control->final && (control->poll ||
6639 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6642 event = rx_func_to_event[control->super];
6643 if (l2cap_rx(chan, control, skb, event))
6644 l2cap_send_disconn_req(chan, ECONNRESET);
6654 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6656 struct l2cap_conn *conn = chan->conn;
6657 struct l2cap_le_credits pkt;
6660 /* We return more credits to the sender only after the amount of
6661 * credits falls below half of the initial amount.
6663 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6666 return_credits = le_max_credits - chan->rx_credits;
6668 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6670 chan->rx_credits += return_credits;
6672 pkt.cid = cpu_to_le16(chan->scid);
6673 pkt.credits = cpu_to_le16(return_credits);
6675 chan->ident = l2cap_get_ident(conn);
6677 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6680 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6684 if (!chan->rx_credits) {
6685 BT_ERR("No credits to receive LE L2CAP data");
6686 l2cap_send_disconn_req(chan, ECONNRESET);
6690 if (chan->imtu < skb->len) {
6691 BT_ERR("Too big LE L2CAP PDU");
6696 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6698 l2cap_chan_le_send_credits(chan);
6705 sdu_len = get_unaligned_le16(skb->data);
6706 skb_pull(skb, L2CAP_SDULEN_SIZE);
6708 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6709 sdu_len, skb->len, chan->imtu);
6711 if (sdu_len > chan->imtu) {
6712 BT_ERR("Too big LE L2CAP SDU length received");
6717 if (skb->len > sdu_len) {
6718 BT_ERR("Too much LE L2CAP data received");
6723 if (skb->len == sdu_len)
6724 return chan->ops->recv(chan, skb);
6727 chan->sdu_len = sdu_len;
6728 chan->sdu_last_frag = skb;
6733 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6734 chan->sdu->len, skb->len, chan->sdu_len);
6736 if (chan->sdu->len + skb->len > chan->sdu_len) {
6737 BT_ERR("Too much LE L2CAP data received");
6742 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6745 if (chan->sdu->len == chan->sdu_len) {
6746 err = chan->ops->recv(chan, chan->sdu);
6749 chan->sdu_last_frag = NULL;
6757 kfree_skb(chan->sdu);
6759 chan->sdu_last_frag = NULL;
6763 /* We can't return an error here since we took care of the skb
6764 * freeing internally. An error return would cause the caller to
6765 * do a double-free of the skb.
6770 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6771 struct sk_buff *skb)
6773 struct l2cap_chan *chan;
6775 chan = l2cap_get_chan_by_scid(conn, cid);
6777 if (cid == L2CAP_CID_A2MP) {
6778 chan = a2mp_channel_create(conn, skb);
6784 l2cap_chan_lock(chan);
6786 BT_DBG("unknown cid 0x%4.4x", cid);
6787 /* Drop packet and return */
6793 BT_DBG("chan %p, len %d", chan, skb->len);
6795 if (chan->state != BT_CONNECTED)
6798 switch (chan->mode) {
6799 case L2CAP_MODE_LE_FLOWCTL:
6800 if (l2cap_le_data_rcv(chan, skb) < 0)
6805 case L2CAP_MODE_BASIC:
6806 /* If socket recv buffers overflows we drop data here
6807 * which is *bad* because L2CAP has to be reliable.
6808 * But we don't have any other choice. L2CAP doesn't
6809 * provide flow control mechanism. */
6811 if (chan->imtu < skb->len) {
6812 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6816 if (!chan->ops->recv(chan, skb))
6820 case L2CAP_MODE_ERTM:
6821 case L2CAP_MODE_STREAMING:
6822 l2cap_data_rcv(chan, skb);
6826 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6834 l2cap_chan_unlock(chan);
6837 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6838 struct sk_buff *skb)
6840 struct hci_conn *hcon = conn->hcon;
6841 struct l2cap_chan *chan;
6843 if (hcon->type != ACL_LINK)
6846 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6851 BT_DBG("chan %p, len %d", chan, skb->len);
6853 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6856 if (chan->imtu < skb->len)
6859 /* Store remote BD_ADDR and PSM for msg_name */
6860 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6861 bt_cb(skb)->psm = psm;
6863 if (!chan->ops->recv(chan, skb))
6870 static void l2cap_att_channel(struct l2cap_conn *conn,
6871 struct sk_buff *skb)
6873 struct hci_conn *hcon = conn->hcon;
6874 struct l2cap_chan *chan;
6876 if (hcon->type != LE_LINK)
6879 chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6880 &hcon->src, &hcon->dst);
6884 BT_DBG("chan %p, len %d", chan, skb->len);
6886 if (chan->imtu < skb->len)
6889 if (!chan->ops->recv(chan, skb))
6896 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6898 struct l2cap_hdr *lh = (void *) skb->data;
6899 struct hci_conn *hcon = conn->hcon;
6903 if (hcon->state != BT_CONNECTED) {
6904 BT_DBG("queueing pending rx skb");
6905 skb_queue_tail(&conn->pending_rx, skb);
6909 skb_pull(skb, L2CAP_HDR_SIZE);
6910 cid = __le16_to_cpu(lh->cid);
6911 len = __le16_to_cpu(lh->len);
6913 if (len != skb->len) {
6918 /* Since we can't actively block incoming LE connections we must
6919 * at least ensure that we ignore incoming data from them.
6921 if (hcon->type == LE_LINK &&
6922 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6923 bdaddr_type(hcon, hcon->dst_type))) {
6928 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6931 case L2CAP_CID_SIGNALING:
6932 l2cap_sig_channel(conn, skb);
6935 case L2CAP_CID_CONN_LESS:
6936 psm = get_unaligned((__le16 *) skb->data);
6937 skb_pull(skb, L2CAP_PSMLEN_SIZE);
6938 l2cap_conless_channel(conn, psm, skb);
6942 l2cap_att_channel(conn, skb);
6945 case L2CAP_CID_LE_SIGNALING:
6946 l2cap_le_sig_channel(conn, skb);
6950 if (smp_sig_channel(conn, skb))
6951 l2cap_conn_del(conn->hcon, EACCES);
6955 l2cap_data_channel(conn, cid, skb);
6960 static void process_pending_rx(struct work_struct *work)
6962 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6964 struct sk_buff *skb;
6968 while ((skb = skb_dequeue(&conn->pending_rx)))
6969 l2cap_recv_frame(conn, skb);
6972 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6974 struct l2cap_conn *conn = hcon->l2cap_data;
6975 struct hci_chan *hchan;
6980 hchan = hci_chan_create(hcon);
6984 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6986 hci_chan_del(hchan);
6990 kref_init(&conn->ref);
6991 hcon->l2cap_data = conn;
6993 hci_conn_get(conn->hcon);
6994 conn->hchan = hchan;
6996 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6998 switch (hcon->type) {
7000 if (hcon->hdev->le_mtu) {
7001 conn->mtu = hcon->hdev->le_mtu;
7006 conn->mtu = hcon->hdev->acl_mtu;
7010 conn->feat_mask = 0;
7012 if (hcon->type == ACL_LINK)
7013 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7014 &hcon->hdev->dev_flags);
7016 spin_lock_init(&conn->lock);
7017 mutex_init(&conn->chan_lock);
7019 INIT_LIST_HEAD(&conn->chan_l);
7020 INIT_LIST_HEAD(&conn->users);
7022 if (hcon->type == LE_LINK)
7023 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7025 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7027 skb_queue_head_init(&conn->pending_rx);
7028 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7030 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7035 static bool is_valid_psm(u16 psm, u8 dst_type) {
7039 if (bdaddr_type_is_le(dst_type))
7040 return (psm <= 0x00ff);
7042 /* PSM must be odd and lsb of upper byte must be 0 */
7043 return ((psm & 0x0101) == 0x0001);
7046 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7047 bdaddr_t *dst, u8 dst_type)
7049 struct l2cap_conn *conn;
7050 struct hci_conn *hcon;
7051 struct hci_dev *hdev;
7054 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7055 dst_type, __le16_to_cpu(psm));
7057 hdev = hci_get_route(dst, &chan->src);
7059 return -EHOSTUNREACH;
7063 l2cap_chan_lock(chan);
7065 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7066 chan->chan_type != L2CAP_CHAN_RAW) {
7071 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7076 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7081 switch (chan->mode) {
7082 case L2CAP_MODE_BASIC:
7084 case L2CAP_MODE_LE_FLOWCTL:
7085 l2cap_le_flowctl_init(chan);
7087 case L2CAP_MODE_ERTM:
7088 case L2CAP_MODE_STREAMING:
7097 switch (chan->state) {
7101 /* Already connecting */
7106 /* Already connected */
7120 /* Set destination address and psm */
7121 bacpy(&chan->dst, dst);
7122 chan->dst_type = dst_type;
7127 if (bdaddr_type_is_le(dst_type)) {
7130 /* Convert from L2CAP channel address type to HCI address type
7132 if (dst_type == BDADDR_LE_PUBLIC)
7133 dst_type = ADDR_LE_DEV_PUBLIC;
7135 dst_type = ADDR_LE_DEV_RANDOM;
7137 master = !test_bit(HCI_ADVERTISING, &hdev->dev_flags);
7139 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7140 HCI_LE_CONN_TIMEOUT, master);
7142 u8 auth_type = l2cap_get_auth_type(chan);
7143 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7147 err = PTR_ERR(hcon);
7151 conn = l2cap_conn_add(hcon);
7153 hci_conn_drop(hcon);
7158 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7159 hci_conn_drop(hcon);
7164 /* Update source addr of the socket */
7165 bacpy(&chan->src, &hcon->src);
7166 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7168 l2cap_chan_unlock(chan);
7169 l2cap_chan_add(conn, chan);
7170 l2cap_chan_lock(chan);
7172 /* l2cap_chan_add takes its own ref so we can drop this one */
7173 hci_conn_drop(hcon);
7175 l2cap_state_change(chan, BT_CONNECT);
7176 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7178 /* Release chan->sport so that it can be reused by other
7179 * sockets (as it's only used for listening sockets).
7181 write_lock(&chan_list_lock);
7183 write_unlock(&chan_list_lock);
7185 if (hcon->state == BT_CONNECTED) {
7186 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7187 __clear_chan_timer(chan);
7188 if (l2cap_chan_check_security(chan))
7189 l2cap_state_change(chan, BT_CONNECTED);
7191 l2cap_do_start(chan);
7197 l2cap_chan_unlock(chan);
7198 hci_dev_unlock(hdev);
7202 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7204 /* ---- L2CAP interface with lower layer (HCI) ---- */
7206 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7208 int exact = 0, lm1 = 0, lm2 = 0;
7209 struct l2cap_chan *c;
7211 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7213 /* Find listening sockets and check their link_mode */
7214 read_lock(&chan_list_lock);
7215 list_for_each_entry(c, &chan_list, global_l) {
7216 if (c->state != BT_LISTEN)
7219 if (!bacmp(&c->src, &hdev->bdaddr)) {
7220 lm1 |= HCI_LM_ACCEPT;
7221 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7222 lm1 |= HCI_LM_MASTER;
7224 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7225 lm2 |= HCI_LM_ACCEPT;
7226 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7227 lm2 |= HCI_LM_MASTER;
7230 read_unlock(&chan_list_lock);
7232 return exact ? lm1 : lm2;
7235 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7237 struct l2cap_conn *conn;
7239 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7242 conn = l2cap_conn_add(hcon);
7244 l2cap_conn_ready(conn);
7246 l2cap_conn_del(hcon, bt_to_errno(status));
7250 int l2cap_disconn_ind(struct hci_conn *hcon)
7252 struct l2cap_conn *conn = hcon->l2cap_data;
7254 BT_DBG("hcon %p", hcon);
7257 return HCI_ERROR_REMOTE_USER_TERM;
7258 return conn->disc_reason;
7261 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7263 BT_DBG("hcon %p reason %d", hcon, reason);
7265 l2cap_conn_del(hcon, bt_to_errno(reason));
7268 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7270 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7273 if (encrypt == 0x00) {
7274 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7275 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7276 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7277 chan->sec_level == BT_SECURITY_FIPS)
7278 l2cap_chan_close(chan, ECONNREFUSED);
7280 if (chan->sec_level == BT_SECURITY_MEDIUM)
7281 __clear_chan_timer(chan);
7285 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7287 struct l2cap_conn *conn = hcon->l2cap_data;
7288 struct l2cap_chan *chan;
7293 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7295 if (hcon->type == LE_LINK) {
7296 if (!status && encrypt)
7297 smp_distribute_keys(conn);
7298 cancel_delayed_work(&conn->security_timer);
7301 mutex_lock(&conn->chan_lock);
7303 list_for_each_entry(chan, &conn->chan_l, list) {
7304 l2cap_chan_lock(chan);
7306 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7307 state_to_string(chan->state));
7309 if (chan->scid == L2CAP_CID_A2MP) {
7310 l2cap_chan_unlock(chan);
7314 if (chan->scid == L2CAP_CID_ATT) {
7315 if (!status && encrypt) {
7316 chan->sec_level = hcon->sec_level;
7317 l2cap_chan_ready(chan);
7320 l2cap_chan_unlock(chan);
7324 if (!__l2cap_no_conn_pending(chan)) {
7325 l2cap_chan_unlock(chan);
7329 if (!status && (chan->state == BT_CONNECTED ||
7330 chan->state == BT_CONFIG)) {
7331 chan->ops->resume(chan);
7332 l2cap_check_encryption(chan, encrypt);
7333 l2cap_chan_unlock(chan);
7337 if (chan->state == BT_CONNECT) {
7339 l2cap_start_connection(chan);
7341 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7342 } else if (chan->state == BT_CONNECT2) {
7343 struct l2cap_conn_rsp rsp;
7347 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7348 res = L2CAP_CR_PEND;
7349 stat = L2CAP_CS_AUTHOR_PEND;
7350 chan->ops->defer(chan);
7352 l2cap_state_change(chan, BT_CONFIG);
7353 res = L2CAP_CR_SUCCESS;
7354 stat = L2CAP_CS_NO_INFO;
7357 l2cap_state_change(chan, BT_DISCONN);
7358 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7359 res = L2CAP_CR_SEC_BLOCK;
7360 stat = L2CAP_CS_NO_INFO;
7363 rsp.scid = cpu_to_le16(chan->dcid);
7364 rsp.dcid = cpu_to_le16(chan->scid);
7365 rsp.result = cpu_to_le16(res);
7366 rsp.status = cpu_to_le16(stat);
7367 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7370 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7371 res == L2CAP_CR_SUCCESS) {
7373 set_bit(CONF_REQ_SENT, &chan->conf_state);
7374 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7376 l2cap_build_conf_req(chan, buf),
7378 chan->num_conf_req++;
7382 l2cap_chan_unlock(chan);
7385 mutex_unlock(&conn->chan_lock);
7390 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7392 struct l2cap_conn *conn = hcon->l2cap_data;
7393 struct l2cap_hdr *hdr;
7396 /* For AMP controller do not create l2cap conn */
7397 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7401 conn = l2cap_conn_add(hcon);
7406 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7410 case ACL_START_NO_FLUSH:
7413 BT_ERR("Unexpected start frame (len %d)", skb->len);
7414 kfree_skb(conn->rx_skb);
7415 conn->rx_skb = NULL;
7417 l2cap_conn_unreliable(conn, ECOMM);
7420 /* Start fragment always begin with Basic L2CAP header */
7421 if (skb->len < L2CAP_HDR_SIZE) {
7422 BT_ERR("Frame is too short (len %d)", skb->len);
7423 l2cap_conn_unreliable(conn, ECOMM);
7427 hdr = (struct l2cap_hdr *) skb->data;
7428 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7430 if (len == skb->len) {
7431 /* Complete frame received */
7432 l2cap_recv_frame(conn, skb);
7436 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7438 if (skb->len > len) {
7439 BT_ERR("Frame is too long (len %d, expected len %d)",
7441 l2cap_conn_unreliable(conn, ECOMM);
7445 /* Allocate skb for the complete frame (with header) */
7446 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7450 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7452 conn->rx_len = len - skb->len;
7456 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7458 if (!conn->rx_len) {
7459 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7460 l2cap_conn_unreliable(conn, ECOMM);
7464 if (skb->len > conn->rx_len) {
7465 BT_ERR("Fragment is too long (len %d, expected %d)",
7466 skb->len, conn->rx_len);
7467 kfree_skb(conn->rx_skb);
7468 conn->rx_skb = NULL;
7470 l2cap_conn_unreliable(conn, ECOMM);
7474 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7476 conn->rx_len -= skb->len;
7478 if (!conn->rx_len) {
7479 /* Complete frame received. l2cap_recv_frame
7480 * takes ownership of the skb so set the global
7481 * rx_skb pointer to NULL first.
7483 struct sk_buff *rx_skb = conn->rx_skb;
7484 conn->rx_skb = NULL;
7485 l2cap_recv_frame(conn, rx_skb);
7495 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7497 struct l2cap_chan *c;
7499 read_lock(&chan_list_lock);
7501 list_for_each_entry(c, &chan_list, global_l) {
7502 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7504 c->state, __le16_to_cpu(c->psm),
7505 c->scid, c->dcid, c->imtu, c->omtu,
7506 c->sec_level, c->mode);
7509 read_unlock(&chan_list_lock);
7514 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7516 return single_open(file, l2cap_debugfs_show, inode->i_private);
7519 static const struct file_operations l2cap_debugfs_fops = {
7520 .open = l2cap_debugfs_open,
7522 .llseek = seq_lseek,
7523 .release = single_release,
7526 static struct dentry *l2cap_debugfs;
7528 int __init l2cap_init(void)
7532 err = l2cap_init_sockets();
7536 if (IS_ERR_OR_NULL(bt_debugfs))
7539 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7540 NULL, &l2cap_debugfs_fops);
7542 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7544 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7550 void l2cap_exit(void)
7552 debugfs_remove(l2cap_debugfs);
7553 l2cap_cleanup_sockets();
7556 module_param(disable_ertm, bool, 0644);
7557 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");