2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66 static void l2cap_retrans_timeout(struct work_struct *work);
67 static void l2cap_monitor_timeout(struct work_struct *work);
68 static void l2cap_ack_timeout(struct work_struct *work);
70 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
72 if (link_type == LE_LINK) {
73 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
74 return BDADDR_LE_PUBLIC;
76 return BDADDR_LE_RANDOM;
82 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
84 return bdaddr_type(hcon->type, hcon->src_type);
87 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
89 return bdaddr_type(hcon->type, hcon->dst_type);
92 /* ---- L2CAP channels ---- */
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
99 list_for_each_entry(c, &conn->chan_l, list) {
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 list_for_each_entry(c, &conn->chan_l, list) {
118 /* Find channel with given SCID.
119 * Returns a reference locked channel.
121 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 mutex_lock(&conn->chan_lock);
127 c = __l2cap_get_chan_by_scid(conn, cid);
129 /* Only lock if chan reference is not 0 */
130 c = l2cap_chan_hold_unless_zero(c);
134 mutex_unlock(&conn->chan_lock);
139 /* Find channel with given DCID.
140 * Returns a reference locked channel.
142 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
145 struct l2cap_chan *c;
147 mutex_lock(&conn->chan_lock);
148 c = __l2cap_get_chan_by_dcid(conn, cid);
150 /* Only lock if chan reference is not 0 */
151 c = l2cap_chan_hold_unless_zero(c);
155 mutex_unlock(&conn->chan_lock);
160 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 list_for_each_entry(c, &conn->chan_l, list) {
166 if (c->ident == ident)
172 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
175 struct l2cap_chan *c;
177 mutex_lock(&conn->chan_lock);
178 c = __l2cap_get_chan_by_ident(conn, ident);
180 /* Only lock if chan reference is not 0 */
181 c = l2cap_chan_hold_unless_zero(c);
185 mutex_unlock(&conn->chan_lock);
190 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
192 struct l2cap_chan *c;
194 list_for_each_entry(c, &chan_list, global_l) {
195 if (c->sport == psm && !bacmp(&c->src, src))
201 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
205 write_lock(&chan_list_lock);
207 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
217 u16 p, start, end, incr;
219 if (chan->src_type == BDADDR_BREDR) {
220 start = L2CAP_PSM_DYN_START;
221 end = L2CAP_PSM_AUTO_END;
224 start = L2CAP_PSM_LE_DYN_START;
225 end = L2CAP_PSM_LE_DYN_END;
230 for (p = start; p <= end; p += incr)
231 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
232 chan->psm = cpu_to_le16(p);
233 chan->sport = cpu_to_le16(p);
240 write_unlock(&chan_list_lock);
243 EXPORT_SYMBOL_GPL(l2cap_add_psm);
245 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
247 write_lock(&chan_list_lock);
249 /* Override the defaults (which are for conn-oriented) */
250 chan->omtu = L2CAP_DEFAULT_MTU;
251 chan->chan_type = L2CAP_CHAN_FIXED;
255 write_unlock(&chan_list_lock);
260 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 if (conn->hcon->type == LE_LINK)
265 dyn_end = L2CAP_CID_LE_DYN_END;
267 dyn_end = L2CAP_CID_DYN_END;
269 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
270 if (!__l2cap_get_chan_by_scid(conn, cid))
277 static void l2cap_state_change(struct l2cap_chan *chan, int state)
279 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
280 state_to_string(state));
283 chan->ops->state_change(chan, state, 0);
286 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 chan->ops->state_change(chan, chan->state, err);
293 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
295 chan->ops->state_change(chan, chan->state, err);
298 static void __set_retrans_timer(struct l2cap_chan *chan)
300 if (!delayed_work_pending(&chan->monitor_timer) &&
301 chan->retrans_timeout) {
302 l2cap_set_timer(chan, &chan->retrans_timer,
303 msecs_to_jiffies(chan->retrans_timeout));
307 static void __set_monitor_timer(struct l2cap_chan *chan)
309 __clear_retrans_timer(chan);
310 if (chan->monitor_timeout) {
311 l2cap_set_timer(chan, &chan->monitor_timer,
312 msecs_to_jiffies(chan->monitor_timeout));
316 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
321 skb_queue_walk(head, skb) {
322 if (bt_cb(skb)->l2cap.txseq == seq)
329 /* ---- L2CAP sequence number lists ---- */
331 /* For ERTM, ordered lists of sequence numbers must be tracked for
332 * SREJ requests that are received and for frames that are to be
333 * retransmitted. These seq_list functions implement a singly-linked
334 * list in an array, where membership in the list can also be checked
335 * in constant time. Items can also be added to the tail of the list
336 * and removed from the head in constant time, without further memory
340 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
342 size_t alloc_size, i;
344 /* Allocated size is a power of 2 to map sequence numbers
345 * (which may be up to 14 bits) in to a smaller array that is
346 * sized for the negotiated ERTM transmit windows.
348 alloc_size = roundup_pow_of_two(size);
350 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
354 seq_list->mask = alloc_size - 1;
355 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
356 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
357 for (i = 0; i < alloc_size; i++)
358 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
363 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
365 kfree(seq_list->list);
368 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
371 /* Constant-time check for list membership */
372 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
375 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
377 u16 seq = seq_list->head;
378 u16 mask = seq_list->mask;
380 seq_list->head = seq_list->list[seq & mask];
381 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
383 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
384 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
391 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
398 for (i = 0; i <= seq_list->mask; i++)
399 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
401 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
402 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
405 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
407 u16 mask = seq_list->mask;
409 /* All appends happen in constant time */
411 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
414 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
415 seq_list->head = seq;
417 seq_list->list[seq_list->tail & mask] = seq;
419 seq_list->tail = seq;
420 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
423 static void l2cap_chan_timeout(struct work_struct *work)
425 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
427 struct l2cap_conn *conn = chan->conn;
430 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
432 mutex_lock(&conn->chan_lock);
433 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
434 * this work. No need to call l2cap_chan_hold(chan) here again.
436 l2cap_chan_lock(chan);
438 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
439 reason = ECONNREFUSED;
440 else if (chan->state == BT_CONNECT &&
441 chan->sec_level != BT_SECURITY_SDP)
442 reason = ECONNREFUSED;
446 l2cap_chan_close(chan, reason);
448 chan->ops->close(chan);
450 l2cap_chan_unlock(chan);
451 l2cap_chan_put(chan);
453 mutex_unlock(&conn->chan_lock);
456 struct l2cap_chan *l2cap_chan_create(void)
458 struct l2cap_chan *chan;
460 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 skb_queue_head_init(&chan->tx_q);
465 skb_queue_head_init(&chan->srej_q);
466 mutex_init(&chan->lock);
468 /* Set default lock nesting level */
469 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
471 write_lock(&chan_list_lock);
472 list_add(&chan->global_l, &chan_list);
473 write_unlock(&chan_list_lock);
475 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
476 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
477 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
478 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
480 chan->state = BT_OPEN;
482 kref_init(&chan->kref);
484 /* This flag is cleared in l2cap_chan_ready() */
485 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
487 BT_DBG("chan %p", chan);
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
493 static void l2cap_chan_destroy(struct kref *kref)
495 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
497 BT_DBG("chan %p", chan);
499 write_lock(&chan_list_lock);
500 list_del(&chan->global_l);
501 write_unlock(&chan_list_lock);
506 void l2cap_chan_hold(struct l2cap_chan *c)
508 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
515 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
517 if (!kref_get_unless_zero(&c->kref))
523 void l2cap_chan_put(struct l2cap_chan *c)
525 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
527 kref_put(&c->kref, l2cap_chan_destroy);
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
533 chan->fcs = L2CAP_FCS_CRC16;
534 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 chan->remote_max_tx = chan->max_tx;
538 chan->remote_tx_win = chan->tx_win;
539 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 chan->sec_level = BT_SECURITY_LOW;
541 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
545 chan->conf_state = 0;
546 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
548 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
555 chan->sdu_last_frag = NULL;
557 chan->tx_credits = 0;
558 chan->rx_credits = le_max_credits;
559 chan->mps = min_t(u16, chan->imtu, le_default_mps);
561 skb_queue_head_init(&chan->tx_q);
564 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
566 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
567 __le16_to_cpu(chan->psm), chan->dcid);
569 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
573 switch (chan->chan_type) {
574 case L2CAP_CHAN_CONN_ORIENTED:
575 /* Alloc CID for connection-oriented socket */
576 chan->scid = l2cap_alloc_cid(conn);
577 if (conn->hcon->type == ACL_LINK)
578 chan->omtu = L2CAP_DEFAULT_MTU;
581 case L2CAP_CHAN_CONN_LESS:
582 /* Connectionless socket */
583 chan->scid = L2CAP_CID_CONN_LESS;
584 chan->dcid = L2CAP_CID_CONN_LESS;
585 chan->omtu = L2CAP_DEFAULT_MTU;
588 case L2CAP_CHAN_FIXED:
589 /* Caller will set CID and CID specific MTU values */
593 /* Raw socket can send/recv signalling messages only */
594 chan->scid = L2CAP_CID_SIGNALING;
595 chan->dcid = L2CAP_CID_SIGNALING;
596 chan->omtu = L2CAP_DEFAULT_MTU;
599 chan->local_id = L2CAP_BESTEFFORT_ID;
600 chan->local_stype = L2CAP_SERV_BESTEFFORT;
601 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
602 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
603 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
604 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
606 l2cap_chan_hold(chan);
608 /* Only keep a reference for fixed channels if they requested it */
609 if (chan->chan_type != L2CAP_CHAN_FIXED ||
610 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
611 hci_conn_hold(conn->hcon);
613 list_add(&chan->list, &conn->chan_l);
616 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
618 mutex_lock(&conn->chan_lock);
619 __l2cap_chan_add(conn, chan);
620 mutex_unlock(&conn->chan_lock);
623 void l2cap_chan_del(struct l2cap_chan *chan, int err)
625 struct l2cap_conn *conn = chan->conn;
627 __clear_chan_timer(chan);
629 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
630 state_to_string(chan->state));
632 chan->ops->teardown(chan, err);
635 struct amp_mgr *mgr = conn->hcon->amp_mgr;
636 /* Delete from channel list */
637 list_del(&chan->list);
639 l2cap_chan_put(chan);
643 /* Reference was only held for non-fixed channels or
644 * fixed channels that explicitly requested it using the
645 * FLAG_HOLD_HCI_CONN flag.
647 if (chan->chan_type != L2CAP_CHAN_FIXED ||
648 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
649 hci_conn_drop(conn->hcon);
651 if (mgr && mgr->bredr_chan == chan)
652 mgr->bredr_chan = NULL;
655 if (chan->hs_hchan) {
656 struct hci_chan *hs_hchan = chan->hs_hchan;
658 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
659 amp_disconnect_logical_link(hs_hchan);
662 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
666 case L2CAP_MODE_BASIC:
669 case L2CAP_MODE_LE_FLOWCTL:
670 skb_queue_purge(&chan->tx_q);
673 case L2CAP_MODE_ERTM:
674 __clear_retrans_timer(chan);
675 __clear_monitor_timer(chan);
676 __clear_ack_timer(chan);
678 skb_queue_purge(&chan->srej_q);
680 l2cap_seq_list_free(&chan->srej_list);
681 l2cap_seq_list_free(&chan->retrans_list);
685 case L2CAP_MODE_STREAMING:
686 skb_queue_purge(&chan->tx_q);
692 EXPORT_SYMBOL_GPL(l2cap_chan_del);
694 static void l2cap_conn_update_id_addr(struct work_struct *work)
696 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
697 id_addr_update_work);
698 struct hci_conn *hcon = conn->hcon;
699 struct l2cap_chan *chan;
701 mutex_lock(&conn->chan_lock);
703 list_for_each_entry(chan, &conn->chan_l, list) {
704 l2cap_chan_lock(chan);
705 bacpy(&chan->dst, &hcon->dst);
706 chan->dst_type = bdaddr_dst_type(hcon);
707 l2cap_chan_unlock(chan);
710 mutex_unlock(&conn->chan_lock);
713 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
715 struct l2cap_conn *conn = chan->conn;
716 struct l2cap_le_conn_rsp rsp;
719 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
720 result = L2CAP_CR_AUTHORIZATION;
722 result = L2CAP_CR_BAD_PSM;
724 l2cap_state_change(chan, BT_DISCONN);
726 rsp.dcid = cpu_to_le16(chan->scid);
727 rsp.mtu = cpu_to_le16(chan->imtu);
728 rsp.mps = cpu_to_le16(chan->mps);
729 rsp.credits = cpu_to_le16(chan->rx_credits);
730 rsp.result = cpu_to_le16(result);
732 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
736 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
738 struct l2cap_conn *conn = chan->conn;
739 struct l2cap_conn_rsp rsp;
742 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
743 result = L2CAP_CR_SEC_BLOCK;
745 result = L2CAP_CR_BAD_PSM;
747 l2cap_state_change(chan, BT_DISCONN);
749 rsp.scid = cpu_to_le16(chan->dcid);
750 rsp.dcid = cpu_to_le16(chan->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
754 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
757 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
759 struct l2cap_conn *conn = chan->conn;
761 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
763 switch (chan->state) {
765 chan->ops->teardown(chan, 0);
770 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
771 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
772 l2cap_send_disconn_req(chan, reason);
774 l2cap_chan_del(chan, reason);
778 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
779 if (conn->hcon->type == ACL_LINK)
780 l2cap_chan_connect_reject(chan);
781 else if (conn->hcon->type == LE_LINK)
782 l2cap_chan_le_connect_reject(chan);
785 l2cap_chan_del(chan, reason);
790 l2cap_chan_del(chan, reason);
794 chan->ops->teardown(chan, 0);
798 EXPORT_SYMBOL(l2cap_chan_close);
800 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
802 switch (chan->chan_type) {
804 switch (chan->sec_level) {
805 case BT_SECURITY_HIGH:
806 case BT_SECURITY_FIPS:
807 return HCI_AT_DEDICATED_BONDING_MITM;
808 case BT_SECURITY_MEDIUM:
809 return HCI_AT_DEDICATED_BONDING;
811 return HCI_AT_NO_BONDING;
814 case L2CAP_CHAN_CONN_LESS:
815 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
816 if (chan->sec_level == BT_SECURITY_LOW)
817 chan->sec_level = BT_SECURITY_SDP;
819 if (chan->sec_level == BT_SECURITY_HIGH ||
820 chan->sec_level == BT_SECURITY_FIPS)
821 return HCI_AT_NO_BONDING_MITM;
823 return HCI_AT_NO_BONDING;
825 case L2CAP_CHAN_CONN_ORIENTED:
826 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
827 if (chan->sec_level == BT_SECURITY_LOW)
828 chan->sec_level = BT_SECURITY_SDP;
830 if (chan->sec_level == BT_SECURITY_HIGH ||
831 chan->sec_level == BT_SECURITY_FIPS)
832 return HCI_AT_NO_BONDING_MITM;
834 return HCI_AT_NO_BONDING;
838 switch (chan->sec_level) {
839 case BT_SECURITY_HIGH:
840 case BT_SECURITY_FIPS:
841 return HCI_AT_GENERAL_BONDING_MITM;
842 case BT_SECURITY_MEDIUM:
843 return HCI_AT_GENERAL_BONDING;
845 return HCI_AT_NO_BONDING;
851 /* Service level security */
852 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
854 struct l2cap_conn *conn = chan->conn;
857 if (conn->hcon->type == LE_LINK)
858 return smp_conn_security(conn->hcon, chan->sec_level);
860 auth_type = l2cap_get_auth_type(chan);
862 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
866 static u8 l2cap_get_ident(struct l2cap_conn *conn)
870 /* Get next available identificator.
871 * 1 - 128 are used by kernel.
872 * 129 - 199 are reserved.
873 * 200 - 254 are used by utilities like l2ping, etc.
876 mutex_lock(&conn->ident_lock);
878 if (++conn->tx_ident > 128)
883 mutex_unlock(&conn->ident_lock);
888 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
891 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
894 BT_DBG("code 0x%2.2x", code);
899 /* Use NO_FLUSH if supported or we have an LE link (which does
900 * not support auto-flushing packets) */
901 if (lmp_no_flush_capable(conn->hcon->hdev) ||
902 conn->hcon->type == LE_LINK)
903 flags = ACL_START_NO_FLUSH;
907 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
908 skb->priority = HCI_PRIO_MAX;
910 hci_send_acl(conn->hchan, skb, flags);
913 static bool __chan_is_moving(struct l2cap_chan *chan)
915 return chan->move_state != L2CAP_MOVE_STABLE &&
916 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
919 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
921 struct hci_conn *hcon = chan->conn->hcon;
924 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
927 if (chan->hs_hcon && !__chan_is_moving(chan)) {
929 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
936 /* Use NO_FLUSH for LE links (where this is the only option) or
937 * if the BR/EDR link supports it and flushing has not been
938 * explicitly requested (through FLAG_FLUSHABLE).
940 if (hcon->type == LE_LINK ||
941 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
942 lmp_no_flush_capable(hcon->hdev)))
943 flags = ACL_START_NO_FLUSH;
947 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
948 hci_send_acl(chan->conn->hchan, skb, flags);
951 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
953 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
954 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
956 if (enh & L2CAP_CTRL_FRAME_TYPE) {
959 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
960 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
967 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
968 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
975 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
977 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
978 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
980 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
983 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
984 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
991 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
992 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
999 static inline void __unpack_control(struct l2cap_chan *chan,
1000 struct sk_buff *skb)
1002 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1003 __unpack_extended_control(get_unaligned_le32(skb->data),
1004 &bt_cb(skb)->l2cap);
1005 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1007 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1008 &bt_cb(skb)->l2cap);
1009 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1013 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1017 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1018 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1020 if (control->sframe) {
1021 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1022 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1023 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1025 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1026 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1032 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1036 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1037 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1039 if (control->sframe) {
1040 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1041 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1042 packed |= L2CAP_CTRL_FRAME_TYPE;
1044 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1045 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1051 static inline void __pack_control(struct l2cap_chan *chan,
1052 struct l2cap_ctrl *control,
1053 struct sk_buff *skb)
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1056 put_unaligned_le32(__pack_extended_control(control),
1057 skb->data + L2CAP_HDR_SIZE);
1059 put_unaligned_le16(__pack_enhanced_control(control),
1060 skb->data + L2CAP_HDR_SIZE);
1064 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1066 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1067 return L2CAP_EXT_HDR_SIZE;
1069 return L2CAP_ENH_HDR_SIZE;
1072 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1075 struct sk_buff *skb;
1076 struct l2cap_hdr *lh;
1077 int hlen = __ertm_hdr_size(chan);
1079 if (chan->fcs == L2CAP_FCS_CRC16)
1080 hlen += L2CAP_FCS_SIZE;
1082 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1085 return ERR_PTR(-ENOMEM);
1087 lh = skb_put(skb, L2CAP_HDR_SIZE);
1088 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1089 lh->cid = cpu_to_le16(chan->dcid);
1091 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1092 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1094 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1096 if (chan->fcs == L2CAP_FCS_CRC16) {
1097 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1098 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1101 skb->priority = HCI_PRIO_MAX;
1105 static void l2cap_send_sframe(struct l2cap_chan *chan,
1106 struct l2cap_ctrl *control)
1108 struct sk_buff *skb;
1111 BT_DBG("chan %p, control %p", chan, control);
1113 if (!control->sframe)
1116 if (__chan_is_moving(chan))
1119 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1123 if (control->super == L2CAP_SUPER_RR)
1124 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1125 else if (control->super == L2CAP_SUPER_RNR)
1126 set_bit(CONN_RNR_SENT, &chan->conn_state);
1128 if (control->super != L2CAP_SUPER_SREJ) {
1129 chan->last_acked_seq = control->reqseq;
1130 __clear_ack_timer(chan);
1133 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1134 control->final, control->poll, control->super);
1136 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 control_field = __pack_extended_control(control);
1139 control_field = __pack_enhanced_control(control);
1141 skb = l2cap_create_sframe_pdu(chan, control_field);
1143 l2cap_do_send(chan, skb);
1146 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1148 struct l2cap_ctrl control;
1150 BT_DBG("chan %p, poll %d", chan, poll);
1152 memset(&control, 0, sizeof(control));
1154 control.poll = poll;
1156 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1157 control.super = L2CAP_SUPER_RNR;
1159 control.super = L2CAP_SUPER_RR;
1161 control.reqseq = chan->buffer_seq;
1162 l2cap_send_sframe(chan, &control);
1165 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1167 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1170 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1173 static bool __amp_capable(struct l2cap_chan *chan)
1175 struct l2cap_conn *conn = chan->conn;
1176 struct hci_dev *hdev;
1177 bool amp_available = false;
1179 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1182 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1185 read_lock(&hci_dev_list_lock);
1186 list_for_each_entry(hdev, &hci_dev_list, list) {
1187 if (hdev->amp_type != AMP_TYPE_BREDR &&
1188 test_bit(HCI_UP, &hdev->flags)) {
1189 amp_available = true;
1193 read_unlock(&hci_dev_list_lock);
1195 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1196 return amp_available;
1201 static bool l2cap_check_efs(struct l2cap_chan *chan)
1203 /* Check EFS parameters */
1207 void l2cap_send_conn_req(struct l2cap_chan *chan)
1209 struct l2cap_conn *conn = chan->conn;
1210 struct l2cap_conn_req req;
1212 req.scid = cpu_to_le16(chan->scid);
1213 req.psm = chan->psm;
1215 chan->ident = l2cap_get_ident(conn);
1217 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1219 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1222 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1224 struct l2cap_create_chan_req req;
1225 req.scid = cpu_to_le16(chan->scid);
1226 req.psm = chan->psm;
1227 req.amp_id = amp_id;
1229 chan->ident = l2cap_get_ident(chan->conn);
1231 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1235 static void l2cap_move_setup(struct l2cap_chan *chan)
1237 struct sk_buff *skb;
1239 BT_DBG("chan %p", chan);
1241 if (chan->mode != L2CAP_MODE_ERTM)
1244 __clear_retrans_timer(chan);
1245 __clear_monitor_timer(chan);
1246 __clear_ack_timer(chan);
1248 chan->retry_count = 0;
1249 skb_queue_walk(&chan->tx_q, skb) {
1250 if (bt_cb(skb)->l2cap.retries)
1251 bt_cb(skb)->l2cap.retries = 1;
1256 chan->expected_tx_seq = chan->buffer_seq;
1258 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1259 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1260 l2cap_seq_list_clear(&chan->retrans_list);
1261 l2cap_seq_list_clear(&chan->srej_list);
1262 skb_queue_purge(&chan->srej_q);
1264 chan->tx_state = L2CAP_TX_STATE_XMIT;
1265 chan->rx_state = L2CAP_RX_STATE_MOVE;
1267 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1270 static void l2cap_move_done(struct l2cap_chan *chan)
1272 u8 move_role = chan->move_role;
1273 BT_DBG("chan %p", chan);
1275 chan->move_state = L2CAP_MOVE_STABLE;
1276 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1278 if (chan->mode != L2CAP_MODE_ERTM)
1281 switch (move_role) {
1282 case L2CAP_MOVE_ROLE_INITIATOR:
1283 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1284 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1286 case L2CAP_MOVE_ROLE_RESPONDER:
1287 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1292 static void l2cap_chan_ready(struct l2cap_chan *chan)
1294 /* The channel may have already been flagged as connected in
1295 * case of receiving data before the L2CAP info req/rsp
1296 * procedure is complete.
1298 if (chan->state == BT_CONNECTED)
1301 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1302 chan->conf_state = 0;
1303 __clear_chan_timer(chan);
1305 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1306 chan->ops->suspend(chan);
1308 chan->state = BT_CONNECTED;
1310 chan->ops->ready(chan);
1313 static void l2cap_le_connect(struct l2cap_chan *chan)
1315 struct l2cap_conn *conn = chan->conn;
1316 struct l2cap_le_conn_req req;
1318 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1321 req.psm = chan->psm;
1322 req.scid = cpu_to_le16(chan->scid);
1323 req.mtu = cpu_to_le16(chan->imtu);
1324 req.mps = cpu_to_le16(chan->mps);
1325 req.credits = cpu_to_le16(chan->rx_credits);
1327 chan->ident = l2cap_get_ident(conn);
1329 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1333 static void l2cap_le_start(struct l2cap_chan *chan)
1335 struct l2cap_conn *conn = chan->conn;
1337 if (!smp_conn_security(conn->hcon, chan->sec_level))
1341 l2cap_chan_ready(chan);
1345 if (chan->state == BT_CONNECT)
1346 l2cap_le_connect(chan);
1349 static void l2cap_start_connection(struct l2cap_chan *chan)
1351 if (__amp_capable(chan)) {
1352 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1353 a2mp_discover_amp(chan);
1354 } else if (chan->conn->hcon->type == LE_LINK) {
1355 l2cap_le_start(chan);
1357 l2cap_send_conn_req(chan);
1361 static void l2cap_request_info(struct l2cap_conn *conn)
1363 struct l2cap_info_req req;
1365 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1368 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1370 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1371 conn->info_ident = l2cap_get_ident(conn);
1373 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1375 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1379 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1381 /* The minimum encryption key size needs to be enforced by the
1382 * host stack before establishing any L2CAP connections. The
1383 * specification in theory allows a minimum of 1, but to align
1384 * BR/EDR and LE transports, a minimum of 7 is chosen.
1386 * This check might also be called for unencrypted connections
1387 * that have no key size requirements. Ensure that the link is
1388 * actually encrypted before enforcing a key size.
1390 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1391 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1394 static void l2cap_do_start(struct l2cap_chan *chan)
1396 struct l2cap_conn *conn = chan->conn;
1398 if (conn->hcon->type == LE_LINK) {
1399 l2cap_le_start(chan);
1403 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1404 l2cap_request_info(conn);
1408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1411 if (!l2cap_chan_check_security(chan, true) ||
1412 !__l2cap_no_conn_pending(chan))
1415 if (l2cap_check_enc_key_size(conn->hcon))
1416 l2cap_start_connection(chan);
1418 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1421 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1423 u32 local_feat_mask = l2cap_feat_mask;
1425 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1428 case L2CAP_MODE_ERTM:
1429 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1430 case L2CAP_MODE_STREAMING:
1431 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1437 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1439 struct l2cap_conn *conn = chan->conn;
1440 struct l2cap_disconn_req req;
1445 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1446 __clear_retrans_timer(chan);
1447 __clear_monitor_timer(chan);
1448 __clear_ack_timer(chan);
1451 if (chan->scid == L2CAP_CID_A2MP) {
1452 l2cap_state_change(chan, BT_DISCONN);
1456 req.dcid = cpu_to_le16(chan->dcid);
1457 req.scid = cpu_to_le16(chan->scid);
1458 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1461 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1464 /* ---- L2CAP connections ---- */
1465 static void l2cap_conn_start(struct l2cap_conn *conn)
1467 struct l2cap_chan *chan, *tmp;
1469 BT_DBG("conn %p", conn);
1471 mutex_lock(&conn->chan_lock);
1473 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1474 l2cap_chan_lock(chan);
1476 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1477 l2cap_chan_ready(chan);
1478 l2cap_chan_unlock(chan);
1482 if (chan->state == BT_CONNECT) {
1483 if (!l2cap_chan_check_security(chan, true) ||
1484 !__l2cap_no_conn_pending(chan)) {
1485 l2cap_chan_unlock(chan);
1489 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1490 && test_bit(CONF_STATE2_DEVICE,
1491 &chan->conf_state)) {
1492 l2cap_chan_close(chan, ECONNRESET);
1493 l2cap_chan_unlock(chan);
1497 if (l2cap_check_enc_key_size(conn->hcon))
1498 l2cap_start_connection(chan);
1500 l2cap_chan_close(chan, ECONNREFUSED);
1502 } else if (chan->state == BT_CONNECT2) {
1503 struct l2cap_conn_rsp rsp;
1505 rsp.scid = cpu_to_le16(chan->dcid);
1506 rsp.dcid = cpu_to_le16(chan->scid);
1508 if (l2cap_chan_check_security(chan, false)) {
1509 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1510 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1511 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1512 chan->ops->defer(chan);
1515 l2cap_state_change(chan, BT_CONFIG);
1516 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1517 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1520 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1521 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1524 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1527 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1528 rsp.result != L2CAP_CR_SUCCESS) {
1529 l2cap_chan_unlock(chan);
1533 set_bit(CONF_REQ_SENT, &chan->conf_state);
1534 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1535 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1536 chan->num_conf_req++;
1539 l2cap_chan_unlock(chan);
1542 mutex_unlock(&conn->chan_lock);
1545 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1547 struct hci_conn *hcon = conn->hcon;
1548 struct hci_dev *hdev = hcon->hdev;
1550 BT_DBG("%s conn %p", hdev->name, conn);
1552 /* For outgoing pairing which doesn't necessarily have an
1553 * associated socket (e.g. mgmt_pair_device).
1556 smp_conn_security(hcon, hcon->pending_sec_level);
1558 /* For LE slave connections, make sure the connection interval
1559 * is in the range of the minium and maximum interval that has
1560 * been configured for this connection. If not, then trigger
1561 * the connection update procedure.
1563 if (hcon->role == HCI_ROLE_SLAVE &&
1564 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1565 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1566 struct l2cap_conn_param_update_req req;
1568 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1569 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1570 req.latency = cpu_to_le16(hcon->le_conn_latency);
1571 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1573 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1574 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1578 static void l2cap_conn_ready(struct l2cap_conn *conn)
1580 struct l2cap_chan *chan;
1581 struct hci_conn *hcon = conn->hcon;
1583 BT_DBG("conn %p", conn);
1585 if (hcon->type == ACL_LINK)
1586 l2cap_request_info(conn);
1588 mutex_lock(&conn->chan_lock);
1590 list_for_each_entry(chan, &conn->chan_l, list) {
1592 l2cap_chan_lock(chan);
1594 if (chan->scid == L2CAP_CID_A2MP) {
1595 l2cap_chan_unlock(chan);
1599 if (hcon->type == LE_LINK) {
1600 l2cap_le_start(chan);
1601 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1602 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1603 l2cap_chan_ready(chan);
1604 } else if (chan->state == BT_CONNECT) {
1605 l2cap_do_start(chan);
1608 l2cap_chan_unlock(chan);
1611 mutex_unlock(&conn->chan_lock);
1613 if (hcon->type == LE_LINK)
1614 l2cap_le_conn_ready(conn);
1616 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1619 /* Notify sockets that we cannot guaranty reliability anymore */
1620 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1622 struct l2cap_chan *chan;
1624 BT_DBG("conn %p", conn);
1626 mutex_lock(&conn->chan_lock);
1628 list_for_each_entry(chan, &conn->chan_l, list) {
1629 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1630 l2cap_chan_set_err(chan, err);
1633 mutex_unlock(&conn->chan_lock);
1636 static void l2cap_info_timeout(struct work_struct *work)
1638 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1641 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1642 conn->info_ident = 0;
1644 l2cap_conn_start(conn);
1649 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1650 * callback is called during registration. The ->remove callback is called
1651 * during unregistration.
1652 * An l2cap_user object can either be explicitly unregistered or when the
1653 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1654 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1655 * External modules must own a reference to the l2cap_conn object if they intend
1656 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1657 * any time if they don't.
1660 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1662 struct hci_dev *hdev = conn->hcon->hdev;
1665 /* We need to check whether l2cap_conn is registered. If it is not, we
1666 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1667 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1668 * relies on the parent hci_conn object to be locked. This itself relies
1669 * on the hci_dev object to be locked. So we must lock the hci device
1674 if (!list_empty(&user->list)) {
1679 /* conn->hchan is NULL after l2cap_conn_del() was called */
1685 ret = user->probe(conn, user);
1689 list_add(&user->list, &conn->users);
1693 hci_dev_unlock(hdev);
1696 EXPORT_SYMBOL(l2cap_register_user);
1698 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1700 struct hci_dev *hdev = conn->hcon->hdev;
1704 if (list_empty(&user->list))
1707 list_del_init(&user->list);
1708 user->remove(conn, user);
1711 hci_dev_unlock(hdev);
1713 EXPORT_SYMBOL(l2cap_unregister_user);
1715 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1717 struct l2cap_user *user;
1719 while (!list_empty(&conn->users)) {
1720 user = list_first_entry(&conn->users, struct l2cap_user, list);
1721 list_del_init(&user->list);
1722 user->remove(conn, user);
1726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1728 struct l2cap_conn *conn = hcon->l2cap_data;
1729 struct l2cap_chan *chan, *l;
1734 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1736 kfree_skb(conn->rx_skb);
1738 skb_queue_purge(&conn->pending_rx);
1740 /* We can not call flush_work(&conn->pending_rx_work) here since we
1741 * might block if we are running on a worker from the same workqueue
1742 * pending_rx_work is waiting on.
1744 if (work_pending(&conn->pending_rx_work))
1745 cancel_work_sync(&conn->pending_rx_work);
1747 if (work_pending(&conn->id_addr_update_work))
1748 cancel_work_sync(&conn->id_addr_update_work);
1750 l2cap_unregister_all_users(conn);
1752 /* Force the connection to be immediately dropped */
1753 hcon->disc_timeout = 0;
1755 mutex_lock(&conn->chan_lock);
1758 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1759 l2cap_chan_hold(chan);
1760 l2cap_chan_lock(chan);
1762 l2cap_chan_del(chan, err);
1764 chan->ops->close(chan);
1766 l2cap_chan_unlock(chan);
1767 l2cap_chan_put(chan);
1770 mutex_unlock(&conn->chan_lock);
1772 hci_chan_del(conn->hchan);
1774 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1775 cancel_delayed_work_sync(&conn->info_timer);
1777 hcon->l2cap_data = NULL;
1779 l2cap_conn_put(conn);
1782 static void l2cap_conn_free(struct kref *ref)
1784 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1786 hci_conn_put(conn->hcon);
1790 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1792 kref_get(&conn->ref);
1795 EXPORT_SYMBOL(l2cap_conn_get);
1797 void l2cap_conn_put(struct l2cap_conn *conn)
1799 kref_put(&conn->ref, l2cap_conn_free);
1801 EXPORT_SYMBOL(l2cap_conn_put);
1803 /* ---- Socket interface ---- */
1805 /* Find socket with psm and source / destination bdaddr.
1806 * Returns closest match.
1808 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1813 struct l2cap_chan *c, *tmp, *c1 = NULL;
1815 read_lock(&chan_list_lock);
1817 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1818 if (state && c->state != state)
1821 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1824 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1827 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1828 int src_match, dst_match;
1829 int src_any, dst_any;
1832 src_match = !bacmp(&c->src, src);
1833 dst_match = !bacmp(&c->dst, dst);
1834 if (src_match && dst_match) {
1835 if (!l2cap_chan_hold_unless_zero(c))
1838 read_unlock(&chan_list_lock);
1843 src_any = !bacmp(&c->src, BDADDR_ANY);
1844 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1845 if ((src_match && dst_any) || (src_any && dst_match) ||
1846 (src_any && dst_any))
1852 c1 = l2cap_chan_hold_unless_zero(c1);
1854 read_unlock(&chan_list_lock);
1859 static void l2cap_monitor_timeout(struct work_struct *work)
1861 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1862 monitor_timer.work);
1864 BT_DBG("chan %p", chan);
1866 l2cap_chan_lock(chan);
1869 l2cap_chan_unlock(chan);
1870 l2cap_chan_put(chan);
1874 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1876 l2cap_chan_unlock(chan);
1877 l2cap_chan_put(chan);
1880 static void l2cap_retrans_timeout(struct work_struct *work)
1882 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1883 retrans_timer.work);
1885 BT_DBG("chan %p", chan);
1887 l2cap_chan_lock(chan);
1890 l2cap_chan_unlock(chan);
1891 l2cap_chan_put(chan);
1895 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1896 l2cap_chan_unlock(chan);
1897 l2cap_chan_put(chan);
1900 static void l2cap_streaming_send(struct l2cap_chan *chan,
1901 struct sk_buff_head *skbs)
1903 struct sk_buff *skb;
1904 struct l2cap_ctrl *control;
1906 BT_DBG("chan %p, skbs %p", chan, skbs);
1908 if (__chan_is_moving(chan))
1911 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1913 while (!skb_queue_empty(&chan->tx_q)) {
1915 skb = skb_dequeue(&chan->tx_q);
1917 bt_cb(skb)->l2cap.retries = 1;
1918 control = &bt_cb(skb)->l2cap;
1920 control->reqseq = 0;
1921 control->txseq = chan->next_tx_seq;
1923 __pack_control(chan, control, skb);
1925 if (chan->fcs == L2CAP_FCS_CRC16) {
1926 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1927 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1930 l2cap_do_send(chan, skb);
1932 BT_DBG("Sent txseq %u", control->txseq);
1934 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1935 chan->frames_sent++;
1939 static int l2cap_ertm_send(struct l2cap_chan *chan)
1941 struct sk_buff *skb, *tx_skb;
1942 struct l2cap_ctrl *control;
1945 BT_DBG("chan %p", chan);
1947 if (chan->state != BT_CONNECTED)
1950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1953 if (__chan_is_moving(chan))
1956 while (chan->tx_send_head &&
1957 chan->unacked_frames < chan->remote_tx_win &&
1958 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1960 skb = chan->tx_send_head;
1962 bt_cb(skb)->l2cap.retries = 1;
1963 control = &bt_cb(skb)->l2cap;
1965 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1968 control->reqseq = chan->buffer_seq;
1969 chan->last_acked_seq = chan->buffer_seq;
1970 control->txseq = chan->next_tx_seq;
1972 __pack_control(chan, control, skb);
1974 if (chan->fcs == L2CAP_FCS_CRC16) {
1975 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1976 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1979 /* Clone after data has been modified. Data is assumed to be
1980 read-only (for locking purposes) on cloned sk_buffs.
1982 tx_skb = skb_clone(skb, GFP_KERNEL);
1987 __set_retrans_timer(chan);
1989 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1990 chan->unacked_frames++;
1991 chan->frames_sent++;
1994 if (skb_queue_is_last(&chan->tx_q, skb))
1995 chan->tx_send_head = NULL;
1997 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1999 l2cap_do_send(chan, tx_skb);
2000 BT_DBG("Sent txseq %u", control->txseq);
2003 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2004 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2009 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2011 struct l2cap_ctrl control;
2012 struct sk_buff *skb;
2013 struct sk_buff *tx_skb;
2016 BT_DBG("chan %p", chan);
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2021 if (__chan_is_moving(chan))
2024 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2025 seq = l2cap_seq_list_pop(&chan->retrans_list);
2027 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2029 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2034 bt_cb(skb)->l2cap.retries++;
2035 control = bt_cb(skb)->l2cap;
2037 if (chan->max_tx != 0 &&
2038 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2039 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2040 l2cap_send_disconn_req(chan, ECONNRESET);
2041 l2cap_seq_list_clear(&chan->retrans_list);
2045 control.reqseq = chan->buffer_seq;
2046 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2051 if (skb_cloned(skb)) {
2052 /* Cloned sk_buffs are read-only, so we need a
2055 tx_skb = skb_copy(skb, GFP_KERNEL);
2057 tx_skb = skb_clone(skb, GFP_KERNEL);
2061 l2cap_seq_list_clear(&chan->retrans_list);
2065 /* Update skb contents */
2066 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2067 put_unaligned_le32(__pack_extended_control(&control),
2068 tx_skb->data + L2CAP_HDR_SIZE);
2070 put_unaligned_le16(__pack_enhanced_control(&control),
2071 tx_skb->data + L2CAP_HDR_SIZE);
2075 if (chan->fcs == L2CAP_FCS_CRC16) {
2076 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2077 tx_skb->len - L2CAP_FCS_SIZE);
2078 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2082 l2cap_do_send(chan, tx_skb);
2084 BT_DBG("Resent txseq %d", control.txseq);
2086 chan->last_acked_seq = chan->buffer_seq;
2090 static void l2cap_retransmit(struct l2cap_chan *chan,
2091 struct l2cap_ctrl *control)
2093 BT_DBG("chan %p, control %p", chan, control);
2095 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2096 l2cap_ertm_resend(chan);
2099 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2100 struct l2cap_ctrl *control)
2102 struct sk_buff *skb;
2104 BT_DBG("chan %p, control %p", chan, control);
2107 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2109 l2cap_seq_list_clear(&chan->retrans_list);
2111 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2114 if (chan->unacked_frames) {
2115 skb_queue_walk(&chan->tx_q, skb) {
2116 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2117 skb == chan->tx_send_head)
2121 skb_queue_walk_from(&chan->tx_q, skb) {
2122 if (skb == chan->tx_send_head)
2125 l2cap_seq_list_append(&chan->retrans_list,
2126 bt_cb(skb)->l2cap.txseq);
2129 l2cap_ertm_resend(chan);
2133 static void l2cap_send_ack(struct l2cap_chan *chan)
2135 struct l2cap_ctrl control;
2136 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2137 chan->last_acked_seq);
2140 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2141 chan, chan->last_acked_seq, chan->buffer_seq);
2143 memset(&control, 0, sizeof(control));
2146 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2147 chan->rx_state == L2CAP_RX_STATE_RECV) {
2148 __clear_ack_timer(chan);
2149 control.super = L2CAP_SUPER_RNR;
2150 control.reqseq = chan->buffer_seq;
2151 l2cap_send_sframe(chan, &control);
2153 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2154 l2cap_ertm_send(chan);
2155 /* If any i-frames were sent, they included an ack */
2156 if (chan->buffer_seq == chan->last_acked_seq)
2160 /* Ack now if the window is 3/4ths full.
2161 * Calculate without mul or div
2163 threshold = chan->ack_win;
2164 threshold += threshold << 1;
2167 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2170 if (frames_to_ack >= threshold) {
2171 __clear_ack_timer(chan);
2172 control.super = L2CAP_SUPER_RR;
2173 control.reqseq = chan->buffer_seq;
2174 l2cap_send_sframe(chan, &control);
2179 __set_ack_timer(chan);
2183 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2184 struct msghdr *msg, int len,
2185 int count, struct sk_buff *skb)
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff **frag;
2191 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2197 /* Continuation fragments (no L2CAP header) */
2198 frag = &skb_shinfo(skb)->frag_list;
2200 struct sk_buff *tmp;
2202 count = min_t(unsigned int, conn->mtu, len);
2204 tmp = chan->ops->alloc_skb(chan, 0, count,
2205 msg->msg_flags & MSG_DONTWAIT);
2207 return PTR_ERR(tmp);
2211 if (!copy_from_iter_full(skb_put(*frag, count), count,
2218 skb->len += (*frag)->len;
2219 skb->data_len += (*frag)->len;
2221 frag = &(*frag)->next;
2227 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2228 struct msghdr *msg, size_t len)
2230 struct l2cap_conn *conn = chan->conn;
2231 struct sk_buff *skb;
2232 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2233 struct l2cap_hdr *lh;
2235 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2236 __le16_to_cpu(chan->psm), len);
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2240 skb = chan->ops->alloc_skb(chan, hlen, count,
2241 msg->msg_flags & MSG_DONTWAIT);
2245 /* Create L2CAP header */
2246 lh = skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2249 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2251 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2252 if (unlikely(err < 0)) {
2254 return ERR_PTR(err);
2259 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2260 struct msghdr *msg, size_t len)
2262 struct l2cap_conn *conn = chan->conn;
2263 struct sk_buff *skb;
2265 struct l2cap_hdr *lh;
2267 BT_DBG("chan %p len %zu", chan, len);
2269 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2271 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2272 msg->msg_flags & MSG_DONTWAIT);
2276 /* Create L2CAP header */
2277 lh = skb_put(skb, L2CAP_HDR_SIZE);
2278 lh->cid = cpu_to_le16(chan->dcid);
2279 lh->len = cpu_to_le16(len);
2281 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2282 if (unlikely(err < 0)) {
2284 return ERR_PTR(err);
2289 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2290 struct msghdr *msg, size_t len,
2293 struct l2cap_conn *conn = chan->conn;
2294 struct sk_buff *skb;
2295 int err, count, hlen;
2296 struct l2cap_hdr *lh;
2298 BT_DBG("chan %p len %zu", chan, len);
2301 return ERR_PTR(-ENOTCONN);
2303 hlen = __ertm_hdr_size(chan);
2306 hlen += L2CAP_SDULEN_SIZE;
2308 if (chan->fcs == L2CAP_FCS_CRC16)
2309 hlen += L2CAP_FCS_SIZE;
2311 count = min_t(unsigned int, (conn->mtu - hlen), len);
2313 skb = chan->ops->alloc_skb(chan, hlen, count,
2314 msg->msg_flags & MSG_DONTWAIT);
2318 /* Create L2CAP header */
2319 lh = skb_put(skb, L2CAP_HDR_SIZE);
2320 lh->cid = cpu_to_le16(chan->dcid);
2321 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2323 /* Control header is populated later */
2324 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2325 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2327 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2330 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2332 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2333 if (unlikely(err < 0)) {
2335 return ERR_PTR(err);
2338 bt_cb(skb)->l2cap.fcs = chan->fcs;
2339 bt_cb(skb)->l2cap.retries = 0;
2343 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2344 struct sk_buff_head *seg_queue,
2345 struct msghdr *msg, size_t len)
2347 struct sk_buff *skb;
2352 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2354 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2355 * so fragmented skbs are not used. The HCI layer's handling
2356 * of fragmented skbs is not compatible with ERTM's queueing.
2359 /* PDU size is derived from the HCI MTU */
2360 pdu_len = chan->conn->mtu;
2362 /* Constrain PDU size for BR/EDR connections */
2364 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2366 /* Adjust for largest possible L2CAP overhead. */
2368 pdu_len -= L2CAP_FCS_SIZE;
2370 pdu_len -= __ertm_hdr_size(chan);
2372 /* Remote device may have requested smaller PDUs */
2373 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2375 if (len <= pdu_len) {
2376 sar = L2CAP_SAR_UNSEGMENTED;
2380 sar = L2CAP_SAR_START;
2385 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2388 __skb_queue_purge(seg_queue);
2389 return PTR_ERR(skb);
2392 bt_cb(skb)->l2cap.sar = sar;
2393 __skb_queue_tail(seg_queue, skb);
2399 if (len <= pdu_len) {
2400 sar = L2CAP_SAR_END;
2403 sar = L2CAP_SAR_CONTINUE;
2410 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2412 size_t len, u16 sdulen)
2414 struct l2cap_conn *conn = chan->conn;
2415 struct sk_buff *skb;
2416 int err, count, hlen;
2417 struct l2cap_hdr *lh;
2419 BT_DBG("chan %p len %zu", chan, len);
2422 return ERR_PTR(-ENOTCONN);
2424 hlen = L2CAP_HDR_SIZE;
2427 hlen += L2CAP_SDULEN_SIZE;
2429 count = min_t(unsigned int, (conn->mtu - hlen), len);
2431 skb = chan->ops->alloc_skb(chan, hlen, count,
2432 msg->msg_flags & MSG_DONTWAIT);
2436 /* Create L2CAP header */
2437 lh = skb_put(skb, L2CAP_HDR_SIZE);
2438 lh->cid = cpu_to_le16(chan->dcid);
2439 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2442 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2444 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2445 if (unlikely(err < 0)) {
2447 return ERR_PTR(err);
2453 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2454 struct sk_buff_head *seg_queue,
2455 struct msghdr *msg, size_t len)
2457 struct sk_buff *skb;
2461 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2464 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2470 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2472 __skb_queue_purge(seg_queue);
2473 return PTR_ERR(skb);
2476 __skb_queue_tail(seg_queue, skb);
2482 pdu_len += L2CAP_SDULEN_SIZE;
2489 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2493 BT_DBG("chan %p", chan);
2495 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2496 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2501 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2502 skb_queue_len(&chan->tx_q));
2505 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2507 struct sk_buff *skb;
2509 struct sk_buff_head seg_queue;
2514 /* Connectionless channel */
2515 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2516 skb = l2cap_create_connless_pdu(chan, msg, len);
2518 return PTR_ERR(skb);
2520 l2cap_do_send(chan, skb);
2524 switch (chan->mode) {
2525 case L2CAP_MODE_LE_FLOWCTL:
2526 /* Check outgoing MTU */
2527 if (len > chan->omtu)
2530 __skb_queue_head_init(&seg_queue);
2532 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2534 if (chan->state != BT_CONNECTED) {
2535 __skb_queue_purge(&seg_queue);
2542 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2544 l2cap_le_flowctl_send(chan);
2546 if (!chan->tx_credits)
2547 chan->ops->suspend(chan);
2553 case L2CAP_MODE_BASIC:
2554 /* Check outgoing MTU */
2555 if (len > chan->omtu)
2558 /* Create a basic PDU */
2559 skb = l2cap_create_basic_pdu(chan, msg, len);
2561 return PTR_ERR(skb);
2563 l2cap_do_send(chan, skb);
2567 case L2CAP_MODE_ERTM:
2568 case L2CAP_MODE_STREAMING:
2569 /* Check outgoing MTU */
2570 if (len > chan->omtu) {
2575 __skb_queue_head_init(&seg_queue);
2577 /* Do segmentation before calling in to the state machine,
2578 * since it's possible to block while waiting for memory
2581 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2586 if (chan->mode == L2CAP_MODE_ERTM)
2587 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2589 l2cap_streaming_send(chan, &seg_queue);
2593 /* If the skbs were not queued for sending, they'll still be in
2594 * seg_queue and need to be purged.
2596 __skb_queue_purge(&seg_queue);
2600 BT_DBG("bad state %1.1x", chan->mode);
2606 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2608 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2610 struct l2cap_ctrl control;
2613 BT_DBG("chan %p, txseq %u", chan, txseq);
2615 memset(&control, 0, sizeof(control));
2617 control.super = L2CAP_SUPER_SREJ;
2619 for (seq = chan->expected_tx_seq; seq != txseq;
2620 seq = __next_seq(chan, seq)) {
2621 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2622 control.reqseq = seq;
2623 l2cap_send_sframe(chan, &control);
2624 l2cap_seq_list_append(&chan->srej_list, seq);
2628 chan->expected_tx_seq = __next_seq(chan, txseq);
2631 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2633 struct l2cap_ctrl control;
2635 BT_DBG("chan %p", chan);
2637 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2640 memset(&control, 0, sizeof(control));
2642 control.super = L2CAP_SUPER_SREJ;
2643 control.reqseq = chan->srej_list.tail;
2644 l2cap_send_sframe(chan, &control);
2647 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2649 struct l2cap_ctrl control;
2653 BT_DBG("chan %p, txseq %u", chan, txseq);
2655 memset(&control, 0, sizeof(control));
2657 control.super = L2CAP_SUPER_SREJ;
2659 /* Capture initial list head to allow only one pass through the list. */
2660 initial_head = chan->srej_list.head;
2663 seq = l2cap_seq_list_pop(&chan->srej_list);
2664 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2667 control.reqseq = seq;
2668 l2cap_send_sframe(chan, &control);
2669 l2cap_seq_list_append(&chan->srej_list, seq);
2670 } while (chan->srej_list.head != initial_head);
2673 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2675 struct sk_buff *acked_skb;
2678 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2680 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2683 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2684 chan->expected_ack_seq, chan->unacked_frames);
2686 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2687 ackseq = __next_seq(chan, ackseq)) {
2689 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2691 skb_unlink(acked_skb, &chan->tx_q);
2692 kfree_skb(acked_skb);
2693 chan->unacked_frames--;
2697 chan->expected_ack_seq = reqseq;
2699 if (chan->unacked_frames == 0)
2700 __clear_retrans_timer(chan);
2702 BT_DBG("unacked_frames %u", chan->unacked_frames);
2705 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2707 BT_DBG("chan %p", chan);
2709 chan->expected_tx_seq = chan->buffer_seq;
2710 l2cap_seq_list_clear(&chan->srej_list);
2711 skb_queue_purge(&chan->srej_q);
2712 chan->rx_state = L2CAP_RX_STATE_RECV;
2715 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2716 struct l2cap_ctrl *control,
2717 struct sk_buff_head *skbs, u8 event)
2719 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2723 case L2CAP_EV_DATA_REQUEST:
2724 if (chan->tx_send_head == NULL)
2725 chan->tx_send_head = skb_peek(skbs);
2727 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2728 l2cap_ertm_send(chan);
2730 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2731 BT_DBG("Enter LOCAL_BUSY");
2732 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2735 /* The SREJ_SENT state must be aborted if we are to
2736 * enter the LOCAL_BUSY state.
2738 l2cap_abort_rx_srej_sent(chan);
2741 l2cap_send_ack(chan);
2744 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2745 BT_DBG("Exit LOCAL_BUSY");
2746 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2749 struct l2cap_ctrl local_control;
2751 memset(&local_control, 0, sizeof(local_control));
2752 local_control.sframe = 1;
2753 local_control.super = L2CAP_SUPER_RR;
2754 local_control.poll = 1;
2755 local_control.reqseq = chan->buffer_seq;
2756 l2cap_send_sframe(chan, &local_control);
2758 chan->retry_count = 1;
2759 __set_monitor_timer(chan);
2760 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2763 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2764 l2cap_process_reqseq(chan, control->reqseq);
2766 case L2CAP_EV_EXPLICIT_POLL:
2767 l2cap_send_rr_or_rnr(chan, 1);
2768 chan->retry_count = 1;
2769 __set_monitor_timer(chan);
2770 __clear_ack_timer(chan);
2771 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2773 case L2CAP_EV_RETRANS_TO:
2774 l2cap_send_rr_or_rnr(chan, 1);
2775 chan->retry_count = 1;
2776 __set_monitor_timer(chan);
2777 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2779 case L2CAP_EV_RECV_FBIT:
2780 /* Nothing to process */
2787 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2788 struct l2cap_ctrl *control,
2789 struct sk_buff_head *skbs, u8 event)
2791 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2795 case L2CAP_EV_DATA_REQUEST:
2796 if (chan->tx_send_head == NULL)
2797 chan->tx_send_head = skb_peek(skbs);
2798 /* Queue data, but don't send. */
2799 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2801 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2802 BT_DBG("Enter LOCAL_BUSY");
2803 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2805 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2806 /* The SREJ_SENT state must be aborted if we are to
2807 * enter the LOCAL_BUSY state.
2809 l2cap_abort_rx_srej_sent(chan);
2812 l2cap_send_ack(chan);
2815 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2816 BT_DBG("Exit LOCAL_BUSY");
2817 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2819 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2820 struct l2cap_ctrl local_control;
2821 memset(&local_control, 0, sizeof(local_control));
2822 local_control.sframe = 1;
2823 local_control.super = L2CAP_SUPER_RR;
2824 local_control.poll = 1;
2825 local_control.reqseq = chan->buffer_seq;
2826 l2cap_send_sframe(chan, &local_control);
2828 chan->retry_count = 1;
2829 __set_monitor_timer(chan);
2830 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2833 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2834 l2cap_process_reqseq(chan, control->reqseq);
2838 case L2CAP_EV_RECV_FBIT:
2839 if (control && control->final) {
2840 __clear_monitor_timer(chan);
2841 if (chan->unacked_frames > 0)
2842 __set_retrans_timer(chan);
2843 chan->retry_count = 0;
2844 chan->tx_state = L2CAP_TX_STATE_XMIT;
2845 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2848 case L2CAP_EV_EXPLICIT_POLL:
2851 case L2CAP_EV_MONITOR_TO:
2852 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2853 l2cap_send_rr_or_rnr(chan, 1);
2854 __set_monitor_timer(chan);
2855 chan->retry_count++;
2857 l2cap_send_disconn_req(chan, ECONNABORTED);
2865 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2866 struct sk_buff_head *skbs, u8 event)
2868 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2869 chan, control, skbs, event, chan->tx_state);
2871 switch (chan->tx_state) {
2872 case L2CAP_TX_STATE_XMIT:
2873 l2cap_tx_state_xmit(chan, control, skbs, event);
2875 case L2CAP_TX_STATE_WAIT_F:
2876 l2cap_tx_state_wait_f(chan, control, skbs, event);
2884 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2885 struct l2cap_ctrl *control)
2887 BT_DBG("chan %p, control %p", chan, control);
2888 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2891 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2892 struct l2cap_ctrl *control)
2894 BT_DBG("chan %p, control %p", chan, control);
2895 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2898 /* Copy frame to all raw sockets on that connection */
2899 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2901 struct sk_buff *nskb;
2902 struct l2cap_chan *chan;
2904 BT_DBG("conn %p", conn);
2906 mutex_lock(&conn->chan_lock);
2908 list_for_each_entry(chan, &conn->chan_l, list) {
2909 if (chan->chan_type != L2CAP_CHAN_RAW)
2912 /* Don't send frame to the channel it came from */
2913 if (bt_cb(skb)->l2cap.chan == chan)
2916 nskb = skb_clone(skb, GFP_KERNEL);
2919 if (chan->ops->recv(chan, nskb))
2923 mutex_unlock(&conn->chan_lock);
2926 /* ---- L2CAP signalling commands ---- */
2927 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2928 u8 ident, u16 dlen, void *data)
2930 struct sk_buff *skb, **frag;
2931 struct l2cap_cmd_hdr *cmd;
2932 struct l2cap_hdr *lh;
2935 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2936 conn, code, ident, dlen);
2938 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2941 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2942 count = min_t(unsigned int, conn->mtu, len);
2944 skb = bt_skb_alloc(count, GFP_KERNEL);
2948 lh = skb_put(skb, L2CAP_HDR_SIZE);
2949 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2951 if (conn->hcon->type == LE_LINK)
2952 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2954 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2956 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2959 cmd->len = cpu_to_le16(dlen);
2962 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2963 skb_put_data(skb, data, count);
2969 /* Continuation fragments (no L2CAP header) */
2970 frag = &skb_shinfo(skb)->frag_list;
2972 count = min_t(unsigned int, conn->mtu, len);
2974 *frag = bt_skb_alloc(count, GFP_KERNEL);
2978 skb_put_data(*frag, data, count);
2983 frag = &(*frag)->next;
2993 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2996 struct l2cap_conf_opt *opt = *ptr;
2999 len = L2CAP_CONF_OPT_SIZE + opt->len;
3007 *val = *((u8 *) opt->val);
3011 *val = get_unaligned_le16(opt->val);
3015 *val = get_unaligned_le32(opt->val);
3019 *val = (unsigned long) opt->val;
3023 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3027 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3029 struct l2cap_conf_opt *opt = *ptr;
3031 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3033 if (size < L2CAP_CONF_OPT_SIZE + len)
3041 *((u8 *) opt->val) = val;
3045 put_unaligned_le16(val, opt->val);
3049 put_unaligned_le32(val, opt->val);
3053 memcpy(opt->val, (void *) val, len);
3057 *ptr += L2CAP_CONF_OPT_SIZE + len;
3060 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3062 struct l2cap_conf_efs efs;
3064 switch (chan->mode) {
3065 case L2CAP_MODE_ERTM:
3066 efs.id = chan->local_id;
3067 efs.stype = chan->local_stype;
3068 efs.msdu = cpu_to_le16(chan->local_msdu);
3069 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3070 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3071 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3074 case L2CAP_MODE_STREAMING:
3076 efs.stype = L2CAP_SERV_BESTEFFORT;
3077 efs.msdu = cpu_to_le16(chan->local_msdu);
3078 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3087 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3088 (unsigned long) &efs, size);
3091 static void l2cap_ack_timeout(struct work_struct *work)
3093 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3097 BT_DBG("chan %p", chan);
3099 l2cap_chan_lock(chan);
3101 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3102 chan->last_acked_seq);
3105 l2cap_send_rr_or_rnr(chan, 0);
3107 l2cap_chan_unlock(chan);
3108 l2cap_chan_put(chan);
3111 int l2cap_ertm_init(struct l2cap_chan *chan)
3115 chan->next_tx_seq = 0;
3116 chan->expected_tx_seq = 0;
3117 chan->expected_ack_seq = 0;
3118 chan->unacked_frames = 0;
3119 chan->buffer_seq = 0;
3120 chan->frames_sent = 0;
3121 chan->last_acked_seq = 0;
3123 chan->sdu_last_frag = NULL;
3126 skb_queue_head_init(&chan->tx_q);
3128 chan->local_amp_id = AMP_ID_BREDR;
3129 chan->move_id = AMP_ID_BREDR;
3130 chan->move_state = L2CAP_MOVE_STABLE;
3131 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3133 if (chan->mode != L2CAP_MODE_ERTM)
3136 chan->rx_state = L2CAP_RX_STATE_RECV;
3137 chan->tx_state = L2CAP_TX_STATE_XMIT;
3139 skb_queue_head_init(&chan->srej_q);
3141 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3145 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3147 l2cap_seq_list_free(&chan->srej_list);
3152 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3155 case L2CAP_MODE_STREAMING:
3156 case L2CAP_MODE_ERTM:
3157 if (l2cap_mode_supported(mode, remote_feat_mask))
3161 return L2CAP_MODE_BASIC;
3165 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3167 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3168 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3171 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3173 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3174 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3177 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3178 struct l2cap_conf_rfc *rfc)
3180 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3181 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3183 /* Class 1 devices have must have ERTM timeouts
3184 * exceeding the Link Supervision Timeout. The
3185 * default Link Supervision Timeout for AMP
3186 * controllers is 10 seconds.
3188 * Class 1 devices use 0xffffffff for their
3189 * best-effort flush timeout, so the clamping logic
3190 * will result in a timeout that meets the above
3191 * requirement. ERTM timeouts are 16-bit values, so
3192 * the maximum timeout is 65.535 seconds.
3195 /* Convert timeout to milliseconds and round */
3196 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3198 /* This is the recommended formula for class 2 devices
3199 * that start ERTM timers when packets are sent to the
3202 ertm_to = 3 * ertm_to + 500;
3204 if (ertm_to > 0xffff)
3207 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3208 rfc->monitor_timeout = rfc->retrans_timeout;
3210 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3211 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3215 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3217 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3218 __l2cap_ews_supported(chan->conn)) {
3219 /* use extended control field */
3220 set_bit(FLAG_EXT_CTRL, &chan->flags);
3221 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3223 chan->tx_win = min_t(u16, chan->tx_win,
3224 L2CAP_DEFAULT_TX_WINDOW);
3225 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3227 chan->ack_win = chan->tx_win;
3230 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3232 struct l2cap_conf_req *req = data;
3233 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3234 void *ptr = req->data;
3235 void *endptr = data + data_size;
3238 BT_DBG("chan %p", chan);
3240 if (chan->num_conf_req || chan->num_conf_rsp)
3243 switch (chan->mode) {
3244 case L2CAP_MODE_STREAMING:
3245 case L2CAP_MODE_ERTM:
3246 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3249 if (__l2cap_efs_supported(chan->conn))
3250 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3254 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3259 if (chan->imtu != L2CAP_DEFAULT_MTU)
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3262 switch (chan->mode) {
3263 case L2CAP_MODE_BASIC:
3267 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3268 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3271 rfc.mode = L2CAP_MODE_BASIC;
3273 rfc.max_transmit = 0;
3274 rfc.retrans_timeout = 0;
3275 rfc.monitor_timeout = 0;
3276 rfc.max_pdu_size = 0;
3278 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3279 (unsigned long) &rfc, endptr - ptr);
3282 case L2CAP_MODE_ERTM:
3283 rfc.mode = L2CAP_MODE_ERTM;
3284 rfc.max_transmit = chan->max_tx;
3286 __l2cap_set_ertm_timeouts(chan, &rfc);
3288 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3289 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3291 rfc.max_pdu_size = cpu_to_le16(size);
3293 l2cap_txwin_setup(chan);
3295 rfc.txwin_size = min_t(u16, chan->tx_win,
3296 L2CAP_DEFAULT_TX_WINDOW);
3298 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3299 (unsigned long) &rfc, endptr - ptr);
3301 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3302 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3304 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3306 chan->tx_win, endptr - ptr);
3308 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3309 if (chan->fcs == L2CAP_FCS_NONE ||
3310 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3311 chan->fcs = L2CAP_FCS_NONE;
3312 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3313 chan->fcs, endptr - ptr);
3317 case L2CAP_MODE_STREAMING:
3318 l2cap_txwin_setup(chan);
3319 rfc.mode = L2CAP_MODE_STREAMING;
3321 rfc.max_transmit = 0;
3322 rfc.retrans_timeout = 0;
3323 rfc.monitor_timeout = 0;
3325 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3326 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3328 rfc.max_pdu_size = cpu_to_le16(size);
3330 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3331 (unsigned long) &rfc, endptr - ptr);
3333 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3334 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3336 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3337 if (chan->fcs == L2CAP_FCS_NONE ||
3338 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3339 chan->fcs = L2CAP_FCS_NONE;
3340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3341 chan->fcs, endptr - ptr);
3346 req->dcid = cpu_to_le16(chan->dcid);
3347 req->flags = cpu_to_le16(0);
3352 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3354 struct l2cap_conf_rsp *rsp = data;
3355 void *ptr = rsp->data;
3356 void *endptr = data + data_size;
3357 void *req = chan->conf_req;
3358 int len = chan->conf_len;
3359 int type, hint, olen;
3361 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3362 struct l2cap_conf_efs efs;
3364 u16 mtu = L2CAP_DEFAULT_MTU;
3365 u16 result = L2CAP_CONF_SUCCESS;
3368 BT_DBG("chan %p", chan);
3370 while (len >= L2CAP_CONF_OPT_SIZE) {
3371 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3375 hint = type & L2CAP_CONF_HINT;
3376 type &= L2CAP_CONF_MASK;
3379 case L2CAP_CONF_MTU:
3385 case L2CAP_CONF_FLUSH_TO:
3388 chan->flush_to = val;
3391 case L2CAP_CONF_QOS:
3394 case L2CAP_CONF_RFC:
3395 if (olen != sizeof(rfc))
3397 memcpy(&rfc, (void *) val, olen);
3400 case L2CAP_CONF_FCS:
3403 if (val == L2CAP_FCS_NONE)
3404 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3407 case L2CAP_CONF_EFS:
3408 if (olen != sizeof(efs))
3411 memcpy(&efs, (void *) val, olen);
3414 case L2CAP_CONF_EWS:
3417 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3418 return -ECONNREFUSED;
3419 set_bit(FLAG_EXT_CTRL, &chan->flags);
3420 set_bit(CONF_EWS_RECV, &chan->conf_state);
3421 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3422 chan->remote_tx_win = val;
3428 result = L2CAP_CONF_UNKNOWN;
3429 *((u8 *) ptr++) = type;
3434 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3437 switch (chan->mode) {
3438 case L2CAP_MODE_STREAMING:
3439 case L2CAP_MODE_ERTM:
3440 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3441 chan->mode = l2cap_select_mode(rfc.mode,
3442 chan->conn->feat_mask);
3447 if (__l2cap_efs_supported(chan->conn))
3448 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3450 return -ECONNREFUSED;
3453 if (chan->mode != rfc.mode)
3454 return -ECONNREFUSED;
3460 if (chan->mode != rfc.mode) {
3461 result = L2CAP_CONF_UNACCEPT;
3462 rfc.mode = chan->mode;
3464 if (chan->num_conf_rsp == 1)
3465 return -ECONNREFUSED;
3467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3468 (unsigned long) &rfc, endptr - ptr);
3471 if (result == L2CAP_CONF_SUCCESS) {
3472 /* Configure output options and let the other side know
3473 * which ones we don't like. */
3475 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3476 result = L2CAP_CONF_UNACCEPT;
3479 set_bit(CONF_MTU_DONE, &chan->conf_state);
3481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3484 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3485 efs.stype != L2CAP_SERV_NOTRAFIC &&
3486 efs.stype != chan->local_stype) {
3488 result = L2CAP_CONF_UNACCEPT;
3490 if (chan->num_conf_req >= 1)
3491 return -ECONNREFUSED;
3493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3495 (unsigned long) &efs, endptr - ptr);
3497 /* Send PENDING Conf Rsp */
3498 result = L2CAP_CONF_PENDING;
3499 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3504 case L2CAP_MODE_BASIC:
3505 chan->fcs = L2CAP_FCS_NONE;
3506 set_bit(CONF_MODE_DONE, &chan->conf_state);
3509 case L2CAP_MODE_ERTM:
3510 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3511 chan->remote_tx_win = rfc.txwin_size;
3513 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3515 chan->remote_max_tx = rfc.max_transmit;
3517 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3518 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3519 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3520 rfc.max_pdu_size = cpu_to_le16(size);
3521 chan->remote_mps = size;
3523 __l2cap_set_ertm_timeouts(chan, &rfc);
3525 set_bit(CONF_MODE_DONE, &chan->conf_state);
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3528 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3531 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3532 chan->remote_id = efs.id;
3533 chan->remote_stype = efs.stype;
3534 chan->remote_msdu = le16_to_cpu(efs.msdu);
3535 chan->remote_flush_to =
3536 le32_to_cpu(efs.flush_to);
3537 chan->remote_acc_lat =
3538 le32_to_cpu(efs.acc_lat);
3539 chan->remote_sdu_itime =
3540 le32_to_cpu(efs.sdu_itime);
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3543 (unsigned long) &efs, endptr - ptr);
3547 case L2CAP_MODE_STREAMING:
3548 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3549 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3550 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3551 rfc.max_pdu_size = cpu_to_le16(size);
3552 chan->remote_mps = size;
3554 set_bit(CONF_MODE_DONE, &chan->conf_state);
3556 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3557 (unsigned long) &rfc, endptr - ptr);
3562 result = L2CAP_CONF_UNACCEPT;
3564 memset(&rfc, 0, sizeof(rfc));
3565 rfc.mode = chan->mode;
3568 if (result == L2CAP_CONF_SUCCESS)
3569 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3571 rsp->scid = cpu_to_le16(chan->dcid);
3572 rsp->result = cpu_to_le16(result);
3573 rsp->flags = cpu_to_le16(0);
3578 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3579 void *data, size_t size, u16 *result)
3581 struct l2cap_conf_req *req = data;
3582 void *ptr = req->data;
3583 void *endptr = data + size;
3586 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3587 struct l2cap_conf_efs efs;
3589 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3591 while (len >= L2CAP_CONF_OPT_SIZE) {
3592 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3597 case L2CAP_CONF_MTU:
3600 if (val < L2CAP_DEFAULT_MIN_MTU) {
3601 *result = L2CAP_CONF_UNACCEPT;
3602 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3605 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3609 case L2CAP_CONF_FLUSH_TO:
3612 chan->flush_to = val;
3613 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3614 chan->flush_to, endptr - ptr);
3617 case L2CAP_CONF_RFC:
3618 if (olen != sizeof(rfc))
3620 memcpy(&rfc, (void *)val, olen);
3621 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3622 rfc.mode != chan->mode)
3623 return -ECONNREFUSED;
3625 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3626 (unsigned long) &rfc, endptr - ptr);
3629 case L2CAP_CONF_EWS:
3632 chan->ack_win = min_t(u16, val, chan->ack_win);
3633 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3634 chan->tx_win, endptr - ptr);
3637 case L2CAP_CONF_EFS:
3638 if (olen != sizeof(efs))
3640 memcpy(&efs, (void *)val, olen);
3641 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3642 efs.stype != L2CAP_SERV_NOTRAFIC &&
3643 efs.stype != chan->local_stype)
3644 return -ECONNREFUSED;
3645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3646 (unsigned long) &efs, endptr - ptr);
3649 case L2CAP_CONF_FCS:
3652 if (*result == L2CAP_CONF_PENDING)
3653 if (val == L2CAP_FCS_NONE)
3654 set_bit(CONF_RECV_NO_FCS,
3660 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3661 return -ECONNREFUSED;
3663 chan->mode = rfc.mode;
3665 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3667 case L2CAP_MODE_ERTM:
3668 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3669 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3670 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3671 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3672 chan->ack_win = min_t(u16, chan->ack_win,
3675 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3676 chan->local_msdu = le16_to_cpu(efs.msdu);
3677 chan->local_sdu_itime =
3678 le32_to_cpu(efs.sdu_itime);
3679 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3680 chan->local_flush_to =
3681 le32_to_cpu(efs.flush_to);
3685 case L2CAP_MODE_STREAMING:
3686 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3690 req->dcid = cpu_to_le16(chan->dcid);
3691 req->flags = cpu_to_le16(0);
3696 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3697 u16 result, u16 flags)
3699 struct l2cap_conf_rsp *rsp = data;
3700 void *ptr = rsp->data;
3702 BT_DBG("chan %p", chan);
3704 rsp->scid = cpu_to_le16(chan->dcid);
3705 rsp->result = cpu_to_le16(result);
3706 rsp->flags = cpu_to_le16(flags);
3711 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3713 struct l2cap_le_conn_rsp rsp;
3714 struct l2cap_conn *conn = chan->conn;
3716 BT_DBG("chan %p", chan);
3718 rsp.dcid = cpu_to_le16(chan->scid);
3719 rsp.mtu = cpu_to_le16(chan->imtu);
3720 rsp.mps = cpu_to_le16(chan->mps);
3721 rsp.credits = cpu_to_le16(chan->rx_credits);
3722 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3724 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3728 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3730 struct l2cap_conn_rsp rsp;
3731 struct l2cap_conn *conn = chan->conn;
3735 rsp.scid = cpu_to_le16(chan->dcid);
3736 rsp.dcid = cpu_to_le16(chan->scid);
3737 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3738 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3741 rsp_code = L2CAP_CREATE_CHAN_RSP;
3743 rsp_code = L2CAP_CONN_RSP;
3745 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3747 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3749 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3752 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3753 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3754 chan->num_conf_req++;
3757 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3761 /* Use sane default values in case a misbehaving remote device
3762 * did not send an RFC or extended window size option.
3764 u16 txwin_ext = chan->ack_win;
3765 struct l2cap_conf_rfc rfc = {
3767 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3768 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3769 .max_pdu_size = cpu_to_le16(chan->imtu),
3770 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3773 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3775 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3778 while (len >= L2CAP_CONF_OPT_SIZE) {
3779 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3784 case L2CAP_CONF_RFC:
3785 if (olen != sizeof(rfc))
3787 memcpy(&rfc, (void *)val, olen);
3789 case L2CAP_CONF_EWS:
3798 case L2CAP_MODE_ERTM:
3799 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3800 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3801 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3802 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3803 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3805 chan->ack_win = min_t(u16, chan->ack_win,
3808 case L2CAP_MODE_STREAMING:
3809 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3813 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3814 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3817 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3819 if (cmd_len < sizeof(*rej))
3822 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3825 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3826 cmd->ident == conn->info_ident) {
3827 cancel_delayed_work(&conn->info_timer);
3829 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3830 conn->info_ident = 0;
3832 l2cap_conn_start(conn);
3838 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3839 struct l2cap_cmd_hdr *cmd,
3840 u8 *data, u8 rsp_code, u8 amp_id)
3842 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3843 struct l2cap_conn_rsp rsp;
3844 struct l2cap_chan *chan = NULL, *pchan;
3845 int result, status = L2CAP_CS_NO_INFO;
3847 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3848 __le16 psm = req->psm;
3850 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3852 /* Check if we have socket listening on psm */
3853 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3854 &conn->hcon->dst, ACL_LINK);
3856 result = L2CAP_CR_BAD_PSM;
3860 mutex_lock(&conn->chan_lock);
3861 l2cap_chan_lock(pchan);
3863 /* Check if the ACL is secure enough (if not SDP) */
3864 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3865 !hci_conn_check_link_mode(conn->hcon)) {
3866 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3867 result = L2CAP_CR_SEC_BLOCK;
3871 result = L2CAP_CR_NO_MEM;
3873 /* Check if we already have channel with that dcid */
3874 if (__l2cap_get_chan_by_dcid(conn, scid))
3877 chan = pchan->ops->new_connection(pchan);
3881 /* For certain devices (ex: HID mouse), support for authentication,
3882 * pairing and bonding is optional. For such devices, inorder to avoid
3883 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3884 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3886 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3888 bacpy(&chan->src, &conn->hcon->src);
3889 bacpy(&chan->dst, &conn->hcon->dst);
3890 chan->src_type = bdaddr_src_type(conn->hcon);
3891 chan->dst_type = bdaddr_dst_type(conn->hcon);
3894 chan->local_amp_id = amp_id;
3896 __l2cap_chan_add(conn, chan);
3900 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3902 chan->ident = cmd->ident;
3904 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3905 if (l2cap_chan_check_security(chan, false)) {
3906 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3907 l2cap_state_change(chan, BT_CONNECT2);
3908 result = L2CAP_CR_PEND;
3909 status = L2CAP_CS_AUTHOR_PEND;
3910 chan->ops->defer(chan);
3912 /* Force pending result for AMP controllers.
3913 * The connection will succeed after the
3914 * physical link is up.
3916 if (amp_id == AMP_ID_BREDR) {
3917 l2cap_state_change(chan, BT_CONFIG);
3918 result = L2CAP_CR_SUCCESS;
3920 l2cap_state_change(chan, BT_CONNECT2);
3921 result = L2CAP_CR_PEND;
3923 status = L2CAP_CS_NO_INFO;
3926 l2cap_state_change(chan, BT_CONNECT2);
3927 result = L2CAP_CR_PEND;
3928 status = L2CAP_CS_AUTHEN_PEND;
3931 l2cap_state_change(chan, BT_CONNECT2);
3932 result = L2CAP_CR_PEND;
3933 status = L2CAP_CS_NO_INFO;
3937 l2cap_chan_unlock(pchan);
3938 mutex_unlock(&conn->chan_lock);
3939 l2cap_chan_put(pchan);
3942 rsp.scid = cpu_to_le16(scid);
3943 rsp.dcid = cpu_to_le16(dcid);
3944 rsp.result = cpu_to_le16(result);
3945 rsp.status = cpu_to_le16(status);
3946 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3948 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3949 struct l2cap_info_req info;
3950 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3952 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3953 conn->info_ident = l2cap_get_ident(conn);
3955 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3957 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3958 sizeof(info), &info);
3961 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3962 result == L2CAP_CR_SUCCESS) {
3964 set_bit(CONF_REQ_SENT, &chan->conf_state);
3965 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3966 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3967 chan->num_conf_req++;
3973 static int l2cap_connect_req(struct l2cap_conn *conn,
3974 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3976 struct hci_dev *hdev = conn->hcon->hdev;
3977 struct hci_conn *hcon = conn->hcon;
3979 if (cmd_len < sizeof(struct l2cap_conn_req))
3983 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3984 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3985 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3986 hci_dev_unlock(hdev);
3988 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3992 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3993 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3996 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3997 u16 scid, dcid, result, status;
3998 struct l2cap_chan *chan;
4002 if (cmd_len < sizeof(*rsp))
4005 scid = __le16_to_cpu(rsp->scid);
4006 dcid = __le16_to_cpu(rsp->dcid);
4007 result = __le16_to_cpu(rsp->result);
4008 status = __le16_to_cpu(rsp->status);
4010 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4011 dcid > L2CAP_CID_DYN_END))
4014 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4015 dcid, scid, result, status);
4017 mutex_lock(&conn->chan_lock);
4020 chan = __l2cap_get_chan_by_scid(conn, scid);
4026 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4033 chan = l2cap_chan_hold_unless_zero(chan);
4041 l2cap_chan_lock(chan);
4044 case L2CAP_CR_SUCCESS:
4045 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4050 l2cap_state_change(chan, BT_CONFIG);
4053 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4055 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4058 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4059 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4060 chan->num_conf_req++;
4064 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4068 l2cap_chan_del(chan, ECONNREFUSED);
4072 l2cap_chan_unlock(chan);
4073 l2cap_chan_put(chan);
4076 mutex_unlock(&conn->chan_lock);
4081 static inline void set_default_fcs(struct l2cap_chan *chan)
4083 /* FCS is enabled only in ERTM or streaming mode, if one or both
4086 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4087 chan->fcs = L2CAP_FCS_NONE;
4088 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4089 chan->fcs = L2CAP_FCS_CRC16;
4092 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4093 u8 ident, u16 flags)
4095 struct l2cap_conn *conn = chan->conn;
4097 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4100 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4101 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4103 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4104 l2cap_build_conf_rsp(chan, data,
4105 L2CAP_CONF_SUCCESS, flags), data);
4108 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4111 struct l2cap_cmd_rej_cid rej;
4113 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4114 rej.scid = __cpu_to_le16(scid);
4115 rej.dcid = __cpu_to_le16(dcid);
4117 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4120 static inline int l2cap_config_req(struct l2cap_conn *conn,
4121 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4124 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4127 struct l2cap_chan *chan;
4130 if (cmd_len < sizeof(*req))
4133 dcid = __le16_to_cpu(req->dcid);
4134 flags = __le16_to_cpu(req->flags);
4136 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4138 chan = l2cap_get_chan_by_scid(conn, dcid);
4140 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4144 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4145 chan->state != BT_CONNECTED) {
4146 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4151 /* Reject if config buffer is too small. */
4152 len = cmd_len - sizeof(*req);
4153 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4154 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4155 l2cap_build_conf_rsp(chan, rsp,
4156 L2CAP_CONF_REJECT, flags), rsp);
4161 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4162 chan->conf_len += len;
4164 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4165 /* Incomplete config. Send empty response. */
4166 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4167 l2cap_build_conf_rsp(chan, rsp,
4168 L2CAP_CONF_SUCCESS, flags), rsp);
4172 /* Complete config. */
4173 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4175 l2cap_send_disconn_req(chan, ECONNRESET);
4179 chan->ident = cmd->ident;
4180 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4181 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4182 chan->num_conf_rsp++;
4184 /* Reset config buffer. */
4187 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4190 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4191 set_default_fcs(chan);
4193 if (chan->mode == L2CAP_MODE_ERTM ||
4194 chan->mode == L2CAP_MODE_STREAMING)
4195 err = l2cap_ertm_init(chan);
4198 l2cap_send_disconn_req(chan, -err);
4200 l2cap_chan_ready(chan);
4205 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4207 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4208 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4209 chan->num_conf_req++;
4212 /* Got Conf Rsp PENDING from remote side and assume we sent
4213 Conf Rsp PENDING in the code above */
4214 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4215 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4217 /* check compatibility */
4219 /* Send rsp for BR/EDR channel */
4221 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4223 chan->ident = cmd->ident;
4227 l2cap_chan_unlock(chan);
4228 l2cap_chan_put(chan);
4232 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4233 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4236 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4237 u16 scid, flags, result;
4238 struct l2cap_chan *chan;
4239 int len = cmd_len - sizeof(*rsp);
4242 if (cmd_len < sizeof(*rsp))
4245 scid = __le16_to_cpu(rsp->scid);
4246 flags = __le16_to_cpu(rsp->flags);
4247 result = __le16_to_cpu(rsp->result);
4249 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4252 chan = l2cap_get_chan_by_scid(conn, scid);
4257 case L2CAP_CONF_SUCCESS:
4258 l2cap_conf_rfc_get(chan, rsp->data, len);
4259 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4262 case L2CAP_CONF_PENDING:
4263 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4265 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4268 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4269 buf, sizeof(buf), &result);
4271 l2cap_send_disconn_req(chan, ECONNRESET);
4275 if (!chan->hs_hcon) {
4276 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4279 if (l2cap_check_efs(chan)) {
4280 amp_create_logical_link(chan);
4281 chan->ident = cmd->ident;
4287 case L2CAP_CONF_UNACCEPT:
4288 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4291 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4292 l2cap_send_disconn_req(chan, ECONNRESET);
4296 /* throw out any old stored conf requests */
4297 result = L2CAP_CONF_SUCCESS;
4298 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4299 req, sizeof(req), &result);
4301 l2cap_send_disconn_req(chan, ECONNRESET);
4305 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4306 L2CAP_CONF_REQ, len, req);
4307 chan->num_conf_req++;
4308 if (result != L2CAP_CONF_SUCCESS)
4314 l2cap_chan_set_err(chan, ECONNRESET);
4316 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4317 l2cap_send_disconn_req(chan, ECONNRESET);
4321 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4324 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4326 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4327 set_default_fcs(chan);
4329 if (chan->mode == L2CAP_MODE_ERTM ||
4330 chan->mode == L2CAP_MODE_STREAMING)
4331 err = l2cap_ertm_init(chan);
4334 l2cap_send_disconn_req(chan, -err);
4336 l2cap_chan_ready(chan);
4340 l2cap_chan_unlock(chan);
4341 l2cap_chan_put(chan);
4345 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4346 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4350 struct l2cap_disconn_rsp rsp;
4352 struct l2cap_chan *chan;
4354 if (cmd_len != sizeof(*req))
4357 scid = __le16_to_cpu(req->scid);
4358 dcid = __le16_to_cpu(req->dcid);
4360 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4362 chan = l2cap_get_chan_by_scid(conn, dcid);
4364 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4368 rsp.dcid = cpu_to_le16(chan->scid);
4369 rsp.scid = cpu_to_le16(chan->dcid);
4370 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4372 chan->ops->set_shutdown(chan);
4374 l2cap_chan_unlock(chan);
4375 mutex_lock(&conn->chan_lock);
4376 l2cap_chan_lock(chan);
4377 l2cap_chan_del(chan, ECONNRESET);
4378 mutex_unlock(&conn->chan_lock);
4380 chan->ops->close(chan);
4382 l2cap_chan_unlock(chan);
4383 l2cap_chan_put(chan);
4388 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4389 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4392 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4394 struct l2cap_chan *chan;
4396 if (cmd_len != sizeof(*rsp))
4399 scid = __le16_to_cpu(rsp->scid);
4400 dcid = __le16_to_cpu(rsp->dcid);
4402 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4404 chan = l2cap_get_chan_by_scid(conn, scid);
4409 if (chan->state != BT_DISCONN) {
4410 l2cap_chan_unlock(chan);
4411 l2cap_chan_put(chan);
4415 l2cap_chan_unlock(chan);
4416 mutex_lock(&conn->chan_lock);
4417 l2cap_chan_lock(chan);
4418 l2cap_chan_del(chan, 0);
4419 mutex_unlock(&conn->chan_lock);
4421 chan->ops->close(chan);
4423 l2cap_chan_unlock(chan);
4424 l2cap_chan_put(chan);
4429 static inline int l2cap_information_req(struct l2cap_conn *conn,
4430 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4433 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4436 if (cmd_len != sizeof(*req))
4439 type = __le16_to_cpu(req->type);
4441 BT_DBG("type 0x%4.4x", type);
4443 if (type == L2CAP_IT_FEAT_MASK) {
4445 u32 feat_mask = l2cap_feat_mask;
4446 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4447 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4448 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4450 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4452 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4453 feat_mask |= L2CAP_FEAT_EXT_FLOW
4454 | L2CAP_FEAT_EXT_WINDOW;
4456 put_unaligned_le32(feat_mask, rsp->data);
4457 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4459 } else if (type == L2CAP_IT_FIXED_CHAN) {
4461 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4463 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4464 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4465 rsp->data[0] = conn->local_fixed_chan;
4466 memset(rsp->data + 1, 0, 7);
4467 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4470 struct l2cap_info_rsp rsp;
4471 rsp.type = cpu_to_le16(type);
4472 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4473 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4480 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4481 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4484 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4487 if (cmd_len < sizeof(*rsp))
4490 type = __le16_to_cpu(rsp->type);
4491 result = __le16_to_cpu(rsp->result);
4493 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4495 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4496 if (cmd->ident != conn->info_ident ||
4497 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4500 cancel_delayed_work(&conn->info_timer);
4502 if (result != L2CAP_IR_SUCCESS) {
4503 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4504 conn->info_ident = 0;
4506 l2cap_conn_start(conn);
4512 case L2CAP_IT_FEAT_MASK:
4513 conn->feat_mask = get_unaligned_le32(rsp->data);
4515 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4516 struct l2cap_info_req req;
4517 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4519 conn->info_ident = l2cap_get_ident(conn);
4521 l2cap_send_cmd(conn, conn->info_ident,
4522 L2CAP_INFO_REQ, sizeof(req), &req);
4524 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4525 conn->info_ident = 0;
4527 l2cap_conn_start(conn);
4531 case L2CAP_IT_FIXED_CHAN:
4532 conn->remote_fixed_chan = rsp->data[0];
4533 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4534 conn->info_ident = 0;
4536 l2cap_conn_start(conn);
4543 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4544 struct l2cap_cmd_hdr *cmd,
4545 u16 cmd_len, void *data)
4547 struct l2cap_create_chan_req *req = data;
4548 struct l2cap_create_chan_rsp rsp;
4549 struct l2cap_chan *chan;
4550 struct hci_dev *hdev;
4553 if (cmd_len != sizeof(*req))
4556 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4559 psm = le16_to_cpu(req->psm);
4560 scid = le16_to_cpu(req->scid);
4562 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4564 /* For controller id 0 make BR/EDR connection */
4565 if (req->amp_id == AMP_ID_BREDR) {
4566 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4571 /* Validate AMP controller id */
4572 hdev = hci_dev_get(req->amp_id);
4576 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4581 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4584 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4585 struct hci_conn *hs_hcon;
4587 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4591 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4596 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4598 mgr->bredr_chan = chan;
4599 chan->hs_hcon = hs_hcon;
4600 chan->fcs = L2CAP_FCS_NONE;
4601 conn->mtu = hdev->block_mtu;
4610 rsp.scid = cpu_to_le16(scid);
4611 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4612 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4614 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4620 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4622 struct l2cap_move_chan_req req;
4625 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4627 ident = l2cap_get_ident(chan->conn);
4628 chan->ident = ident;
4630 req.icid = cpu_to_le16(chan->scid);
4631 req.dest_amp_id = dest_amp_id;
4633 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4636 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4639 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4641 struct l2cap_move_chan_rsp rsp;
4643 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4645 rsp.icid = cpu_to_le16(chan->dcid);
4646 rsp.result = cpu_to_le16(result);
4648 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4652 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4654 struct l2cap_move_chan_cfm cfm;
4656 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4658 chan->ident = l2cap_get_ident(chan->conn);
4660 cfm.icid = cpu_to_le16(chan->scid);
4661 cfm.result = cpu_to_le16(result);
4663 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4666 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4669 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4671 struct l2cap_move_chan_cfm cfm;
4673 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4675 cfm.icid = cpu_to_le16(icid);
4676 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4678 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4682 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4685 struct l2cap_move_chan_cfm_rsp rsp;
4687 BT_DBG("icid 0x%4.4x", icid);
4689 rsp.icid = cpu_to_le16(icid);
4690 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4693 static void __release_logical_link(struct l2cap_chan *chan)
4695 chan->hs_hchan = NULL;
4696 chan->hs_hcon = NULL;
4698 /* Placeholder - release the logical link */
4701 static void l2cap_logical_fail(struct l2cap_chan *chan)
4703 /* Logical link setup failed */
4704 if (chan->state != BT_CONNECTED) {
4705 /* Create channel failure, disconnect */
4706 l2cap_send_disconn_req(chan, ECONNRESET);
4710 switch (chan->move_role) {
4711 case L2CAP_MOVE_ROLE_RESPONDER:
4712 l2cap_move_done(chan);
4713 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4715 case L2CAP_MOVE_ROLE_INITIATOR:
4716 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4717 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4718 /* Remote has only sent pending or
4719 * success responses, clean up
4721 l2cap_move_done(chan);
4724 /* Other amp move states imply that the move
4725 * has already aborted
4727 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4732 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4733 struct hci_chan *hchan)
4735 struct l2cap_conf_rsp rsp;
4737 chan->hs_hchan = hchan;
4738 chan->hs_hcon->l2cap_data = chan->conn;
4740 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4742 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4745 set_default_fcs(chan);
4747 err = l2cap_ertm_init(chan);
4749 l2cap_send_disconn_req(chan, -err);
4751 l2cap_chan_ready(chan);
4755 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4756 struct hci_chan *hchan)
4758 chan->hs_hcon = hchan->conn;
4759 chan->hs_hcon->l2cap_data = chan->conn;
4761 BT_DBG("move_state %d", chan->move_state);
4763 switch (chan->move_state) {
4764 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4765 /* Move confirm will be sent after a success
4766 * response is received
4768 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4770 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4771 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4772 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4773 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4774 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4775 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4776 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4777 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4778 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4782 /* Move was not in expected state, free the channel */
4783 __release_logical_link(chan);
4785 chan->move_state = L2CAP_MOVE_STABLE;
4789 /* Call with chan locked */
4790 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4793 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4796 l2cap_logical_fail(chan);
4797 __release_logical_link(chan);
4801 if (chan->state != BT_CONNECTED) {
4802 /* Ignore logical link if channel is on BR/EDR */
4803 if (chan->local_amp_id != AMP_ID_BREDR)
4804 l2cap_logical_finish_create(chan, hchan);
4806 l2cap_logical_finish_move(chan, hchan);
4810 void l2cap_move_start(struct l2cap_chan *chan)
4812 BT_DBG("chan %p", chan);
4814 if (chan->local_amp_id == AMP_ID_BREDR) {
4815 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4817 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4818 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4819 /* Placeholder - start physical link setup */
4821 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4822 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4824 l2cap_move_setup(chan);
4825 l2cap_send_move_chan_req(chan, 0);
4829 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4830 u8 local_amp_id, u8 remote_amp_id)
4832 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4833 local_amp_id, remote_amp_id);
4835 chan->fcs = L2CAP_FCS_NONE;
4837 /* Outgoing channel on AMP */
4838 if (chan->state == BT_CONNECT) {
4839 if (result == L2CAP_CR_SUCCESS) {
4840 chan->local_amp_id = local_amp_id;
4841 l2cap_send_create_chan_req(chan, remote_amp_id);
4843 /* Revert to BR/EDR connect */
4844 l2cap_send_conn_req(chan);
4850 /* Incoming channel on AMP */
4851 if (__l2cap_no_conn_pending(chan)) {
4852 struct l2cap_conn_rsp rsp;
4854 rsp.scid = cpu_to_le16(chan->dcid);
4855 rsp.dcid = cpu_to_le16(chan->scid);
4857 if (result == L2CAP_CR_SUCCESS) {
4858 /* Send successful response */
4859 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4860 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4862 /* Send negative response */
4863 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4864 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4867 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4870 if (result == L2CAP_CR_SUCCESS) {
4871 l2cap_state_change(chan, BT_CONFIG);
4872 set_bit(CONF_REQ_SENT, &chan->conf_state);
4873 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4875 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4876 chan->num_conf_req++;
4881 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4884 l2cap_move_setup(chan);
4885 chan->move_id = local_amp_id;
4886 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4888 l2cap_send_move_chan_req(chan, remote_amp_id);
4891 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4893 struct hci_chan *hchan = NULL;
4895 /* Placeholder - get hci_chan for logical link */
4898 if (hchan->state == BT_CONNECTED) {
4899 /* Logical link is ready to go */
4900 chan->hs_hcon = hchan->conn;
4901 chan->hs_hcon->l2cap_data = chan->conn;
4902 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4903 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4905 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4907 /* Wait for logical link to be ready */
4908 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4911 /* Logical link not available */
4912 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4916 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4918 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4920 if (result == -EINVAL)
4921 rsp_result = L2CAP_MR_BAD_ID;
4923 rsp_result = L2CAP_MR_NOT_ALLOWED;
4925 l2cap_send_move_chan_rsp(chan, rsp_result);
4928 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4929 chan->move_state = L2CAP_MOVE_STABLE;
4931 /* Restart data transmission */
4932 l2cap_ertm_send(chan);
4935 /* Invoke with locked chan */
4936 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4938 u8 local_amp_id = chan->local_amp_id;
4939 u8 remote_amp_id = chan->remote_amp_id;
4941 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4942 chan, result, local_amp_id, remote_amp_id);
4944 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4947 if (chan->state != BT_CONNECTED) {
4948 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4949 } else if (result != L2CAP_MR_SUCCESS) {
4950 l2cap_do_move_cancel(chan, result);
4952 switch (chan->move_role) {
4953 case L2CAP_MOVE_ROLE_INITIATOR:
4954 l2cap_do_move_initiate(chan, local_amp_id,
4957 case L2CAP_MOVE_ROLE_RESPONDER:
4958 l2cap_do_move_respond(chan, result);
4961 l2cap_do_move_cancel(chan, result);
4967 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4968 struct l2cap_cmd_hdr *cmd,
4969 u16 cmd_len, void *data)
4971 struct l2cap_move_chan_req *req = data;
4972 struct l2cap_move_chan_rsp rsp;
4973 struct l2cap_chan *chan;
4975 u16 result = L2CAP_MR_NOT_ALLOWED;
4977 if (cmd_len != sizeof(*req))
4980 icid = le16_to_cpu(req->icid);
4982 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4984 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4987 chan = l2cap_get_chan_by_dcid(conn, icid);
4989 rsp.icid = cpu_to_le16(icid);
4990 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4991 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4996 chan->ident = cmd->ident;
4998 if (chan->scid < L2CAP_CID_DYN_START ||
4999 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5000 (chan->mode != L2CAP_MODE_ERTM &&
5001 chan->mode != L2CAP_MODE_STREAMING)) {
5002 result = L2CAP_MR_NOT_ALLOWED;
5003 goto send_move_response;
5006 if (chan->local_amp_id == req->dest_amp_id) {
5007 result = L2CAP_MR_SAME_ID;
5008 goto send_move_response;
5011 if (req->dest_amp_id != AMP_ID_BREDR) {
5012 struct hci_dev *hdev;
5013 hdev = hci_dev_get(req->dest_amp_id);
5014 if (!hdev || hdev->dev_type != HCI_AMP ||
5015 !test_bit(HCI_UP, &hdev->flags)) {
5019 result = L2CAP_MR_BAD_ID;
5020 goto send_move_response;
5025 /* Detect a move collision. Only send a collision response
5026 * if this side has "lost", otherwise proceed with the move.
5027 * The winner has the larger bd_addr.
5029 if ((__chan_is_moving(chan) ||
5030 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5031 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5032 result = L2CAP_MR_COLLISION;
5033 goto send_move_response;
5036 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5037 l2cap_move_setup(chan);
5038 chan->move_id = req->dest_amp_id;
5041 if (req->dest_amp_id == AMP_ID_BREDR) {
5042 /* Moving to BR/EDR */
5043 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5044 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5045 result = L2CAP_MR_PEND;
5047 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5048 result = L2CAP_MR_SUCCESS;
5051 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5052 /* Placeholder - uncomment when amp functions are available */
5053 /*amp_accept_physical(chan, req->dest_amp_id);*/
5054 result = L2CAP_MR_PEND;
5058 l2cap_send_move_chan_rsp(chan, result);
5060 l2cap_chan_unlock(chan);
5061 l2cap_chan_put(chan);
5066 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5068 struct l2cap_chan *chan;
5069 struct hci_chan *hchan = NULL;
5071 chan = l2cap_get_chan_by_scid(conn, icid);
5073 l2cap_send_move_chan_cfm_icid(conn, icid);
5077 __clear_chan_timer(chan);
5078 if (result == L2CAP_MR_PEND)
5079 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5081 switch (chan->move_state) {
5082 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5083 /* Move confirm will be sent when logical link
5086 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5088 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5089 if (result == L2CAP_MR_PEND) {
5091 } else if (test_bit(CONN_LOCAL_BUSY,
5092 &chan->conn_state)) {
5093 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5095 /* Logical link is up or moving to BR/EDR,
5098 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5099 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5102 case L2CAP_MOVE_WAIT_RSP:
5104 if (result == L2CAP_MR_SUCCESS) {
5105 /* Remote is ready, send confirm immediately
5106 * after logical link is ready
5108 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5110 /* Both logical link and move success
5111 * are required to confirm
5113 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5116 /* Placeholder - get hci_chan for logical link */
5118 /* Logical link not available */
5119 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5123 /* If the logical link is not yet connected, do not
5124 * send confirmation.
5126 if (hchan->state != BT_CONNECTED)
5129 /* Logical link is already ready to go */
5131 chan->hs_hcon = hchan->conn;
5132 chan->hs_hcon->l2cap_data = chan->conn;
5134 if (result == L2CAP_MR_SUCCESS) {
5135 /* Can confirm now */
5136 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5138 /* Now only need move success
5141 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5144 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5147 /* Any other amp move state means the move failed. */
5148 chan->move_id = chan->local_amp_id;
5149 l2cap_move_done(chan);
5150 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5153 l2cap_chan_unlock(chan);
5154 l2cap_chan_put(chan);
5157 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5160 struct l2cap_chan *chan;
5162 chan = l2cap_get_chan_by_ident(conn, ident);
5164 /* Could not locate channel, icid is best guess */
5165 l2cap_send_move_chan_cfm_icid(conn, icid);
5169 __clear_chan_timer(chan);
5171 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5172 if (result == L2CAP_MR_COLLISION) {
5173 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5175 /* Cleanup - cancel move */
5176 chan->move_id = chan->local_amp_id;
5177 l2cap_move_done(chan);
5181 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5183 l2cap_chan_unlock(chan);
5184 l2cap_chan_put(chan);
5187 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5188 struct l2cap_cmd_hdr *cmd,
5189 u16 cmd_len, void *data)
5191 struct l2cap_move_chan_rsp *rsp = data;
5194 if (cmd_len != sizeof(*rsp))
5197 icid = le16_to_cpu(rsp->icid);
5198 result = le16_to_cpu(rsp->result);
5200 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5202 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5203 l2cap_move_continue(conn, icid, result);
5205 l2cap_move_fail(conn, cmd->ident, icid, result);
5210 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5211 struct l2cap_cmd_hdr *cmd,
5212 u16 cmd_len, void *data)
5214 struct l2cap_move_chan_cfm *cfm = data;
5215 struct l2cap_chan *chan;
5218 if (cmd_len != sizeof(*cfm))
5221 icid = le16_to_cpu(cfm->icid);
5222 result = le16_to_cpu(cfm->result);
5224 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5226 chan = l2cap_get_chan_by_dcid(conn, icid);
5228 /* Spec requires a response even if the icid was not found */
5229 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5233 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5234 if (result == L2CAP_MC_CONFIRMED) {
5235 chan->local_amp_id = chan->move_id;
5236 if (chan->local_amp_id == AMP_ID_BREDR)
5237 __release_logical_link(chan);
5239 chan->move_id = chan->local_amp_id;
5242 l2cap_move_done(chan);
5245 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5247 l2cap_chan_unlock(chan);
5248 l2cap_chan_put(chan);
5253 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5254 struct l2cap_cmd_hdr *cmd,
5255 u16 cmd_len, void *data)
5257 struct l2cap_move_chan_cfm_rsp *rsp = data;
5258 struct l2cap_chan *chan;
5261 if (cmd_len != sizeof(*rsp))
5264 icid = le16_to_cpu(rsp->icid);
5266 BT_DBG("icid 0x%4.4x", icid);
5268 chan = l2cap_get_chan_by_scid(conn, icid);
5272 __clear_chan_timer(chan);
5274 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5275 chan->local_amp_id = chan->move_id;
5277 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5278 __release_logical_link(chan);
5280 l2cap_move_done(chan);
5283 l2cap_chan_unlock(chan);
5284 l2cap_chan_put(chan);
5289 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5290 struct l2cap_cmd_hdr *cmd,
5291 u16 cmd_len, u8 *data)
5293 struct hci_conn *hcon = conn->hcon;
5294 struct l2cap_conn_param_update_req *req;
5295 struct l2cap_conn_param_update_rsp rsp;
5296 u16 min, max, latency, to_multiplier;
5299 if (hcon->role != HCI_ROLE_MASTER)
5302 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5305 req = (struct l2cap_conn_param_update_req *) data;
5306 min = __le16_to_cpu(req->min);
5307 max = __le16_to_cpu(req->max);
5308 latency = __le16_to_cpu(req->latency);
5309 to_multiplier = __le16_to_cpu(req->to_multiplier);
5311 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5312 min, max, latency, to_multiplier);
5314 memset(&rsp, 0, sizeof(rsp));
5316 err = hci_check_conn_params(min, max, latency, to_multiplier);
5318 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5320 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5322 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5328 store_hint = hci_le_conn_update(hcon, min, max, latency,
5330 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5331 store_hint, min, max, latency,
5339 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5340 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5343 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5344 struct hci_conn *hcon = conn->hcon;
5345 u16 dcid, mtu, mps, credits, result;
5346 struct l2cap_chan *chan;
5349 if (cmd_len < sizeof(*rsp))
5352 dcid = __le16_to_cpu(rsp->dcid);
5353 mtu = __le16_to_cpu(rsp->mtu);
5354 mps = __le16_to_cpu(rsp->mps);
5355 credits = __le16_to_cpu(rsp->credits);
5356 result = __le16_to_cpu(rsp->result);
5358 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5359 dcid < L2CAP_CID_DYN_START ||
5360 dcid > L2CAP_CID_LE_DYN_END))
5363 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5364 dcid, mtu, mps, credits, result);
5366 mutex_lock(&conn->chan_lock);
5368 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5376 l2cap_chan_lock(chan);
5379 case L2CAP_CR_SUCCESS:
5380 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5388 chan->remote_mps = mps;
5389 chan->tx_credits = credits;
5390 l2cap_chan_ready(chan);
5393 case L2CAP_CR_AUTHENTICATION:
5394 case L2CAP_CR_ENCRYPTION:
5395 /* If we already have MITM protection we can't do
5398 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5399 l2cap_chan_del(chan, ECONNREFUSED);
5403 sec_level = hcon->sec_level + 1;
5404 if (chan->sec_level < sec_level)
5405 chan->sec_level = sec_level;
5407 /* We'll need to send a new Connect Request */
5408 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5410 smp_conn_security(hcon, chan->sec_level);
5414 l2cap_chan_del(chan, ECONNREFUSED);
5418 l2cap_chan_unlock(chan);
5421 mutex_unlock(&conn->chan_lock);
5426 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5427 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5432 switch (cmd->code) {
5433 case L2CAP_COMMAND_REJ:
5434 l2cap_command_rej(conn, cmd, cmd_len, data);
5437 case L2CAP_CONN_REQ:
5438 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5441 case L2CAP_CONN_RSP:
5442 case L2CAP_CREATE_CHAN_RSP:
5443 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5446 case L2CAP_CONF_REQ:
5447 err = l2cap_config_req(conn, cmd, cmd_len, data);
5450 case L2CAP_CONF_RSP:
5451 l2cap_config_rsp(conn, cmd, cmd_len, data);
5454 case L2CAP_DISCONN_REQ:
5455 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5458 case L2CAP_DISCONN_RSP:
5459 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5462 case L2CAP_ECHO_REQ:
5463 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5466 case L2CAP_ECHO_RSP:
5469 case L2CAP_INFO_REQ:
5470 err = l2cap_information_req(conn, cmd, cmd_len, data);
5473 case L2CAP_INFO_RSP:
5474 l2cap_information_rsp(conn, cmd, cmd_len, data);
5477 case L2CAP_CREATE_CHAN_REQ:
5478 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5481 case L2CAP_MOVE_CHAN_REQ:
5482 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5485 case L2CAP_MOVE_CHAN_RSP:
5486 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5489 case L2CAP_MOVE_CHAN_CFM:
5490 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5493 case L2CAP_MOVE_CHAN_CFM_RSP:
5494 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5498 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5506 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5507 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5510 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5511 struct l2cap_le_conn_rsp rsp;
5512 struct l2cap_chan *chan, *pchan;
5513 u16 dcid, scid, credits, mtu, mps;
5517 if (cmd_len != sizeof(*req))
5520 scid = __le16_to_cpu(req->scid);
5521 mtu = __le16_to_cpu(req->mtu);
5522 mps = __le16_to_cpu(req->mps);
5527 if (mtu < 23 || mps < 23)
5530 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5533 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5536 * Valid range: 0x0001-0x00ff
5538 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5540 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5541 result = L2CAP_CR_BAD_PSM;
5546 /* Check if we have socket listening on psm */
5547 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5548 &conn->hcon->dst, LE_LINK);
5550 result = L2CAP_CR_BAD_PSM;
5555 mutex_lock(&conn->chan_lock);
5556 l2cap_chan_lock(pchan);
5558 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5560 result = L2CAP_CR_AUTHENTICATION;
5562 goto response_unlock;
5565 /* Check for valid dynamic CID range */
5566 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5567 result = L2CAP_CR_INVALID_SCID;
5569 goto response_unlock;
5572 /* Check if we already have channel with that dcid */
5573 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5574 result = L2CAP_CR_SCID_IN_USE;
5576 goto response_unlock;
5579 chan = pchan->ops->new_connection(pchan);
5581 result = L2CAP_CR_NO_MEM;
5582 goto response_unlock;
5585 l2cap_le_flowctl_init(chan);
5587 bacpy(&chan->src, &conn->hcon->src);
5588 bacpy(&chan->dst, &conn->hcon->dst);
5589 chan->src_type = bdaddr_src_type(conn->hcon);
5590 chan->dst_type = bdaddr_dst_type(conn->hcon);
5594 chan->remote_mps = mps;
5595 chan->tx_credits = __le16_to_cpu(req->credits);
5597 __l2cap_chan_add(conn, chan);
5599 credits = chan->rx_credits;
5601 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5603 chan->ident = cmd->ident;
5605 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5606 l2cap_state_change(chan, BT_CONNECT2);
5607 /* The following result value is actually not defined
5608 * for LE CoC but we use it to let the function know
5609 * that it should bail out after doing its cleanup
5610 * instead of sending a response.
5612 result = L2CAP_CR_PEND;
5613 chan->ops->defer(chan);
5615 l2cap_chan_ready(chan);
5616 result = L2CAP_CR_SUCCESS;
5620 l2cap_chan_unlock(pchan);
5621 mutex_unlock(&conn->chan_lock);
5622 l2cap_chan_put(pchan);
5624 if (result == L2CAP_CR_PEND)
5629 rsp.mtu = cpu_to_le16(chan->imtu);
5630 rsp.mps = cpu_to_le16(chan->mps);
5636 rsp.dcid = cpu_to_le16(dcid);
5637 rsp.credits = cpu_to_le16(credits);
5638 rsp.result = cpu_to_le16(result);
5640 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5645 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5646 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5649 struct l2cap_le_credits *pkt;
5650 struct l2cap_chan *chan;
5651 u16 cid, credits, max_credits;
5653 if (cmd_len != sizeof(*pkt))
5656 pkt = (struct l2cap_le_credits *) data;
5657 cid = __le16_to_cpu(pkt->cid);
5658 credits = __le16_to_cpu(pkt->credits);
5660 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5662 chan = l2cap_get_chan_by_dcid(conn, cid);
5666 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5667 if (credits > max_credits) {
5668 BT_ERR("LE credits overflow");
5669 l2cap_send_disconn_req(chan, ECONNRESET);
5671 /* Return 0 so that we don't trigger an unnecessary
5672 * command reject packet.
5677 chan->tx_credits += credits;
5679 /* Resume sending */
5680 l2cap_le_flowctl_send(chan);
5682 if (chan->tx_credits)
5683 chan->ops->resume(chan);
5686 l2cap_chan_unlock(chan);
5687 l2cap_chan_put(chan);
5692 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5693 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5696 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5697 struct l2cap_chan *chan;
5699 if (cmd_len < sizeof(*rej))
5702 mutex_lock(&conn->chan_lock);
5704 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5708 l2cap_chan_lock(chan);
5709 l2cap_chan_del(chan, ECONNREFUSED);
5710 l2cap_chan_unlock(chan);
5713 mutex_unlock(&conn->chan_lock);
5717 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5718 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5723 switch (cmd->code) {
5724 case L2CAP_COMMAND_REJ:
5725 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5728 case L2CAP_CONN_PARAM_UPDATE_REQ:
5729 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5732 case L2CAP_CONN_PARAM_UPDATE_RSP:
5735 case L2CAP_LE_CONN_RSP:
5736 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5739 case L2CAP_LE_CONN_REQ:
5740 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5743 case L2CAP_LE_CREDITS:
5744 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5747 case L2CAP_DISCONN_REQ:
5748 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5751 case L2CAP_DISCONN_RSP:
5752 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5756 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5764 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5765 struct sk_buff *skb)
5767 struct hci_conn *hcon = conn->hcon;
5768 struct l2cap_cmd_hdr *cmd;
5772 if (hcon->type != LE_LINK)
5775 if (skb->len < L2CAP_CMD_HDR_SIZE)
5778 cmd = (void *) skb->data;
5779 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5781 len = le16_to_cpu(cmd->len);
5783 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5785 if (len != skb->len || !cmd->ident) {
5786 BT_DBG("corrupted command");
5790 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5792 struct l2cap_cmd_rej_unk rej;
5794 BT_ERR("Wrong link type (%d)", err);
5796 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5797 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5805 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5806 struct sk_buff *skb)
5808 struct hci_conn *hcon = conn->hcon;
5809 u8 *data = skb->data;
5811 struct l2cap_cmd_hdr cmd;
5814 l2cap_raw_recv(conn, skb);
5816 if (hcon->type != ACL_LINK)
5819 while (len >= L2CAP_CMD_HDR_SIZE) {
5821 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5822 data += L2CAP_CMD_HDR_SIZE;
5823 len -= L2CAP_CMD_HDR_SIZE;
5825 cmd_len = le16_to_cpu(cmd.len);
5827 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5830 if (cmd_len > len || !cmd.ident) {
5831 BT_DBG("corrupted command");
5835 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5837 struct l2cap_cmd_rej_unk rej;
5839 BT_ERR("Wrong link type (%d)", err);
5841 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5842 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5854 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5856 u16 our_fcs, rcv_fcs;
5859 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5860 hdr_size = L2CAP_EXT_HDR_SIZE;
5862 hdr_size = L2CAP_ENH_HDR_SIZE;
5864 if (chan->fcs == L2CAP_FCS_CRC16) {
5865 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5866 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5867 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5869 if (our_fcs != rcv_fcs)
5875 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5877 struct l2cap_ctrl control;
5879 BT_DBG("chan %p", chan);
5881 memset(&control, 0, sizeof(control));
5884 control.reqseq = chan->buffer_seq;
5885 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5887 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5888 control.super = L2CAP_SUPER_RNR;
5889 l2cap_send_sframe(chan, &control);
5892 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5893 chan->unacked_frames > 0)
5894 __set_retrans_timer(chan);
5896 /* Send pending iframes */
5897 l2cap_ertm_send(chan);
5899 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5900 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5901 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5904 control.super = L2CAP_SUPER_RR;
5905 l2cap_send_sframe(chan, &control);
5909 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5910 struct sk_buff **last_frag)
5912 /* skb->len reflects data in skb as well as all fragments
5913 * skb->data_len reflects only data in fragments
5915 if (!skb_has_frag_list(skb))
5916 skb_shinfo(skb)->frag_list = new_frag;
5918 new_frag->next = NULL;
5920 (*last_frag)->next = new_frag;
5921 *last_frag = new_frag;
5923 skb->len += new_frag->len;
5924 skb->data_len += new_frag->len;
5925 skb->truesize += new_frag->truesize;
5928 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5929 struct l2cap_ctrl *control)
5933 switch (control->sar) {
5934 case L2CAP_SAR_UNSEGMENTED:
5938 err = chan->ops->recv(chan, skb);
5941 case L2CAP_SAR_START:
5945 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5948 chan->sdu_len = get_unaligned_le16(skb->data);
5949 skb_pull(skb, L2CAP_SDULEN_SIZE);
5951 if (chan->sdu_len > chan->imtu) {
5956 if (skb->len >= chan->sdu_len)
5960 chan->sdu_last_frag = skb;
5966 case L2CAP_SAR_CONTINUE:
5970 append_skb_frag(chan->sdu, skb,
5971 &chan->sdu_last_frag);
5974 if (chan->sdu->len >= chan->sdu_len)
5984 append_skb_frag(chan->sdu, skb,
5985 &chan->sdu_last_frag);
5988 if (chan->sdu->len != chan->sdu_len)
5991 err = chan->ops->recv(chan, chan->sdu);
5994 /* Reassembly complete */
5996 chan->sdu_last_frag = NULL;
6004 kfree_skb(chan->sdu);
6006 chan->sdu_last_frag = NULL;
6013 static int l2cap_resegment(struct l2cap_chan *chan)
6019 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6023 if (chan->mode != L2CAP_MODE_ERTM)
6026 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6027 l2cap_tx(chan, NULL, NULL, event);
6030 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6033 /* Pass sequential frames to l2cap_reassemble_sdu()
6034 * until a gap is encountered.
6037 BT_DBG("chan %p", chan);
6039 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6040 struct sk_buff *skb;
6041 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6042 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6044 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6049 skb_unlink(skb, &chan->srej_q);
6050 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6051 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6056 if (skb_queue_empty(&chan->srej_q)) {
6057 chan->rx_state = L2CAP_RX_STATE_RECV;
6058 l2cap_send_ack(chan);
6064 static void l2cap_handle_srej(struct l2cap_chan *chan,
6065 struct l2cap_ctrl *control)
6067 struct sk_buff *skb;
6069 BT_DBG("chan %p, control %p", chan, control);
6071 if (control->reqseq == chan->next_tx_seq) {
6072 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6073 l2cap_send_disconn_req(chan, ECONNRESET);
6077 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6080 BT_DBG("Seq %d not available for retransmission",
6085 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6086 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6087 l2cap_send_disconn_req(chan, ECONNRESET);
6091 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6093 if (control->poll) {
6094 l2cap_pass_to_tx(chan, control);
6096 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6097 l2cap_retransmit(chan, control);
6098 l2cap_ertm_send(chan);
6100 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6101 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6102 chan->srej_save_reqseq = control->reqseq;
6105 l2cap_pass_to_tx_fbit(chan, control);
6107 if (control->final) {
6108 if (chan->srej_save_reqseq != control->reqseq ||
6109 !test_and_clear_bit(CONN_SREJ_ACT,
6111 l2cap_retransmit(chan, control);
6113 l2cap_retransmit(chan, control);
6114 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6115 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6116 chan->srej_save_reqseq = control->reqseq;
6122 static void l2cap_handle_rej(struct l2cap_chan *chan,
6123 struct l2cap_ctrl *control)
6125 struct sk_buff *skb;
6127 BT_DBG("chan %p, control %p", chan, control);
6129 if (control->reqseq == chan->next_tx_seq) {
6130 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6131 l2cap_send_disconn_req(chan, ECONNRESET);
6135 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6137 if (chan->max_tx && skb &&
6138 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6139 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6140 l2cap_send_disconn_req(chan, ECONNRESET);
6144 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6146 l2cap_pass_to_tx(chan, control);
6148 if (control->final) {
6149 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6150 l2cap_retransmit_all(chan, control);
6152 l2cap_retransmit_all(chan, control);
6153 l2cap_ertm_send(chan);
6154 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6155 set_bit(CONN_REJ_ACT, &chan->conn_state);
6159 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6161 BT_DBG("chan %p, txseq %d", chan, txseq);
6163 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6164 chan->expected_tx_seq);
6166 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6167 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6169 /* See notes below regarding "double poll" and
6172 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6173 BT_DBG("Invalid/Ignore - after SREJ");
6174 return L2CAP_TXSEQ_INVALID_IGNORE;
6176 BT_DBG("Invalid - in window after SREJ sent");
6177 return L2CAP_TXSEQ_INVALID;
6181 if (chan->srej_list.head == txseq) {
6182 BT_DBG("Expected SREJ");
6183 return L2CAP_TXSEQ_EXPECTED_SREJ;
6186 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6187 BT_DBG("Duplicate SREJ - txseq already stored");
6188 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6191 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6192 BT_DBG("Unexpected SREJ - not requested");
6193 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6197 if (chan->expected_tx_seq == txseq) {
6198 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6200 BT_DBG("Invalid - txseq outside tx window");
6201 return L2CAP_TXSEQ_INVALID;
6204 return L2CAP_TXSEQ_EXPECTED;
6208 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6209 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6210 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6211 return L2CAP_TXSEQ_DUPLICATE;
6214 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6215 /* A source of invalid packets is a "double poll" condition,
6216 * where delays cause us to send multiple poll packets. If
6217 * the remote stack receives and processes both polls,
6218 * sequence numbers can wrap around in such a way that a
6219 * resent frame has a sequence number that looks like new data
6220 * with a sequence gap. This would trigger an erroneous SREJ
6223 * Fortunately, this is impossible with a tx window that's
6224 * less than half of the maximum sequence number, which allows
6225 * invalid frames to be safely ignored.
6227 * With tx window sizes greater than half of the tx window
6228 * maximum, the frame is invalid and cannot be ignored. This
6229 * causes a disconnect.
6232 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6233 BT_DBG("Invalid/Ignore - txseq outside tx window");
6234 return L2CAP_TXSEQ_INVALID_IGNORE;
6236 BT_DBG("Invalid - txseq outside tx window");
6237 return L2CAP_TXSEQ_INVALID;
6240 BT_DBG("Unexpected - txseq indicates missing frames");
6241 return L2CAP_TXSEQ_UNEXPECTED;
6245 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6246 struct l2cap_ctrl *control,
6247 struct sk_buff *skb, u8 event)
6249 struct l2cap_ctrl local_control;
6251 bool skb_in_use = false;
6253 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6257 case L2CAP_EV_RECV_IFRAME:
6258 switch (l2cap_classify_txseq(chan, control->txseq)) {
6259 case L2CAP_TXSEQ_EXPECTED:
6260 l2cap_pass_to_tx(chan, control);
6262 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6263 BT_DBG("Busy, discarding expected seq %d",
6268 chan->expected_tx_seq = __next_seq(chan,
6271 chan->buffer_seq = chan->expected_tx_seq;
6274 /* l2cap_reassemble_sdu may free skb, hence invalidate
6275 * control, so make a copy in advance to use it after
6276 * l2cap_reassemble_sdu returns and to avoid the race
6277 * condition, for example:
6279 * The current thread calls:
6280 * l2cap_reassemble_sdu
6281 * chan->ops->recv == l2cap_sock_recv_cb
6282 * __sock_queue_rcv_skb
6283 * Another thread calls:
6287 * Then the current thread tries to access control, but
6288 * it was freed by skb_free_datagram.
6290 local_control = *control;
6291 err = l2cap_reassemble_sdu(chan, skb, control);
6295 if (local_control.final) {
6296 if (!test_and_clear_bit(CONN_REJ_ACT,
6297 &chan->conn_state)) {
6298 local_control.final = 0;
6299 l2cap_retransmit_all(chan, &local_control);
6300 l2cap_ertm_send(chan);
6304 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6305 l2cap_send_ack(chan);
6307 case L2CAP_TXSEQ_UNEXPECTED:
6308 l2cap_pass_to_tx(chan, control);
6310 /* Can't issue SREJ frames in the local busy state.
6311 * Drop this frame, it will be seen as missing
6312 * when local busy is exited.
6314 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6315 BT_DBG("Busy, discarding unexpected seq %d",
6320 /* There was a gap in the sequence, so an SREJ
6321 * must be sent for each missing frame. The
6322 * current frame is stored for later use.
6324 skb_queue_tail(&chan->srej_q, skb);
6326 BT_DBG("Queued %p (queue len %d)", skb,
6327 skb_queue_len(&chan->srej_q));
6329 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6330 l2cap_seq_list_clear(&chan->srej_list);
6331 l2cap_send_srej(chan, control->txseq);
6333 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6335 case L2CAP_TXSEQ_DUPLICATE:
6336 l2cap_pass_to_tx(chan, control);
6338 case L2CAP_TXSEQ_INVALID_IGNORE:
6340 case L2CAP_TXSEQ_INVALID:
6342 l2cap_send_disconn_req(chan, ECONNRESET);
6346 case L2CAP_EV_RECV_RR:
6347 l2cap_pass_to_tx(chan, control);
6348 if (control->final) {
6349 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6351 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6352 !__chan_is_moving(chan)) {
6354 l2cap_retransmit_all(chan, control);
6357 l2cap_ertm_send(chan);
6358 } else if (control->poll) {
6359 l2cap_send_i_or_rr_or_rnr(chan);
6361 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6362 &chan->conn_state) &&
6363 chan->unacked_frames)
6364 __set_retrans_timer(chan);
6366 l2cap_ertm_send(chan);
6369 case L2CAP_EV_RECV_RNR:
6370 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6371 l2cap_pass_to_tx(chan, control);
6372 if (control && control->poll) {
6373 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6374 l2cap_send_rr_or_rnr(chan, 0);
6376 __clear_retrans_timer(chan);
6377 l2cap_seq_list_clear(&chan->retrans_list);
6379 case L2CAP_EV_RECV_REJ:
6380 l2cap_handle_rej(chan, control);
6382 case L2CAP_EV_RECV_SREJ:
6383 l2cap_handle_srej(chan, control);
6389 if (skb && !skb_in_use) {
6390 BT_DBG("Freeing %p", skb);
6397 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6398 struct l2cap_ctrl *control,
6399 struct sk_buff *skb, u8 event)
6402 u16 txseq = control->txseq;
6403 bool skb_in_use = false;
6405 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6409 case L2CAP_EV_RECV_IFRAME:
6410 switch (l2cap_classify_txseq(chan, txseq)) {
6411 case L2CAP_TXSEQ_EXPECTED:
6412 /* Keep frame for reassembly later */
6413 l2cap_pass_to_tx(chan, control);
6414 skb_queue_tail(&chan->srej_q, skb);
6416 BT_DBG("Queued %p (queue len %d)", skb,
6417 skb_queue_len(&chan->srej_q));
6419 chan->expected_tx_seq = __next_seq(chan, txseq);
6421 case L2CAP_TXSEQ_EXPECTED_SREJ:
6422 l2cap_seq_list_pop(&chan->srej_list);
6424 l2cap_pass_to_tx(chan, control);
6425 skb_queue_tail(&chan->srej_q, skb);
6427 BT_DBG("Queued %p (queue len %d)", skb,
6428 skb_queue_len(&chan->srej_q));
6430 err = l2cap_rx_queued_iframes(chan);
6435 case L2CAP_TXSEQ_UNEXPECTED:
6436 /* Got a frame that can't be reassembled yet.
6437 * Save it for later, and send SREJs to cover
6438 * the missing frames.
6440 skb_queue_tail(&chan->srej_q, skb);
6442 BT_DBG("Queued %p (queue len %d)", skb,
6443 skb_queue_len(&chan->srej_q));
6445 l2cap_pass_to_tx(chan, control);
6446 l2cap_send_srej(chan, control->txseq);
6448 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6449 /* This frame was requested with an SREJ, but
6450 * some expected retransmitted frames are
6451 * missing. Request retransmission of missing
6454 skb_queue_tail(&chan->srej_q, skb);
6456 BT_DBG("Queued %p (queue len %d)", skb,
6457 skb_queue_len(&chan->srej_q));
6459 l2cap_pass_to_tx(chan, control);
6460 l2cap_send_srej_list(chan, control->txseq);
6462 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6463 /* We've already queued this frame. Drop this copy. */
6464 l2cap_pass_to_tx(chan, control);
6466 case L2CAP_TXSEQ_DUPLICATE:
6467 /* Expecting a later sequence number, so this frame
6468 * was already received. Ignore it completely.
6471 case L2CAP_TXSEQ_INVALID_IGNORE:
6473 case L2CAP_TXSEQ_INVALID:
6475 l2cap_send_disconn_req(chan, ECONNRESET);
6479 case L2CAP_EV_RECV_RR:
6480 l2cap_pass_to_tx(chan, control);
6481 if (control->final) {
6482 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6484 if (!test_and_clear_bit(CONN_REJ_ACT,
6485 &chan->conn_state)) {
6487 l2cap_retransmit_all(chan, control);
6490 l2cap_ertm_send(chan);
6491 } else if (control->poll) {
6492 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6493 &chan->conn_state) &&
6494 chan->unacked_frames) {
6495 __set_retrans_timer(chan);
6498 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6499 l2cap_send_srej_tail(chan);
6501 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6502 &chan->conn_state) &&
6503 chan->unacked_frames)
6504 __set_retrans_timer(chan);
6506 l2cap_send_ack(chan);
6509 case L2CAP_EV_RECV_RNR:
6510 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6511 l2cap_pass_to_tx(chan, control);
6512 if (control->poll) {
6513 l2cap_send_srej_tail(chan);
6515 struct l2cap_ctrl rr_control;
6516 memset(&rr_control, 0, sizeof(rr_control));
6517 rr_control.sframe = 1;
6518 rr_control.super = L2CAP_SUPER_RR;
6519 rr_control.reqseq = chan->buffer_seq;
6520 l2cap_send_sframe(chan, &rr_control);
6524 case L2CAP_EV_RECV_REJ:
6525 l2cap_handle_rej(chan, control);
6527 case L2CAP_EV_RECV_SREJ:
6528 l2cap_handle_srej(chan, control);
6532 if (skb && !skb_in_use) {
6533 BT_DBG("Freeing %p", skb);
6540 static int l2cap_finish_move(struct l2cap_chan *chan)
6542 BT_DBG("chan %p", chan);
6544 chan->rx_state = L2CAP_RX_STATE_RECV;
6547 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6549 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6551 return l2cap_resegment(chan);
6554 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6555 struct l2cap_ctrl *control,
6556 struct sk_buff *skb, u8 event)
6560 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6566 l2cap_process_reqseq(chan, control->reqseq);
6568 if (!skb_queue_empty(&chan->tx_q))
6569 chan->tx_send_head = skb_peek(&chan->tx_q);
6571 chan->tx_send_head = NULL;
6573 /* Rewind next_tx_seq to the point expected
6576 chan->next_tx_seq = control->reqseq;
6577 chan->unacked_frames = 0;
6579 err = l2cap_finish_move(chan);
6583 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6584 l2cap_send_i_or_rr_or_rnr(chan);
6586 if (event == L2CAP_EV_RECV_IFRAME)
6589 return l2cap_rx_state_recv(chan, control, NULL, event);
6592 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6593 struct l2cap_ctrl *control,
6594 struct sk_buff *skb, u8 event)
6598 if (!control->final)
6601 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6603 chan->rx_state = L2CAP_RX_STATE_RECV;
6604 l2cap_process_reqseq(chan, control->reqseq);
6606 if (!skb_queue_empty(&chan->tx_q))
6607 chan->tx_send_head = skb_peek(&chan->tx_q);
6609 chan->tx_send_head = NULL;
6611 /* Rewind next_tx_seq to the point expected
6614 chan->next_tx_seq = control->reqseq;
6615 chan->unacked_frames = 0;
6618 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6620 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6622 err = l2cap_resegment(chan);
6625 err = l2cap_rx_state_recv(chan, control, skb, event);
6630 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6632 /* Make sure reqseq is for a packet that has been sent but not acked */
6635 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6636 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6639 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6640 struct sk_buff *skb, u8 event)
6644 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6645 control, skb, event, chan->rx_state);
6647 if (__valid_reqseq(chan, control->reqseq)) {
6648 switch (chan->rx_state) {
6649 case L2CAP_RX_STATE_RECV:
6650 err = l2cap_rx_state_recv(chan, control, skb, event);
6652 case L2CAP_RX_STATE_SREJ_SENT:
6653 err = l2cap_rx_state_srej_sent(chan, control, skb,
6656 case L2CAP_RX_STATE_WAIT_P:
6657 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6659 case L2CAP_RX_STATE_WAIT_F:
6660 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6667 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6668 control->reqseq, chan->next_tx_seq,
6669 chan->expected_ack_seq);
6670 l2cap_send_disconn_req(chan, ECONNRESET);
6676 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6677 struct sk_buff *skb)
6679 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6680 * the txseq field in advance to use it after l2cap_reassemble_sdu
6681 * returns and to avoid the race condition, for example:
6683 * The current thread calls:
6684 * l2cap_reassemble_sdu
6685 * chan->ops->recv == l2cap_sock_recv_cb
6686 * __sock_queue_rcv_skb
6687 * Another thread calls:
6691 * Then the current thread tries to access control, but it was freed by
6692 * skb_free_datagram.
6694 u16 txseq = control->txseq;
6696 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6699 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6700 l2cap_pass_to_tx(chan, control);
6702 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6703 __next_seq(chan, chan->buffer_seq));
6705 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6707 l2cap_reassemble_sdu(chan, skb, control);
6710 kfree_skb(chan->sdu);
6713 chan->sdu_last_frag = NULL;
6717 BT_DBG("Freeing %p", skb);
6722 chan->last_acked_seq = txseq;
6723 chan->expected_tx_seq = __next_seq(chan, txseq);
6728 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6730 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6734 __unpack_control(chan, skb);
6739 * We can just drop the corrupted I-frame here.
6740 * Receiver will miss it and start proper recovery
6741 * procedures and ask for retransmission.
6743 if (l2cap_check_fcs(chan, skb))
6746 if (!control->sframe && control->sar == L2CAP_SAR_START)
6747 len -= L2CAP_SDULEN_SIZE;
6749 if (chan->fcs == L2CAP_FCS_CRC16)
6750 len -= L2CAP_FCS_SIZE;
6752 if (len > chan->mps) {
6753 l2cap_send_disconn_req(chan, ECONNRESET);
6757 if (chan->ops->filter) {
6758 if (chan->ops->filter(chan, skb))
6762 if (!control->sframe) {
6765 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6766 control->sar, control->reqseq, control->final,
6769 /* Validate F-bit - F=0 always valid, F=1 only
6770 * valid in TX WAIT_F
6772 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6775 if (chan->mode != L2CAP_MODE_STREAMING) {
6776 event = L2CAP_EV_RECV_IFRAME;
6777 err = l2cap_rx(chan, control, skb, event);
6779 err = l2cap_stream_rx(chan, control, skb);
6783 l2cap_send_disconn_req(chan, ECONNRESET);
6785 const u8 rx_func_to_event[4] = {
6786 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6787 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6790 /* Only I-frames are expected in streaming mode */
6791 if (chan->mode == L2CAP_MODE_STREAMING)
6794 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6795 control->reqseq, control->final, control->poll,
6799 BT_ERR("Trailing bytes: %d in sframe", len);
6800 l2cap_send_disconn_req(chan, ECONNRESET);
6804 /* Validate F and P bits */
6805 if (control->final && (control->poll ||
6806 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6809 event = rx_func_to_event[control->super];
6810 if (l2cap_rx(chan, control, skb, event))
6811 l2cap_send_disconn_req(chan, ECONNRESET);
6821 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6823 struct l2cap_conn *conn = chan->conn;
6824 struct l2cap_le_credits pkt;
6827 /* We return more credits to the sender only after the amount of
6828 * credits falls below half of the initial amount.
6830 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6833 return_credits = le_max_credits - chan->rx_credits;
6835 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6837 chan->rx_credits += return_credits;
6839 pkt.cid = cpu_to_le16(chan->scid);
6840 pkt.credits = cpu_to_le16(return_credits);
6842 chan->ident = l2cap_get_ident(conn);
6844 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6847 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6851 if (!chan->rx_credits) {
6852 BT_ERR("No credits to receive LE L2CAP data");
6853 l2cap_send_disconn_req(chan, ECONNRESET);
6857 if (chan->imtu < skb->len) {
6858 BT_ERR("Too big LE L2CAP PDU");
6863 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6865 l2cap_chan_le_send_credits(chan);
6872 sdu_len = get_unaligned_le16(skb->data);
6873 skb_pull(skb, L2CAP_SDULEN_SIZE);
6875 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6876 sdu_len, skb->len, chan->imtu);
6878 if (sdu_len > chan->imtu) {
6879 BT_ERR("Too big LE L2CAP SDU length received");
6884 if (skb->len > sdu_len) {
6885 BT_ERR("Too much LE L2CAP data received");
6890 if (skb->len == sdu_len)
6891 return chan->ops->recv(chan, skb);
6894 chan->sdu_len = sdu_len;
6895 chan->sdu_last_frag = skb;
6897 /* Detect if remote is not able to use the selected MPS */
6898 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6899 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6901 /* Adjust the number of credits */
6902 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6903 chan->mps = mps_len;
6904 l2cap_chan_le_send_credits(chan);
6910 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6911 chan->sdu->len, skb->len, chan->sdu_len);
6913 if (chan->sdu->len + skb->len > chan->sdu_len) {
6914 BT_ERR("Too much LE L2CAP data received");
6919 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6922 if (chan->sdu->len == chan->sdu_len) {
6923 err = chan->ops->recv(chan, chan->sdu);
6926 chan->sdu_last_frag = NULL;
6934 kfree_skb(chan->sdu);
6936 chan->sdu_last_frag = NULL;
6940 /* We can't return an error here since we took care of the skb
6941 * freeing internally. An error return would cause the caller to
6942 * do a double-free of the skb.
6947 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6948 struct sk_buff *skb)
6950 struct l2cap_chan *chan;
6952 chan = l2cap_get_chan_by_scid(conn, cid);
6954 if (cid == L2CAP_CID_A2MP) {
6955 chan = a2mp_channel_create(conn, skb);
6961 l2cap_chan_hold(chan);
6962 l2cap_chan_lock(chan);
6964 BT_DBG("unknown cid 0x%4.4x", cid);
6965 /* Drop packet and return */
6971 BT_DBG("chan %p, len %d", chan, skb->len);
6973 /* If we receive data on a fixed channel before the info req/rsp
6974 * procdure is done simply assume that the channel is supported
6975 * and mark it as ready.
6977 if (chan->chan_type == L2CAP_CHAN_FIXED)
6978 l2cap_chan_ready(chan);
6980 if (chan->state != BT_CONNECTED)
6983 switch (chan->mode) {
6984 case L2CAP_MODE_LE_FLOWCTL:
6985 if (l2cap_le_data_rcv(chan, skb) < 0)
6990 case L2CAP_MODE_BASIC:
6991 /* If socket recv buffers overflows we drop data here
6992 * which is *bad* because L2CAP has to be reliable.
6993 * But we don't have any other choice. L2CAP doesn't
6994 * provide flow control mechanism. */
6996 if (chan->imtu < skb->len) {
6997 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7001 if (!chan->ops->recv(chan, skb))
7005 case L2CAP_MODE_ERTM:
7006 case L2CAP_MODE_STREAMING:
7007 l2cap_data_rcv(chan, skb);
7011 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7019 l2cap_chan_unlock(chan);
7020 l2cap_chan_put(chan);
7023 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7024 struct sk_buff *skb)
7026 struct hci_conn *hcon = conn->hcon;
7027 struct l2cap_chan *chan;
7029 if (hcon->type != ACL_LINK)
7032 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7037 BT_DBG("chan %p, len %d", chan, skb->len);
7039 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7042 if (chan->imtu < skb->len)
7045 /* Store remote BD_ADDR and PSM for msg_name */
7046 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7047 bt_cb(skb)->l2cap.psm = psm;
7049 if (!chan->ops->recv(chan, skb)) {
7050 l2cap_chan_put(chan);
7055 l2cap_chan_put(chan);
7060 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7062 struct l2cap_hdr *lh = (void *) skb->data;
7063 struct hci_conn *hcon = conn->hcon;
7067 if (hcon->state != BT_CONNECTED) {
7068 BT_DBG("queueing pending rx skb");
7069 skb_queue_tail(&conn->pending_rx, skb);
7073 skb_pull(skb, L2CAP_HDR_SIZE);
7074 cid = __le16_to_cpu(lh->cid);
7075 len = __le16_to_cpu(lh->len);
7077 if (len != skb->len) {
7082 /* Since we can't actively block incoming LE connections we must
7083 * at least ensure that we ignore incoming data from them.
7085 if (hcon->type == LE_LINK &&
7086 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7087 bdaddr_dst_type(hcon))) {
7092 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7095 case L2CAP_CID_SIGNALING:
7096 l2cap_sig_channel(conn, skb);
7099 case L2CAP_CID_CONN_LESS:
7100 psm = get_unaligned((__le16 *) skb->data);
7101 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7102 l2cap_conless_channel(conn, psm, skb);
7105 case L2CAP_CID_LE_SIGNALING:
7106 l2cap_le_sig_channel(conn, skb);
7110 l2cap_data_channel(conn, cid, skb);
7115 static void process_pending_rx(struct work_struct *work)
7117 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7119 struct sk_buff *skb;
7123 while ((skb = skb_dequeue(&conn->pending_rx)))
7124 l2cap_recv_frame(conn, skb);
7127 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7129 struct l2cap_conn *conn = hcon->l2cap_data;
7130 struct hci_chan *hchan;
7135 hchan = hci_chan_create(hcon);
7139 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7141 hci_chan_del(hchan);
7145 kref_init(&conn->ref);
7146 hcon->l2cap_data = conn;
7147 conn->hcon = hci_conn_get(hcon);
7148 conn->hchan = hchan;
7150 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7152 switch (hcon->type) {
7154 if (hcon->hdev->le_mtu) {
7155 conn->mtu = hcon->hdev->le_mtu;
7160 conn->mtu = hcon->hdev->acl_mtu;
7164 conn->feat_mask = 0;
7166 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7168 if (hcon->type == ACL_LINK &&
7169 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7170 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7172 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7173 (bredr_sc_enabled(hcon->hdev) ||
7174 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7175 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7177 mutex_init(&conn->ident_lock);
7178 mutex_init(&conn->chan_lock);
7180 INIT_LIST_HEAD(&conn->chan_l);
7181 INIT_LIST_HEAD(&conn->users);
7183 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7185 skb_queue_head_init(&conn->pending_rx);
7186 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7187 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7189 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7194 static bool is_valid_psm(u16 psm, u8 dst_type) {
7198 if (bdaddr_type_is_le(dst_type))
7199 return (psm <= 0x00ff);
7201 /* PSM must be odd and lsb of upper byte must be 0 */
7202 return ((psm & 0x0101) == 0x0001);
7205 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7206 bdaddr_t *dst, u8 dst_type)
7208 struct l2cap_conn *conn;
7209 struct hci_conn *hcon;
7210 struct hci_dev *hdev;
7213 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7214 dst_type, __le16_to_cpu(psm));
7216 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7218 return -EHOSTUNREACH;
7222 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7223 chan->chan_type != L2CAP_CHAN_RAW) {
7228 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7233 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7238 switch (chan->mode) {
7239 case L2CAP_MODE_BASIC:
7241 case L2CAP_MODE_LE_FLOWCTL:
7242 l2cap_le_flowctl_init(chan);
7244 case L2CAP_MODE_ERTM:
7245 case L2CAP_MODE_STREAMING:
7254 switch (chan->state) {
7258 /* Already connecting */
7263 /* Already connected */
7277 /* Set destination address and psm */
7278 bacpy(&chan->dst, dst);
7279 chan->dst_type = dst_type;
7284 if (bdaddr_type_is_le(dst_type)) {
7285 /* Convert from L2CAP channel address type to HCI address type
7287 if (dst_type == BDADDR_LE_PUBLIC)
7288 dst_type = ADDR_LE_DEV_PUBLIC;
7290 dst_type = ADDR_LE_DEV_RANDOM;
7292 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7293 hcon = hci_connect_le(hdev, dst, dst_type,
7295 HCI_LE_CONN_TIMEOUT,
7296 HCI_ROLE_SLAVE, NULL);
7298 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7300 HCI_LE_CONN_TIMEOUT);
7303 u8 auth_type = l2cap_get_auth_type(chan);
7304 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7308 err = PTR_ERR(hcon);
7312 conn = l2cap_conn_add(hcon);
7314 hci_conn_drop(hcon);
7319 mutex_lock(&conn->chan_lock);
7320 l2cap_chan_lock(chan);
7322 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7323 hci_conn_drop(hcon);
7328 /* Update source addr of the socket */
7329 bacpy(&chan->src, &hcon->src);
7330 chan->src_type = bdaddr_src_type(hcon);
7332 __l2cap_chan_add(conn, chan);
7334 /* l2cap_chan_add takes its own ref so we can drop this one */
7335 hci_conn_drop(hcon);
7337 l2cap_state_change(chan, BT_CONNECT);
7338 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7340 /* Release chan->sport so that it can be reused by other
7341 * sockets (as it's only used for listening sockets).
7343 write_lock(&chan_list_lock);
7345 write_unlock(&chan_list_lock);
7347 if (hcon->state == BT_CONNECTED) {
7348 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7349 __clear_chan_timer(chan);
7350 if (l2cap_chan_check_security(chan, true))
7351 l2cap_state_change(chan, BT_CONNECTED);
7353 l2cap_do_start(chan);
7359 l2cap_chan_unlock(chan);
7360 mutex_unlock(&conn->chan_lock);
7362 hci_dev_unlock(hdev);
7366 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7368 /* ---- L2CAP interface with lower layer (HCI) ---- */
7370 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7372 int exact = 0, lm1 = 0, lm2 = 0;
7373 struct l2cap_chan *c;
7375 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7377 /* Find listening sockets and check their link_mode */
7378 read_lock(&chan_list_lock);
7379 list_for_each_entry(c, &chan_list, global_l) {
7380 if (c->state != BT_LISTEN)
7383 if (!bacmp(&c->src, &hdev->bdaddr)) {
7384 lm1 |= HCI_LM_ACCEPT;
7385 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7386 lm1 |= HCI_LM_MASTER;
7388 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7389 lm2 |= HCI_LM_ACCEPT;
7390 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7391 lm2 |= HCI_LM_MASTER;
7394 read_unlock(&chan_list_lock);
7396 return exact ? lm1 : lm2;
7399 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7400 * from an existing channel in the list or from the beginning of the
7401 * global list (by passing NULL as first parameter).
7403 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7404 struct hci_conn *hcon)
7406 u8 src_type = bdaddr_src_type(hcon);
7408 read_lock(&chan_list_lock);
7411 c = list_next_entry(c, global_l);
7413 c = list_entry(chan_list.next, typeof(*c), global_l);
7415 list_for_each_entry_from(c, &chan_list, global_l) {
7416 if (c->chan_type != L2CAP_CHAN_FIXED)
7418 if (c->state != BT_LISTEN)
7420 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7422 if (src_type != c->src_type)
7425 c = l2cap_chan_hold_unless_zero(c);
7426 read_unlock(&chan_list_lock);
7430 read_unlock(&chan_list_lock);
7435 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7437 struct hci_dev *hdev = hcon->hdev;
7438 struct l2cap_conn *conn;
7439 struct l2cap_chan *pchan;
7442 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7445 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7448 l2cap_conn_del(hcon, bt_to_errno(status));
7452 conn = l2cap_conn_add(hcon);
7456 dst_type = bdaddr_dst_type(hcon);
7458 /* If device is blocked, do not create channels for it */
7459 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7462 /* Find fixed channels and notify them of the new connection. We
7463 * use multiple individual lookups, continuing each time where
7464 * we left off, because the list lock would prevent calling the
7465 * potentially sleeping l2cap_chan_lock() function.
7467 pchan = l2cap_global_fixed_chan(NULL, hcon);
7469 struct l2cap_chan *chan, *next;
7471 /* Client fixed channels should override server ones */
7472 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7475 l2cap_chan_lock(pchan);
7476 chan = pchan->ops->new_connection(pchan);
7478 bacpy(&chan->src, &hcon->src);
7479 bacpy(&chan->dst, &hcon->dst);
7480 chan->src_type = bdaddr_src_type(hcon);
7481 chan->dst_type = dst_type;
7483 __l2cap_chan_add(conn, chan);
7486 l2cap_chan_unlock(pchan);
7488 next = l2cap_global_fixed_chan(pchan, hcon);
7489 l2cap_chan_put(pchan);
7493 l2cap_conn_ready(conn);
7496 int l2cap_disconn_ind(struct hci_conn *hcon)
7498 struct l2cap_conn *conn = hcon->l2cap_data;
7500 BT_DBG("hcon %p", hcon);
7503 return HCI_ERROR_REMOTE_USER_TERM;
7504 return conn->disc_reason;
7507 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7509 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7512 BT_DBG("hcon %p reason %d", hcon, reason);
7514 l2cap_conn_del(hcon, bt_to_errno(reason));
7517 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7519 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7522 if (encrypt == 0x00) {
7523 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7524 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7525 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7526 chan->sec_level == BT_SECURITY_FIPS)
7527 l2cap_chan_close(chan, ECONNREFUSED);
7529 if (chan->sec_level == BT_SECURITY_MEDIUM)
7530 __clear_chan_timer(chan);
7534 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7536 struct l2cap_conn *conn = hcon->l2cap_data;
7537 struct l2cap_chan *chan;
7542 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7544 mutex_lock(&conn->chan_lock);
7546 list_for_each_entry(chan, &conn->chan_l, list) {
7547 l2cap_chan_lock(chan);
7549 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7550 state_to_string(chan->state));
7552 if (chan->scid == L2CAP_CID_A2MP) {
7553 l2cap_chan_unlock(chan);
7557 if (!status && encrypt)
7558 chan->sec_level = hcon->sec_level;
7560 if (!__l2cap_no_conn_pending(chan)) {
7561 l2cap_chan_unlock(chan);
7565 if (!status && (chan->state == BT_CONNECTED ||
7566 chan->state == BT_CONFIG)) {
7567 chan->ops->resume(chan);
7568 l2cap_check_encryption(chan, encrypt);
7569 l2cap_chan_unlock(chan);
7573 if (chan->state == BT_CONNECT) {
7574 if (!status && l2cap_check_enc_key_size(hcon))
7575 l2cap_start_connection(chan);
7577 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7578 } else if (chan->state == BT_CONNECT2 &&
7579 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7580 struct l2cap_conn_rsp rsp;
7583 if (!status && l2cap_check_enc_key_size(hcon)) {
7584 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7585 res = L2CAP_CR_PEND;
7586 stat = L2CAP_CS_AUTHOR_PEND;
7587 chan->ops->defer(chan);
7589 l2cap_state_change(chan, BT_CONFIG);
7590 res = L2CAP_CR_SUCCESS;
7591 stat = L2CAP_CS_NO_INFO;
7594 l2cap_state_change(chan, BT_DISCONN);
7595 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7596 res = L2CAP_CR_SEC_BLOCK;
7597 stat = L2CAP_CS_NO_INFO;
7600 rsp.scid = cpu_to_le16(chan->dcid);
7601 rsp.dcid = cpu_to_le16(chan->scid);
7602 rsp.result = cpu_to_le16(res);
7603 rsp.status = cpu_to_le16(stat);
7604 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7607 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7608 res == L2CAP_CR_SUCCESS) {
7610 set_bit(CONF_REQ_SENT, &chan->conf_state);
7611 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7613 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7615 chan->num_conf_req++;
7619 l2cap_chan_unlock(chan);
7622 mutex_unlock(&conn->chan_lock);
7625 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7627 struct l2cap_conn *conn = hcon->l2cap_data;
7628 struct l2cap_hdr *hdr;
7631 /* For AMP controller do not create l2cap conn */
7632 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7636 conn = l2cap_conn_add(hcon);
7641 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7645 case ACL_START_NO_FLUSH:
7648 BT_ERR("Unexpected start frame (len %d)", skb->len);
7649 kfree_skb(conn->rx_skb);
7650 conn->rx_skb = NULL;
7652 l2cap_conn_unreliable(conn, ECOMM);
7655 /* Start fragment always begin with Basic L2CAP header */
7656 if (skb->len < L2CAP_HDR_SIZE) {
7657 BT_ERR("Frame is too short (len %d)", skb->len);
7658 l2cap_conn_unreliable(conn, ECOMM);
7662 hdr = (struct l2cap_hdr *) skb->data;
7663 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7665 if (len == skb->len) {
7666 /* Complete frame received */
7667 l2cap_recv_frame(conn, skb);
7671 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7673 if (skb->len > len) {
7674 BT_ERR("Frame is too long (len %d, expected len %d)",
7676 l2cap_conn_unreliable(conn, ECOMM);
7680 /* Allocate skb for the complete frame (with header) */
7681 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7687 conn->rx_len = len - skb->len;
7691 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7693 if (!conn->rx_len) {
7694 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7695 l2cap_conn_unreliable(conn, ECOMM);
7699 if (skb->len > conn->rx_len) {
7700 BT_ERR("Fragment is too long (len %d, expected %d)",
7701 skb->len, conn->rx_len);
7702 kfree_skb(conn->rx_skb);
7703 conn->rx_skb = NULL;
7705 l2cap_conn_unreliable(conn, ECOMM);
7709 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7711 conn->rx_len -= skb->len;
7713 if (!conn->rx_len) {
7714 /* Complete frame received. l2cap_recv_frame
7715 * takes ownership of the skb so set the global
7716 * rx_skb pointer to NULL first.
7718 struct sk_buff *rx_skb = conn->rx_skb;
7719 conn->rx_skb = NULL;
7720 l2cap_recv_frame(conn, rx_skb);
7729 static struct hci_cb l2cap_cb = {
7731 .connect_cfm = l2cap_connect_cfm,
7732 .disconn_cfm = l2cap_disconn_cfm,
7733 .security_cfm = l2cap_security_cfm,
7736 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7738 struct l2cap_chan *c;
7740 read_lock(&chan_list_lock);
7742 list_for_each_entry(c, &chan_list, global_l) {
7743 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7744 &c->src, c->src_type, &c->dst, c->dst_type,
7745 c->state, __le16_to_cpu(c->psm),
7746 c->scid, c->dcid, c->imtu, c->omtu,
7747 c->sec_level, c->mode);
7750 read_unlock(&chan_list_lock);
7755 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7757 return single_open(file, l2cap_debugfs_show, inode->i_private);
7760 static const struct file_operations l2cap_debugfs_fops = {
7761 .open = l2cap_debugfs_open,
7763 .llseek = seq_lseek,
7764 .release = single_release,
7767 static struct dentry *l2cap_debugfs;
7769 int __init l2cap_init(void)
7773 err = l2cap_init_sockets();
7777 hci_register_cb(&l2cap_cb);
7779 if (IS_ERR_OR_NULL(bt_debugfs))
7782 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7783 NULL, &l2cap_debugfs_fops);
7785 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7787 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7793 void l2cap_exit(void)
7795 debugfs_remove(l2cap_debugfs);
7796 hci_unregister_cb(&l2cap_cb);
7797 l2cap_cleanup_sockets();
7800 module_param(disable_ertm, bool, 0644);
7801 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");