2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 struct sk_buff_head *skbs, u8 event);
66 static void l2cap_retrans_timeout(struct work_struct *work);
67 static void l2cap_monitor_timeout(struct work_struct *work);
68 static void l2cap_ack_timeout(struct work_struct *work);
70 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
72 if (link_type == LE_LINK) {
73 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
74 return BDADDR_LE_PUBLIC;
76 return BDADDR_LE_RANDOM;
82 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
84 return bdaddr_type(hcon->type, hcon->src_type);
87 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
89 return bdaddr_type(hcon->type, hcon->dst_type);
92 /* ---- L2CAP channels ---- */
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
99 list_for_each_entry(c, &conn->chan_l, list) {
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 struct l2cap_chan *c;
111 list_for_each_entry(c, &conn->chan_l, list) {
118 /* Find channel with given SCID.
119 * Returns a reference locked channel.
121 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
124 struct l2cap_chan *c;
126 mutex_lock(&conn->chan_lock);
127 c = __l2cap_get_chan_by_scid(conn, cid);
129 /* Only lock if chan reference is not 0 */
130 c = l2cap_chan_hold_unless_zero(c);
134 mutex_unlock(&conn->chan_lock);
139 /* Find channel with given DCID.
140 * Returns a reference locked channel.
142 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
145 struct l2cap_chan *c;
147 mutex_lock(&conn->chan_lock);
148 c = __l2cap_get_chan_by_dcid(conn, cid);
150 /* Only lock if chan reference is not 0 */
151 c = l2cap_chan_hold_unless_zero(c);
155 mutex_unlock(&conn->chan_lock);
160 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
163 struct l2cap_chan *c;
165 list_for_each_entry(c, &conn->chan_l, list) {
166 if (c->ident == ident)
172 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
175 struct l2cap_chan *c;
177 mutex_lock(&conn->chan_lock);
178 c = __l2cap_get_chan_by_ident(conn, ident);
180 /* Only lock if chan reference is not 0 */
181 c = l2cap_chan_hold_unless_zero(c);
185 mutex_unlock(&conn->chan_lock);
190 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
192 struct l2cap_chan *c;
194 list_for_each_entry(c, &chan_list, global_l) {
195 if (c->sport == psm && !bacmp(&c->src, src))
201 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
205 write_lock(&chan_list_lock);
207 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
217 u16 p, start, end, incr;
219 if (chan->src_type == BDADDR_BREDR) {
220 start = L2CAP_PSM_DYN_START;
221 end = L2CAP_PSM_AUTO_END;
224 start = L2CAP_PSM_LE_DYN_START;
225 end = L2CAP_PSM_LE_DYN_END;
230 for (p = start; p <= end; p += incr)
231 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
232 chan->psm = cpu_to_le16(p);
233 chan->sport = cpu_to_le16(p);
240 write_unlock(&chan_list_lock);
243 EXPORT_SYMBOL_GPL(l2cap_add_psm);
245 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
247 write_lock(&chan_list_lock);
249 /* Override the defaults (which are for conn-oriented) */
250 chan->omtu = L2CAP_DEFAULT_MTU;
251 chan->chan_type = L2CAP_CHAN_FIXED;
255 write_unlock(&chan_list_lock);
260 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 if (conn->hcon->type == LE_LINK)
265 dyn_end = L2CAP_CID_LE_DYN_END;
267 dyn_end = L2CAP_CID_DYN_END;
269 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
270 if (!__l2cap_get_chan_by_scid(conn, cid))
277 static void l2cap_state_change(struct l2cap_chan *chan, int state)
279 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
280 state_to_string(state));
283 chan->ops->state_change(chan, state, 0);
286 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 chan->ops->state_change(chan, chan->state, err);
293 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
295 chan->ops->state_change(chan, chan->state, err);
298 static void __set_retrans_timer(struct l2cap_chan *chan)
300 if (!delayed_work_pending(&chan->monitor_timer) &&
301 chan->retrans_timeout) {
302 l2cap_set_timer(chan, &chan->retrans_timer,
303 msecs_to_jiffies(chan->retrans_timeout));
307 static void __set_monitor_timer(struct l2cap_chan *chan)
309 __clear_retrans_timer(chan);
310 if (chan->monitor_timeout) {
311 l2cap_set_timer(chan, &chan->monitor_timer,
312 msecs_to_jiffies(chan->monitor_timeout));
316 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
321 skb_queue_walk(head, skb) {
322 if (bt_cb(skb)->l2cap.txseq == seq)
329 /* ---- L2CAP sequence number lists ---- */
331 /* For ERTM, ordered lists of sequence numbers must be tracked for
332 * SREJ requests that are received and for frames that are to be
333 * retransmitted. These seq_list functions implement a singly-linked
334 * list in an array, where membership in the list can also be checked
335 * in constant time. Items can also be added to the tail of the list
336 * and removed from the head in constant time, without further memory
340 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
342 size_t alloc_size, i;
344 /* Allocated size is a power of 2 to map sequence numbers
345 * (which may be up to 14 bits) in to a smaller array that is
346 * sized for the negotiated ERTM transmit windows.
348 alloc_size = roundup_pow_of_two(size);
350 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
354 seq_list->mask = alloc_size - 1;
355 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
356 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
357 for (i = 0; i < alloc_size; i++)
358 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
363 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
365 kfree(seq_list->list);
368 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
371 /* Constant-time check for list membership */
372 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
375 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
377 u16 seq = seq_list->head;
378 u16 mask = seq_list->mask;
380 seq_list->head = seq_list->list[seq & mask];
381 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
383 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
384 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
385 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
391 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
398 for (i = 0; i <= seq_list->mask; i++)
399 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
401 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
402 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
405 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
407 u16 mask = seq_list->mask;
409 /* All appends happen in constant time */
411 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
414 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
415 seq_list->head = seq;
417 seq_list->list[seq_list->tail & mask] = seq;
419 seq_list->tail = seq;
420 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
423 static void l2cap_chan_timeout(struct work_struct *work)
425 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
427 struct l2cap_conn *conn = chan->conn;
430 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
432 mutex_lock(&conn->chan_lock);
433 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
434 * this work. No need to call l2cap_chan_hold(chan) here again.
436 l2cap_chan_lock(chan);
438 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
439 reason = ECONNREFUSED;
440 else if (chan->state == BT_CONNECT &&
441 chan->sec_level != BT_SECURITY_SDP)
442 reason = ECONNREFUSED;
446 l2cap_chan_close(chan, reason);
448 chan->ops->close(chan);
450 l2cap_chan_unlock(chan);
451 l2cap_chan_put(chan);
453 mutex_unlock(&conn->chan_lock);
456 struct l2cap_chan *l2cap_chan_create(void)
458 struct l2cap_chan *chan;
460 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 skb_queue_head_init(&chan->tx_q);
465 skb_queue_head_init(&chan->srej_q);
466 mutex_init(&chan->lock);
468 /* Set default lock nesting level */
469 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
471 write_lock(&chan_list_lock);
472 list_add(&chan->global_l, &chan_list);
473 write_unlock(&chan_list_lock);
475 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
476 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
477 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
478 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
480 chan->state = BT_OPEN;
482 kref_init(&chan->kref);
484 /* This flag is cleared in l2cap_chan_ready() */
485 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
487 BT_DBG("chan %p", chan);
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
493 static void l2cap_chan_destroy(struct kref *kref)
495 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
497 BT_DBG("chan %p", chan);
499 write_lock(&chan_list_lock);
500 list_del(&chan->global_l);
501 write_unlock(&chan_list_lock);
506 void l2cap_chan_hold(struct l2cap_chan *c)
508 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
515 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
517 if (!kref_get_unless_zero(&c->kref))
523 void l2cap_chan_put(struct l2cap_chan *c)
525 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
527 kref_put(&c->kref, l2cap_chan_destroy);
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
533 chan->fcs = L2CAP_FCS_CRC16;
534 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 chan->remote_max_tx = chan->max_tx;
538 chan->remote_tx_win = chan->tx_win;
539 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 chan->sec_level = BT_SECURITY_LOW;
541 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
545 chan->conf_state = 0;
546 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
548 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
555 chan->sdu_last_frag = NULL;
557 chan->tx_credits = 0;
558 chan->rx_credits = le_max_credits;
559 chan->mps = min_t(u16, chan->imtu, le_default_mps);
561 skb_queue_head_init(&chan->tx_q);
564 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
566 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
567 __le16_to_cpu(chan->psm), chan->dcid);
569 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
573 switch (chan->chan_type) {
574 case L2CAP_CHAN_CONN_ORIENTED:
575 /* Alloc CID for connection-oriented socket */
576 chan->scid = l2cap_alloc_cid(conn);
577 if (conn->hcon->type == ACL_LINK)
578 chan->omtu = L2CAP_DEFAULT_MTU;
581 case L2CAP_CHAN_CONN_LESS:
582 /* Connectionless socket */
583 chan->scid = L2CAP_CID_CONN_LESS;
584 chan->dcid = L2CAP_CID_CONN_LESS;
585 chan->omtu = L2CAP_DEFAULT_MTU;
588 case L2CAP_CHAN_FIXED:
589 /* Caller will set CID and CID specific MTU values */
593 /* Raw socket can send/recv signalling messages only */
594 chan->scid = L2CAP_CID_SIGNALING;
595 chan->dcid = L2CAP_CID_SIGNALING;
596 chan->omtu = L2CAP_DEFAULT_MTU;
599 chan->local_id = L2CAP_BESTEFFORT_ID;
600 chan->local_stype = L2CAP_SERV_BESTEFFORT;
601 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
602 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
603 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
604 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
606 l2cap_chan_hold(chan);
608 /* Only keep a reference for fixed channels if they requested it */
609 if (chan->chan_type != L2CAP_CHAN_FIXED ||
610 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
611 hci_conn_hold(conn->hcon);
613 list_add(&chan->list, &conn->chan_l);
616 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
618 mutex_lock(&conn->chan_lock);
619 __l2cap_chan_add(conn, chan);
620 mutex_unlock(&conn->chan_lock);
623 void l2cap_chan_del(struct l2cap_chan *chan, int err)
625 struct l2cap_conn *conn = chan->conn;
627 __clear_chan_timer(chan);
629 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
630 state_to_string(chan->state));
632 chan->ops->teardown(chan, err);
635 struct amp_mgr *mgr = conn->hcon->amp_mgr;
636 /* Delete from channel list */
637 list_del(&chan->list);
639 l2cap_chan_put(chan);
643 /* Reference was only held for non-fixed channels or
644 * fixed channels that explicitly requested it using the
645 * FLAG_HOLD_HCI_CONN flag.
647 if (chan->chan_type != L2CAP_CHAN_FIXED ||
648 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
649 hci_conn_drop(conn->hcon);
651 if (mgr && mgr->bredr_chan == chan)
652 mgr->bredr_chan = NULL;
655 if (chan->hs_hchan) {
656 struct hci_chan *hs_hchan = chan->hs_hchan;
658 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
659 amp_disconnect_logical_link(hs_hchan);
662 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
666 case L2CAP_MODE_BASIC:
669 case L2CAP_MODE_LE_FLOWCTL:
670 skb_queue_purge(&chan->tx_q);
673 case L2CAP_MODE_ERTM:
674 __clear_retrans_timer(chan);
675 __clear_monitor_timer(chan);
676 __clear_ack_timer(chan);
678 skb_queue_purge(&chan->srej_q);
680 l2cap_seq_list_free(&chan->srej_list);
681 l2cap_seq_list_free(&chan->retrans_list);
685 case L2CAP_MODE_STREAMING:
686 skb_queue_purge(&chan->tx_q);
692 EXPORT_SYMBOL_GPL(l2cap_chan_del);
694 static void l2cap_conn_update_id_addr(struct work_struct *work)
696 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
697 id_addr_update_work);
698 struct hci_conn *hcon = conn->hcon;
699 struct l2cap_chan *chan;
701 mutex_lock(&conn->chan_lock);
703 list_for_each_entry(chan, &conn->chan_l, list) {
704 l2cap_chan_lock(chan);
705 bacpy(&chan->dst, &hcon->dst);
706 chan->dst_type = bdaddr_dst_type(hcon);
707 l2cap_chan_unlock(chan);
710 mutex_unlock(&conn->chan_lock);
713 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
715 struct l2cap_conn *conn = chan->conn;
716 struct l2cap_le_conn_rsp rsp;
719 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
720 result = L2CAP_CR_AUTHORIZATION;
722 result = L2CAP_CR_BAD_PSM;
724 l2cap_state_change(chan, BT_DISCONN);
726 rsp.dcid = cpu_to_le16(chan->scid);
727 rsp.mtu = cpu_to_le16(chan->imtu);
728 rsp.mps = cpu_to_le16(chan->mps);
729 rsp.credits = cpu_to_le16(chan->rx_credits);
730 rsp.result = cpu_to_le16(result);
732 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
736 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
738 struct l2cap_conn *conn = chan->conn;
739 struct l2cap_conn_rsp rsp;
742 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
743 result = L2CAP_CR_SEC_BLOCK;
745 result = L2CAP_CR_BAD_PSM;
747 l2cap_state_change(chan, BT_DISCONN);
749 rsp.scid = cpu_to_le16(chan->dcid);
750 rsp.dcid = cpu_to_le16(chan->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
754 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
757 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
759 struct l2cap_conn *conn = chan->conn;
761 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
763 switch (chan->state) {
765 chan->ops->teardown(chan, 0);
770 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
771 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
772 l2cap_send_disconn_req(chan, reason);
774 l2cap_chan_del(chan, reason);
778 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
779 if (conn->hcon->type == ACL_LINK)
780 l2cap_chan_connect_reject(chan);
781 else if (conn->hcon->type == LE_LINK)
782 l2cap_chan_le_connect_reject(chan);
785 l2cap_chan_del(chan, reason);
790 l2cap_chan_del(chan, reason);
794 chan->ops->teardown(chan, 0);
798 EXPORT_SYMBOL(l2cap_chan_close);
800 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
802 switch (chan->chan_type) {
804 switch (chan->sec_level) {
805 case BT_SECURITY_HIGH:
806 case BT_SECURITY_FIPS:
807 return HCI_AT_DEDICATED_BONDING_MITM;
808 case BT_SECURITY_MEDIUM:
809 return HCI_AT_DEDICATED_BONDING;
811 return HCI_AT_NO_BONDING;
814 case L2CAP_CHAN_CONN_LESS:
815 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
816 if (chan->sec_level == BT_SECURITY_LOW)
817 chan->sec_level = BT_SECURITY_SDP;
819 if (chan->sec_level == BT_SECURITY_HIGH ||
820 chan->sec_level == BT_SECURITY_FIPS)
821 return HCI_AT_NO_BONDING_MITM;
823 return HCI_AT_NO_BONDING;
825 case L2CAP_CHAN_CONN_ORIENTED:
826 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
827 if (chan->sec_level == BT_SECURITY_LOW)
828 chan->sec_level = BT_SECURITY_SDP;
830 if (chan->sec_level == BT_SECURITY_HIGH ||
831 chan->sec_level == BT_SECURITY_FIPS)
832 return HCI_AT_NO_BONDING_MITM;
834 return HCI_AT_NO_BONDING;
838 switch (chan->sec_level) {
839 case BT_SECURITY_HIGH:
840 case BT_SECURITY_FIPS:
841 return HCI_AT_GENERAL_BONDING_MITM;
842 case BT_SECURITY_MEDIUM:
843 return HCI_AT_GENERAL_BONDING;
845 return HCI_AT_NO_BONDING;
851 /* Service level security */
852 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
854 struct l2cap_conn *conn = chan->conn;
857 if (conn->hcon->type == LE_LINK)
858 return smp_conn_security(conn->hcon, chan->sec_level);
860 auth_type = l2cap_get_auth_type(chan);
862 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
866 static u8 l2cap_get_ident(struct l2cap_conn *conn)
870 /* Get next available identificator.
871 * 1 - 128 are used by kernel.
872 * 129 - 199 are reserved.
873 * 200 - 254 are used by utilities like l2ping, etc.
876 mutex_lock(&conn->ident_lock);
878 if (++conn->tx_ident > 128)
883 mutex_unlock(&conn->ident_lock);
888 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
891 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
894 BT_DBG("code 0x%2.2x", code);
899 /* Use NO_FLUSH if supported or we have an LE link (which does
900 * not support auto-flushing packets) */
901 if (lmp_no_flush_capable(conn->hcon->hdev) ||
902 conn->hcon->type == LE_LINK)
903 flags = ACL_START_NO_FLUSH;
907 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
908 skb->priority = HCI_PRIO_MAX;
910 hci_send_acl(conn->hchan, skb, flags);
913 static bool __chan_is_moving(struct l2cap_chan *chan)
915 return chan->move_state != L2CAP_MOVE_STABLE &&
916 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
919 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
921 struct hci_conn *hcon = chan->conn->hcon;
924 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
927 if (chan->hs_hcon && !__chan_is_moving(chan)) {
929 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
936 /* Use NO_FLUSH for LE links (where this is the only option) or
937 * if the BR/EDR link supports it and flushing has not been
938 * explicitly requested (through FLAG_FLUSHABLE).
940 if (hcon->type == LE_LINK ||
941 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
942 lmp_no_flush_capable(hcon->hdev)))
943 flags = ACL_START_NO_FLUSH;
947 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
948 hci_send_acl(chan->conn->hchan, skb, flags);
951 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
953 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
954 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
956 if (enh & L2CAP_CTRL_FRAME_TYPE) {
959 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
960 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
967 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
968 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
975 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
977 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
978 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
980 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
983 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
984 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
991 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
992 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
999 static inline void __unpack_control(struct l2cap_chan *chan,
1000 struct sk_buff *skb)
1002 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1003 __unpack_extended_control(get_unaligned_le32(skb->data),
1004 &bt_cb(skb)->l2cap);
1005 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1007 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1008 &bt_cb(skb)->l2cap);
1009 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1013 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1017 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1018 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1020 if (control->sframe) {
1021 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1022 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1023 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1025 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1026 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1032 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1036 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1037 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1039 if (control->sframe) {
1040 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1041 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1042 packed |= L2CAP_CTRL_FRAME_TYPE;
1044 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1045 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1051 static inline void __pack_control(struct l2cap_chan *chan,
1052 struct l2cap_ctrl *control,
1053 struct sk_buff *skb)
1055 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1056 put_unaligned_le32(__pack_extended_control(control),
1057 skb->data + L2CAP_HDR_SIZE);
1059 put_unaligned_le16(__pack_enhanced_control(control),
1060 skb->data + L2CAP_HDR_SIZE);
1064 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1066 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1067 return L2CAP_EXT_HDR_SIZE;
1069 return L2CAP_ENH_HDR_SIZE;
1072 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1075 struct sk_buff *skb;
1076 struct l2cap_hdr *lh;
1077 int hlen = __ertm_hdr_size(chan);
1079 if (chan->fcs == L2CAP_FCS_CRC16)
1080 hlen += L2CAP_FCS_SIZE;
1082 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1085 return ERR_PTR(-ENOMEM);
1087 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1088 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1089 lh->cid = cpu_to_le16(chan->dcid);
1091 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1092 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1094 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1096 if (chan->fcs == L2CAP_FCS_CRC16) {
1097 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1098 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1101 skb->priority = HCI_PRIO_MAX;
1105 static void l2cap_send_sframe(struct l2cap_chan *chan,
1106 struct l2cap_ctrl *control)
1108 struct sk_buff *skb;
1111 BT_DBG("chan %p, control %p", chan, control);
1113 if (!control->sframe)
1116 if (__chan_is_moving(chan))
1119 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1123 if (control->super == L2CAP_SUPER_RR)
1124 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1125 else if (control->super == L2CAP_SUPER_RNR)
1126 set_bit(CONN_RNR_SENT, &chan->conn_state);
1128 if (control->super != L2CAP_SUPER_SREJ) {
1129 chan->last_acked_seq = control->reqseq;
1130 __clear_ack_timer(chan);
1133 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1134 control->final, control->poll, control->super);
1136 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 control_field = __pack_extended_control(control);
1139 control_field = __pack_enhanced_control(control);
1141 skb = l2cap_create_sframe_pdu(chan, control_field);
1143 l2cap_do_send(chan, skb);
1146 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1148 struct l2cap_ctrl control;
1150 BT_DBG("chan %p, poll %d", chan, poll);
1152 memset(&control, 0, sizeof(control));
1154 control.poll = poll;
1156 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1157 control.super = L2CAP_SUPER_RNR;
1159 control.super = L2CAP_SUPER_RR;
1161 control.reqseq = chan->buffer_seq;
1162 l2cap_send_sframe(chan, &control);
1165 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1167 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1170 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1173 static bool __amp_capable(struct l2cap_chan *chan)
1175 struct l2cap_conn *conn = chan->conn;
1176 struct hci_dev *hdev;
1177 bool amp_available = false;
1179 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1182 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1185 read_lock(&hci_dev_list_lock);
1186 list_for_each_entry(hdev, &hci_dev_list, list) {
1187 if (hdev->amp_type != AMP_TYPE_BREDR &&
1188 test_bit(HCI_UP, &hdev->flags)) {
1189 amp_available = true;
1193 read_unlock(&hci_dev_list_lock);
1195 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1196 return amp_available;
1201 static bool l2cap_check_efs(struct l2cap_chan *chan)
1203 /* Check EFS parameters */
1207 void l2cap_send_conn_req(struct l2cap_chan *chan)
1209 struct l2cap_conn *conn = chan->conn;
1210 struct l2cap_conn_req req;
1212 req.scid = cpu_to_le16(chan->scid);
1213 req.psm = chan->psm;
1215 chan->ident = l2cap_get_ident(conn);
1217 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1219 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1222 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1224 struct l2cap_create_chan_req req;
1225 req.scid = cpu_to_le16(chan->scid);
1226 req.psm = chan->psm;
1227 req.amp_id = amp_id;
1229 chan->ident = l2cap_get_ident(chan->conn);
1231 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1235 static void l2cap_move_setup(struct l2cap_chan *chan)
1237 struct sk_buff *skb;
1239 BT_DBG("chan %p", chan);
1241 if (chan->mode != L2CAP_MODE_ERTM)
1244 __clear_retrans_timer(chan);
1245 __clear_monitor_timer(chan);
1246 __clear_ack_timer(chan);
1248 chan->retry_count = 0;
1249 skb_queue_walk(&chan->tx_q, skb) {
1250 if (bt_cb(skb)->l2cap.retries)
1251 bt_cb(skb)->l2cap.retries = 1;
1256 chan->expected_tx_seq = chan->buffer_seq;
1258 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1259 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1260 l2cap_seq_list_clear(&chan->retrans_list);
1261 l2cap_seq_list_clear(&chan->srej_list);
1262 skb_queue_purge(&chan->srej_q);
1264 chan->tx_state = L2CAP_TX_STATE_XMIT;
1265 chan->rx_state = L2CAP_RX_STATE_MOVE;
1267 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1270 static void l2cap_move_done(struct l2cap_chan *chan)
1272 u8 move_role = chan->move_role;
1273 BT_DBG("chan %p", chan);
1275 chan->move_state = L2CAP_MOVE_STABLE;
1276 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1278 if (chan->mode != L2CAP_MODE_ERTM)
1281 switch (move_role) {
1282 case L2CAP_MOVE_ROLE_INITIATOR:
1283 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1284 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1286 case L2CAP_MOVE_ROLE_RESPONDER:
1287 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1292 static void l2cap_chan_ready(struct l2cap_chan *chan)
1294 /* The channel may have already been flagged as connected in
1295 * case of receiving data before the L2CAP info req/rsp
1296 * procedure is complete.
1298 if (chan->state == BT_CONNECTED)
1301 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1302 chan->conf_state = 0;
1303 __clear_chan_timer(chan);
1305 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1306 chan->ops->suspend(chan);
1308 chan->state = BT_CONNECTED;
1310 chan->ops->ready(chan);
1313 static void l2cap_le_connect(struct l2cap_chan *chan)
1315 struct l2cap_conn *conn = chan->conn;
1316 struct l2cap_le_conn_req req;
1318 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1321 req.psm = chan->psm;
1322 req.scid = cpu_to_le16(chan->scid);
1323 req.mtu = cpu_to_le16(chan->imtu);
1324 req.mps = cpu_to_le16(chan->mps);
1325 req.credits = cpu_to_le16(chan->rx_credits);
1327 chan->ident = l2cap_get_ident(conn);
1329 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1333 static void l2cap_le_start(struct l2cap_chan *chan)
1335 struct l2cap_conn *conn = chan->conn;
1337 if (!smp_conn_security(conn->hcon, chan->sec_level))
1341 l2cap_chan_ready(chan);
1345 if (chan->state == BT_CONNECT)
1346 l2cap_le_connect(chan);
1349 static void l2cap_start_connection(struct l2cap_chan *chan)
1351 if (__amp_capable(chan)) {
1352 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1353 a2mp_discover_amp(chan);
1354 } else if (chan->conn->hcon->type == LE_LINK) {
1355 l2cap_le_start(chan);
1357 l2cap_send_conn_req(chan);
1361 static void l2cap_request_info(struct l2cap_conn *conn)
1363 struct l2cap_info_req req;
1365 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1368 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1370 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1371 conn->info_ident = l2cap_get_ident(conn);
1373 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1375 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1379 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1381 /* The minimum encryption key size needs to be enforced by the
1382 * host stack before establishing any L2CAP connections. The
1383 * specification in theory allows a minimum of 1, but to align
1384 * BR/EDR and LE transports, a minimum of 7 is chosen.
1386 * This check might also be called for unencrypted connections
1387 * that have no key size requirements. Ensure that the link is
1388 * actually encrypted before enforcing a key size.
1390 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1391 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1394 static void l2cap_do_start(struct l2cap_chan *chan)
1396 struct l2cap_conn *conn = chan->conn;
1398 if (conn->hcon->type == LE_LINK) {
1399 l2cap_le_start(chan);
1403 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1404 l2cap_request_info(conn);
1408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1411 if (!l2cap_chan_check_security(chan, true) ||
1412 !__l2cap_no_conn_pending(chan))
1415 if (l2cap_check_enc_key_size(conn->hcon))
1416 l2cap_start_connection(chan);
1418 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1421 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1423 u32 local_feat_mask = l2cap_feat_mask;
1425 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1428 case L2CAP_MODE_ERTM:
1429 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1430 case L2CAP_MODE_STREAMING:
1431 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1437 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1439 struct l2cap_conn *conn = chan->conn;
1440 struct l2cap_disconn_req req;
1445 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1446 __clear_retrans_timer(chan);
1447 __clear_monitor_timer(chan);
1448 __clear_ack_timer(chan);
1451 if (chan->scid == L2CAP_CID_A2MP) {
1452 l2cap_state_change(chan, BT_DISCONN);
1456 req.dcid = cpu_to_le16(chan->dcid);
1457 req.scid = cpu_to_le16(chan->scid);
1458 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1461 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1464 /* ---- L2CAP connections ---- */
1465 static void l2cap_conn_start(struct l2cap_conn *conn)
1467 struct l2cap_chan *chan, *tmp;
1469 BT_DBG("conn %p", conn);
1471 mutex_lock(&conn->chan_lock);
1473 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1474 l2cap_chan_lock(chan);
1476 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1477 l2cap_chan_ready(chan);
1478 l2cap_chan_unlock(chan);
1482 if (chan->state == BT_CONNECT) {
1483 if (!l2cap_chan_check_security(chan, true) ||
1484 !__l2cap_no_conn_pending(chan)) {
1485 l2cap_chan_unlock(chan);
1489 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1490 && test_bit(CONF_STATE2_DEVICE,
1491 &chan->conf_state)) {
1492 l2cap_chan_close(chan, ECONNRESET);
1493 l2cap_chan_unlock(chan);
1497 if (l2cap_check_enc_key_size(conn->hcon))
1498 l2cap_start_connection(chan);
1500 l2cap_chan_close(chan, ECONNREFUSED);
1502 } else if (chan->state == BT_CONNECT2) {
1503 struct l2cap_conn_rsp rsp;
1505 rsp.scid = cpu_to_le16(chan->dcid);
1506 rsp.dcid = cpu_to_le16(chan->scid);
1508 if (l2cap_chan_check_security(chan, false)) {
1509 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1510 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1511 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1512 chan->ops->defer(chan);
1515 l2cap_state_change(chan, BT_CONFIG);
1516 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1517 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1520 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1521 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1524 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1527 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1528 rsp.result != L2CAP_CR_SUCCESS) {
1529 l2cap_chan_unlock(chan);
1533 set_bit(CONF_REQ_SENT, &chan->conf_state);
1534 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1535 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1536 chan->num_conf_req++;
1539 l2cap_chan_unlock(chan);
1542 mutex_unlock(&conn->chan_lock);
1545 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1547 struct hci_conn *hcon = conn->hcon;
1548 struct hci_dev *hdev = hcon->hdev;
1550 BT_DBG("%s conn %p", hdev->name, conn);
1552 /* For outgoing pairing which doesn't necessarily have an
1553 * associated socket (e.g. mgmt_pair_device).
1556 smp_conn_security(hcon, hcon->pending_sec_level);
1558 /* For LE slave connections, make sure the connection interval
1559 * is in the range of the minium and maximum interval that has
1560 * been configured for this connection. If not, then trigger
1561 * the connection update procedure.
1563 if (hcon->role == HCI_ROLE_SLAVE &&
1564 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1565 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1566 struct l2cap_conn_param_update_req req;
1568 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1569 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1570 req.latency = cpu_to_le16(hcon->le_conn_latency);
1571 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1573 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1574 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1578 static void l2cap_conn_ready(struct l2cap_conn *conn)
1580 struct l2cap_chan *chan;
1581 struct hci_conn *hcon = conn->hcon;
1583 BT_DBG("conn %p", conn);
1585 if (hcon->type == ACL_LINK)
1586 l2cap_request_info(conn);
1588 mutex_lock(&conn->chan_lock);
1590 list_for_each_entry(chan, &conn->chan_l, list) {
1592 l2cap_chan_lock(chan);
1594 if (chan->scid == L2CAP_CID_A2MP) {
1595 l2cap_chan_unlock(chan);
1599 if (hcon->type == LE_LINK) {
1600 l2cap_le_start(chan);
1601 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1602 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1603 l2cap_chan_ready(chan);
1604 } else if (chan->state == BT_CONNECT) {
1605 l2cap_do_start(chan);
1608 l2cap_chan_unlock(chan);
1611 mutex_unlock(&conn->chan_lock);
1613 if (hcon->type == LE_LINK)
1614 l2cap_le_conn_ready(conn);
1616 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1619 /* Notify sockets that we cannot guaranty reliability anymore */
1620 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1622 struct l2cap_chan *chan;
1624 BT_DBG("conn %p", conn);
1626 mutex_lock(&conn->chan_lock);
1628 list_for_each_entry(chan, &conn->chan_l, list) {
1629 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1630 l2cap_chan_set_err(chan, err);
1633 mutex_unlock(&conn->chan_lock);
1636 static void l2cap_info_timeout(struct work_struct *work)
1638 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1641 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1642 conn->info_ident = 0;
1644 l2cap_conn_start(conn);
1649 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1650 * callback is called during registration. The ->remove callback is called
1651 * during unregistration.
1652 * An l2cap_user object can either be explicitly unregistered or when the
1653 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1654 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1655 * External modules must own a reference to the l2cap_conn object if they intend
1656 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1657 * any time if they don't.
1660 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1662 struct hci_dev *hdev = conn->hcon->hdev;
1665 /* We need to check whether l2cap_conn is registered. If it is not, we
1666 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1667 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1668 * relies on the parent hci_conn object to be locked. This itself relies
1669 * on the hci_dev object to be locked. So we must lock the hci device
1674 if (!list_empty(&user->list)) {
1679 /* conn->hchan is NULL after l2cap_conn_del() was called */
1685 ret = user->probe(conn, user);
1689 list_add(&user->list, &conn->users);
1693 hci_dev_unlock(hdev);
1696 EXPORT_SYMBOL(l2cap_register_user);
1698 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1700 struct hci_dev *hdev = conn->hcon->hdev;
1704 if (list_empty(&user->list))
1707 list_del_init(&user->list);
1708 user->remove(conn, user);
1711 hci_dev_unlock(hdev);
1713 EXPORT_SYMBOL(l2cap_unregister_user);
1715 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1717 struct l2cap_user *user;
1719 while (!list_empty(&conn->users)) {
1720 user = list_first_entry(&conn->users, struct l2cap_user, list);
1721 list_del_init(&user->list);
1722 user->remove(conn, user);
1726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1728 struct l2cap_conn *conn = hcon->l2cap_data;
1729 struct l2cap_chan *chan, *l;
1734 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1736 kfree_skb(conn->rx_skb);
1738 skb_queue_purge(&conn->pending_rx);
1740 /* We can not call flush_work(&conn->pending_rx_work) here since we
1741 * might block if we are running on a worker from the same workqueue
1742 * pending_rx_work is waiting on.
1744 if (work_pending(&conn->pending_rx_work))
1745 cancel_work_sync(&conn->pending_rx_work);
1747 if (work_pending(&conn->id_addr_update_work))
1748 cancel_work_sync(&conn->id_addr_update_work);
1750 l2cap_unregister_all_users(conn);
1752 /* Force the connection to be immediately dropped */
1753 hcon->disc_timeout = 0;
1755 mutex_lock(&conn->chan_lock);
1758 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1759 l2cap_chan_hold(chan);
1760 l2cap_chan_lock(chan);
1762 l2cap_chan_del(chan, err);
1764 chan->ops->close(chan);
1766 l2cap_chan_unlock(chan);
1767 l2cap_chan_put(chan);
1770 mutex_unlock(&conn->chan_lock);
1772 hci_chan_del(conn->hchan);
1774 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1775 cancel_delayed_work_sync(&conn->info_timer);
1777 hcon->l2cap_data = NULL;
1779 l2cap_conn_put(conn);
1782 static void l2cap_conn_free(struct kref *ref)
1784 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1786 hci_conn_put(conn->hcon);
1790 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1792 kref_get(&conn->ref);
1795 EXPORT_SYMBOL(l2cap_conn_get);
1797 void l2cap_conn_put(struct l2cap_conn *conn)
1799 kref_put(&conn->ref, l2cap_conn_free);
1801 EXPORT_SYMBOL(l2cap_conn_put);
1803 /* ---- Socket interface ---- */
1805 /* Find socket with psm and source / destination bdaddr.
1806 * Returns closest match.
1808 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1813 struct l2cap_chan *c, *tmp, *c1 = NULL;
1815 read_lock(&chan_list_lock);
1817 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1818 if (state && c->state != state)
1821 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1824 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1827 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1828 int src_match, dst_match;
1829 int src_any, dst_any;
1832 src_match = !bacmp(&c->src, src);
1833 dst_match = !bacmp(&c->dst, dst);
1834 if (src_match && dst_match) {
1835 if (!l2cap_chan_hold_unless_zero(c))
1838 read_unlock(&chan_list_lock);
1843 src_any = !bacmp(&c->src, BDADDR_ANY);
1844 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1845 if ((src_match && dst_any) || (src_any && dst_match) ||
1846 (src_any && dst_any))
1852 c1 = l2cap_chan_hold_unless_zero(c1);
1854 read_unlock(&chan_list_lock);
1859 static void l2cap_monitor_timeout(struct work_struct *work)
1861 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1862 monitor_timer.work);
1864 BT_DBG("chan %p", chan);
1866 l2cap_chan_lock(chan);
1869 l2cap_chan_unlock(chan);
1870 l2cap_chan_put(chan);
1874 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1876 l2cap_chan_unlock(chan);
1877 l2cap_chan_put(chan);
1880 static void l2cap_retrans_timeout(struct work_struct *work)
1882 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1883 retrans_timer.work);
1885 BT_DBG("chan %p", chan);
1887 l2cap_chan_lock(chan);
1890 l2cap_chan_unlock(chan);
1891 l2cap_chan_put(chan);
1895 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1896 l2cap_chan_unlock(chan);
1897 l2cap_chan_put(chan);
1900 static void l2cap_streaming_send(struct l2cap_chan *chan,
1901 struct sk_buff_head *skbs)
1903 struct sk_buff *skb;
1904 struct l2cap_ctrl *control;
1906 BT_DBG("chan %p, skbs %p", chan, skbs);
1908 if (__chan_is_moving(chan))
1911 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1913 while (!skb_queue_empty(&chan->tx_q)) {
1915 skb = skb_dequeue(&chan->tx_q);
1917 bt_cb(skb)->l2cap.retries = 1;
1918 control = &bt_cb(skb)->l2cap;
1920 control->reqseq = 0;
1921 control->txseq = chan->next_tx_seq;
1923 __pack_control(chan, control, skb);
1925 if (chan->fcs == L2CAP_FCS_CRC16) {
1926 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1927 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1930 l2cap_do_send(chan, skb);
1932 BT_DBG("Sent txseq %u", control->txseq);
1934 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1935 chan->frames_sent++;
1939 static int l2cap_ertm_send(struct l2cap_chan *chan)
1941 struct sk_buff *skb, *tx_skb;
1942 struct l2cap_ctrl *control;
1945 BT_DBG("chan %p", chan);
1947 if (chan->state != BT_CONNECTED)
1950 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1953 if (__chan_is_moving(chan))
1956 while (chan->tx_send_head &&
1957 chan->unacked_frames < chan->remote_tx_win &&
1958 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1960 skb = chan->tx_send_head;
1962 bt_cb(skb)->l2cap.retries = 1;
1963 control = &bt_cb(skb)->l2cap;
1965 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1968 control->reqseq = chan->buffer_seq;
1969 chan->last_acked_seq = chan->buffer_seq;
1970 control->txseq = chan->next_tx_seq;
1972 __pack_control(chan, control, skb);
1974 if (chan->fcs == L2CAP_FCS_CRC16) {
1975 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1976 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1979 /* Clone after data has been modified. Data is assumed to be
1980 read-only (for locking purposes) on cloned sk_buffs.
1982 tx_skb = skb_clone(skb, GFP_KERNEL);
1987 __set_retrans_timer(chan);
1989 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1990 chan->unacked_frames++;
1991 chan->frames_sent++;
1994 if (skb_queue_is_last(&chan->tx_q, skb))
1995 chan->tx_send_head = NULL;
1997 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1999 l2cap_do_send(chan, tx_skb);
2000 BT_DBG("Sent txseq %u", control->txseq);
2003 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2004 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2009 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2011 struct l2cap_ctrl control;
2012 struct sk_buff *skb;
2013 struct sk_buff *tx_skb;
2016 BT_DBG("chan %p", chan);
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2021 if (__chan_is_moving(chan))
2024 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2025 seq = l2cap_seq_list_pop(&chan->retrans_list);
2027 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2029 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2034 bt_cb(skb)->l2cap.retries++;
2035 control = bt_cb(skb)->l2cap;
2037 if (chan->max_tx != 0 &&
2038 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2039 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2040 l2cap_send_disconn_req(chan, ECONNRESET);
2041 l2cap_seq_list_clear(&chan->retrans_list);
2045 control.reqseq = chan->buffer_seq;
2046 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2051 if (skb_cloned(skb)) {
2052 /* Cloned sk_buffs are read-only, so we need a
2055 tx_skb = skb_copy(skb, GFP_KERNEL);
2057 tx_skb = skb_clone(skb, GFP_KERNEL);
2061 l2cap_seq_list_clear(&chan->retrans_list);
2065 /* Update skb contents */
2066 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2067 put_unaligned_le32(__pack_extended_control(&control),
2068 tx_skb->data + L2CAP_HDR_SIZE);
2070 put_unaligned_le16(__pack_enhanced_control(&control),
2071 tx_skb->data + L2CAP_HDR_SIZE);
2075 if (chan->fcs == L2CAP_FCS_CRC16) {
2076 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2077 tx_skb->len - L2CAP_FCS_SIZE);
2078 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2082 l2cap_do_send(chan, tx_skb);
2084 BT_DBG("Resent txseq %d", control.txseq);
2086 chan->last_acked_seq = chan->buffer_seq;
2090 static void l2cap_retransmit(struct l2cap_chan *chan,
2091 struct l2cap_ctrl *control)
2093 BT_DBG("chan %p, control %p", chan, control);
2095 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2096 l2cap_ertm_resend(chan);
2099 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2100 struct l2cap_ctrl *control)
2102 struct sk_buff *skb;
2104 BT_DBG("chan %p, control %p", chan, control);
2107 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2109 l2cap_seq_list_clear(&chan->retrans_list);
2111 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2114 if (chan->unacked_frames) {
2115 skb_queue_walk(&chan->tx_q, skb) {
2116 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2117 skb == chan->tx_send_head)
2121 skb_queue_walk_from(&chan->tx_q, skb) {
2122 if (skb == chan->tx_send_head)
2125 l2cap_seq_list_append(&chan->retrans_list,
2126 bt_cb(skb)->l2cap.txseq);
2129 l2cap_ertm_resend(chan);
2133 static void l2cap_send_ack(struct l2cap_chan *chan)
2135 struct l2cap_ctrl control;
2136 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2137 chan->last_acked_seq);
2140 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2141 chan, chan->last_acked_seq, chan->buffer_seq);
2143 memset(&control, 0, sizeof(control));
2146 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2147 chan->rx_state == L2CAP_RX_STATE_RECV) {
2148 __clear_ack_timer(chan);
2149 control.super = L2CAP_SUPER_RNR;
2150 control.reqseq = chan->buffer_seq;
2151 l2cap_send_sframe(chan, &control);
2153 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2154 l2cap_ertm_send(chan);
2155 /* If any i-frames were sent, they included an ack */
2156 if (chan->buffer_seq == chan->last_acked_seq)
2160 /* Ack now if the window is 3/4ths full.
2161 * Calculate without mul or div
2163 threshold = chan->ack_win;
2164 threshold += threshold << 1;
2167 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2170 if (frames_to_ack >= threshold) {
2171 __clear_ack_timer(chan);
2172 control.super = L2CAP_SUPER_RR;
2173 control.reqseq = chan->buffer_seq;
2174 l2cap_send_sframe(chan, &control);
2179 __set_ack_timer(chan);
2183 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2184 struct msghdr *msg, int len,
2185 int count, struct sk_buff *skb)
2187 struct l2cap_conn *conn = chan->conn;
2188 struct sk_buff **frag;
2191 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2197 /* Continuation fragments (no L2CAP header) */
2198 frag = &skb_shinfo(skb)->frag_list;
2200 struct sk_buff *tmp;
2202 count = min_t(unsigned int, conn->mtu, len);
2204 tmp = chan->ops->alloc_skb(chan, 0, count,
2205 msg->msg_flags & MSG_DONTWAIT);
2207 return PTR_ERR(tmp);
2211 if (copy_from_iter(skb_put(*frag, count), count,
2212 &msg->msg_iter) != count)
2218 skb->len += (*frag)->len;
2219 skb->data_len += (*frag)->len;
2221 frag = &(*frag)->next;
2227 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2228 struct msghdr *msg, size_t len)
2230 struct l2cap_conn *conn = chan->conn;
2231 struct sk_buff *skb;
2232 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2233 struct l2cap_hdr *lh;
2235 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2236 __le16_to_cpu(chan->psm), len);
2238 count = min_t(unsigned int, (conn->mtu - hlen), len);
2240 skb = chan->ops->alloc_skb(chan, hlen, count,
2241 msg->msg_flags & MSG_DONTWAIT);
2245 /* Create L2CAP header */
2246 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 lh->cid = cpu_to_le16(chan->dcid);
2248 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2249 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2251 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2252 if (unlikely(err < 0)) {
2254 return ERR_PTR(err);
2259 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2260 struct msghdr *msg, size_t len)
2262 struct l2cap_conn *conn = chan->conn;
2263 struct sk_buff *skb;
2265 struct l2cap_hdr *lh;
2267 BT_DBG("chan %p len %zu", chan, len);
2269 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2271 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2272 msg->msg_flags & MSG_DONTWAIT);
2276 /* Create L2CAP header */
2277 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2278 lh->cid = cpu_to_le16(chan->dcid);
2279 lh->len = cpu_to_le16(len);
2281 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2282 if (unlikely(err < 0)) {
2284 return ERR_PTR(err);
2289 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2290 struct msghdr *msg, size_t len,
2293 struct l2cap_conn *conn = chan->conn;
2294 struct sk_buff *skb;
2295 int err, count, hlen;
2296 struct l2cap_hdr *lh;
2298 BT_DBG("chan %p len %zu", chan, len);
2301 return ERR_PTR(-ENOTCONN);
2303 hlen = __ertm_hdr_size(chan);
2306 hlen += L2CAP_SDULEN_SIZE;
2308 if (chan->fcs == L2CAP_FCS_CRC16)
2309 hlen += L2CAP_FCS_SIZE;
2311 count = min_t(unsigned int, (conn->mtu - hlen), len);
2313 skb = chan->ops->alloc_skb(chan, hlen, count,
2314 msg->msg_flags & MSG_DONTWAIT);
2318 /* Create L2CAP header */
2319 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2320 lh->cid = cpu_to_le16(chan->dcid);
2321 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2323 /* Control header is populated later */
2324 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2325 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2327 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2330 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2332 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2333 if (unlikely(err < 0)) {
2335 return ERR_PTR(err);
2338 bt_cb(skb)->l2cap.fcs = chan->fcs;
2339 bt_cb(skb)->l2cap.retries = 0;
2343 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2344 struct sk_buff_head *seg_queue,
2345 struct msghdr *msg, size_t len)
2347 struct sk_buff *skb;
2352 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2354 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2355 * so fragmented skbs are not used. The HCI layer's handling
2356 * of fragmented skbs is not compatible with ERTM's queueing.
2359 /* PDU size is derived from the HCI MTU */
2360 pdu_len = chan->conn->mtu;
2362 /* Constrain PDU size for BR/EDR connections */
2364 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2366 /* Adjust for largest possible L2CAP overhead. */
2368 pdu_len -= L2CAP_FCS_SIZE;
2370 pdu_len -= __ertm_hdr_size(chan);
2372 /* Remote device may have requested smaller PDUs */
2373 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2375 if (len <= pdu_len) {
2376 sar = L2CAP_SAR_UNSEGMENTED;
2380 sar = L2CAP_SAR_START;
2385 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2388 __skb_queue_purge(seg_queue);
2389 return PTR_ERR(skb);
2392 bt_cb(skb)->l2cap.sar = sar;
2393 __skb_queue_tail(seg_queue, skb);
2399 if (len <= pdu_len) {
2400 sar = L2CAP_SAR_END;
2403 sar = L2CAP_SAR_CONTINUE;
2410 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2412 size_t len, u16 sdulen)
2414 struct l2cap_conn *conn = chan->conn;
2415 struct sk_buff *skb;
2416 int err, count, hlen;
2417 struct l2cap_hdr *lh;
2419 BT_DBG("chan %p len %zu", chan, len);
2422 return ERR_PTR(-ENOTCONN);
2424 hlen = L2CAP_HDR_SIZE;
2427 hlen += L2CAP_SDULEN_SIZE;
2429 count = min_t(unsigned int, (conn->mtu - hlen), len);
2431 skb = chan->ops->alloc_skb(chan, hlen, count,
2432 msg->msg_flags & MSG_DONTWAIT);
2436 /* Create L2CAP header */
2437 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2438 lh->cid = cpu_to_le16(chan->dcid);
2439 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2442 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2444 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2445 if (unlikely(err < 0)) {
2447 return ERR_PTR(err);
2453 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2454 struct sk_buff_head *seg_queue,
2455 struct msghdr *msg, size_t len)
2457 struct sk_buff *skb;
2461 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2464 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2470 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2472 __skb_queue_purge(seg_queue);
2473 return PTR_ERR(skb);
2476 __skb_queue_tail(seg_queue, skb);
2482 pdu_len += L2CAP_SDULEN_SIZE;
2489 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2491 struct sk_buff *skb;
2493 struct sk_buff_head seg_queue;
2498 /* Connectionless channel */
2499 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2500 skb = l2cap_create_connless_pdu(chan, msg, len);
2502 return PTR_ERR(skb);
2504 /* Channel lock is released before requesting new skb and then
2505 * reacquired thus we need to recheck channel state.
2507 if (chan->state != BT_CONNECTED) {
2512 l2cap_do_send(chan, skb);
2516 switch (chan->mode) {
2517 case L2CAP_MODE_LE_FLOWCTL:
2518 /* Check outgoing MTU */
2519 if (len > chan->omtu)
2522 if (!chan->tx_credits)
2525 __skb_queue_head_init(&seg_queue);
2527 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2529 if (chan->state != BT_CONNECTED) {
2530 __skb_queue_purge(&seg_queue);
2537 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2539 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2540 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2544 if (!chan->tx_credits)
2545 chan->ops->suspend(chan);
2551 case L2CAP_MODE_BASIC:
2552 /* Check outgoing MTU */
2553 if (len > chan->omtu)
2556 /* Create a basic PDU */
2557 skb = l2cap_create_basic_pdu(chan, msg, len);
2559 return PTR_ERR(skb);
2561 /* Channel lock is released before requesting new skb and then
2562 * reacquired thus we need to recheck channel state.
2564 if (chan->state != BT_CONNECTED) {
2569 l2cap_do_send(chan, skb);
2573 case L2CAP_MODE_ERTM:
2574 case L2CAP_MODE_STREAMING:
2575 /* Check outgoing MTU */
2576 if (len > chan->omtu) {
2581 __skb_queue_head_init(&seg_queue);
2583 /* Do segmentation before calling in to the state machine,
2584 * since it's possible to block while waiting for memory
2587 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2589 /* The channel could have been closed while segmenting,
2590 * check that it is still connected.
2592 if (chan->state != BT_CONNECTED) {
2593 __skb_queue_purge(&seg_queue);
2600 if (chan->mode == L2CAP_MODE_ERTM)
2601 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2603 l2cap_streaming_send(chan, &seg_queue);
2607 /* If the skbs were not queued for sending, they'll still be in
2608 * seg_queue and need to be purged.
2610 __skb_queue_purge(&seg_queue);
2614 BT_DBG("bad state %1.1x", chan->mode);
2620 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2622 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2624 struct l2cap_ctrl control;
2627 BT_DBG("chan %p, txseq %u", chan, txseq);
2629 memset(&control, 0, sizeof(control));
2631 control.super = L2CAP_SUPER_SREJ;
2633 for (seq = chan->expected_tx_seq; seq != txseq;
2634 seq = __next_seq(chan, seq)) {
2635 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2636 control.reqseq = seq;
2637 l2cap_send_sframe(chan, &control);
2638 l2cap_seq_list_append(&chan->srej_list, seq);
2642 chan->expected_tx_seq = __next_seq(chan, txseq);
2645 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2647 struct l2cap_ctrl control;
2649 BT_DBG("chan %p", chan);
2651 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2654 memset(&control, 0, sizeof(control));
2656 control.super = L2CAP_SUPER_SREJ;
2657 control.reqseq = chan->srej_list.tail;
2658 l2cap_send_sframe(chan, &control);
2661 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2663 struct l2cap_ctrl control;
2667 BT_DBG("chan %p, txseq %u", chan, txseq);
2669 memset(&control, 0, sizeof(control));
2671 control.super = L2CAP_SUPER_SREJ;
2673 /* Capture initial list head to allow only one pass through the list. */
2674 initial_head = chan->srej_list.head;
2677 seq = l2cap_seq_list_pop(&chan->srej_list);
2678 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2681 control.reqseq = seq;
2682 l2cap_send_sframe(chan, &control);
2683 l2cap_seq_list_append(&chan->srej_list, seq);
2684 } while (chan->srej_list.head != initial_head);
2687 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2689 struct sk_buff *acked_skb;
2692 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2694 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2697 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2698 chan->expected_ack_seq, chan->unacked_frames);
2700 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2701 ackseq = __next_seq(chan, ackseq)) {
2703 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2705 skb_unlink(acked_skb, &chan->tx_q);
2706 kfree_skb(acked_skb);
2707 chan->unacked_frames--;
2711 chan->expected_ack_seq = reqseq;
2713 if (chan->unacked_frames == 0)
2714 __clear_retrans_timer(chan);
2716 BT_DBG("unacked_frames %u", chan->unacked_frames);
2719 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2721 BT_DBG("chan %p", chan);
2723 chan->expected_tx_seq = chan->buffer_seq;
2724 l2cap_seq_list_clear(&chan->srej_list);
2725 skb_queue_purge(&chan->srej_q);
2726 chan->rx_state = L2CAP_RX_STATE_RECV;
2729 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2730 struct l2cap_ctrl *control,
2731 struct sk_buff_head *skbs, u8 event)
2733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2737 case L2CAP_EV_DATA_REQUEST:
2738 if (chan->tx_send_head == NULL)
2739 chan->tx_send_head = skb_peek(skbs);
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 l2cap_ertm_send(chan);
2744 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2745 BT_DBG("Enter LOCAL_BUSY");
2746 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2748 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2749 /* The SREJ_SENT state must be aborted if we are to
2750 * enter the LOCAL_BUSY state.
2752 l2cap_abort_rx_srej_sent(chan);
2755 l2cap_send_ack(chan);
2758 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2759 BT_DBG("Exit LOCAL_BUSY");
2760 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2762 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2763 struct l2cap_ctrl local_control;
2765 memset(&local_control, 0, sizeof(local_control));
2766 local_control.sframe = 1;
2767 local_control.super = L2CAP_SUPER_RR;
2768 local_control.poll = 1;
2769 local_control.reqseq = chan->buffer_seq;
2770 l2cap_send_sframe(chan, &local_control);
2772 chan->retry_count = 1;
2773 __set_monitor_timer(chan);
2774 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2777 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2778 l2cap_process_reqseq(chan, control->reqseq);
2780 case L2CAP_EV_EXPLICIT_POLL:
2781 l2cap_send_rr_or_rnr(chan, 1);
2782 chan->retry_count = 1;
2783 __set_monitor_timer(chan);
2784 __clear_ack_timer(chan);
2785 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2787 case L2CAP_EV_RETRANS_TO:
2788 l2cap_send_rr_or_rnr(chan, 1);
2789 chan->retry_count = 1;
2790 __set_monitor_timer(chan);
2791 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2793 case L2CAP_EV_RECV_FBIT:
2794 /* Nothing to process */
2801 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2802 struct l2cap_ctrl *control,
2803 struct sk_buff_head *skbs, u8 event)
2805 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2809 case L2CAP_EV_DATA_REQUEST:
2810 if (chan->tx_send_head == NULL)
2811 chan->tx_send_head = skb_peek(skbs);
2812 /* Queue data, but don't send. */
2813 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2815 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2816 BT_DBG("Enter LOCAL_BUSY");
2817 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2819 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2820 /* The SREJ_SENT state must be aborted if we are to
2821 * enter the LOCAL_BUSY state.
2823 l2cap_abort_rx_srej_sent(chan);
2826 l2cap_send_ack(chan);
2829 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2830 BT_DBG("Exit LOCAL_BUSY");
2831 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2833 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2834 struct l2cap_ctrl local_control;
2835 memset(&local_control, 0, sizeof(local_control));
2836 local_control.sframe = 1;
2837 local_control.super = L2CAP_SUPER_RR;
2838 local_control.poll = 1;
2839 local_control.reqseq = chan->buffer_seq;
2840 l2cap_send_sframe(chan, &local_control);
2842 chan->retry_count = 1;
2843 __set_monitor_timer(chan);
2844 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2847 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2848 l2cap_process_reqseq(chan, control->reqseq);
2852 case L2CAP_EV_RECV_FBIT:
2853 if (control && control->final) {
2854 __clear_monitor_timer(chan);
2855 if (chan->unacked_frames > 0)
2856 __set_retrans_timer(chan);
2857 chan->retry_count = 0;
2858 chan->tx_state = L2CAP_TX_STATE_XMIT;
2859 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2862 case L2CAP_EV_EXPLICIT_POLL:
2865 case L2CAP_EV_MONITOR_TO:
2866 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2867 l2cap_send_rr_or_rnr(chan, 1);
2868 __set_monitor_timer(chan);
2869 chan->retry_count++;
2871 l2cap_send_disconn_req(chan, ECONNABORTED);
2879 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2880 struct sk_buff_head *skbs, u8 event)
2882 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2883 chan, control, skbs, event, chan->tx_state);
2885 switch (chan->tx_state) {
2886 case L2CAP_TX_STATE_XMIT:
2887 l2cap_tx_state_xmit(chan, control, skbs, event);
2889 case L2CAP_TX_STATE_WAIT_F:
2890 l2cap_tx_state_wait_f(chan, control, skbs, event);
2898 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2899 struct l2cap_ctrl *control)
2901 BT_DBG("chan %p, control %p", chan, control);
2902 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2905 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2906 struct l2cap_ctrl *control)
2908 BT_DBG("chan %p, control %p", chan, control);
2909 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2912 /* Copy frame to all raw sockets on that connection */
2913 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2915 struct sk_buff *nskb;
2916 struct l2cap_chan *chan;
2918 BT_DBG("conn %p", conn);
2920 mutex_lock(&conn->chan_lock);
2922 list_for_each_entry(chan, &conn->chan_l, list) {
2923 if (chan->chan_type != L2CAP_CHAN_RAW)
2926 /* Don't send frame to the channel it came from */
2927 if (bt_cb(skb)->l2cap.chan == chan)
2930 nskb = skb_clone(skb, GFP_KERNEL);
2933 if (chan->ops->recv(chan, nskb))
2937 mutex_unlock(&conn->chan_lock);
2940 /* ---- L2CAP signalling commands ---- */
2941 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2942 u8 ident, u16 dlen, void *data)
2944 struct sk_buff *skb, **frag;
2945 struct l2cap_cmd_hdr *cmd;
2946 struct l2cap_hdr *lh;
2949 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2950 conn, code, ident, dlen);
2952 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2955 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2956 count = min_t(unsigned int, conn->mtu, len);
2958 skb = bt_skb_alloc(count, GFP_KERNEL);
2962 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2963 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2965 if (conn->hcon->type == LE_LINK)
2966 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2968 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2970 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2973 cmd->len = cpu_to_le16(dlen);
2976 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2977 memcpy(skb_put(skb, count), data, count);
2983 /* Continuation fragments (no L2CAP header) */
2984 frag = &skb_shinfo(skb)->frag_list;
2986 count = min_t(unsigned int, conn->mtu, len);
2988 *frag = bt_skb_alloc(count, GFP_KERNEL);
2992 memcpy(skb_put(*frag, count), data, count);
2997 frag = &(*frag)->next;
3007 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3010 struct l2cap_conf_opt *opt = *ptr;
3013 len = L2CAP_CONF_OPT_SIZE + opt->len;
3021 *val = *((u8 *) opt->val);
3025 *val = get_unaligned_le16(opt->val);
3029 *val = get_unaligned_le32(opt->val);
3033 *val = (unsigned long) opt->val;
3037 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3041 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3043 struct l2cap_conf_opt *opt = *ptr;
3045 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3047 if (size < L2CAP_CONF_OPT_SIZE + len)
3055 *((u8 *) opt->val) = val;
3059 put_unaligned_le16(val, opt->val);
3063 put_unaligned_le32(val, opt->val);
3067 memcpy(opt->val, (void *) val, len);
3071 *ptr += L2CAP_CONF_OPT_SIZE + len;
3074 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3076 struct l2cap_conf_efs efs;
3078 switch (chan->mode) {
3079 case L2CAP_MODE_ERTM:
3080 efs.id = chan->local_id;
3081 efs.stype = chan->local_stype;
3082 efs.msdu = cpu_to_le16(chan->local_msdu);
3083 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3084 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3085 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3088 case L2CAP_MODE_STREAMING:
3090 efs.stype = L2CAP_SERV_BESTEFFORT;
3091 efs.msdu = cpu_to_le16(chan->local_msdu);
3092 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3101 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3102 (unsigned long) &efs, size);
3105 static void l2cap_ack_timeout(struct work_struct *work)
3107 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3111 BT_DBG("chan %p", chan);
3113 l2cap_chan_lock(chan);
3115 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3116 chan->last_acked_seq);
3119 l2cap_send_rr_or_rnr(chan, 0);
3121 l2cap_chan_unlock(chan);
3122 l2cap_chan_put(chan);
3125 int l2cap_ertm_init(struct l2cap_chan *chan)
3129 chan->next_tx_seq = 0;
3130 chan->expected_tx_seq = 0;
3131 chan->expected_ack_seq = 0;
3132 chan->unacked_frames = 0;
3133 chan->buffer_seq = 0;
3134 chan->frames_sent = 0;
3135 chan->last_acked_seq = 0;
3137 chan->sdu_last_frag = NULL;
3140 skb_queue_head_init(&chan->tx_q);
3142 chan->local_amp_id = AMP_ID_BREDR;
3143 chan->move_id = AMP_ID_BREDR;
3144 chan->move_state = L2CAP_MOVE_STABLE;
3145 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3147 if (chan->mode != L2CAP_MODE_ERTM)
3150 chan->rx_state = L2CAP_RX_STATE_RECV;
3151 chan->tx_state = L2CAP_TX_STATE_XMIT;
3153 skb_queue_head_init(&chan->srej_q);
3155 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3159 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3161 l2cap_seq_list_free(&chan->srej_list);
3166 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3169 case L2CAP_MODE_STREAMING:
3170 case L2CAP_MODE_ERTM:
3171 if (l2cap_mode_supported(mode, remote_feat_mask))
3175 return L2CAP_MODE_BASIC;
3179 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3181 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3182 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3185 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3187 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3188 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3191 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3192 struct l2cap_conf_rfc *rfc)
3194 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3195 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3197 /* Class 1 devices have must have ERTM timeouts
3198 * exceeding the Link Supervision Timeout. The
3199 * default Link Supervision Timeout for AMP
3200 * controllers is 10 seconds.
3202 * Class 1 devices use 0xffffffff for their
3203 * best-effort flush timeout, so the clamping logic
3204 * will result in a timeout that meets the above
3205 * requirement. ERTM timeouts are 16-bit values, so
3206 * the maximum timeout is 65.535 seconds.
3209 /* Convert timeout to milliseconds and round */
3210 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3212 /* This is the recommended formula for class 2 devices
3213 * that start ERTM timers when packets are sent to the
3216 ertm_to = 3 * ertm_to + 500;
3218 if (ertm_to > 0xffff)
3221 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3222 rfc->monitor_timeout = rfc->retrans_timeout;
3224 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3225 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3229 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3231 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3232 __l2cap_ews_supported(chan->conn)) {
3233 /* use extended control field */
3234 set_bit(FLAG_EXT_CTRL, &chan->flags);
3235 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3237 chan->tx_win = min_t(u16, chan->tx_win,
3238 L2CAP_DEFAULT_TX_WINDOW);
3239 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3241 chan->ack_win = chan->tx_win;
3244 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3246 struct l2cap_conf_req *req = data;
3247 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3248 void *ptr = req->data;
3249 void *endptr = data + data_size;
3252 BT_DBG("chan %p", chan);
3254 if (chan->num_conf_req || chan->num_conf_rsp)
3257 switch (chan->mode) {
3258 case L2CAP_MODE_STREAMING:
3259 case L2CAP_MODE_ERTM:
3260 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3263 if (__l2cap_efs_supported(chan->conn))
3264 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3268 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3273 if (chan->imtu != L2CAP_DEFAULT_MTU)
3274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3276 switch (chan->mode) {
3277 case L2CAP_MODE_BASIC:
3281 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3282 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3285 rfc.mode = L2CAP_MODE_BASIC;
3287 rfc.max_transmit = 0;
3288 rfc.retrans_timeout = 0;
3289 rfc.monitor_timeout = 0;
3290 rfc.max_pdu_size = 0;
3292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3293 (unsigned long) &rfc, endptr - ptr);
3296 case L2CAP_MODE_ERTM:
3297 rfc.mode = L2CAP_MODE_ERTM;
3298 rfc.max_transmit = chan->max_tx;
3300 __l2cap_set_ertm_timeouts(chan, &rfc);
3302 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3303 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3305 rfc.max_pdu_size = cpu_to_le16(size);
3307 l2cap_txwin_setup(chan);
3309 rfc.txwin_size = min_t(u16, chan->tx_win,
3310 L2CAP_DEFAULT_TX_WINDOW);
3312 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3313 (unsigned long) &rfc, endptr - ptr);
3315 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3316 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3318 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3319 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3320 chan->tx_win, endptr - ptr);
3322 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3323 if (chan->fcs == L2CAP_FCS_NONE ||
3324 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3325 chan->fcs = L2CAP_FCS_NONE;
3326 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3327 chan->fcs, endptr - ptr);
3331 case L2CAP_MODE_STREAMING:
3332 l2cap_txwin_setup(chan);
3333 rfc.mode = L2CAP_MODE_STREAMING;
3335 rfc.max_transmit = 0;
3336 rfc.retrans_timeout = 0;
3337 rfc.monitor_timeout = 0;
3339 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3340 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3342 rfc.max_pdu_size = cpu_to_le16(size);
3344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3345 (unsigned long) &rfc, endptr - ptr);
3347 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3348 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3350 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3351 if (chan->fcs == L2CAP_FCS_NONE ||
3352 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3353 chan->fcs = L2CAP_FCS_NONE;
3354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3355 chan->fcs, endptr - ptr);
3360 req->dcid = cpu_to_le16(chan->dcid);
3361 req->flags = cpu_to_le16(0);
3366 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3368 struct l2cap_conf_rsp *rsp = data;
3369 void *ptr = rsp->data;
3370 void *endptr = data + data_size;
3371 void *req = chan->conf_req;
3372 int len = chan->conf_len;
3373 int type, hint, olen;
3375 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3376 struct l2cap_conf_efs efs;
3378 u16 mtu = L2CAP_DEFAULT_MTU;
3379 u16 result = L2CAP_CONF_SUCCESS;
3382 BT_DBG("chan %p", chan);
3384 while (len >= L2CAP_CONF_OPT_SIZE) {
3385 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3389 hint = type & L2CAP_CONF_HINT;
3390 type &= L2CAP_CONF_MASK;
3393 case L2CAP_CONF_MTU:
3399 case L2CAP_CONF_FLUSH_TO:
3402 chan->flush_to = val;
3405 case L2CAP_CONF_QOS:
3408 case L2CAP_CONF_RFC:
3409 if (olen != sizeof(rfc))
3411 memcpy(&rfc, (void *) val, olen);
3414 case L2CAP_CONF_FCS:
3417 if (val == L2CAP_FCS_NONE)
3418 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3421 case L2CAP_CONF_EFS:
3422 if (olen != sizeof(efs))
3425 memcpy(&efs, (void *) val, olen);
3428 case L2CAP_CONF_EWS:
3431 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3432 return -ECONNREFUSED;
3433 set_bit(FLAG_EXT_CTRL, &chan->flags);
3434 set_bit(CONF_EWS_RECV, &chan->conf_state);
3435 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3436 chan->remote_tx_win = val;
3442 result = L2CAP_CONF_UNKNOWN;
3443 *((u8 *) ptr++) = type;
3448 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3451 switch (chan->mode) {
3452 case L2CAP_MODE_STREAMING:
3453 case L2CAP_MODE_ERTM:
3454 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3455 chan->mode = l2cap_select_mode(rfc.mode,
3456 chan->conn->feat_mask);
3461 if (__l2cap_efs_supported(chan->conn))
3462 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3464 return -ECONNREFUSED;
3467 if (chan->mode != rfc.mode)
3468 return -ECONNREFUSED;
3474 if (chan->mode != rfc.mode) {
3475 result = L2CAP_CONF_UNACCEPT;
3476 rfc.mode = chan->mode;
3478 if (chan->num_conf_rsp == 1)
3479 return -ECONNREFUSED;
3481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3482 (unsigned long) &rfc, endptr - ptr);
3485 if (result == L2CAP_CONF_SUCCESS) {
3486 /* Configure output options and let the other side know
3487 * which ones we don't like. */
3489 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3490 result = L2CAP_CONF_UNACCEPT;
3493 set_bit(CONF_MTU_DONE, &chan->conf_state);
3495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3498 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3499 efs.stype != L2CAP_SERV_NOTRAFIC &&
3500 efs.stype != chan->local_stype) {
3502 result = L2CAP_CONF_UNACCEPT;
3504 if (chan->num_conf_req >= 1)
3505 return -ECONNREFUSED;
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3509 (unsigned long) &efs, endptr - ptr);
3511 /* Send PENDING Conf Rsp */
3512 result = L2CAP_CONF_PENDING;
3513 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3518 case L2CAP_MODE_BASIC:
3519 chan->fcs = L2CAP_FCS_NONE;
3520 set_bit(CONF_MODE_DONE, &chan->conf_state);
3523 case L2CAP_MODE_ERTM:
3524 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3525 chan->remote_tx_win = rfc.txwin_size;
3527 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3529 chan->remote_max_tx = rfc.max_transmit;
3531 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3532 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3533 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3534 rfc.max_pdu_size = cpu_to_le16(size);
3535 chan->remote_mps = size;
3537 __l2cap_set_ertm_timeouts(chan, &rfc);
3539 set_bit(CONF_MODE_DONE, &chan->conf_state);
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3542 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3545 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3546 chan->remote_id = efs.id;
3547 chan->remote_stype = efs.stype;
3548 chan->remote_msdu = le16_to_cpu(efs.msdu);
3549 chan->remote_flush_to =
3550 le32_to_cpu(efs.flush_to);
3551 chan->remote_acc_lat =
3552 le32_to_cpu(efs.acc_lat);
3553 chan->remote_sdu_itime =
3554 le32_to_cpu(efs.sdu_itime);
3555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3557 (unsigned long) &efs, endptr - ptr);
3561 case L2CAP_MODE_STREAMING:
3562 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3563 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3564 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3565 rfc.max_pdu_size = cpu_to_le16(size);
3566 chan->remote_mps = size;
3568 set_bit(CONF_MODE_DONE, &chan->conf_state);
3570 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3571 (unsigned long) &rfc, endptr - ptr);
3576 result = L2CAP_CONF_UNACCEPT;
3578 memset(&rfc, 0, sizeof(rfc));
3579 rfc.mode = chan->mode;
3582 if (result == L2CAP_CONF_SUCCESS)
3583 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3585 rsp->scid = cpu_to_le16(chan->dcid);
3586 rsp->result = cpu_to_le16(result);
3587 rsp->flags = cpu_to_le16(0);
3592 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3593 void *data, size_t size, u16 *result)
3595 struct l2cap_conf_req *req = data;
3596 void *ptr = req->data;
3597 void *endptr = data + size;
3600 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3601 struct l2cap_conf_efs efs;
3603 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3605 while (len >= L2CAP_CONF_OPT_SIZE) {
3606 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3611 case L2CAP_CONF_MTU:
3614 if (val < L2CAP_DEFAULT_MIN_MTU) {
3615 *result = L2CAP_CONF_UNACCEPT;
3616 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3619 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3623 case L2CAP_CONF_FLUSH_TO:
3626 chan->flush_to = val;
3627 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3628 chan->flush_to, endptr - ptr);
3631 case L2CAP_CONF_RFC:
3632 if (olen != sizeof(rfc))
3634 memcpy(&rfc, (void *)val, olen);
3635 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3636 rfc.mode != chan->mode)
3637 return -ECONNREFUSED;
3639 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3640 (unsigned long) &rfc, endptr - ptr);
3643 case L2CAP_CONF_EWS:
3646 chan->ack_win = min_t(u16, val, chan->ack_win);
3647 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3648 chan->tx_win, endptr - ptr);
3651 case L2CAP_CONF_EFS:
3652 if (olen != sizeof(efs))
3654 memcpy(&efs, (void *)val, olen);
3655 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3656 efs.stype != L2CAP_SERV_NOTRAFIC &&
3657 efs.stype != chan->local_stype)
3658 return -ECONNREFUSED;
3659 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3660 (unsigned long) &efs, endptr - ptr);
3663 case L2CAP_CONF_FCS:
3666 if (*result == L2CAP_CONF_PENDING)
3667 if (val == L2CAP_FCS_NONE)
3668 set_bit(CONF_RECV_NO_FCS,
3674 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3675 return -ECONNREFUSED;
3677 chan->mode = rfc.mode;
3679 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3681 case L2CAP_MODE_ERTM:
3682 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3683 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3684 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3685 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3686 chan->ack_win = min_t(u16, chan->ack_win,
3689 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3690 chan->local_msdu = le16_to_cpu(efs.msdu);
3691 chan->local_sdu_itime =
3692 le32_to_cpu(efs.sdu_itime);
3693 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3694 chan->local_flush_to =
3695 le32_to_cpu(efs.flush_to);
3699 case L2CAP_MODE_STREAMING:
3700 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3704 req->dcid = cpu_to_le16(chan->dcid);
3705 req->flags = cpu_to_le16(0);
3710 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3711 u16 result, u16 flags)
3713 struct l2cap_conf_rsp *rsp = data;
3714 void *ptr = rsp->data;
3716 BT_DBG("chan %p", chan);
3718 rsp->scid = cpu_to_le16(chan->dcid);
3719 rsp->result = cpu_to_le16(result);
3720 rsp->flags = cpu_to_le16(flags);
3725 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3727 struct l2cap_le_conn_rsp rsp;
3728 struct l2cap_conn *conn = chan->conn;
3730 BT_DBG("chan %p", chan);
3732 rsp.dcid = cpu_to_le16(chan->scid);
3733 rsp.mtu = cpu_to_le16(chan->imtu);
3734 rsp.mps = cpu_to_le16(chan->mps);
3735 rsp.credits = cpu_to_le16(chan->rx_credits);
3736 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3738 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3742 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3744 struct l2cap_conn_rsp rsp;
3745 struct l2cap_conn *conn = chan->conn;
3749 rsp.scid = cpu_to_le16(chan->dcid);
3750 rsp.dcid = cpu_to_le16(chan->scid);
3751 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3755 rsp_code = L2CAP_CREATE_CHAN_RSP;
3757 rsp_code = L2CAP_CONN_RSP;
3759 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3761 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3763 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3766 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3767 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3768 chan->num_conf_req++;
3771 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3775 /* Use sane default values in case a misbehaving remote device
3776 * did not send an RFC or extended window size option.
3778 u16 txwin_ext = chan->ack_win;
3779 struct l2cap_conf_rfc rfc = {
3781 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3782 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3783 .max_pdu_size = cpu_to_le16(chan->imtu),
3784 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3787 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3789 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3792 while (len >= L2CAP_CONF_OPT_SIZE) {
3793 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3798 case L2CAP_CONF_RFC:
3799 if (olen != sizeof(rfc))
3801 memcpy(&rfc, (void *)val, olen);
3803 case L2CAP_CONF_EWS:
3812 case L2CAP_MODE_ERTM:
3813 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3814 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3815 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3816 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3817 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3819 chan->ack_win = min_t(u16, chan->ack_win,
3822 case L2CAP_MODE_STREAMING:
3823 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3827 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3828 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3831 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3833 if (cmd_len < sizeof(*rej))
3836 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3839 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3840 cmd->ident == conn->info_ident) {
3841 cancel_delayed_work(&conn->info_timer);
3843 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3844 conn->info_ident = 0;
3846 l2cap_conn_start(conn);
3852 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3853 struct l2cap_cmd_hdr *cmd,
3854 u8 *data, u8 rsp_code, u8 amp_id)
3856 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3857 struct l2cap_conn_rsp rsp;
3858 struct l2cap_chan *chan = NULL, *pchan;
3859 int result, status = L2CAP_CS_NO_INFO;
3861 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3862 __le16 psm = req->psm;
3864 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3866 /* Check if we have socket listening on psm */
3867 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3868 &conn->hcon->dst, ACL_LINK);
3870 result = L2CAP_CR_BAD_PSM;
3874 mutex_lock(&conn->chan_lock);
3875 l2cap_chan_lock(pchan);
3877 /* Check if the ACL is secure enough (if not SDP) */
3878 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3879 !hci_conn_check_link_mode(conn->hcon)) {
3880 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3881 result = L2CAP_CR_SEC_BLOCK;
3885 result = L2CAP_CR_NO_MEM;
3887 /* Check if we already have channel with that dcid */
3888 if (__l2cap_get_chan_by_dcid(conn, scid))
3891 chan = pchan->ops->new_connection(pchan);
3895 /* For certain devices (ex: HID mouse), support for authentication,
3896 * pairing and bonding is optional. For such devices, inorder to avoid
3897 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3898 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3900 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3902 bacpy(&chan->src, &conn->hcon->src);
3903 bacpy(&chan->dst, &conn->hcon->dst);
3904 chan->src_type = bdaddr_src_type(conn->hcon);
3905 chan->dst_type = bdaddr_dst_type(conn->hcon);
3908 chan->local_amp_id = amp_id;
3910 __l2cap_chan_add(conn, chan);
3914 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3916 chan->ident = cmd->ident;
3918 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3919 if (l2cap_chan_check_security(chan, false)) {
3920 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3921 l2cap_state_change(chan, BT_CONNECT2);
3922 result = L2CAP_CR_PEND;
3923 status = L2CAP_CS_AUTHOR_PEND;
3924 chan->ops->defer(chan);
3926 /* Force pending result for AMP controllers.
3927 * The connection will succeed after the
3928 * physical link is up.
3930 if (amp_id == AMP_ID_BREDR) {
3931 l2cap_state_change(chan, BT_CONFIG);
3932 result = L2CAP_CR_SUCCESS;
3934 l2cap_state_change(chan, BT_CONNECT2);
3935 result = L2CAP_CR_PEND;
3937 status = L2CAP_CS_NO_INFO;
3940 l2cap_state_change(chan, BT_CONNECT2);
3941 result = L2CAP_CR_PEND;
3942 status = L2CAP_CS_AUTHEN_PEND;
3945 l2cap_state_change(chan, BT_CONNECT2);
3946 result = L2CAP_CR_PEND;
3947 status = L2CAP_CS_NO_INFO;
3951 l2cap_chan_unlock(pchan);
3952 mutex_unlock(&conn->chan_lock);
3953 l2cap_chan_put(pchan);
3956 rsp.scid = cpu_to_le16(scid);
3957 rsp.dcid = cpu_to_le16(dcid);
3958 rsp.result = cpu_to_le16(result);
3959 rsp.status = cpu_to_le16(status);
3960 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3962 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3963 struct l2cap_info_req info;
3964 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3966 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3967 conn->info_ident = l2cap_get_ident(conn);
3969 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3971 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3972 sizeof(info), &info);
3975 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3976 result == L2CAP_CR_SUCCESS) {
3978 set_bit(CONF_REQ_SENT, &chan->conf_state);
3979 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3980 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3981 chan->num_conf_req++;
3987 static int l2cap_connect_req(struct l2cap_conn *conn,
3988 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3990 struct hci_dev *hdev = conn->hcon->hdev;
3991 struct hci_conn *hcon = conn->hcon;
3993 if (cmd_len < sizeof(struct l2cap_conn_req))
3997 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3998 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3999 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4000 hci_dev_unlock(hdev);
4002 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4006 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4007 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4010 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4011 u16 scid, dcid, result, status;
4012 struct l2cap_chan *chan;
4016 if (cmd_len < sizeof(*rsp))
4019 scid = __le16_to_cpu(rsp->scid);
4020 dcid = __le16_to_cpu(rsp->dcid);
4021 result = __le16_to_cpu(rsp->result);
4022 status = __le16_to_cpu(rsp->status);
4024 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4025 dcid, scid, result, status);
4027 mutex_lock(&conn->chan_lock);
4030 chan = __l2cap_get_chan_by_scid(conn, scid);
4036 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4043 chan = l2cap_chan_hold_unless_zero(chan);
4051 l2cap_chan_lock(chan);
4054 case L2CAP_CR_SUCCESS:
4055 l2cap_state_change(chan, BT_CONFIG);
4058 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4060 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4063 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4064 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4065 chan->num_conf_req++;
4069 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4073 l2cap_chan_del(chan, ECONNREFUSED);
4077 l2cap_chan_unlock(chan);
4078 l2cap_chan_put(chan);
4081 mutex_unlock(&conn->chan_lock);
4086 static inline void set_default_fcs(struct l2cap_chan *chan)
4088 /* FCS is enabled only in ERTM or streaming mode, if one or both
4091 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4092 chan->fcs = L2CAP_FCS_NONE;
4093 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4094 chan->fcs = L2CAP_FCS_CRC16;
4097 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4098 u8 ident, u16 flags)
4100 struct l2cap_conn *conn = chan->conn;
4102 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4105 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4106 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4108 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4109 l2cap_build_conf_rsp(chan, data,
4110 L2CAP_CONF_SUCCESS, flags), data);
4113 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4116 struct l2cap_cmd_rej_cid rej;
4118 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4119 rej.scid = __cpu_to_le16(scid);
4120 rej.dcid = __cpu_to_le16(dcid);
4122 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4125 static inline int l2cap_config_req(struct l2cap_conn *conn,
4126 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4129 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4132 struct l2cap_chan *chan;
4135 if (cmd_len < sizeof(*req))
4138 dcid = __le16_to_cpu(req->dcid);
4139 flags = __le16_to_cpu(req->flags);
4141 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4143 chan = l2cap_get_chan_by_scid(conn, dcid);
4145 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4149 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4150 chan->state != BT_CONNECTED) {
4151 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4156 /* Reject if config buffer is too small. */
4157 len = cmd_len - sizeof(*req);
4158 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4159 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4160 l2cap_build_conf_rsp(chan, rsp,
4161 L2CAP_CONF_REJECT, flags), rsp);
4166 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4167 chan->conf_len += len;
4169 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4170 /* Incomplete config. Send empty response. */
4171 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4172 l2cap_build_conf_rsp(chan, rsp,
4173 L2CAP_CONF_SUCCESS, flags), rsp);
4177 /* Complete config. */
4178 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4180 l2cap_send_disconn_req(chan, ECONNRESET);
4184 chan->ident = cmd->ident;
4185 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4186 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4187 chan->num_conf_rsp++;
4189 /* Reset config buffer. */
4192 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4195 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4196 set_default_fcs(chan);
4198 if (chan->mode == L2CAP_MODE_ERTM ||
4199 chan->mode == L2CAP_MODE_STREAMING)
4200 err = l2cap_ertm_init(chan);
4203 l2cap_send_disconn_req(chan, -err);
4205 l2cap_chan_ready(chan);
4210 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4212 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4213 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4214 chan->num_conf_req++;
4217 /* Got Conf Rsp PENDING from remote side and assume we sent
4218 Conf Rsp PENDING in the code above */
4219 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4220 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4222 /* check compatibility */
4224 /* Send rsp for BR/EDR channel */
4226 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4228 chan->ident = cmd->ident;
4232 l2cap_chan_unlock(chan);
4233 l2cap_chan_put(chan);
4237 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4238 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4241 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4242 u16 scid, flags, result;
4243 struct l2cap_chan *chan;
4244 int len = cmd_len - sizeof(*rsp);
4247 if (cmd_len < sizeof(*rsp))
4250 scid = __le16_to_cpu(rsp->scid);
4251 flags = __le16_to_cpu(rsp->flags);
4252 result = __le16_to_cpu(rsp->result);
4254 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4257 chan = l2cap_get_chan_by_scid(conn, scid);
4262 case L2CAP_CONF_SUCCESS:
4263 l2cap_conf_rfc_get(chan, rsp->data, len);
4264 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4267 case L2CAP_CONF_PENDING:
4268 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4270 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4273 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4274 buf, sizeof(buf), &result);
4276 l2cap_send_disconn_req(chan, ECONNRESET);
4280 if (!chan->hs_hcon) {
4281 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4284 if (l2cap_check_efs(chan)) {
4285 amp_create_logical_link(chan);
4286 chan->ident = cmd->ident;
4292 case L2CAP_CONF_UNACCEPT:
4293 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4296 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4297 l2cap_send_disconn_req(chan, ECONNRESET);
4301 /* throw out any old stored conf requests */
4302 result = L2CAP_CONF_SUCCESS;
4303 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4304 req, sizeof(req), &result);
4306 l2cap_send_disconn_req(chan, ECONNRESET);
4310 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4311 L2CAP_CONF_REQ, len, req);
4312 chan->num_conf_req++;
4313 if (result != L2CAP_CONF_SUCCESS)
4319 l2cap_chan_set_err(chan, ECONNRESET);
4321 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4322 l2cap_send_disconn_req(chan, ECONNRESET);
4326 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4329 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4331 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4332 set_default_fcs(chan);
4334 if (chan->mode == L2CAP_MODE_ERTM ||
4335 chan->mode == L2CAP_MODE_STREAMING)
4336 err = l2cap_ertm_init(chan);
4339 l2cap_send_disconn_req(chan, -err);
4341 l2cap_chan_ready(chan);
4345 l2cap_chan_unlock(chan);
4346 l2cap_chan_put(chan);
4350 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4351 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4354 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4355 struct l2cap_disconn_rsp rsp;
4357 struct l2cap_chan *chan;
4359 if (cmd_len != sizeof(*req))
4362 scid = __le16_to_cpu(req->scid);
4363 dcid = __le16_to_cpu(req->dcid);
4365 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4367 mutex_lock(&conn->chan_lock);
4369 chan = __l2cap_get_chan_by_scid(conn, dcid);
4371 mutex_unlock(&conn->chan_lock);
4372 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4376 l2cap_chan_hold(chan);
4377 l2cap_chan_lock(chan);
4379 rsp.dcid = cpu_to_le16(chan->scid);
4380 rsp.scid = cpu_to_le16(chan->dcid);
4381 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4383 chan->ops->set_shutdown(chan);
4385 l2cap_chan_del(chan, ECONNRESET);
4387 chan->ops->close(chan);
4389 l2cap_chan_unlock(chan);
4390 l2cap_chan_put(chan);
4392 mutex_unlock(&conn->chan_lock);
4397 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4398 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4401 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4403 struct l2cap_chan *chan;
4405 if (cmd_len != sizeof(*rsp))
4408 scid = __le16_to_cpu(rsp->scid);
4409 dcid = __le16_to_cpu(rsp->dcid);
4411 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4413 mutex_lock(&conn->chan_lock);
4415 chan = __l2cap_get_chan_by_scid(conn, scid);
4417 mutex_unlock(&conn->chan_lock);
4421 l2cap_chan_hold(chan);
4422 l2cap_chan_lock(chan);
4424 if (chan->state != BT_DISCONN) {
4425 l2cap_chan_unlock(chan);
4426 l2cap_chan_put(chan);
4427 mutex_unlock(&conn->chan_lock);
4431 l2cap_chan_del(chan, 0);
4433 chan->ops->close(chan);
4435 l2cap_chan_unlock(chan);
4436 l2cap_chan_put(chan);
4438 mutex_unlock(&conn->chan_lock);
4443 static inline int l2cap_information_req(struct l2cap_conn *conn,
4444 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4447 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4450 if (cmd_len != sizeof(*req))
4453 type = __le16_to_cpu(req->type);
4455 BT_DBG("type 0x%4.4x", type);
4457 if (type == L2CAP_IT_FEAT_MASK) {
4459 u32 feat_mask = l2cap_feat_mask;
4460 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4461 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4462 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4464 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4466 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4467 feat_mask |= L2CAP_FEAT_EXT_FLOW
4468 | L2CAP_FEAT_EXT_WINDOW;
4470 put_unaligned_le32(feat_mask, rsp->data);
4471 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4473 } else if (type == L2CAP_IT_FIXED_CHAN) {
4475 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4477 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4478 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4479 rsp->data[0] = conn->local_fixed_chan;
4480 memset(rsp->data + 1, 0, 7);
4481 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4484 struct l2cap_info_rsp rsp;
4485 rsp.type = cpu_to_le16(type);
4486 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4487 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4494 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4495 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4498 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4501 if (cmd_len < sizeof(*rsp))
4504 type = __le16_to_cpu(rsp->type);
4505 result = __le16_to_cpu(rsp->result);
4507 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4509 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4510 if (cmd->ident != conn->info_ident ||
4511 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4514 cancel_delayed_work(&conn->info_timer);
4516 if (result != L2CAP_IR_SUCCESS) {
4517 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4518 conn->info_ident = 0;
4520 l2cap_conn_start(conn);
4526 case L2CAP_IT_FEAT_MASK:
4527 conn->feat_mask = get_unaligned_le32(rsp->data);
4529 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4530 struct l2cap_info_req req;
4531 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4533 conn->info_ident = l2cap_get_ident(conn);
4535 l2cap_send_cmd(conn, conn->info_ident,
4536 L2CAP_INFO_REQ, sizeof(req), &req);
4538 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4539 conn->info_ident = 0;
4541 l2cap_conn_start(conn);
4545 case L2CAP_IT_FIXED_CHAN:
4546 conn->remote_fixed_chan = rsp->data[0];
4547 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4548 conn->info_ident = 0;
4550 l2cap_conn_start(conn);
4557 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4558 struct l2cap_cmd_hdr *cmd,
4559 u16 cmd_len, void *data)
4561 struct l2cap_create_chan_req *req = data;
4562 struct l2cap_create_chan_rsp rsp;
4563 struct l2cap_chan *chan;
4564 struct hci_dev *hdev;
4567 if (cmd_len != sizeof(*req))
4570 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4573 psm = le16_to_cpu(req->psm);
4574 scid = le16_to_cpu(req->scid);
4576 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4578 /* For controller id 0 make BR/EDR connection */
4579 if (req->amp_id == AMP_ID_BREDR) {
4580 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4585 /* Validate AMP controller id */
4586 hdev = hci_dev_get(req->amp_id);
4590 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4595 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4598 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4599 struct hci_conn *hs_hcon;
4601 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4605 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4610 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4612 mgr->bredr_chan = chan;
4613 chan->hs_hcon = hs_hcon;
4614 chan->fcs = L2CAP_FCS_NONE;
4615 conn->mtu = hdev->block_mtu;
4624 rsp.scid = cpu_to_le16(scid);
4625 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4626 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4628 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4634 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4636 struct l2cap_move_chan_req req;
4639 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4641 ident = l2cap_get_ident(chan->conn);
4642 chan->ident = ident;
4644 req.icid = cpu_to_le16(chan->scid);
4645 req.dest_amp_id = dest_amp_id;
4647 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4650 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4653 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4655 struct l2cap_move_chan_rsp rsp;
4657 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4659 rsp.icid = cpu_to_le16(chan->dcid);
4660 rsp.result = cpu_to_le16(result);
4662 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4666 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4668 struct l2cap_move_chan_cfm cfm;
4670 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4672 chan->ident = l2cap_get_ident(chan->conn);
4674 cfm.icid = cpu_to_le16(chan->scid);
4675 cfm.result = cpu_to_le16(result);
4677 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4680 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4683 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4685 struct l2cap_move_chan_cfm cfm;
4687 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4689 cfm.icid = cpu_to_le16(icid);
4690 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4692 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4696 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4699 struct l2cap_move_chan_cfm_rsp rsp;
4701 BT_DBG("icid 0x%4.4x", icid);
4703 rsp.icid = cpu_to_le16(icid);
4704 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4707 static void __release_logical_link(struct l2cap_chan *chan)
4709 chan->hs_hchan = NULL;
4710 chan->hs_hcon = NULL;
4712 /* Placeholder - release the logical link */
4715 static void l2cap_logical_fail(struct l2cap_chan *chan)
4717 /* Logical link setup failed */
4718 if (chan->state != BT_CONNECTED) {
4719 /* Create channel failure, disconnect */
4720 l2cap_send_disconn_req(chan, ECONNRESET);
4724 switch (chan->move_role) {
4725 case L2CAP_MOVE_ROLE_RESPONDER:
4726 l2cap_move_done(chan);
4727 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4729 case L2CAP_MOVE_ROLE_INITIATOR:
4730 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4731 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4732 /* Remote has only sent pending or
4733 * success responses, clean up
4735 l2cap_move_done(chan);
4738 /* Other amp move states imply that the move
4739 * has already aborted
4741 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4746 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4747 struct hci_chan *hchan)
4749 struct l2cap_conf_rsp rsp;
4751 chan->hs_hchan = hchan;
4752 chan->hs_hcon->l2cap_data = chan->conn;
4754 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4756 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4759 set_default_fcs(chan);
4761 err = l2cap_ertm_init(chan);
4763 l2cap_send_disconn_req(chan, -err);
4765 l2cap_chan_ready(chan);
4769 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4770 struct hci_chan *hchan)
4772 chan->hs_hcon = hchan->conn;
4773 chan->hs_hcon->l2cap_data = chan->conn;
4775 BT_DBG("move_state %d", chan->move_state);
4777 switch (chan->move_state) {
4778 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4779 /* Move confirm will be sent after a success
4780 * response is received
4782 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4784 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4785 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4786 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4787 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4788 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4789 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4790 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4791 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4792 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4796 /* Move was not in expected state, free the channel */
4797 __release_logical_link(chan);
4799 chan->move_state = L2CAP_MOVE_STABLE;
4803 /* Call with chan locked */
4804 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4807 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4810 l2cap_logical_fail(chan);
4811 __release_logical_link(chan);
4815 if (chan->state != BT_CONNECTED) {
4816 /* Ignore logical link if channel is on BR/EDR */
4817 if (chan->local_amp_id != AMP_ID_BREDR)
4818 l2cap_logical_finish_create(chan, hchan);
4820 l2cap_logical_finish_move(chan, hchan);
4824 void l2cap_move_start(struct l2cap_chan *chan)
4826 BT_DBG("chan %p", chan);
4828 if (chan->local_amp_id == AMP_ID_BREDR) {
4829 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4831 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4832 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4833 /* Placeholder - start physical link setup */
4835 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4836 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4838 l2cap_move_setup(chan);
4839 l2cap_send_move_chan_req(chan, 0);
4843 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4844 u8 local_amp_id, u8 remote_amp_id)
4846 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4847 local_amp_id, remote_amp_id);
4849 chan->fcs = L2CAP_FCS_NONE;
4851 /* Outgoing channel on AMP */
4852 if (chan->state == BT_CONNECT) {
4853 if (result == L2CAP_CR_SUCCESS) {
4854 chan->local_amp_id = local_amp_id;
4855 l2cap_send_create_chan_req(chan, remote_amp_id);
4857 /* Revert to BR/EDR connect */
4858 l2cap_send_conn_req(chan);
4864 /* Incoming channel on AMP */
4865 if (__l2cap_no_conn_pending(chan)) {
4866 struct l2cap_conn_rsp rsp;
4868 rsp.scid = cpu_to_le16(chan->dcid);
4869 rsp.dcid = cpu_to_le16(chan->scid);
4871 if (result == L2CAP_CR_SUCCESS) {
4872 /* Send successful response */
4873 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4874 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4876 /* Send negative response */
4877 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4878 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4881 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4884 if (result == L2CAP_CR_SUCCESS) {
4885 l2cap_state_change(chan, BT_CONFIG);
4886 set_bit(CONF_REQ_SENT, &chan->conf_state);
4887 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4889 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4890 chan->num_conf_req++;
4895 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4898 l2cap_move_setup(chan);
4899 chan->move_id = local_amp_id;
4900 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4902 l2cap_send_move_chan_req(chan, remote_amp_id);
4905 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4907 struct hci_chan *hchan = NULL;
4909 /* Placeholder - get hci_chan for logical link */
4912 if (hchan->state == BT_CONNECTED) {
4913 /* Logical link is ready to go */
4914 chan->hs_hcon = hchan->conn;
4915 chan->hs_hcon->l2cap_data = chan->conn;
4916 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4917 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4919 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4921 /* Wait for logical link to be ready */
4922 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4925 /* Logical link not available */
4926 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4930 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4932 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4934 if (result == -EINVAL)
4935 rsp_result = L2CAP_MR_BAD_ID;
4937 rsp_result = L2CAP_MR_NOT_ALLOWED;
4939 l2cap_send_move_chan_rsp(chan, rsp_result);
4942 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4943 chan->move_state = L2CAP_MOVE_STABLE;
4945 /* Restart data transmission */
4946 l2cap_ertm_send(chan);
4949 /* Invoke with locked chan */
4950 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4952 u8 local_amp_id = chan->local_amp_id;
4953 u8 remote_amp_id = chan->remote_amp_id;
4955 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4956 chan, result, local_amp_id, remote_amp_id);
4958 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4961 if (chan->state != BT_CONNECTED) {
4962 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4963 } else if (result != L2CAP_MR_SUCCESS) {
4964 l2cap_do_move_cancel(chan, result);
4966 switch (chan->move_role) {
4967 case L2CAP_MOVE_ROLE_INITIATOR:
4968 l2cap_do_move_initiate(chan, local_amp_id,
4971 case L2CAP_MOVE_ROLE_RESPONDER:
4972 l2cap_do_move_respond(chan, result);
4975 l2cap_do_move_cancel(chan, result);
4981 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4982 struct l2cap_cmd_hdr *cmd,
4983 u16 cmd_len, void *data)
4985 struct l2cap_move_chan_req *req = data;
4986 struct l2cap_move_chan_rsp rsp;
4987 struct l2cap_chan *chan;
4989 u16 result = L2CAP_MR_NOT_ALLOWED;
4991 if (cmd_len != sizeof(*req))
4994 icid = le16_to_cpu(req->icid);
4996 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4998 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5001 chan = l2cap_get_chan_by_dcid(conn, icid);
5003 rsp.icid = cpu_to_le16(icid);
5004 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5005 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5010 chan->ident = cmd->ident;
5012 if (chan->scid < L2CAP_CID_DYN_START ||
5013 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5014 (chan->mode != L2CAP_MODE_ERTM &&
5015 chan->mode != L2CAP_MODE_STREAMING)) {
5016 result = L2CAP_MR_NOT_ALLOWED;
5017 goto send_move_response;
5020 if (chan->local_amp_id == req->dest_amp_id) {
5021 result = L2CAP_MR_SAME_ID;
5022 goto send_move_response;
5025 if (req->dest_amp_id != AMP_ID_BREDR) {
5026 struct hci_dev *hdev;
5027 hdev = hci_dev_get(req->dest_amp_id);
5028 if (!hdev || hdev->dev_type != HCI_AMP ||
5029 !test_bit(HCI_UP, &hdev->flags)) {
5033 result = L2CAP_MR_BAD_ID;
5034 goto send_move_response;
5039 /* Detect a move collision. Only send a collision response
5040 * if this side has "lost", otherwise proceed with the move.
5041 * The winner has the larger bd_addr.
5043 if ((__chan_is_moving(chan) ||
5044 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5045 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5046 result = L2CAP_MR_COLLISION;
5047 goto send_move_response;
5050 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5051 l2cap_move_setup(chan);
5052 chan->move_id = req->dest_amp_id;
5055 if (req->dest_amp_id == AMP_ID_BREDR) {
5056 /* Moving to BR/EDR */
5057 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5058 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5059 result = L2CAP_MR_PEND;
5061 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5062 result = L2CAP_MR_SUCCESS;
5065 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5066 /* Placeholder - uncomment when amp functions are available */
5067 /*amp_accept_physical(chan, req->dest_amp_id);*/
5068 result = L2CAP_MR_PEND;
5072 l2cap_send_move_chan_rsp(chan, result);
5074 l2cap_chan_unlock(chan);
5075 l2cap_chan_put(chan);
5080 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5082 struct l2cap_chan *chan;
5083 struct hci_chan *hchan = NULL;
5085 chan = l2cap_get_chan_by_scid(conn, icid);
5087 l2cap_send_move_chan_cfm_icid(conn, icid);
5091 __clear_chan_timer(chan);
5092 if (result == L2CAP_MR_PEND)
5093 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5095 switch (chan->move_state) {
5096 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5097 /* Move confirm will be sent when logical link
5100 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5102 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5103 if (result == L2CAP_MR_PEND) {
5105 } else if (test_bit(CONN_LOCAL_BUSY,
5106 &chan->conn_state)) {
5107 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5109 /* Logical link is up or moving to BR/EDR,
5112 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5113 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5116 case L2CAP_MOVE_WAIT_RSP:
5118 if (result == L2CAP_MR_SUCCESS) {
5119 /* Remote is ready, send confirm immediately
5120 * after logical link is ready
5122 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5124 /* Both logical link and move success
5125 * are required to confirm
5127 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5130 /* Placeholder - get hci_chan for logical link */
5132 /* Logical link not available */
5133 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5137 /* If the logical link is not yet connected, do not
5138 * send confirmation.
5140 if (hchan->state != BT_CONNECTED)
5143 /* Logical link is already ready to go */
5145 chan->hs_hcon = hchan->conn;
5146 chan->hs_hcon->l2cap_data = chan->conn;
5148 if (result == L2CAP_MR_SUCCESS) {
5149 /* Can confirm now */
5150 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5152 /* Now only need move success
5155 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5158 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5161 /* Any other amp move state means the move failed. */
5162 chan->move_id = chan->local_amp_id;
5163 l2cap_move_done(chan);
5164 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5167 l2cap_chan_unlock(chan);
5168 l2cap_chan_put(chan);
5171 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5174 struct l2cap_chan *chan;
5176 chan = l2cap_get_chan_by_ident(conn, ident);
5178 /* Could not locate channel, icid is best guess */
5179 l2cap_send_move_chan_cfm_icid(conn, icid);
5183 __clear_chan_timer(chan);
5185 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5186 if (result == L2CAP_MR_COLLISION) {
5187 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5189 /* Cleanup - cancel move */
5190 chan->move_id = chan->local_amp_id;
5191 l2cap_move_done(chan);
5195 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5197 l2cap_chan_unlock(chan);
5198 l2cap_chan_put(chan);
5201 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5202 struct l2cap_cmd_hdr *cmd,
5203 u16 cmd_len, void *data)
5205 struct l2cap_move_chan_rsp *rsp = data;
5208 if (cmd_len != sizeof(*rsp))
5211 icid = le16_to_cpu(rsp->icid);
5212 result = le16_to_cpu(rsp->result);
5214 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5216 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5217 l2cap_move_continue(conn, icid, result);
5219 l2cap_move_fail(conn, cmd->ident, icid, result);
5224 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5225 struct l2cap_cmd_hdr *cmd,
5226 u16 cmd_len, void *data)
5228 struct l2cap_move_chan_cfm *cfm = data;
5229 struct l2cap_chan *chan;
5232 if (cmd_len != sizeof(*cfm))
5235 icid = le16_to_cpu(cfm->icid);
5236 result = le16_to_cpu(cfm->result);
5238 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5240 chan = l2cap_get_chan_by_dcid(conn, icid);
5242 /* Spec requires a response even if the icid was not found */
5243 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5247 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5248 if (result == L2CAP_MC_CONFIRMED) {
5249 chan->local_amp_id = chan->move_id;
5250 if (chan->local_amp_id == AMP_ID_BREDR)
5251 __release_logical_link(chan);
5253 chan->move_id = chan->local_amp_id;
5256 l2cap_move_done(chan);
5259 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5261 l2cap_chan_unlock(chan);
5262 l2cap_chan_put(chan);
5267 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5268 struct l2cap_cmd_hdr *cmd,
5269 u16 cmd_len, void *data)
5271 struct l2cap_move_chan_cfm_rsp *rsp = data;
5272 struct l2cap_chan *chan;
5275 if (cmd_len != sizeof(*rsp))
5278 icid = le16_to_cpu(rsp->icid);
5280 BT_DBG("icid 0x%4.4x", icid);
5282 chan = l2cap_get_chan_by_scid(conn, icid);
5286 __clear_chan_timer(chan);
5288 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5289 chan->local_amp_id = chan->move_id;
5291 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5292 __release_logical_link(chan);
5294 l2cap_move_done(chan);
5297 l2cap_chan_unlock(chan);
5298 l2cap_chan_put(chan);
5303 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5304 struct l2cap_cmd_hdr *cmd,
5305 u16 cmd_len, u8 *data)
5307 struct hci_conn *hcon = conn->hcon;
5308 struct l2cap_conn_param_update_req *req;
5309 struct l2cap_conn_param_update_rsp rsp;
5310 u16 min, max, latency, to_multiplier;
5313 if (hcon->role != HCI_ROLE_MASTER)
5316 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5319 req = (struct l2cap_conn_param_update_req *) data;
5320 min = __le16_to_cpu(req->min);
5321 max = __le16_to_cpu(req->max);
5322 latency = __le16_to_cpu(req->latency);
5323 to_multiplier = __le16_to_cpu(req->to_multiplier);
5325 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5326 min, max, latency, to_multiplier);
5328 memset(&rsp, 0, sizeof(rsp));
5330 err = hci_check_conn_params(min, max, latency, to_multiplier);
5332 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5334 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5336 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5342 store_hint = hci_le_conn_update(hcon, min, max, latency,
5344 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5345 store_hint, min, max, latency,
5353 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5354 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5357 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5358 struct hci_conn *hcon = conn->hcon;
5359 u16 dcid, mtu, mps, credits, result;
5360 struct l2cap_chan *chan;
5363 if (cmd_len < sizeof(*rsp))
5366 dcid = __le16_to_cpu(rsp->dcid);
5367 mtu = __le16_to_cpu(rsp->mtu);
5368 mps = __le16_to_cpu(rsp->mps);
5369 credits = __le16_to_cpu(rsp->credits);
5370 result = __le16_to_cpu(rsp->result);
5372 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5373 dcid < L2CAP_CID_DYN_START ||
5374 dcid > L2CAP_CID_LE_DYN_END))
5377 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5378 dcid, mtu, mps, credits, result);
5380 mutex_lock(&conn->chan_lock);
5382 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5390 l2cap_chan_lock(chan);
5393 case L2CAP_CR_SUCCESS:
5394 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5402 chan->remote_mps = mps;
5403 chan->tx_credits = credits;
5404 l2cap_chan_ready(chan);
5407 case L2CAP_CR_AUTHENTICATION:
5408 case L2CAP_CR_ENCRYPTION:
5409 /* If we already have MITM protection we can't do
5412 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5413 l2cap_chan_del(chan, ECONNREFUSED);
5417 sec_level = hcon->sec_level + 1;
5418 if (chan->sec_level < sec_level)
5419 chan->sec_level = sec_level;
5421 /* We'll need to send a new Connect Request */
5422 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5424 smp_conn_security(hcon, chan->sec_level);
5428 l2cap_chan_del(chan, ECONNREFUSED);
5432 l2cap_chan_unlock(chan);
5435 mutex_unlock(&conn->chan_lock);
5440 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5441 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5446 switch (cmd->code) {
5447 case L2CAP_COMMAND_REJ:
5448 l2cap_command_rej(conn, cmd, cmd_len, data);
5451 case L2CAP_CONN_REQ:
5452 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5455 case L2CAP_CONN_RSP:
5456 case L2CAP_CREATE_CHAN_RSP:
5457 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5460 case L2CAP_CONF_REQ:
5461 err = l2cap_config_req(conn, cmd, cmd_len, data);
5464 case L2CAP_CONF_RSP:
5465 l2cap_config_rsp(conn, cmd, cmd_len, data);
5468 case L2CAP_DISCONN_REQ:
5469 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5472 case L2CAP_DISCONN_RSP:
5473 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5476 case L2CAP_ECHO_REQ:
5477 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5480 case L2CAP_ECHO_RSP:
5483 case L2CAP_INFO_REQ:
5484 err = l2cap_information_req(conn, cmd, cmd_len, data);
5487 case L2CAP_INFO_RSP:
5488 l2cap_information_rsp(conn, cmd, cmd_len, data);
5491 case L2CAP_CREATE_CHAN_REQ:
5492 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5495 case L2CAP_MOVE_CHAN_REQ:
5496 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5499 case L2CAP_MOVE_CHAN_RSP:
5500 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5503 case L2CAP_MOVE_CHAN_CFM:
5504 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5507 case L2CAP_MOVE_CHAN_CFM_RSP:
5508 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5512 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5520 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5521 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5524 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5525 struct l2cap_le_conn_rsp rsp;
5526 struct l2cap_chan *chan, *pchan;
5527 u16 dcid, scid, credits, mtu, mps;
5531 if (cmd_len != sizeof(*req))
5534 scid = __le16_to_cpu(req->scid);
5535 mtu = __le16_to_cpu(req->mtu);
5536 mps = __le16_to_cpu(req->mps);
5541 if (mtu < 23 || mps < 23)
5544 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5547 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5550 * Valid range: 0x0001-0x00ff
5552 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5554 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5555 result = L2CAP_CR_BAD_PSM;
5560 /* Check if we have socket listening on psm */
5561 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5562 &conn->hcon->dst, LE_LINK);
5564 result = L2CAP_CR_BAD_PSM;
5569 mutex_lock(&conn->chan_lock);
5570 l2cap_chan_lock(pchan);
5572 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5574 result = L2CAP_CR_AUTHENTICATION;
5576 goto response_unlock;
5579 /* Check for valid dynamic CID range */
5580 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5581 result = L2CAP_CR_INVALID_SCID;
5583 goto response_unlock;
5586 /* Check if we already have channel with that dcid */
5587 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5588 result = L2CAP_CR_SCID_IN_USE;
5590 goto response_unlock;
5593 chan = pchan->ops->new_connection(pchan);
5595 result = L2CAP_CR_NO_MEM;
5596 goto response_unlock;
5599 l2cap_le_flowctl_init(chan);
5601 bacpy(&chan->src, &conn->hcon->src);
5602 bacpy(&chan->dst, &conn->hcon->dst);
5603 chan->src_type = bdaddr_src_type(conn->hcon);
5604 chan->dst_type = bdaddr_dst_type(conn->hcon);
5608 chan->remote_mps = mps;
5609 chan->tx_credits = __le16_to_cpu(req->credits);
5611 __l2cap_chan_add(conn, chan);
5613 credits = chan->rx_credits;
5615 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5617 chan->ident = cmd->ident;
5619 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5620 l2cap_state_change(chan, BT_CONNECT2);
5621 /* The following result value is actually not defined
5622 * for LE CoC but we use it to let the function know
5623 * that it should bail out after doing its cleanup
5624 * instead of sending a response.
5626 result = L2CAP_CR_PEND;
5627 chan->ops->defer(chan);
5629 l2cap_chan_ready(chan);
5630 result = L2CAP_CR_SUCCESS;
5634 l2cap_chan_unlock(pchan);
5635 mutex_unlock(&conn->chan_lock);
5636 l2cap_chan_put(pchan);
5638 if (result == L2CAP_CR_PEND)
5643 rsp.mtu = cpu_to_le16(chan->imtu);
5644 rsp.mps = cpu_to_le16(chan->mps);
5650 rsp.dcid = cpu_to_le16(dcid);
5651 rsp.credits = cpu_to_le16(credits);
5652 rsp.result = cpu_to_le16(result);
5654 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5659 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5660 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5663 struct l2cap_le_credits *pkt;
5664 struct l2cap_chan *chan;
5665 u16 cid, credits, max_credits;
5667 if (cmd_len != sizeof(*pkt))
5670 pkt = (struct l2cap_le_credits *) data;
5671 cid = __le16_to_cpu(pkt->cid);
5672 credits = __le16_to_cpu(pkt->credits);
5674 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5676 chan = l2cap_get_chan_by_dcid(conn, cid);
5680 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5681 if (credits > max_credits) {
5682 BT_ERR("LE credits overflow");
5683 l2cap_send_disconn_req(chan, ECONNRESET);
5685 /* Return 0 so that we don't trigger an unnecessary
5686 * command reject packet.
5691 chan->tx_credits += credits;
5693 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5694 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5698 if (chan->tx_credits)
5699 chan->ops->resume(chan);
5702 l2cap_chan_unlock(chan);
5703 l2cap_chan_put(chan);
5708 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5709 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5712 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5713 struct l2cap_chan *chan;
5715 if (cmd_len < sizeof(*rej))
5718 mutex_lock(&conn->chan_lock);
5720 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5724 l2cap_chan_lock(chan);
5725 l2cap_chan_del(chan, ECONNREFUSED);
5726 l2cap_chan_unlock(chan);
5729 mutex_unlock(&conn->chan_lock);
5733 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5734 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5739 switch (cmd->code) {
5740 case L2CAP_COMMAND_REJ:
5741 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5744 case L2CAP_CONN_PARAM_UPDATE_REQ:
5745 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5748 case L2CAP_CONN_PARAM_UPDATE_RSP:
5751 case L2CAP_LE_CONN_RSP:
5752 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5755 case L2CAP_LE_CONN_REQ:
5756 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5759 case L2CAP_LE_CREDITS:
5760 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5763 case L2CAP_DISCONN_REQ:
5764 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5767 case L2CAP_DISCONN_RSP:
5768 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5772 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5780 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5781 struct sk_buff *skb)
5783 struct hci_conn *hcon = conn->hcon;
5784 struct l2cap_cmd_hdr *cmd;
5788 if (hcon->type != LE_LINK)
5791 if (skb->len < L2CAP_CMD_HDR_SIZE)
5794 cmd = (void *) skb->data;
5795 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5797 len = le16_to_cpu(cmd->len);
5799 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5801 if (len != skb->len || !cmd->ident) {
5802 BT_DBG("corrupted command");
5806 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5808 struct l2cap_cmd_rej_unk rej;
5810 BT_ERR("Wrong link type (%d)", err);
5812 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5813 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5821 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5822 struct sk_buff *skb)
5824 struct hci_conn *hcon = conn->hcon;
5825 u8 *data = skb->data;
5827 struct l2cap_cmd_hdr cmd;
5830 l2cap_raw_recv(conn, skb);
5832 if (hcon->type != ACL_LINK)
5835 while (len >= L2CAP_CMD_HDR_SIZE) {
5837 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5838 data += L2CAP_CMD_HDR_SIZE;
5839 len -= L2CAP_CMD_HDR_SIZE;
5841 cmd_len = le16_to_cpu(cmd.len);
5843 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5846 if (cmd_len > len || !cmd.ident) {
5847 BT_DBG("corrupted command");
5851 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5853 struct l2cap_cmd_rej_unk rej;
5855 BT_ERR("Wrong link type (%d)", err);
5857 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5858 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5870 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5872 u16 our_fcs, rcv_fcs;
5875 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5876 hdr_size = L2CAP_EXT_HDR_SIZE;
5878 hdr_size = L2CAP_ENH_HDR_SIZE;
5880 if (chan->fcs == L2CAP_FCS_CRC16) {
5881 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5882 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5883 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5885 if (our_fcs != rcv_fcs)
5891 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5893 struct l2cap_ctrl control;
5895 BT_DBG("chan %p", chan);
5897 memset(&control, 0, sizeof(control));
5900 control.reqseq = chan->buffer_seq;
5901 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5903 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5904 control.super = L2CAP_SUPER_RNR;
5905 l2cap_send_sframe(chan, &control);
5908 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5909 chan->unacked_frames > 0)
5910 __set_retrans_timer(chan);
5912 /* Send pending iframes */
5913 l2cap_ertm_send(chan);
5915 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5916 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5917 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5920 control.super = L2CAP_SUPER_RR;
5921 l2cap_send_sframe(chan, &control);
5925 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5926 struct sk_buff **last_frag)
5928 /* skb->len reflects data in skb as well as all fragments
5929 * skb->data_len reflects only data in fragments
5931 if (!skb_has_frag_list(skb))
5932 skb_shinfo(skb)->frag_list = new_frag;
5934 new_frag->next = NULL;
5936 (*last_frag)->next = new_frag;
5937 *last_frag = new_frag;
5939 skb->len += new_frag->len;
5940 skb->data_len += new_frag->len;
5941 skb->truesize += new_frag->truesize;
5944 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5945 struct l2cap_ctrl *control)
5949 switch (control->sar) {
5950 case L2CAP_SAR_UNSEGMENTED:
5954 err = chan->ops->recv(chan, skb);
5957 case L2CAP_SAR_START:
5961 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5964 chan->sdu_len = get_unaligned_le16(skb->data);
5965 skb_pull(skb, L2CAP_SDULEN_SIZE);
5967 if (chan->sdu_len > chan->imtu) {
5972 if (skb->len >= chan->sdu_len)
5976 chan->sdu_last_frag = skb;
5982 case L2CAP_SAR_CONTINUE:
5986 append_skb_frag(chan->sdu, skb,
5987 &chan->sdu_last_frag);
5990 if (chan->sdu->len >= chan->sdu_len)
6000 append_skb_frag(chan->sdu, skb,
6001 &chan->sdu_last_frag);
6004 if (chan->sdu->len != chan->sdu_len)
6007 err = chan->ops->recv(chan, chan->sdu);
6010 /* Reassembly complete */
6012 chan->sdu_last_frag = NULL;
6020 kfree_skb(chan->sdu);
6022 chan->sdu_last_frag = NULL;
6029 static int l2cap_resegment(struct l2cap_chan *chan)
6035 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6039 if (chan->mode != L2CAP_MODE_ERTM)
6042 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6043 l2cap_tx(chan, NULL, NULL, event);
6046 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6049 /* Pass sequential frames to l2cap_reassemble_sdu()
6050 * until a gap is encountered.
6053 BT_DBG("chan %p", chan);
6055 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6056 struct sk_buff *skb;
6057 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6058 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6060 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6065 skb_unlink(skb, &chan->srej_q);
6066 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6067 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6072 if (skb_queue_empty(&chan->srej_q)) {
6073 chan->rx_state = L2CAP_RX_STATE_RECV;
6074 l2cap_send_ack(chan);
6080 static void l2cap_handle_srej(struct l2cap_chan *chan,
6081 struct l2cap_ctrl *control)
6083 struct sk_buff *skb;
6085 BT_DBG("chan %p, control %p", chan, control);
6087 if (control->reqseq == chan->next_tx_seq) {
6088 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6089 l2cap_send_disconn_req(chan, ECONNRESET);
6093 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6096 BT_DBG("Seq %d not available for retransmission",
6101 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6102 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6103 l2cap_send_disconn_req(chan, ECONNRESET);
6107 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6109 if (control->poll) {
6110 l2cap_pass_to_tx(chan, control);
6112 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6113 l2cap_retransmit(chan, control);
6114 l2cap_ertm_send(chan);
6116 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6117 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6118 chan->srej_save_reqseq = control->reqseq;
6121 l2cap_pass_to_tx_fbit(chan, control);
6123 if (control->final) {
6124 if (chan->srej_save_reqseq != control->reqseq ||
6125 !test_and_clear_bit(CONN_SREJ_ACT,
6127 l2cap_retransmit(chan, control);
6129 l2cap_retransmit(chan, control);
6130 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6131 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6132 chan->srej_save_reqseq = control->reqseq;
6138 static void l2cap_handle_rej(struct l2cap_chan *chan,
6139 struct l2cap_ctrl *control)
6141 struct sk_buff *skb;
6143 BT_DBG("chan %p, control %p", chan, control);
6145 if (control->reqseq == chan->next_tx_seq) {
6146 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6147 l2cap_send_disconn_req(chan, ECONNRESET);
6151 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6153 if (chan->max_tx && skb &&
6154 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6155 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6156 l2cap_send_disconn_req(chan, ECONNRESET);
6160 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6162 l2cap_pass_to_tx(chan, control);
6164 if (control->final) {
6165 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6166 l2cap_retransmit_all(chan, control);
6168 l2cap_retransmit_all(chan, control);
6169 l2cap_ertm_send(chan);
6170 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6171 set_bit(CONN_REJ_ACT, &chan->conn_state);
6175 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6177 BT_DBG("chan %p, txseq %d", chan, txseq);
6179 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6180 chan->expected_tx_seq);
6182 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6183 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6185 /* See notes below regarding "double poll" and
6188 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6189 BT_DBG("Invalid/Ignore - after SREJ");
6190 return L2CAP_TXSEQ_INVALID_IGNORE;
6192 BT_DBG("Invalid - in window after SREJ sent");
6193 return L2CAP_TXSEQ_INVALID;
6197 if (chan->srej_list.head == txseq) {
6198 BT_DBG("Expected SREJ");
6199 return L2CAP_TXSEQ_EXPECTED_SREJ;
6202 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6203 BT_DBG("Duplicate SREJ - txseq already stored");
6204 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6207 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6208 BT_DBG("Unexpected SREJ - not requested");
6209 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6213 if (chan->expected_tx_seq == txseq) {
6214 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6216 BT_DBG("Invalid - txseq outside tx window");
6217 return L2CAP_TXSEQ_INVALID;
6220 return L2CAP_TXSEQ_EXPECTED;
6224 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6225 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6226 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6227 return L2CAP_TXSEQ_DUPLICATE;
6230 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6231 /* A source of invalid packets is a "double poll" condition,
6232 * where delays cause us to send multiple poll packets. If
6233 * the remote stack receives and processes both polls,
6234 * sequence numbers can wrap around in such a way that a
6235 * resent frame has a sequence number that looks like new data
6236 * with a sequence gap. This would trigger an erroneous SREJ
6239 * Fortunately, this is impossible with a tx window that's
6240 * less than half of the maximum sequence number, which allows
6241 * invalid frames to be safely ignored.
6243 * With tx window sizes greater than half of the tx window
6244 * maximum, the frame is invalid and cannot be ignored. This
6245 * causes a disconnect.
6248 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6249 BT_DBG("Invalid/Ignore - txseq outside tx window");
6250 return L2CAP_TXSEQ_INVALID_IGNORE;
6252 BT_DBG("Invalid - txseq outside tx window");
6253 return L2CAP_TXSEQ_INVALID;
6256 BT_DBG("Unexpected - txseq indicates missing frames");
6257 return L2CAP_TXSEQ_UNEXPECTED;
6261 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6262 struct l2cap_ctrl *control,
6263 struct sk_buff *skb, u8 event)
6265 struct l2cap_ctrl local_control;
6267 bool skb_in_use = false;
6269 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6273 case L2CAP_EV_RECV_IFRAME:
6274 switch (l2cap_classify_txseq(chan, control->txseq)) {
6275 case L2CAP_TXSEQ_EXPECTED:
6276 l2cap_pass_to_tx(chan, control);
6278 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6279 BT_DBG("Busy, discarding expected seq %d",
6284 chan->expected_tx_seq = __next_seq(chan,
6287 chan->buffer_seq = chan->expected_tx_seq;
6290 /* l2cap_reassemble_sdu may free skb, hence invalidate
6291 * control, so make a copy in advance to use it after
6292 * l2cap_reassemble_sdu returns and to avoid the race
6293 * condition, for example:
6295 * The current thread calls:
6296 * l2cap_reassemble_sdu
6297 * chan->ops->recv == l2cap_sock_recv_cb
6298 * __sock_queue_rcv_skb
6299 * Another thread calls:
6303 * Then the current thread tries to access control, but
6304 * it was freed by skb_free_datagram.
6306 local_control = *control;
6307 err = l2cap_reassemble_sdu(chan, skb, control);
6311 if (local_control.final) {
6312 if (!test_and_clear_bit(CONN_REJ_ACT,
6313 &chan->conn_state)) {
6314 local_control.final = 0;
6315 l2cap_retransmit_all(chan, &local_control);
6316 l2cap_ertm_send(chan);
6320 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6321 l2cap_send_ack(chan);
6323 case L2CAP_TXSEQ_UNEXPECTED:
6324 l2cap_pass_to_tx(chan, control);
6326 /* Can't issue SREJ frames in the local busy state.
6327 * Drop this frame, it will be seen as missing
6328 * when local busy is exited.
6330 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6331 BT_DBG("Busy, discarding unexpected seq %d",
6336 /* There was a gap in the sequence, so an SREJ
6337 * must be sent for each missing frame. The
6338 * current frame is stored for later use.
6340 skb_queue_tail(&chan->srej_q, skb);
6342 BT_DBG("Queued %p (queue len %d)", skb,
6343 skb_queue_len(&chan->srej_q));
6345 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6346 l2cap_seq_list_clear(&chan->srej_list);
6347 l2cap_send_srej(chan, control->txseq);
6349 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6351 case L2CAP_TXSEQ_DUPLICATE:
6352 l2cap_pass_to_tx(chan, control);
6354 case L2CAP_TXSEQ_INVALID_IGNORE:
6356 case L2CAP_TXSEQ_INVALID:
6358 l2cap_send_disconn_req(chan, ECONNRESET);
6362 case L2CAP_EV_RECV_RR:
6363 l2cap_pass_to_tx(chan, control);
6364 if (control->final) {
6365 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6367 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6368 !__chan_is_moving(chan)) {
6370 l2cap_retransmit_all(chan, control);
6373 l2cap_ertm_send(chan);
6374 } else if (control->poll) {
6375 l2cap_send_i_or_rr_or_rnr(chan);
6377 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6378 &chan->conn_state) &&
6379 chan->unacked_frames)
6380 __set_retrans_timer(chan);
6382 l2cap_ertm_send(chan);
6385 case L2CAP_EV_RECV_RNR:
6386 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6387 l2cap_pass_to_tx(chan, control);
6388 if (control && control->poll) {
6389 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6390 l2cap_send_rr_or_rnr(chan, 0);
6392 __clear_retrans_timer(chan);
6393 l2cap_seq_list_clear(&chan->retrans_list);
6395 case L2CAP_EV_RECV_REJ:
6396 l2cap_handle_rej(chan, control);
6398 case L2CAP_EV_RECV_SREJ:
6399 l2cap_handle_srej(chan, control);
6405 if (skb && !skb_in_use) {
6406 BT_DBG("Freeing %p", skb);
6413 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6414 struct l2cap_ctrl *control,
6415 struct sk_buff *skb, u8 event)
6418 u16 txseq = control->txseq;
6419 bool skb_in_use = false;
6421 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6425 case L2CAP_EV_RECV_IFRAME:
6426 switch (l2cap_classify_txseq(chan, txseq)) {
6427 case L2CAP_TXSEQ_EXPECTED:
6428 /* Keep frame for reassembly later */
6429 l2cap_pass_to_tx(chan, control);
6430 skb_queue_tail(&chan->srej_q, skb);
6432 BT_DBG("Queued %p (queue len %d)", skb,
6433 skb_queue_len(&chan->srej_q));
6435 chan->expected_tx_seq = __next_seq(chan, txseq);
6437 case L2CAP_TXSEQ_EXPECTED_SREJ:
6438 l2cap_seq_list_pop(&chan->srej_list);
6440 l2cap_pass_to_tx(chan, control);
6441 skb_queue_tail(&chan->srej_q, skb);
6443 BT_DBG("Queued %p (queue len %d)", skb,
6444 skb_queue_len(&chan->srej_q));
6446 err = l2cap_rx_queued_iframes(chan);
6451 case L2CAP_TXSEQ_UNEXPECTED:
6452 /* Got a frame that can't be reassembled yet.
6453 * Save it for later, and send SREJs to cover
6454 * the missing frames.
6456 skb_queue_tail(&chan->srej_q, skb);
6458 BT_DBG("Queued %p (queue len %d)", skb,
6459 skb_queue_len(&chan->srej_q));
6461 l2cap_pass_to_tx(chan, control);
6462 l2cap_send_srej(chan, control->txseq);
6464 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6465 /* This frame was requested with an SREJ, but
6466 * some expected retransmitted frames are
6467 * missing. Request retransmission of missing
6470 skb_queue_tail(&chan->srej_q, skb);
6472 BT_DBG("Queued %p (queue len %d)", skb,
6473 skb_queue_len(&chan->srej_q));
6475 l2cap_pass_to_tx(chan, control);
6476 l2cap_send_srej_list(chan, control->txseq);
6478 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6479 /* We've already queued this frame. Drop this copy. */
6480 l2cap_pass_to_tx(chan, control);
6482 case L2CAP_TXSEQ_DUPLICATE:
6483 /* Expecting a later sequence number, so this frame
6484 * was already received. Ignore it completely.
6487 case L2CAP_TXSEQ_INVALID_IGNORE:
6489 case L2CAP_TXSEQ_INVALID:
6491 l2cap_send_disconn_req(chan, ECONNRESET);
6495 case L2CAP_EV_RECV_RR:
6496 l2cap_pass_to_tx(chan, control);
6497 if (control->final) {
6498 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6500 if (!test_and_clear_bit(CONN_REJ_ACT,
6501 &chan->conn_state)) {
6503 l2cap_retransmit_all(chan, control);
6506 l2cap_ertm_send(chan);
6507 } else if (control->poll) {
6508 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6509 &chan->conn_state) &&
6510 chan->unacked_frames) {
6511 __set_retrans_timer(chan);
6514 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6515 l2cap_send_srej_tail(chan);
6517 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6518 &chan->conn_state) &&
6519 chan->unacked_frames)
6520 __set_retrans_timer(chan);
6522 l2cap_send_ack(chan);
6525 case L2CAP_EV_RECV_RNR:
6526 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6527 l2cap_pass_to_tx(chan, control);
6528 if (control->poll) {
6529 l2cap_send_srej_tail(chan);
6531 struct l2cap_ctrl rr_control;
6532 memset(&rr_control, 0, sizeof(rr_control));
6533 rr_control.sframe = 1;
6534 rr_control.super = L2CAP_SUPER_RR;
6535 rr_control.reqseq = chan->buffer_seq;
6536 l2cap_send_sframe(chan, &rr_control);
6540 case L2CAP_EV_RECV_REJ:
6541 l2cap_handle_rej(chan, control);
6543 case L2CAP_EV_RECV_SREJ:
6544 l2cap_handle_srej(chan, control);
6548 if (skb && !skb_in_use) {
6549 BT_DBG("Freeing %p", skb);
6556 static int l2cap_finish_move(struct l2cap_chan *chan)
6558 BT_DBG("chan %p", chan);
6560 chan->rx_state = L2CAP_RX_STATE_RECV;
6563 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6565 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6567 return l2cap_resegment(chan);
6570 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6571 struct l2cap_ctrl *control,
6572 struct sk_buff *skb, u8 event)
6576 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6582 l2cap_process_reqseq(chan, control->reqseq);
6584 if (!skb_queue_empty(&chan->tx_q))
6585 chan->tx_send_head = skb_peek(&chan->tx_q);
6587 chan->tx_send_head = NULL;
6589 /* Rewind next_tx_seq to the point expected
6592 chan->next_tx_seq = control->reqseq;
6593 chan->unacked_frames = 0;
6595 err = l2cap_finish_move(chan);
6599 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6600 l2cap_send_i_or_rr_or_rnr(chan);
6602 if (event == L2CAP_EV_RECV_IFRAME)
6605 return l2cap_rx_state_recv(chan, control, NULL, event);
6608 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6609 struct l2cap_ctrl *control,
6610 struct sk_buff *skb, u8 event)
6614 if (!control->final)
6617 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6619 chan->rx_state = L2CAP_RX_STATE_RECV;
6620 l2cap_process_reqseq(chan, control->reqseq);
6622 if (!skb_queue_empty(&chan->tx_q))
6623 chan->tx_send_head = skb_peek(&chan->tx_q);
6625 chan->tx_send_head = NULL;
6627 /* Rewind next_tx_seq to the point expected
6630 chan->next_tx_seq = control->reqseq;
6631 chan->unacked_frames = 0;
6634 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6636 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6638 err = l2cap_resegment(chan);
6641 err = l2cap_rx_state_recv(chan, control, skb, event);
6646 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6648 /* Make sure reqseq is for a packet that has been sent but not acked */
6651 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6652 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6655 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6656 struct sk_buff *skb, u8 event)
6660 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6661 control, skb, event, chan->rx_state);
6663 if (__valid_reqseq(chan, control->reqseq)) {
6664 switch (chan->rx_state) {
6665 case L2CAP_RX_STATE_RECV:
6666 err = l2cap_rx_state_recv(chan, control, skb, event);
6668 case L2CAP_RX_STATE_SREJ_SENT:
6669 err = l2cap_rx_state_srej_sent(chan, control, skb,
6672 case L2CAP_RX_STATE_WAIT_P:
6673 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6675 case L2CAP_RX_STATE_WAIT_F:
6676 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6683 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6684 control->reqseq, chan->next_tx_seq,
6685 chan->expected_ack_seq);
6686 l2cap_send_disconn_req(chan, ECONNRESET);
6692 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6693 struct sk_buff *skb)
6695 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6696 * the txseq field in advance to use it after l2cap_reassemble_sdu
6697 * returns and to avoid the race condition, for example:
6699 * The current thread calls:
6700 * l2cap_reassemble_sdu
6701 * chan->ops->recv == l2cap_sock_recv_cb
6702 * __sock_queue_rcv_skb
6703 * Another thread calls:
6707 * Then the current thread tries to access control, but it was freed by
6708 * skb_free_datagram.
6710 u16 txseq = control->txseq;
6712 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6715 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6716 l2cap_pass_to_tx(chan, control);
6718 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6719 __next_seq(chan, chan->buffer_seq));
6721 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6723 l2cap_reassemble_sdu(chan, skb, control);
6726 kfree_skb(chan->sdu);
6729 chan->sdu_last_frag = NULL;
6733 BT_DBG("Freeing %p", skb);
6738 chan->last_acked_seq = txseq;
6739 chan->expected_tx_seq = __next_seq(chan, txseq);
6744 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6746 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6750 __unpack_control(chan, skb);
6755 * We can just drop the corrupted I-frame here.
6756 * Receiver will miss it and start proper recovery
6757 * procedures and ask for retransmission.
6759 if (l2cap_check_fcs(chan, skb))
6762 if (!control->sframe && control->sar == L2CAP_SAR_START)
6763 len -= L2CAP_SDULEN_SIZE;
6765 if (chan->fcs == L2CAP_FCS_CRC16)
6766 len -= L2CAP_FCS_SIZE;
6768 if (len > chan->mps) {
6769 l2cap_send_disconn_req(chan, ECONNRESET);
6773 if (chan->ops->filter) {
6774 if (chan->ops->filter(chan, skb))
6778 if (!control->sframe) {
6781 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6782 control->sar, control->reqseq, control->final,
6785 /* Validate F-bit - F=0 always valid, F=1 only
6786 * valid in TX WAIT_F
6788 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6791 if (chan->mode != L2CAP_MODE_STREAMING) {
6792 event = L2CAP_EV_RECV_IFRAME;
6793 err = l2cap_rx(chan, control, skb, event);
6795 err = l2cap_stream_rx(chan, control, skb);
6799 l2cap_send_disconn_req(chan, ECONNRESET);
6801 const u8 rx_func_to_event[4] = {
6802 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6803 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6806 /* Only I-frames are expected in streaming mode */
6807 if (chan->mode == L2CAP_MODE_STREAMING)
6810 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6811 control->reqseq, control->final, control->poll,
6815 BT_ERR("Trailing bytes: %d in sframe", len);
6816 l2cap_send_disconn_req(chan, ECONNRESET);
6820 /* Validate F and P bits */
6821 if (control->final && (control->poll ||
6822 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6825 event = rx_func_to_event[control->super];
6826 if (l2cap_rx(chan, control, skb, event))
6827 l2cap_send_disconn_req(chan, ECONNRESET);
6837 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6839 struct l2cap_conn *conn = chan->conn;
6840 struct l2cap_le_credits pkt;
6843 /* We return more credits to the sender only after the amount of
6844 * credits falls below half of the initial amount.
6846 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6849 return_credits = le_max_credits - chan->rx_credits;
6851 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6853 chan->rx_credits += return_credits;
6855 pkt.cid = cpu_to_le16(chan->scid);
6856 pkt.credits = cpu_to_le16(return_credits);
6858 chan->ident = l2cap_get_ident(conn);
6860 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6863 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6867 if (!chan->rx_credits) {
6868 BT_ERR("No credits to receive LE L2CAP data");
6869 l2cap_send_disconn_req(chan, ECONNRESET);
6873 if (chan->imtu < skb->len) {
6874 BT_ERR("Too big LE L2CAP PDU");
6879 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6881 l2cap_chan_le_send_credits(chan);
6888 sdu_len = get_unaligned_le16(skb->data);
6889 skb_pull(skb, L2CAP_SDULEN_SIZE);
6891 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6892 sdu_len, skb->len, chan->imtu);
6894 if (sdu_len > chan->imtu) {
6895 BT_ERR("Too big LE L2CAP SDU length received");
6900 if (skb->len > sdu_len) {
6901 BT_ERR("Too much LE L2CAP data received");
6906 if (skb->len == sdu_len)
6907 return chan->ops->recv(chan, skb);
6910 chan->sdu_len = sdu_len;
6911 chan->sdu_last_frag = skb;
6913 /* Detect if remote is not able to use the selected MPS */
6914 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6915 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6917 /* Adjust the number of credits */
6918 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6919 chan->mps = mps_len;
6920 l2cap_chan_le_send_credits(chan);
6926 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6927 chan->sdu->len, skb->len, chan->sdu_len);
6929 if (chan->sdu->len + skb->len > chan->sdu_len) {
6930 BT_ERR("Too much LE L2CAP data received");
6935 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6938 if (chan->sdu->len == chan->sdu_len) {
6939 err = chan->ops->recv(chan, chan->sdu);
6942 chan->sdu_last_frag = NULL;
6950 kfree_skb(chan->sdu);
6952 chan->sdu_last_frag = NULL;
6956 /* We can't return an error here since we took care of the skb
6957 * freeing internally. An error return would cause the caller to
6958 * do a double-free of the skb.
6963 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6964 struct sk_buff *skb)
6966 struct l2cap_chan *chan;
6968 chan = l2cap_get_chan_by_scid(conn, cid);
6970 if (cid == L2CAP_CID_A2MP) {
6971 chan = a2mp_channel_create(conn, skb);
6977 l2cap_chan_hold(chan);
6978 l2cap_chan_lock(chan);
6980 BT_DBG("unknown cid 0x%4.4x", cid);
6981 /* Drop packet and return */
6987 BT_DBG("chan %p, len %d", chan, skb->len);
6989 /* If we receive data on a fixed channel before the info req/rsp
6990 * procdure is done simply assume that the channel is supported
6991 * and mark it as ready.
6993 if (chan->chan_type == L2CAP_CHAN_FIXED)
6994 l2cap_chan_ready(chan);
6996 if (chan->state != BT_CONNECTED)
6999 switch (chan->mode) {
7000 case L2CAP_MODE_LE_FLOWCTL:
7001 if (l2cap_le_data_rcv(chan, skb) < 0)
7006 case L2CAP_MODE_BASIC:
7007 /* If socket recv buffers overflows we drop data here
7008 * which is *bad* because L2CAP has to be reliable.
7009 * But we don't have any other choice. L2CAP doesn't
7010 * provide flow control mechanism. */
7012 if (chan->imtu < skb->len) {
7013 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7017 if (!chan->ops->recv(chan, skb))
7021 case L2CAP_MODE_ERTM:
7022 case L2CAP_MODE_STREAMING:
7023 l2cap_data_rcv(chan, skb);
7027 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7035 l2cap_chan_unlock(chan);
7036 l2cap_chan_put(chan);
7039 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7040 struct sk_buff *skb)
7042 struct hci_conn *hcon = conn->hcon;
7043 struct l2cap_chan *chan;
7045 if (hcon->type != ACL_LINK)
7048 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7053 BT_DBG("chan %p, len %d", chan, skb->len);
7055 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7058 if (chan->imtu < skb->len)
7061 /* Store remote BD_ADDR and PSM for msg_name */
7062 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7063 bt_cb(skb)->l2cap.psm = psm;
7065 if (!chan->ops->recv(chan, skb)) {
7066 l2cap_chan_put(chan);
7071 l2cap_chan_put(chan);
7076 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7078 struct l2cap_hdr *lh = (void *) skb->data;
7079 struct hci_conn *hcon = conn->hcon;
7083 if (hcon->state != BT_CONNECTED) {
7084 BT_DBG("queueing pending rx skb");
7085 skb_queue_tail(&conn->pending_rx, skb);
7089 skb_pull(skb, L2CAP_HDR_SIZE);
7090 cid = __le16_to_cpu(lh->cid);
7091 len = __le16_to_cpu(lh->len);
7093 if (len != skb->len) {
7098 /* Since we can't actively block incoming LE connections we must
7099 * at least ensure that we ignore incoming data from them.
7101 if (hcon->type == LE_LINK &&
7102 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7103 bdaddr_dst_type(hcon))) {
7108 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7111 case L2CAP_CID_SIGNALING:
7112 l2cap_sig_channel(conn, skb);
7115 case L2CAP_CID_CONN_LESS:
7116 psm = get_unaligned((__le16 *) skb->data);
7117 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7118 l2cap_conless_channel(conn, psm, skb);
7121 case L2CAP_CID_LE_SIGNALING:
7122 l2cap_le_sig_channel(conn, skb);
7126 l2cap_data_channel(conn, cid, skb);
7131 static void process_pending_rx(struct work_struct *work)
7133 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7135 struct sk_buff *skb;
7139 while ((skb = skb_dequeue(&conn->pending_rx)))
7140 l2cap_recv_frame(conn, skb);
7143 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7145 struct l2cap_conn *conn = hcon->l2cap_data;
7146 struct hci_chan *hchan;
7151 hchan = hci_chan_create(hcon);
7155 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7157 hci_chan_del(hchan);
7161 kref_init(&conn->ref);
7162 hcon->l2cap_data = conn;
7163 conn->hcon = hci_conn_get(hcon);
7164 conn->hchan = hchan;
7166 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7168 switch (hcon->type) {
7170 if (hcon->hdev->le_mtu) {
7171 conn->mtu = hcon->hdev->le_mtu;
7176 conn->mtu = hcon->hdev->acl_mtu;
7180 conn->feat_mask = 0;
7182 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7184 if (hcon->type == ACL_LINK &&
7185 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7186 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7188 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7189 (bredr_sc_enabled(hcon->hdev) ||
7190 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7191 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7193 mutex_init(&conn->ident_lock);
7194 mutex_init(&conn->chan_lock);
7196 INIT_LIST_HEAD(&conn->chan_l);
7197 INIT_LIST_HEAD(&conn->users);
7199 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7201 skb_queue_head_init(&conn->pending_rx);
7202 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7203 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7205 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7210 static bool is_valid_psm(u16 psm, u8 dst_type) {
7214 if (bdaddr_type_is_le(dst_type))
7215 return (psm <= 0x00ff);
7217 /* PSM must be odd and lsb of upper byte must be 0 */
7218 return ((psm & 0x0101) == 0x0001);
7221 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7222 bdaddr_t *dst, u8 dst_type)
7224 struct l2cap_conn *conn;
7225 struct hci_conn *hcon;
7226 struct hci_dev *hdev;
7229 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7230 dst_type, __le16_to_cpu(psm));
7232 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7234 return -EHOSTUNREACH;
7238 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7239 chan->chan_type != L2CAP_CHAN_RAW) {
7244 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7249 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7254 switch (chan->mode) {
7255 case L2CAP_MODE_BASIC:
7257 case L2CAP_MODE_LE_FLOWCTL:
7258 l2cap_le_flowctl_init(chan);
7260 case L2CAP_MODE_ERTM:
7261 case L2CAP_MODE_STREAMING:
7270 switch (chan->state) {
7274 /* Already connecting */
7279 /* Already connected */
7293 /* Set destination address and psm */
7294 bacpy(&chan->dst, dst);
7295 chan->dst_type = dst_type;
7300 if (bdaddr_type_is_le(dst_type)) {
7301 /* Convert from L2CAP channel address type to HCI address type
7303 if (dst_type == BDADDR_LE_PUBLIC)
7304 dst_type = ADDR_LE_DEV_PUBLIC;
7306 dst_type = ADDR_LE_DEV_RANDOM;
7308 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7309 hcon = hci_connect_le(hdev, dst, dst_type,
7311 HCI_LE_CONN_TIMEOUT,
7312 HCI_ROLE_SLAVE, NULL);
7314 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7316 HCI_LE_CONN_TIMEOUT);
7319 u8 auth_type = l2cap_get_auth_type(chan);
7320 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7324 err = PTR_ERR(hcon);
7328 conn = l2cap_conn_add(hcon);
7330 hci_conn_drop(hcon);
7335 mutex_lock(&conn->chan_lock);
7336 l2cap_chan_lock(chan);
7338 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7339 hci_conn_drop(hcon);
7344 /* Update source addr of the socket */
7345 bacpy(&chan->src, &hcon->src);
7346 chan->src_type = bdaddr_src_type(hcon);
7348 __l2cap_chan_add(conn, chan);
7350 /* l2cap_chan_add takes its own ref so we can drop this one */
7351 hci_conn_drop(hcon);
7353 l2cap_state_change(chan, BT_CONNECT);
7354 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7356 /* Release chan->sport so that it can be reused by other
7357 * sockets (as it's only used for listening sockets).
7359 write_lock(&chan_list_lock);
7361 write_unlock(&chan_list_lock);
7363 if (hcon->state == BT_CONNECTED) {
7364 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7365 __clear_chan_timer(chan);
7366 if (l2cap_chan_check_security(chan, true))
7367 l2cap_state_change(chan, BT_CONNECTED);
7369 l2cap_do_start(chan);
7375 l2cap_chan_unlock(chan);
7376 mutex_unlock(&conn->chan_lock);
7378 hci_dev_unlock(hdev);
7382 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7384 /* ---- L2CAP interface with lower layer (HCI) ---- */
7386 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7388 int exact = 0, lm1 = 0, lm2 = 0;
7389 struct l2cap_chan *c;
7391 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7393 /* Find listening sockets and check their link_mode */
7394 read_lock(&chan_list_lock);
7395 list_for_each_entry(c, &chan_list, global_l) {
7396 if (c->state != BT_LISTEN)
7399 if (!bacmp(&c->src, &hdev->bdaddr)) {
7400 lm1 |= HCI_LM_ACCEPT;
7401 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7402 lm1 |= HCI_LM_MASTER;
7404 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7405 lm2 |= HCI_LM_ACCEPT;
7406 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7407 lm2 |= HCI_LM_MASTER;
7410 read_unlock(&chan_list_lock);
7412 return exact ? lm1 : lm2;
7415 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7416 * from an existing channel in the list or from the beginning of the
7417 * global list (by passing NULL as first parameter).
7419 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7420 struct hci_conn *hcon)
7422 u8 src_type = bdaddr_src_type(hcon);
7424 read_lock(&chan_list_lock);
7427 c = list_next_entry(c, global_l);
7429 c = list_entry(chan_list.next, typeof(*c), global_l);
7431 list_for_each_entry_from(c, &chan_list, global_l) {
7432 if (c->chan_type != L2CAP_CHAN_FIXED)
7434 if (c->state != BT_LISTEN)
7436 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7438 if (src_type != c->src_type)
7441 c = l2cap_chan_hold_unless_zero(c);
7442 read_unlock(&chan_list_lock);
7446 read_unlock(&chan_list_lock);
7451 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7453 struct hci_dev *hdev = hcon->hdev;
7454 struct l2cap_conn *conn;
7455 struct l2cap_chan *pchan;
7458 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7461 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7464 l2cap_conn_del(hcon, bt_to_errno(status));
7468 conn = l2cap_conn_add(hcon);
7472 dst_type = bdaddr_dst_type(hcon);
7474 /* If device is blocked, do not create channels for it */
7475 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7478 /* Find fixed channels and notify them of the new connection. We
7479 * use multiple individual lookups, continuing each time where
7480 * we left off, because the list lock would prevent calling the
7481 * potentially sleeping l2cap_chan_lock() function.
7483 pchan = l2cap_global_fixed_chan(NULL, hcon);
7485 struct l2cap_chan *chan, *next;
7487 /* Client fixed channels should override server ones */
7488 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7491 l2cap_chan_lock(pchan);
7492 chan = pchan->ops->new_connection(pchan);
7494 bacpy(&chan->src, &hcon->src);
7495 bacpy(&chan->dst, &hcon->dst);
7496 chan->src_type = bdaddr_src_type(hcon);
7497 chan->dst_type = dst_type;
7499 __l2cap_chan_add(conn, chan);
7502 l2cap_chan_unlock(pchan);
7504 next = l2cap_global_fixed_chan(pchan, hcon);
7505 l2cap_chan_put(pchan);
7509 l2cap_conn_ready(conn);
7512 int l2cap_disconn_ind(struct hci_conn *hcon)
7514 struct l2cap_conn *conn = hcon->l2cap_data;
7516 BT_DBG("hcon %p", hcon);
7519 return HCI_ERROR_REMOTE_USER_TERM;
7520 return conn->disc_reason;
7523 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7525 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7528 BT_DBG("hcon %p reason %d", hcon, reason);
7530 l2cap_conn_del(hcon, bt_to_errno(reason));
7533 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7535 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7538 if (encrypt == 0x00) {
7539 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7540 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7541 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7542 chan->sec_level == BT_SECURITY_FIPS)
7543 l2cap_chan_close(chan, ECONNREFUSED);
7545 if (chan->sec_level == BT_SECURITY_MEDIUM)
7546 __clear_chan_timer(chan);
7550 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7552 struct l2cap_conn *conn = hcon->l2cap_data;
7553 struct l2cap_chan *chan;
7558 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7560 mutex_lock(&conn->chan_lock);
7562 list_for_each_entry(chan, &conn->chan_l, list) {
7563 l2cap_chan_lock(chan);
7565 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7566 state_to_string(chan->state));
7568 if (chan->scid == L2CAP_CID_A2MP) {
7569 l2cap_chan_unlock(chan);
7573 if (!status && encrypt)
7574 chan->sec_level = hcon->sec_level;
7576 if (!__l2cap_no_conn_pending(chan)) {
7577 l2cap_chan_unlock(chan);
7581 if (!status && (chan->state == BT_CONNECTED ||
7582 chan->state == BT_CONFIG)) {
7583 chan->ops->resume(chan);
7584 l2cap_check_encryption(chan, encrypt);
7585 l2cap_chan_unlock(chan);
7589 if (chan->state == BT_CONNECT) {
7590 if (!status && l2cap_check_enc_key_size(hcon))
7591 l2cap_start_connection(chan);
7593 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7594 } else if (chan->state == BT_CONNECT2 &&
7595 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7596 struct l2cap_conn_rsp rsp;
7599 if (!status && l2cap_check_enc_key_size(hcon)) {
7600 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7601 res = L2CAP_CR_PEND;
7602 stat = L2CAP_CS_AUTHOR_PEND;
7603 chan->ops->defer(chan);
7605 l2cap_state_change(chan, BT_CONFIG);
7606 res = L2CAP_CR_SUCCESS;
7607 stat = L2CAP_CS_NO_INFO;
7610 l2cap_state_change(chan, BT_DISCONN);
7611 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7612 res = L2CAP_CR_SEC_BLOCK;
7613 stat = L2CAP_CS_NO_INFO;
7616 rsp.scid = cpu_to_le16(chan->dcid);
7617 rsp.dcid = cpu_to_le16(chan->scid);
7618 rsp.result = cpu_to_le16(res);
7619 rsp.status = cpu_to_le16(stat);
7620 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7623 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7624 res == L2CAP_CR_SUCCESS) {
7626 set_bit(CONF_REQ_SENT, &chan->conf_state);
7627 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7629 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7631 chan->num_conf_req++;
7635 l2cap_chan_unlock(chan);
7638 mutex_unlock(&conn->chan_lock);
7641 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7643 struct l2cap_conn *conn = hcon->l2cap_data;
7644 struct l2cap_hdr *hdr;
7647 /* For AMP controller do not create l2cap conn */
7648 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7652 conn = l2cap_conn_add(hcon);
7657 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7661 case ACL_START_NO_FLUSH:
7664 BT_ERR("Unexpected start frame (len %d)", skb->len);
7665 kfree_skb(conn->rx_skb);
7666 conn->rx_skb = NULL;
7668 l2cap_conn_unreliable(conn, ECOMM);
7671 /* Start fragment always begin with Basic L2CAP header */
7672 if (skb->len < L2CAP_HDR_SIZE) {
7673 BT_ERR("Frame is too short (len %d)", skb->len);
7674 l2cap_conn_unreliable(conn, ECOMM);
7678 hdr = (struct l2cap_hdr *) skb->data;
7679 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7681 if (len == skb->len) {
7682 /* Complete frame received */
7683 l2cap_recv_frame(conn, skb);
7687 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7689 if (skb->len > len) {
7690 BT_ERR("Frame is too long (len %d, expected len %d)",
7692 l2cap_conn_unreliable(conn, ECOMM);
7696 /* Allocate skb for the complete frame (with header) */
7697 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7701 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7703 conn->rx_len = len - skb->len;
7707 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7709 if (!conn->rx_len) {
7710 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7711 l2cap_conn_unreliable(conn, ECOMM);
7715 if (skb->len > conn->rx_len) {
7716 BT_ERR("Fragment is too long (len %d, expected %d)",
7717 skb->len, conn->rx_len);
7718 kfree_skb(conn->rx_skb);
7719 conn->rx_skb = NULL;
7721 l2cap_conn_unreliable(conn, ECOMM);
7725 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7727 conn->rx_len -= skb->len;
7729 if (!conn->rx_len) {
7730 /* Complete frame received. l2cap_recv_frame
7731 * takes ownership of the skb so set the global
7732 * rx_skb pointer to NULL first.
7734 struct sk_buff *rx_skb = conn->rx_skb;
7735 conn->rx_skb = NULL;
7736 l2cap_recv_frame(conn, rx_skb);
7745 static struct hci_cb l2cap_cb = {
7747 .connect_cfm = l2cap_connect_cfm,
7748 .disconn_cfm = l2cap_disconn_cfm,
7749 .security_cfm = l2cap_security_cfm,
7752 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7754 struct l2cap_chan *c;
7756 read_lock(&chan_list_lock);
7758 list_for_each_entry(c, &chan_list, global_l) {
7759 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7760 &c->src, c->src_type, &c->dst, c->dst_type,
7761 c->state, __le16_to_cpu(c->psm),
7762 c->scid, c->dcid, c->imtu, c->omtu,
7763 c->sec_level, c->mode);
7766 read_unlock(&chan_list_lock);
7771 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7773 return single_open(file, l2cap_debugfs_show, inode->i_private);
7776 static const struct file_operations l2cap_debugfs_fops = {
7777 .open = l2cap_debugfs_open,
7779 .llseek = seq_lseek,
7780 .release = single_release,
7783 static struct dentry *l2cap_debugfs;
7785 int __init l2cap_init(void)
7789 err = l2cap_init_sockets();
7793 hci_register_cb(&l2cap_cb);
7795 if (IS_ERR_OR_NULL(bt_debugfs))
7798 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7799 NULL, &l2cap_debugfs_fops);
7801 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7803 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7809 void l2cap_exit(void)
7811 debugfs_remove(l2cap_debugfs);
7812 hci_unregister_cb(&l2cap_cb);
7813 l2cap_cleanup_sockets();
7816 module_param(disable_ertm, bool, 0644);
7817 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");