2 * This file is part of wl1271
4 * Copyright (C) 2009 Nokia Corporation
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/spinlock.h>
39 * TODO: this is here just for now, it must be removed when the data
40 * operations are in place.
42 #include "../wl12xx/reg.h"
44 static int wl1271_set_default_wep_key(struct wl1271 *wl,
45 struct wl12xx_vif *wlvif, u8 id)
48 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
51 ret = wl12xx_cmd_set_default_wep_key(wl, id,
52 wlvif->ap.bcast_hlid);
54 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
59 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
63 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
67 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
68 if (id >= wl->num_tx_desc)
71 __set_bit(id, wl->tx_frames_map);
72 wl->tx_frames[id] = skb;
77 void wl1271_free_tx_id(struct wl1271 *wl, int id)
79 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
80 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
81 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
83 wl->tx_frames[id] = NULL;
87 EXPORT_SYMBOL(wl1271_free_tx_id);
89 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
90 struct wl12xx_vif *wlvif,
93 struct ieee80211_hdr *hdr;
95 hdr = (struct ieee80211_hdr *)(skb->data +
96 sizeof(struct wl1271_tx_hw_descr));
97 if (!ieee80211_is_auth(hdr->frame_control))
101 * add the station to the known list before transmitting the
102 * authentication response. this way it won't get de-authed by FW
103 * when transmitting too soon.
105 wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
108 * ROC for 1 second on the AP channel for completing the connection.
109 * Note the ROC will be continued by the update_sta_state callbacks
110 * once the station reaches the associated state.
112 wlcore_update_inconn_sta(wl, wlvif, NULL, true);
113 wlvif->pending_auth_reply_time = jiffies;
114 cancel_delayed_work(&wlvif->pending_auth_complete_work);
115 ieee80211_queue_delayed_work(wl->hw,
116 &wlvif->pending_auth_complete_work,
117 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
120 static void wl1271_tx_regulate_link(struct wl1271 *wl,
121 struct wl12xx_vif *wlvif,
127 if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
130 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
131 tx_pkts = wl->links[hlid].allocated_pkts;
134 * if in FW PS and there is enough data in FW we can put the link
135 * into high-level PS and clean out its TX queues.
136 * Make an exception if this is the only connected link. In this
137 * case FW-memory congestion is less of a problem.
138 * Note that a single connected STA means 2*ap_count + 1 active links,
139 * since we must account for the global and broadcast AP links
140 * for each AP. The "fw_ps" check assures us the other link is a STA
141 * connected to the AP. Otherwise the FW would not set the PSM bit.
143 if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
144 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
145 wl12xx_ps_link_start(wl, wlvif, hlid, true);
148 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
150 return wl->dummy_packet == skb;
152 EXPORT_SYMBOL(wl12xx_is_dummy_packet);
154 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
155 struct sk_buff *skb, struct ieee80211_sta *sta)
158 struct wl1271_station *wl_sta;
160 wl_sta = (struct wl1271_station *)sta->drv_priv;
163 struct ieee80211_hdr *hdr;
165 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
166 return wl->system_hlid;
168 hdr = (struct ieee80211_hdr *)skb->data;
169 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
170 return wlvif->ap.bcast_hlid;
172 return wlvif->ap.global_hlid;
176 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
177 struct sk_buff *skb, struct ieee80211_sta *sta)
179 struct ieee80211_tx_info *control;
181 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
182 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
184 control = IEEE80211_SKB_CB(skb);
185 if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
186 wl1271_debug(DEBUG_TX, "tx offchannel");
187 return wlvif->dev_hlid;
190 return wlvif->sta.hlid;
193 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
194 unsigned int packet_length)
196 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
197 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
198 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
200 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
202 EXPORT_SYMBOL(wlcore_calc_packet_alignment);
204 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
205 struct sk_buff *skb, u32 extra, u32 buf_offset,
206 u8 hlid, bool is_gem)
208 struct wl1271_tx_hw_descr *desc;
209 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
211 int id, ret = -EBUSY, ac;
214 if (buf_offset + total_len > wl->aggr_buf_size)
217 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
219 /* allocate free identifier for the packet */
220 id = wl1271_alloc_tx_id(wl, skb);
224 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
226 if (total_blocks <= wl->tx_blocks_available) {
227 desc = skb_push(skb, total_len - skb->len);
229 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
234 wl->tx_blocks_available -= total_blocks;
235 wl->tx_allocated_blocks += total_blocks;
238 * If the FW was empty before, arm the Tx watchdog. Also do
239 * this on the first Tx after resume, as we always cancel the
240 * watchdog on suspend.
242 if (wl->tx_allocated_blocks == total_blocks ||
243 test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
244 wl12xx_rearm_tx_watchdog_locked(wl);
246 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
247 wl->tx_allocated_pkts[ac]++;
249 if (test_bit(hlid, wl->links_map))
250 wl->links[hlid].allocated_pkts++;
254 wl1271_debug(DEBUG_TX,
255 "tx_allocate: size: %d, blocks: %d, id: %d",
256 total_len, total_blocks, id);
258 wl1271_free_tx_id(wl, id);
264 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
265 struct sk_buff *skb, u32 extra,
266 struct ieee80211_tx_info *control, u8 hlid)
268 struct wl1271_tx_hw_descr *desc;
272 __le16 frame_control;
273 struct ieee80211_hdr *hdr;
277 desc = (struct wl1271_tx_hw_descr *) skb->data;
278 frame_start = (u8 *)(desc + 1);
279 hdr = (struct ieee80211_hdr *)(frame_start + extra);
280 frame_control = hdr->frame_control;
282 /* relocate space for security header */
284 int hdrlen = ieee80211_hdrlen(frame_control);
285 memmove(frame_start, hdr, hdrlen);
286 skb_set_network_header(skb, skb_network_offset(skb) + extra);
289 /* configure packet life time */
290 hosttime = (ktime_get_boot_ns() >> 10);
291 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
293 is_dummy = wl12xx_is_dummy_packet(wl, skb);
294 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
295 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
297 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
300 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
301 desc->tid = skb->priority;
305 * FW expects the dummy packet to have an invalid session id -
306 * any session id that is different than the one set in the join
308 tx_attr = (SESSION_COUNTER_INVALID <<
309 TX_HW_ATTR_OFST_SESSION_COUNTER) &
310 TX_HW_ATTR_SESSION_COUNTER;
312 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
314 u8 session_id = wl->session_ids[hlid];
316 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
317 (wlvif->bss_type == BSS_TYPE_AP_BSS))
320 /* configure the tx attributes */
321 tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
325 if (is_dummy || !wlvif)
327 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
329 * if the packets are data packets
330 * send them with AP rate policies (EAPOLs are an exception),
331 * otherwise use default basic rates
333 if (skb->protocol == cpu_to_be16(ETH_P_PAE))
334 rate_idx = wlvif->sta.basic_rate_idx;
335 else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
336 rate_idx = wlvif->sta.p2p_rate_idx;
337 else if (ieee80211_is_data(frame_control))
338 rate_idx = wlvif->sta.ap_rate_idx;
340 rate_idx = wlvif->sta.basic_rate_idx;
342 if (hlid == wlvif->ap.global_hlid)
343 rate_idx = wlvif->ap.mgmt_rate_idx;
344 else if (hlid == wlvif->ap.bcast_hlid ||
345 skb->protocol == cpu_to_be16(ETH_P_PAE) ||
346 !ieee80211_is_data(frame_control))
348 * send non-data, bcast and EAPOLs using the
351 rate_idx = wlvif->ap.bcast_rate_idx;
353 rate_idx = wlvif->ap.ucast_rate_idx[ac];
356 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
358 /* for WEP shared auth - no fw encryption is needed */
359 if (ieee80211_is_auth(frame_control) &&
360 ieee80211_has_protected(frame_control))
361 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
363 /* send EAPOL frames as voice */
364 if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
365 tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
367 desc->tx_attr = cpu_to_le16(tx_attr);
369 wlcore_hw_set_tx_desc_csum(wl, desc, skb);
370 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
373 /* caller must hold wl->mutex */
374 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
375 struct sk_buff *skb, u32 buf_offset, u8 hlid)
377 struct ieee80211_tx_info *info;
385 wl1271_error("discarding null skb");
389 if (hlid == WL12XX_INVALID_LINK_ID) {
390 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
394 info = IEEE80211_SKB_CB(skb);
396 is_dummy = wl12xx_is_dummy_packet(wl, skb);
398 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
399 info->control.hw_key &&
400 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
401 extra = WL1271_EXTRA_SPACE_TKIP;
403 if (info->control.hw_key) {
405 u8 idx = info->control.hw_key->hw_key_idx;
406 u32 cipher = info->control.hw_key->cipher;
408 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
409 (cipher == WLAN_CIPHER_SUITE_WEP104);
411 if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
412 ret = wl1271_set_default_wep_key(wl, wlvif, idx);
415 wlvif->default_key = idx;
418 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
421 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
426 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
428 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
429 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
430 wl1271_tx_regulate_link(wl, wlvif, hlid);
434 * The length of each packet is stored in terms of
435 * words. Thus, we must pad the skb data to make sure its
436 * length is aligned. The number of padding bytes is computed
437 * and set in wl1271_tx_fill_hdr.
438 * In special cases, we want to align to a specific block size
439 * (eg. for wl128x with SDIO we align to 256).
441 total_len = wlcore_calc_packet_alignment(wl, skb->len);
443 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
444 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
446 /* Revert side effects in the dummy packet skb, so it can be reused */
448 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
453 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
454 enum nl80211_band rate_band)
456 struct ieee80211_supported_band *band;
457 u32 enabled_rates = 0;
460 band = wl->hw->wiphy->bands[rate_band];
461 for (bit = 0; bit < band->n_bitrates; bit++) {
463 enabled_rates |= band->bitrates[bit].hw_value;
467 /* MCS rates indication are on bits 16 - 31 */
468 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
470 for (bit = 0; bit < 16; bit++) {
472 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
476 return enabled_rates;
479 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
482 struct wl12xx_vif *wlvif;
484 wl12xx_for_each_wlvif(wl, wlvif) {
485 for (i = 0; i < NUM_TX_QUEUES; i++) {
486 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
487 WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
488 wlvif->tx_queue_count[i] <=
489 WL1271_TX_QUEUE_LOW_WATERMARK)
490 /* firmware buffer has space, restart queues */
491 wlcore_wake_queue(wl, wlvif, i,
492 WLCORE_QUEUE_STOP_REASON_WATERMARK);
497 static int wlcore_select_ac(struct wl1271 *wl)
500 u32 min_pkts = 0xffffffff;
503 * Find a non-empty ac where:
504 * 1. There are packets to transmit
505 * 2. The FW has the least allocated blocks
507 * We prioritize the ACs according to VO>VI>BE>BK
509 for (i = 0; i < NUM_TX_QUEUES; i++) {
510 ac = wl1271_tx_get_queue(i);
511 if (wl->tx_queue_count[ac] &&
512 wl->tx_allocated_pkts[ac] < min_pkts) {
514 min_pkts = wl->tx_allocated_pkts[q];
521 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
522 struct wl1271_link *lnk, u8 q)
527 skb = skb_dequeue(&lnk->tx_queue[q]);
529 spin_lock_irqsave(&wl->wl_lock, flags);
530 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
531 wl->tx_queue_count[q]--;
533 WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
534 lnk->wlvif->tx_queue_count[q]--;
536 spin_unlock_irqrestore(&wl->wl_lock, flags);
542 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
546 struct wl1271_link *lnk = &wl->links[hlid];
548 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
549 if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
550 !skb_queue_empty(&lnk->tx_queue[ac]) &&
551 wlcore_hw_lnk_low_prio(wl, hlid, lnk))
552 /* we found the first non-empty low priority queue */
553 *low_prio_hlid = hlid;
558 return wlcore_lnk_dequeue(wl, lnk, ac);
561 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
562 struct wl12xx_vif *wlvif,
566 struct sk_buff *skb = NULL;
567 int i, h, start_hlid;
569 /* start from the link after the last one */
570 start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
572 /* dequeue according to AC, round robin on each link */
573 for (i = 0; i < wl->num_links; i++) {
574 h = (start_hlid + i) % wl->num_links;
576 /* only consider connected stations */
577 if (!test_bit(h, wlvif->links_map))
580 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
585 wlvif->last_tx_hlid = h;
590 wlvif->last_tx_hlid = 0;
592 *hlid = wlvif->last_tx_hlid;
596 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
599 struct wl12xx_vif *wlvif = wl->last_wlvif;
600 struct sk_buff *skb = NULL;
602 u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
604 ac = wlcore_select_ac(wl);
608 /* continue from last wlvif (round robin) */
610 wl12xx_for_each_wlvif_continue(wl, wlvif) {
611 if (!wlvif->tx_queue_count[ac])
614 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
619 wl->last_wlvif = wlvif;
624 /* dequeue from the system HLID before the restarting wlvif list */
626 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
629 *hlid = wl->system_hlid;
630 wl->last_wlvif = NULL;
634 /* Do a new pass over the wlvif list. But no need to continue
635 * after last_wlvif. The previous pass should have found it. */
637 wl12xx_for_each_wlvif(wl, wlvif) {
638 if (!wlvif->tx_queue_count[ac])
641 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
644 wl->last_wlvif = wlvif;
649 if (wlvif == wl->last_wlvif)
654 /* no high priority skbs found - but maybe a low priority one? */
655 if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
656 struct wl1271_link *lnk = &wl->links[low_prio_hlid];
657 skb = wlcore_lnk_dequeue(wl, lnk, ac);
659 WARN_ON(!skb); /* we checked this before */
660 *hlid = low_prio_hlid;
662 /* ensure proper round robin in the vif/link levels */
663 wl->last_wlvif = lnk->wlvif;
665 lnk->wlvif->last_tx_hlid = low_prio_hlid;
671 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
674 skb = wl->dummy_packet;
675 *hlid = wl->system_hlid;
676 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
677 spin_lock_irqsave(&wl->wl_lock, flags);
678 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
679 wl->tx_queue_count[q]--;
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
686 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
687 struct sk_buff *skb, u8 hlid)
690 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
692 if (wl12xx_is_dummy_packet(wl, skb)) {
693 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
695 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
697 /* make sure we dequeue the same packet next time */
698 wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
702 spin_lock_irqsave(&wl->wl_lock, flags);
703 wl->tx_queue_count[q]++;
705 wlvif->tx_queue_count[q]++;
706 spin_unlock_irqrestore(&wl->wl_lock, flags);
709 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
711 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
713 return ieee80211_is_data_present(hdr->frame_control);
716 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
718 struct wl12xx_vif *wlvif;
722 if (!wl->conf.rx_streaming.interval)
725 if (!wl->conf.rx_streaming.always &&
726 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
729 timeout = wl->conf.rx_streaming.duration;
730 wl12xx_for_each_wlvif_sta(wl, wlvif) {
732 for_each_set_bit(hlid, active_hlids, wl->num_links) {
733 if (test_bit(hlid, wlvif->links_map)) {
742 /* enable rx streaming */
743 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
744 ieee80211_queue_work(wl->hw,
745 &wlvif->rx_streaming_enable_work);
747 mod_timer(&wlvif->rx_streaming_timer,
748 jiffies + msecs_to_jiffies(timeout));
753 * Returns failure values only in case of failed bus ops within this function.
754 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
755 * triggering recovery by higher layers when not necessary.
756 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
757 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
758 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
759 * within prepare_tx_frame code but there's nothing we should do about those
762 int wlcore_tx_work_locked(struct wl1271 *wl)
764 struct wl12xx_vif *wlvif;
766 struct wl1271_tx_hw_descr *desc;
767 u32 buf_offset = 0, last_len = 0;
768 bool sent_packets = false;
769 unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
774 if (unlikely(wl->state != WLCORE_STATE_ON))
777 while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
778 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
779 bool has_data = false;
782 if (!wl12xx_is_dummy_packet(wl, skb))
783 wlvif = wl12xx_vif_to_data(info->control.vif);
785 hlid = wl->system_hlid;
787 has_data = wlvif && wl1271_tx_is_data_present(skb);
788 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
790 if (ret == -EAGAIN) {
792 * Aggregation buffer is full.
793 * Flush buffer and try again.
795 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
797 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
799 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
800 wl->aggr_buf, buf_offset, true);
807 } else if (ret == -EBUSY) {
809 * Firmware buffer is full.
810 * Queue back last skb, and stop aggregating.
812 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
813 /* No work left, avoid scheduling redundant tx work */
814 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
816 } else if (ret < 0) {
817 if (wl12xx_is_dummy_packet(wl, skb))
819 * fw still expects dummy packet,
822 wl1271_skb_queue_head(wl, wlvif, skb, hlid);
824 ieee80211_free_txskb(wl->hw, skb);
828 buf_offset += last_len;
829 wl->tx_packets_count++;
831 desc = (struct wl1271_tx_hw_descr *) skb->data;
832 __set_bit(desc->hlid, active_hlids);
838 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
839 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
848 * Interrupt the firmware with the new packets. This is only
849 * required for older hardware revisions
851 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
852 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
853 wl->tx_packets_count);
858 wl1271_handle_tx_low_watermark(wl);
860 wl12xx_rearm_rx_streaming(wl, active_hlids);
866 void wl1271_tx_work(struct work_struct *work)
868 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
871 mutex_lock(&wl->mutex);
872 ret = pm_runtime_get_sync(wl->dev);
874 pm_runtime_put_noidle(wl->dev);
878 ret = wlcore_tx_work_locked(wl);
880 pm_runtime_put_noidle(wl->dev);
881 wl12xx_queue_recovery_work(wl);
885 pm_runtime_mark_last_busy(wl->dev);
886 pm_runtime_put_autosuspend(wl->dev);
888 mutex_unlock(&wl->mutex);
891 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
896 * TODO: use wl12xx constants when this code is moved to wl12xx, as
897 * only it uses Tx-completion.
899 if (rate_class_index <= 8)
900 flags |= IEEE80211_TX_RC_MCS;
903 * TODO: use wl12xx constants when this code is moved to wl12xx, as
904 * only it uses Tx-completion.
906 if (rate_class_index == 0)
907 flags |= IEEE80211_TX_RC_SHORT_GI;
912 static void wl1271_tx_complete_packet(struct wl1271 *wl,
913 struct wl1271_tx_hw_res_descr *result)
915 struct ieee80211_tx_info *info;
916 struct ieee80211_vif *vif;
917 struct wl12xx_vif *wlvif;
924 /* check for id legality */
925 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
926 wl1271_warning("TX result illegal id: %d", id);
930 skb = wl->tx_frames[id];
931 info = IEEE80211_SKB_CB(skb);
933 if (wl12xx_is_dummy_packet(wl, skb)) {
934 wl1271_free_tx_id(wl, id);
938 /* info->control is valid as long as we don't update info->status */
939 vif = info->control.vif;
940 wlvif = wl12xx_vif_to_data(vif);
942 /* update the TX status info */
943 if (result->status == TX_SUCCESS) {
944 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
945 info->flags |= IEEE80211_TX_STAT_ACK;
946 rate = wlcore_rate_to_idx(wl, result->rate_class_index,
948 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
949 retries = result->ack_failures;
950 } else if (result->status == TX_RETRY_EXCEEDED) {
951 wl->stats.excessive_retries++;
952 retries = result->ack_failures;
955 info->status.rates[0].idx = rate;
956 info->status.rates[0].count = retries;
957 info->status.rates[0].flags = rate_flags;
958 info->status.ack_signal = -1;
960 wl->stats.retry_count += result->ack_failures;
962 /* remove private header from packet */
963 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
965 /* remove TKIP header space if present */
966 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
967 info->control.hw_key &&
968 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
969 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
970 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
972 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
975 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
977 result->id, skb, result->ack_failures,
978 result->rate_class_index, result->status);
980 /* return the packet to the stack */
981 skb_queue_tail(&wl->deferred_tx_queue, skb);
982 queue_work(wl->freezable_wq, &wl->netstack_work);
983 wl1271_free_tx_id(wl, result->id);
986 /* Called upon reception of a TX complete interrupt */
987 int wlcore_tx_complete(struct wl1271 *wl)
989 struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
990 u32 count, fw_counter;
994 /* read the tx results from the chipset */
995 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
996 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
1000 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
1002 /* write host counter to chipset (to ack) */
1003 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
1004 offsetof(struct wl1271_tx_hw_res_if,
1005 tx_result_host_counter), fw_counter);
1009 count = fw_counter - wl->tx_results_count;
1010 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
1012 /* verify that the result buffer is not getting overrun */
1013 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
1014 wl1271_warning("TX result overflow from chipset: %d", count);
1016 /* process the results */
1017 for (i = 0; i < count; i++) {
1018 struct wl1271_tx_hw_res_descr *result;
1019 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
1021 /* process the packet */
1022 result = &(wl->tx_res_if->tx_results_queue[offset]);
1023 wl1271_tx_complete_packet(wl, result);
1025 wl->tx_results_count++;
1031 EXPORT_SYMBOL(wlcore_tx_complete);
1033 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
1035 struct sk_buff *skb;
1037 unsigned long flags;
1038 struct ieee80211_tx_info *info;
1039 int total[NUM_TX_QUEUES];
1040 struct wl1271_link *lnk = &wl->links[hlid];
1042 for (i = 0; i < NUM_TX_QUEUES; i++) {
1044 while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
1045 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
1047 if (!wl12xx_is_dummy_packet(wl, skb)) {
1048 info = IEEE80211_SKB_CB(skb);
1049 info->status.rates[0].idx = -1;
1050 info->status.rates[0].count = 0;
1051 ieee80211_tx_status_ni(wl->hw, skb);
1058 spin_lock_irqsave(&wl->wl_lock, flags);
1059 for (i = 0; i < NUM_TX_QUEUES; i++) {
1060 wl->tx_queue_count[i] -= total[i];
1062 lnk->wlvif->tx_queue_count[i] -= total[i];
1064 spin_unlock_irqrestore(&wl->wl_lock, flags);
1066 wl1271_handle_tx_low_watermark(wl);
1069 /* caller must hold wl->mutex and TX must be stopped */
1070 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1075 for_each_set_bit(i, wlvif->links_map, wl->num_links) {
1076 if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
1077 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
1078 /* this calls wl12xx_free_link */
1079 wl1271_free_sta(wl, wlvif, i);
1082 wl12xx_free_link(wl, wlvif, &hlid);
1085 wlvif->last_tx_hlid = 0;
1087 for (i = 0; i < NUM_TX_QUEUES; i++)
1088 wlvif->tx_queue_count[i] = 0;
1090 /* caller must hold wl->mutex and TX must be stopped */
1091 void wl12xx_tx_reset(struct wl1271 *wl)
1094 struct sk_buff *skb;
1095 struct ieee80211_tx_info *info;
1097 /* only reset the queues if something bad happened */
1098 if (wl1271_tx_total_queue_count(wl) != 0) {
1099 for (i = 0; i < wl->num_links; i++)
1100 wl1271_tx_reset_link_queues(wl, i);
1102 for (i = 0; i < NUM_TX_QUEUES; i++)
1103 wl->tx_queue_count[i] = 0;
1107 * Make sure the driver is at a consistent state, in case this
1108 * function is called from a context other than interface removal.
1109 * This call will always wake the TX queues.
1111 wl1271_handle_tx_low_watermark(wl);
1113 for (i = 0; i < wl->num_tx_desc; i++) {
1114 if (wl->tx_frames[i] == NULL)
1117 skb = wl->tx_frames[i];
1118 wl1271_free_tx_id(wl, i);
1119 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
1121 if (!wl12xx_is_dummy_packet(wl, skb)) {
1123 * Remove private headers before passing the skb to
1126 info = IEEE80211_SKB_CB(skb);
1127 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1128 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
1129 info->control.hw_key &&
1130 info->control.hw_key->cipher ==
1131 WLAN_CIPHER_SUITE_TKIP) {
1132 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1133 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1135 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1138 info->status.rates[0].idx = -1;
1139 info->status.rates[0].count = 0;
1141 ieee80211_tx_status_ni(wl->hw, skb);
1146 #define WL1271_TX_FLUSH_TIMEOUT 500000
1148 /* caller must *NOT* hold wl->mutex */
1149 void wl1271_tx_flush(struct wl1271 *wl)
1151 unsigned long timeout, start_time;
1153 start_time = jiffies;
1154 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1156 /* only one flush should be in progress, for consistent queue state */
1157 mutex_lock(&wl->flush_mutex);
1159 mutex_lock(&wl->mutex);
1160 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
1161 mutex_unlock(&wl->mutex);
1165 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1167 while (!time_after(jiffies, timeout)) {
1168 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
1170 wl1271_tx_total_queue_count(wl));
1172 /* force Tx and give the driver some time to flush data */
1173 mutex_unlock(&wl->mutex);
1174 if (wl1271_tx_total_queue_count(wl))
1175 wl1271_tx_work(&wl->tx_work);
1177 mutex_lock(&wl->mutex);
1179 if ((wl->tx_frames_cnt == 0) &&
1180 (wl1271_tx_total_queue_count(wl) == 0)) {
1181 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
1182 jiffies_to_msecs(jiffies - start_time));
1187 wl1271_warning("Unable to flush all TX buffers, "
1188 "timed out (timeout %d ms",
1189 WL1271_TX_FLUSH_TIMEOUT / 1000);
1191 /* forcibly flush all Tx buffers on our queues */
1192 for (i = 0; i < wl->num_links; i++)
1193 wl1271_tx_reset_link_queues(wl, i);
1196 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
1197 mutex_unlock(&wl->mutex);
1199 mutex_unlock(&wl->flush_mutex);
1201 EXPORT_SYMBOL_GPL(wl1271_tx_flush);
1203 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1205 if (WARN_ON(!rate_set))
1208 return BIT(__ffs(rate_set));
1210 EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
1212 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1213 u8 queue, enum wlcore_queue_stop_reason reason)
1215 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1216 bool stopped = !!wl->queue_stop_reasons[hwq];
1218 /* queue should not be stopped for this reason */
1219 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
1224 ieee80211_stop_queue(wl->hw, hwq);
1227 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1228 enum wlcore_queue_stop_reason reason)
1230 unsigned long flags;
1232 spin_lock_irqsave(&wl->wl_lock, flags);
1233 wlcore_stop_queue_locked(wl, wlvif, queue, reason);
1234 spin_unlock_irqrestore(&wl->wl_lock, flags);
1237 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
1238 enum wlcore_queue_stop_reason reason)
1240 unsigned long flags;
1241 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1243 spin_lock_irqsave(&wl->wl_lock, flags);
1245 /* queue should not be clear for this reason */
1246 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
1248 if (wl->queue_stop_reasons[hwq])
1251 ieee80211_wake_queue(wl->hw, hwq);
1254 spin_unlock_irqrestore(&wl->wl_lock, flags);
1257 void wlcore_stop_queues(struct wl1271 *wl,
1258 enum wlcore_queue_stop_reason reason)
1261 unsigned long flags;
1263 spin_lock_irqsave(&wl->wl_lock, flags);
1265 /* mark all possible queues as stopped */
1266 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1267 WARN_ON_ONCE(test_and_set_bit(reason,
1268 &wl->queue_stop_reasons[i]));
1270 /* use the global version to make sure all vifs in mac80211 we don't
1273 ieee80211_stop_queues(wl->hw);
1275 spin_unlock_irqrestore(&wl->wl_lock, flags);
1278 void wlcore_wake_queues(struct wl1271 *wl,
1279 enum wlcore_queue_stop_reason reason)
1282 unsigned long flags;
1284 spin_lock_irqsave(&wl->wl_lock, flags);
1286 /* mark all possible queues as awake */
1287 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
1288 WARN_ON_ONCE(!test_and_clear_bit(reason,
1289 &wl->queue_stop_reasons[i]));
1291 /* use the global version to make sure all vifs in mac80211 we don't
1292 * know are woken up.
1294 ieee80211_wake_queues(wl->hw);
1296 spin_unlock_irqrestore(&wl->wl_lock, flags);
1299 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
1300 struct wl12xx_vif *wlvif, u8 queue,
1301 enum wlcore_queue_stop_reason reason)
1303 unsigned long flags;
1306 spin_lock_irqsave(&wl->wl_lock, flags);
1307 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
1309 spin_unlock_irqrestore(&wl->wl_lock, flags);
1314 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
1315 struct wl12xx_vif *wlvif, u8 queue,
1316 enum wlcore_queue_stop_reason reason)
1318 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1320 assert_spin_locked(&wl->wl_lock);
1321 return test_bit(reason, &wl->queue_stop_reasons[hwq]);
1324 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1327 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
1329 assert_spin_locked(&wl->wl_lock);
1330 return !!wl->queue_stop_reasons[hwq];