3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
33 #include "wl12xx_80211.h"
40 #include "vendor_cmd.h"
45 #define WL1271_BOOT_RETRIES 3
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery = -1;
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 struct ieee80211_vif *vif,
54 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 wl1271_info("Association completed.");
79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 struct regulatory_request *request)
82 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 struct wl1271 *wl = hw->priv;
85 /* copy the current dfs region */
87 wl->dfs_region = request->dfs_region;
89 wlcore_regdomain_config(wl);
92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
97 /* we should hold wl->mutex */
98 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
103 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
111 * this function is being called when the rx_streaming interval
112 * has beed changed or rx_streaming should be disabled
114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 int period = wl->conf.rx_streaming.interval;
119 /* don't reconfigure if rx_streaming is disabled */
120 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 /* reconfigure/disable according to new streaming_period */
125 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 (wl->conf.rx_streaming.always ||
127 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 ret = wl1271_set_rx_streaming(wl, wlvif, true);
130 ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 /* don't cancel_work_sync since we might deadlock */
132 del_timer_sync(&wlvif->rx_streaming_timer);
138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 rx_streaming_enable_work);
143 struct wl1271 *wl = wlvif->wl;
145 mutex_lock(&wl->mutex);
147 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 (!wl->conf.rx_streaming.always &&
150 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 if (!wl->conf.rx_streaming.interval)
156 ret = wl1271_ps_elp_wakeup(wl);
160 ret = wl1271_set_rx_streaming(wl, wlvif, true);
164 /* stop it after some time of inactivity */
165 mod_timer(&wlvif->rx_streaming_timer,
166 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
169 wl1271_ps_elp_sleep(wl);
171 mutex_unlock(&wl->mutex);
174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
177 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 rx_streaming_disable_work);
179 struct wl1271 *wl = wlvif->wl;
181 mutex_lock(&wl->mutex);
183 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
186 ret = wl1271_ps_elp_wakeup(wl);
190 ret = wl1271_set_rx_streaming(wl, wlvif, false);
195 wl1271_ps_elp_sleep(wl);
197 mutex_unlock(&wl->mutex);
200 static void wl1271_rx_streaming_timer(unsigned long data)
202 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 struct wl1271 *wl = wlvif->wl;
204 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
207 /* wl->mutex must be taken */
208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
210 /* if the watchdog is not armed, don't do anything */
211 if (wl->tx_allocated_blocks == 0)
214 cancel_delayed_work(&wl->tx_watchdog_work);
215 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
219 static void wlcore_rc_update_work(struct work_struct *work)
222 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
224 struct wl1271 *wl = wlvif->wl;
226 mutex_lock(&wl->mutex);
228 if (unlikely(wl->state != WLCORE_STATE_ON))
231 ret = wl1271_ps_elp_wakeup(wl);
235 wlcore_hw_sta_rc_update(wl, wlvif);
237 wl1271_ps_elp_sleep(wl);
239 mutex_unlock(&wl->mutex);
242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
244 struct delayed_work *dwork;
247 dwork = container_of(work, struct delayed_work, work);
248 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
250 mutex_lock(&wl->mutex);
252 if (unlikely(wl->state != WLCORE_STATE_ON))
255 /* Tx went out in the meantime - everything is ok */
256 if (unlikely(wl->tx_allocated_blocks == 0))
260 * if a ROC is in progress, we might not have any Tx for a long
261 * time (e.g. pending Tx on the non-ROC channels)
263 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 wl->conf.tx.tx_watchdog_timeout);
266 wl12xx_rearm_tx_watchdog_locked(wl);
271 * if a scan is in progress, we might not have any Tx for a long
274 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 wl->conf.tx.tx_watchdog_timeout);
277 wl12xx_rearm_tx_watchdog_locked(wl);
282 * AP might cache a frame for a long time for a sleeping station,
283 * so rearm the timer if there's an AP interface with stations. If
284 * Tx is genuinely stuck we will most hopefully discover it when all
285 * stations are removed due to inactivity.
287 if (wl->active_sta_count) {
288 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
290 wl->conf.tx.tx_watchdog_timeout,
291 wl->active_sta_count);
292 wl12xx_rearm_tx_watchdog_locked(wl);
296 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_queue_recovery_work(wl);
301 mutex_unlock(&wl->mutex);
304 static void wlcore_adjust_conf(struct wl1271 *wl)
306 /* Adjust settings according to optional module parameters */
308 /* Firmware Logger params */
309 if (fwlog_mem_blocks != -1) {
310 if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
315 "Illegal fwlog_mem_blocks=%d using default %d",
316 fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
321 if (!strcmp(fwlog_param, "continuous")) {
322 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 } else if (!strcmp(fwlog_param, "ondemand")) {
324 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 } else if (!strcmp(fwlog_param, "dbgpins")) {
326 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 } else if (!strcmp(fwlog_param, "disable")) {
329 wl->conf.fwlog.mem_blocks = 0;
330 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
332 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
336 if (bug_on_recovery != -1)
337 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
339 if (no_recovery != -1)
340 wl->conf.recovery.no_recovery = (u8) no_recovery;
343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 struct wl12xx_vif *wlvif,
349 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
352 * Wake up from high level PS if the STA is asleep with too little
353 * packets in FW or if the STA is awake.
355 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_end(wl, wlvif, hlid);
359 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 * Make an exception if this is the only connected link. In this
361 * case FW-memory congestion is less of a problem.
362 * Note that a single connected STA means 2*ap_count + 1 active links,
363 * since we must account for the global and broadcast AP links
364 * for each AP. The "fw_ps" check assures us the other link is a STA
365 * connected to the AP. Otherwise the FW would not set the PSM bit.
367 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 wl12xx_ps_link_start(wl, wlvif, hlid, true);
372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 struct wl12xx_vif *wlvif,
374 struct wl_fw_status *status)
376 unsigned long cur_fw_ps_map;
379 cur_fw_ps_map = status->link_ps_bitmap;
380 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 wl1271_debug(DEBUG_PSM,
382 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 wl->ap_fw_ps_map, cur_fw_ps_map,
384 wl->ap_fw_ps_map ^ cur_fw_ps_map);
386 wl->ap_fw_ps_map = cur_fw_ps_map;
389 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 wl->links[hlid].allocated_pkts);
394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
396 struct wl12xx_vif *wlvif;
398 u32 old_tx_blk_count = wl->tx_blocks_available;
399 int avail, freed_blocks;
402 struct wl1271_link *lnk;
404 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
406 wl->fw_status_len, false);
410 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
412 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 "drv_rx_counter = %d, tx_results_counter = %d)",
415 status->fw_rx_counter,
416 status->drv_rx_counter,
417 status->tx_results_counter);
419 for (i = 0; i < NUM_TX_QUEUES; i++) {
420 /* prevent wrap-around in freed-packets counter */
421 wl->tx_allocated_pkts[i] -=
422 (status->counters.tx_released_pkts[i] -
423 wl->tx_pkts_freed[i]) & 0xff;
425 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
429 for_each_set_bit(i, wl->links_map, wl->num_links) {
433 /* prevent wrap-around in freed-packets counter */
434 diff = (status->counters.tx_lnk_free_pkts[i] -
435 lnk->prev_freed_pkts) & 0xff;
440 lnk->allocated_pkts -= diff;
441 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
443 /* accumulate the prev_freed_pkts counter */
444 lnk->total_freed_pkts += diff;
447 /* prevent wrap-around in total blocks counter */
448 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 freed_blocks = status->total_released_blks -
452 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 status->total_released_blks;
455 wl->tx_blocks_freed = status->total_released_blks;
457 wl->tx_allocated_blocks -= freed_blocks;
460 * If the FW freed some blocks:
461 * If we still have allocated blocks - re-arm the timer, Tx is
462 * not stuck. Otherwise, cancel the timer (no Tx currently).
465 if (wl->tx_allocated_blocks)
466 wl12xx_rearm_tx_watchdog_locked(wl);
468 cancel_delayed_work(&wl->tx_watchdog_work);
471 avail = status->tx_total - wl->tx_allocated_blocks;
474 * The FW might change the total number of TX memblocks before
475 * we get a notification about blocks being released. Thus, the
476 * available blocks calculation might yield a temporary result
477 * which is lower than the actual available blocks. Keeping in
478 * mind that only blocks that were allocated can be moved from
479 * TX to RX, tx_blocks_available should never decrease here.
481 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
484 /* if more blocks are available now, tx work can be scheduled */
485 if (wl->tx_blocks_available > old_tx_blk_count)
486 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
488 /* for AP update num of allocated TX blocks per link and ps status */
489 wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 wl12xx_irq_update_links_status(wl, wlvif, status);
493 /* update the host-chipset time offset */
495 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 (s64)(status->fw_localtime);
498 wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
507 /* Pass all received frames to the network stack */
508 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 ieee80211_rx_ni(wl->hw, skb);
511 /* Return sent skbs to the network stack */
512 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 ieee80211_tx_status_ni(wl->hw, skb);
516 static void wl1271_netstack_work(struct work_struct *work)
519 container_of(work, struct wl1271, netstack_work);
522 wl1271_flush_deferred_work(wl);
523 } while (skb_queue_len(&wl->deferred_rx_queue));
526 #define WL1271_IRQ_MAX_LOOPS 256
528 static int wlcore_irq_locked(struct wl1271 *wl)
532 int loopcount = WL1271_IRQ_MAX_LOOPS;
534 unsigned int defer_count;
538 * In case edge triggered interrupt must be used, we cannot iterate
539 * more than once without introducing race conditions with the hardirq.
541 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
544 wl1271_debug(DEBUG_IRQ, "IRQ work");
546 if (unlikely(wl->state != WLCORE_STATE_ON))
549 ret = wl1271_ps_elp_wakeup(wl);
553 while (!done && loopcount--) {
555 * In order to avoid a race with the hardirq, clear the flag
556 * before acknowledging the chip. Since the mutex is held,
557 * wl1271_ps_elp_wakeup cannot be called concurrently.
559 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 smp_mb__after_atomic();
562 ret = wlcore_fw_status(wl, wl->fw_status);
566 wlcore_hw_tx_immediate_compl(wl);
568 intr = wl->fw_status->intr;
569 intr &= WLCORE_ALL_INTR_MASK;
575 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 wl1271_error("HW watchdog interrupt received! starting recovery.");
577 wl->watchdog_recovery = true;
580 /* restarting the chip. ignore any other interrupt. */
584 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 wl1271_error("SW watchdog interrupt received! "
586 "starting recovery.");
587 wl->watchdog_recovery = true;
590 /* restarting the chip. ignore any other interrupt. */
594 if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
597 ret = wlcore_rx(wl, wl->fw_status);
601 /* Check if any tx blocks were freed */
602 spin_lock_irqsave(&wl->wl_lock, flags);
603 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 wl1271_tx_total_queue_count(wl) > 0) {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
607 * In order to avoid starvation of the TX path,
608 * call the work function directly.
610 ret = wlcore_tx_work_locked(wl);
614 spin_unlock_irqrestore(&wl->wl_lock, flags);
617 /* check for tx results */
618 ret = wlcore_hw_tx_delayed_compl(wl);
622 /* Make sure the deferred queues don't get too long */
623 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 skb_queue_len(&wl->deferred_rx_queue);
625 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 wl1271_flush_deferred_work(wl);
629 if (intr & WL1271_ACX_INTR_EVENT_A) {
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 ret = wl1271_event_handle(wl, 0);
636 if (intr & WL1271_ACX_INTR_EVENT_B) {
637 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 ret = wl1271_event_handle(wl, 1);
643 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 wl1271_debug(DEBUG_IRQ,
645 "WL1271_ACX_INTR_INIT_COMPLETE");
647 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
651 wl1271_ps_elp_sleep(wl);
657 static irqreturn_t wlcore_irq(int irq, void *cookie)
661 struct wl1271 *wl = cookie;
663 /* complete the ELP completion */
664 spin_lock_irqsave(&wl->wl_lock, flags);
665 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
667 complete(wl->elp_compl);
668 wl->elp_compl = NULL;
671 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 /* don't enqueue a work right now. mark it as pending */
673 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 disable_irq_nosync(wl->irq);
676 pm_wakeup_event(wl->dev, 0);
677 spin_unlock_irqrestore(&wl->wl_lock, flags);
680 spin_unlock_irqrestore(&wl->wl_lock, flags);
682 /* TX might be handled here, avoid redundant work */
683 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 cancel_work_sync(&wl->tx_work);
686 mutex_lock(&wl->mutex);
688 ret = wlcore_irq_locked(wl);
690 wl12xx_queue_recovery_work(wl);
692 spin_lock_irqsave(&wl->wl_lock, flags);
693 /* In case TX was not handled here, queue TX work */
694 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 wl1271_tx_total_queue_count(wl) > 0)
697 ieee80211_queue_work(wl->hw, &wl->tx_work);
698 spin_unlock_irqrestore(&wl->wl_lock, flags);
700 mutex_unlock(&wl->mutex);
705 struct vif_counter_data {
708 struct ieee80211_vif *cur_vif;
709 bool cur_vif_running;
712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 struct ieee80211_vif *vif)
715 struct vif_counter_data *counter = data;
718 if (counter->cur_vif == vif)
719 counter->cur_vif_running = true;
722 /* caller must not hold wl->mutex, as it might deadlock */
723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 struct ieee80211_vif *cur_vif,
725 struct vif_counter_data *data)
727 memset(data, 0, sizeof(*data));
728 data->cur_vif = cur_vif;
730 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 wl12xx_vif_count_iter, data);
734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
736 const struct firmware *fw;
738 enum wl12xx_fw_type fw_type;
742 fw_type = WL12XX_FW_TYPE_PLT;
743 fw_name = wl->plt_fw_name;
746 * we can't call wl12xx_get_vif_count() here because
747 * wl->mutex is taken, so use the cached last_vif_count value
749 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 fw_type = WL12XX_FW_TYPE_MULTI;
751 fw_name = wl->mr_fw_name;
753 fw_type = WL12XX_FW_TYPE_NORMAL;
754 fw_name = wl->sr_fw_name;
758 if (wl->fw_type == fw_type)
761 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
763 ret = reject_firmware(&fw, fw_name, wl->dev);
766 wl1271_error("could not get firmware %s: %d", fw_name, ret);
771 wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 wl->fw_type = WL12XX_FW_TYPE_NONE;
779 wl->fw_len = fw->size;
780 wl->fw = vmalloc(wl->fw_len);
783 wl1271_error("could not allocate memory for the firmware");
788 memcpy(wl->fw, fw->data, wl->fw_len);
790 wl->fw_type = fw_type;
792 release_firmware(fw);
797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
799 /* Avoid a recursive recovery */
800 if (wl->state == WLCORE_STATE_ON) {
801 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 wl->state = WLCORE_STATE_RESTARTING;
805 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 wl1271_ps_elp_wakeup(wl);
807 wlcore_disable_interrupts_nosync(wl);
808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
816 /* Make sure we have enough room */
817 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
819 /* Fill the FW log file, consumed by the sysfs fwlog entry */
820 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 wl->fwlog_size += len;
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
828 struct wlcore_partition_set part, old_part;
835 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 (wl->conf.fwlog.mem_blocks == 0))
839 wl1271_info("Reading FW panic log");
841 block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
846 * Make sure the chip is awake and the logger isn't active.
847 * Do not send a stop fwlog command if the fw is hanged or if
848 * dbgpins are used (due to some fw bug).
850 if (wl1271_ps_elp_wakeup(wl))
852 if (!wl->watchdog_recovery &&
853 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 wl12xx_cmd_stop_fwlog(wl);
856 /* Read the first memory block address */
857 ret = wlcore_fw_status(wl, wl->fw_status);
861 addr = wl->fw_status->log_start_addr;
865 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 end_of_log = wl->fwlog_end;
869 offset = sizeof(addr);
873 old_part = wl->curr_part;
874 memset(&part, 0, sizeof(part));
876 /* Traverse the memory blocks linked list */
878 part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 part.mem.size = PAGE_SIZE;
881 ret = wlcore_set_partition(wl, &part);
883 wl1271_error("%s: set_partition start=0x%X size=%d",
884 __func__, part.mem.start, part.mem.size);
888 memset(block, 0, wl->fw_mem_block_size);
889 ret = wlcore_read_hwaddr(wl, addr, block,
890 wl->fw_mem_block_size, false);
896 * Memory blocks are linked to one another. The first 4 bytes
897 * of each memory block hold the hardware address of the next
898 * one. The last memory block points to the first one in
899 * on demand mode and is equal to 0x2000000 in continuous mode.
901 addr = le32_to_cpup((__le32 *)block);
903 if (!wl12xx_copy_fwlog(wl, block + offset,
904 wl->fw_mem_block_size - offset))
906 } while (addr && (addr != end_of_log));
908 wake_up_interruptible(&wl->fwlog_waitq);
912 wlcore_set_partition(wl, &old_part);
915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 u8 hlid, struct ieee80211_sta *sta)
918 struct wl1271_station *wl_sta;
919 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
921 wl_sta = (void *)sta->drv_priv;
922 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
925 * increment the initial seq number on recovery to account for
926 * transmitted packets that we haven't yet got in the FW status
928 if (wlvif->encryption_type == KEY_GEM)
929 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
931 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 wl_sta->total_freed_pkts += sqn_recovery_padding;
935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 struct wl12xx_vif *wlvif,
937 u8 hlid, const u8 *addr)
939 struct ieee80211_sta *sta;
940 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
942 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 is_zero_ether_addr(addr)))
947 sta = ieee80211_find_sta(vif, addr);
949 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
953 static void wlcore_print_recovery(struct wl1271 *wl)
959 wl1271_info("Hardware recovery in progress. FW ver: %s",
960 wl->chip.fw_ver_str);
962 /* change partitions momentarily so we can read the FW pc */
963 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
971 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
975 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 pc, hint_sts, ++wl->recovery_count);
978 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
982 static void wl1271_recovery_work(struct work_struct *work)
985 container_of(work, struct wl1271, recovery_work);
986 struct wl12xx_vif *wlvif;
987 struct ieee80211_vif *vif;
989 mutex_lock(&wl->mutex);
991 if (wl->state == WLCORE_STATE_OFF || wl->plt)
994 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 wl12xx_read_fwlog_panic(wl);
997 wlcore_print_recovery(wl);
1000 BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1003 if (wl->conf.recovery.no_recovery) {
1004 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1008 /* Prevent spurious TX during FW restart */
1009 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1011 /* reboot the chipset */
1012 while (!list_empty(&wl->wlvif_list)) {
1013 wlvif = list_first_entry(&wl->wlvif_list,
1014 struct wl12xx_vif, list);
1015 vif = wl12xx_wlvif_to_vif(wlvif);
1017 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 vif->bss_conf.bssid);
1023 __wl1271_op_remove_interface(wl, vif, false);
1026 wlcore_op_stop_locked(wl);
1028 ieee80211_restart_hw(wl->hw);
1031 * Its safe to enable TX now - the queues are stopped after a request
1032 * to restart the HW.
1034 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1037 wl->watchdog_recovery = false;
1038 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 mutex_unlock(&wl->mutex);
1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1044 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1047 static int wl1271_setup(struct wl1271 *wl)
1049 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 if (!wl->raw_fw_status)
1053 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1057 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1063 kfree(wl->fw_status);
1064 kfree(wl->raw_fw_status);
1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1072 msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 ret = wl1271_power_on(wl);
1076 msleep(WL1271_POWER_ON_SLEEP);
1077 wl1271_io_reset(wl);
1080 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1084 /* ELP module wake up */
1085 ret = wlcore_fw_wakeup(wl);
1093 wl1271_power_off(wl);
1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1101 ret = wl12xx_set_power_on(wl);
1106 * For wl127x based devices we could use the default block
1107 * size (512 bytes), but due to a bug in the sdio driver, we
1108 * need to set it explicitly after the chip is powered on. To
1109 * simplify the code and since the performance impact is
1110 * negligible, we use the same block size for all different
1113 * Check if the bus supports blocksize alignment and, if it
1114 * doesn't, make sure we don't have the quirk.
1116 if (!wl1271_set_block_size(wl))
1117 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1119 /* TODO: make sure the lower driver has set things up correctly */
1121 ret = wl1271_setup(wl);
1125 ret = wl12xx_fetch_firmware(wl, plt);
1127 kfree(wl->fw_status);
1128 kfree(wl->raw_fw_status);
1129 kfree(wl->tx_res_if);
1136 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1138 int retries = WL1271_BOOT_RETRIES;
1139 struct wiphy *wiphy = wl->hw->wiphy;
1141 static const char* const PLT_MODE[] = {
1150 mutex_lock(&wl->mutex);
1152 wl1271_notice("power up");
1154 if (wl->state != WLCORE_STATE_OFF) {
1155 wl1271_error("cannot go into PLT state because not "
1156 "in off state: %d", wl->state);
1161 /* Indicate to lower levels that we are now in PLT mode */
1163 wl->plt_mode = plt_mode;
1167 ret = wl12xx_chip_wakeup(wl, true);
1171 if (plt_mode != PLT_CHIP_AWAKE) {
1172 ret = wl->ops->plt_init(wl);
1177 wl->state = WLCORE_STATE_ON;
1178 wl1271_notice("firmware booted in PLT mode %s (%s)",
1180 wl->chip.fw_ver_str);
1182 /* update hw/fw version info in wiphy struct */
1183 wiphy->hw_version = wl->chip.id;
1184 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1185 sizeof(wiphy->fw_version));
1190 wl1271_power_off(wl);
1194 wl->plt_mode = PLT_OFF;
1196 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1197 WL1271_BOOT_RETRIES);
1199 mutex_unlock(&wl->mutex);
1204 int wl1271_plt_stop(struct wl1271 *wl)
1208 wl1271_notice("power down");
1211 * Interrupts must be disabled before setting the state to OFF.
1212 * Otherwise, the interrupt handler might be called and exit without
1213 * reading the interrupt status.
1215 wlcore_disable_interrupts(wl);
1216 mutex_lock(&wl->mutex);
1218 mutex_unlock(&wl->mutex);
1221 * This will not necessarily enable interrupts as interrupts
1222 * may have been disabled when op_stop was called. It will,
1223 * however, balance the above call to disable_interrupts().
1225 wlcore_enable_interrupts(wl);
1227 wl1271_error("cannot power down because not in PLT "
1228 "state: %d", wl->state);
1233 mutex_unlock(&wl->mutex);
1235 wl1271_flush_deferred_work(wl);
1236 cancel_work_sync(&wl->netstack_work);
1237 cancel_work_sync(&wl->recovery_work);
1238 cancel_delayed_work_sync(&wl->elp_work);
1239 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1241 mutex_lock(&wl->mutex);
1242 wl1271_power_off(wl);
1244 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1245 wl->state = WLCORE_STATE_OFF;
1247 wl->plt_mode = PLT_OFF;
1249 mutex_unlock(&wl->mutex);
1255 static void wl1271_op_tx(struct ieee80211_hw *hw,
1256 struct ieee80211_tx_control *control,
1257 struct sk_buff *skb)
1259 struct wl1271 *wl = hw->priv;
1260 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1261 struct ieee80211_vif *vif = info->control.vif;
1262 struct wl12xx_vif *wlvif = NULL;
1263 unsigned long flags;
1268 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1269 ieee80211_free_txskb(hw, skb);
1273 wlvif = wl12xx_vif_to_data(vif);
1274 mapping = skb_get_queue_mapping(skb);
1275 q = wl1271_tx_get_queue(mapping);
1277 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1279 spin_lock_irqsave(&wl->wl_lock, flags);
1282 * drop the packet if the link is invalid or the queue is stopped
1283 * for any reason but watermark. Watermark is a "soft"-stop so we
1284 * allow these packets through.
1286 if (hlid == WL12XX_INVALID_LINK_ID ||
1287 (!test_bit(hlid, wlvif->links_map)) ||
1288 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1289 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1290 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1291 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1292 ieee80211_free_txskb(hw, skb);
1296 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1298 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1300 wl->tx_queue_count[q]++;
1301 wlvif->tx_queue_count[q]++;
1304 * The workqueue is slow to process the tx_queue and we need stop
1305 * the queue here, otherwise the queue will get too long.
1307 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1308 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1309 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1310 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1311 wlcore_stop_queue_locked(wl, wlvif, q,
1312 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1316 * The chip specific setup must run before the first TX packet -
1317 * before that, the tx_work will not be initialized!
1320 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1321 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1322 ieee80211_queue_work(wl->hw, &wl->tx_work);
1325 spin_unlock_irqrestore(&wl->wl_lock, flags);
1328 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1330 unsigned long flags;
1333 /* no need to queue a new dummy packet if one is already pending */
1334 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1337 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1339 spin_lock_irqsave(&wl->wl_lock, flags);
1340 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1341 wl->tx_queue_count[q]++;
1342 spin_unlock_irqrestore(&wl->wl_lock, flags);
1344 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1345 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1346 return wlcore_tx_work_locked(wl);
1349 * If the FW TX is busy, TX work will be scheduled by the threaded
1350 * interrupt handler function
1356 * The size of the dummy packet should be at least 1400 bytes. However, in
1357 * order to minimize the number of bus transactions, aligning it to 512 bytes
1358 * boundaries could be beneficial, performance wise
1360 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1362 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1364 struct sk_buff *skb;
1365 struct ieee80211_hdr_3addr *hdr;
1366 unsigned int dummy_packet_size;
1368 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1369 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1371 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1373 wl1271_warning("Failed to allocate a dummy packet skb");
1377 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1379 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1380 memset(hdr, 0, sizeof(*hdr));
1381 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1382 IEEE80211_STYPE_NULLFUNC |
1383 IEEE80211_FCTL_TODS);
1385 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1387 /* Dummy packets require the TID to be management */
1388 skb->priority = WL1271_TID_MGMT;
1390 /* Initialize all fields that might be used */
1391 skb_set_queue_mapping(skb, 0);
1392 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1400 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1402 int num_fields = 0, in_field = 0, fields_size = 0;
1403 int i, pattern_len = 0;
1406 wl1271_warning("No mask in WoWLAN pattern");
1411 * The pattern is broken up into segments of bytes at different offsets
1412 * that need to be checked by the FW filter. Each segment is called
1413 * a field in the FW API. We verify that the total number of fields
1414 * required for this pattern won't exceed FW limits (8)
1415 * as well as the total fields buffer won't exceed the FW limit.
1416 * Note that if there's a pattern which crosses Ethernet/IP header
1417 * boundary a new field is required.
1419 for (i = 0; i < p->pattern_len; i++) {
1420 if (test_bit(i, (unsigned long *)p->mask)) {
1425 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1427 fields_size += pattern_len +
1428 RX_FILTER_FIELD_OVERHEAD;
1436 fields_size += pattern_len +
1437 RX_FILTER_FIELD_OVERHEAD;
1444 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1448 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1449 wl1271_warning("RX Filter too complex. Too many segments");
1453 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1454 wl1271_warning("RX filter pattern is too big");
1461 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1463 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1466 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1473 for (i = 0; i < filter->num_fields; i++)
1474 kfree(filter->fields[i].pattern);
1479 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1480 u16 offset, u8 flags,
1481 const u8 *pattern, u8 len)
1483 struct wl12xx_rx_filter_field *field;
1485 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1486 wl1271_warning("Max fields per RX filter. can't alloc another");
1490 field = &filter->fields[filter->num_fields];
1492 field->pattern = kzalloc(len, GFP_KERNEL);
1493 if (!field->pattern) {
1494 wl1271_warning("Failed to allocate RX filter pattern");
1498 filter->num_fields++;
1500 field->offset = cpu_to_le16(offset);
1501 field->flags = flags;
1503 memcpy(field->pattern, pattern, len);
1508 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1510 int i, fields_size = 0;
1512 for (i = 0; i < filter->num_fields; i++)
1513 fields_size += filter->fields[i].len +
1514 sizeof(struct wl12xx_rx_filter_field) -
1520 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1524 struct wl12xx_rx_filter_field *field;
1526 for (i = 0; i < filter->num_fields; i++) {
1527 field = (struct wl12xx_rx_filter_field *)buf;
1529 field->offset = filter->fields[i].offset;
1530 field->flags = filter->fields[i].flags;
1531 field->len = filter->fields[i].len;
1533 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1534 buf += sizeof(struct wl12xx_rx_filter_field) -
1535 sizeof(u8 *) + field->len;
1540 * Allocates an RX filter returned through f
1541 * which needs to be freed using rx_filter_free()
1544 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1545 struct wl12xx_rx_filter **f)
1548 struct wl12xx_rx_filter *filter;
1552 filter = wl1271_rx_filter_alloc();
1554 wl1271_warning("Failed to alloc rx filter");
1560 while (i < p->pattern_len) {
1561 if (!test_bit(i, (unsigned long *)p->mask)) {
1566 for (j = i; j < p->pattern_len; j++) {
1567 if (!test_bit(j, (unsigned long *)p->mask))
1570 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1571 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1575 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1577 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1579 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1580 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1585 ret = wl1271_rx_filter_alloc_field(filter,
1588 &p->pattern[i], len);
1595 filter->action = FILTER_SIGNAL;
1601 wl1271_rx_filter_free(filter);
1607 static int wl1271_configure_wowlan(struct wl1271 *wl,
1608 struct cfg80211_wowlan *wow)
1612 if (!wow || wow->any || !wow->n_patterns) {
1613 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1618 ret = wl1271_rx_filter_clear_all(wl);
1625 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1628 /* Validate all incoming patterns before clearing current FW state */
1629 for (i = 0; i < wow->n_patterns; i++) {
1630 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1632 wl1271_warning("Bad wowlan pattern %d", i);
1637 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1641 ret = wl1271_rx_filter_clear_all(wl);
1645 /* Translate WoWLAN patterns into filters */
1646 for (i = 0; i < wow->n_patterns; i++) {
1647 struct cfg80211_pkt_pattern *p;
1648 struct wl12xx_rx_filter *filter = NULL;
1650 p = &wow->patterns[i];
1652 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1654 wl1271_warning("Failed to create an RX filter from "
1655 "wowlan pattern %d", i);
1659 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1661 wl1271_rx_filter_free(filter);
1666 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1672 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1673 struct wl12xx_vif *wlvif,
1674 struct cfg80211_wowlan *wow)
1678 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1681 ret = wl1271_configure_wowlan(wl, wow);
1685 if ((wl->conf.conn.suspend_wake_up_event ==
1686 wl->conf.conn.wake_up_event) &&
1687 (wl->conf.conn.suspend_listen_interval ==
1688 wl->conf.conn.listen_interval))
1691 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1692 wl->conf.conn.suspend_wake_up_event,
1693 wl->conf.conn.suspend_listen_interval);
1696 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1702 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1703 struct wl12xx_vif *wlvif,
1704 struct cfg80211_wowlan *wow)
1708 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1711 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1715 ret = wl1271_configure_wowlan(wl, wow);
1724 static int wl1271_configure_suspend(struct wl1271 *wl,
1725 struct wl12xx_vif *wlvif,
1726 struct cfg80211_wowlan *wow)
1728 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1729 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1730 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1731 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1735 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1738 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1739 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1741 if ((!is_ap) && (!is_sta))
1744 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1745 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1748 wl1271_configure_wowlan(wl, NULL);
1751 if ((wl->conf.conn.suspend_wake_up_event ==
1752 wl->conf.conn.wake_up_event) &&
1753 (wl->conf.conn.suspend_listen_interval ==
1754 wl->conf.conn.listen_interval))
1757 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1758 wl->conf.conn.wake_up_event,
1759 wl->conf.conn.listen_interval);
1762 wl1271_error("resume: wake up conditions failed: %d",
1766 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1770 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1771 struct cfg80211_wowlan *wow)
1773 struct wl1271 *wl = hw->priv;
1774 struct wl12xx_vif *wlvif;
1777 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1780 /* we want to perform the recovery before suspending */
1781 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1782 wl1271_warning("postponing suspend to perform recovery");
1786 wl1271_tx_flush(wl);
1788 mutex_lock(&wl->mutex);
1790 ret = wl1271_ps_elp_wakeup(wl);
1792 mutex_unlock(&wl->mutex);
1796 wl->wow_enabled = true;
1797 wl12xx_for_each_wlvif(wl, wlvif) {
1798 if (wlcore_is_p2p_mgmt(wlvif))
1801 ret = wl1271_configure_suspend(wl, wlvif, wow);
1803 mutex_unlock(&wl->mutex);
1804 wl1271_warning("couldn't prepare device to suspend");
1809 /* disable fast link flow control notifications from FW */
1810 ret = wlcore_hw_interrupt_notify(wl, false);
1814 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1815 ret = wlcore_hw_rx_ba_filter(wl,
1816 !!wl->conf.conn.suspend_rx_ba_activity);
1821 wl1271_ps_elp_sleep(wl);
1822 mutex_unlock(&wl->mutex);
1825 wl1271_warning("couldn't prepare device to suspend");
1829 /* flush any remaining work */
1830 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1833 * disable and re-enable interrupts in order to flush
1836 wlcore_disable_interrupts(wl);
1839 * set suspended flag to avoid triggering a new threaded_irq
1840 * work. no need for spinlock as interrupts are disabled.
1842 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1844 wlcore_enable_interrupts(wl);
1845 flush_work(&wl->tx_work);
1846 flush_delayed_work(&wl->elp_work);
1849 * Cancel the watchdog even if above tx_flush failed. We will detect
1850 * it on resume anyway.
1852 cancel_delayed_work(&wl->tx_watchdog_work);
1857 static int wl1271_op_resume(struct ieee80211_hw *hw)
1859 struct wl1271 *wl = hw->priv;
1860 struct wl12xx_vif *wlvif;
1861 unsigned long flags;
1862 bool run_irq_work = false, pending_recovery;
1865 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1867 WARN_ON(!wl->wow_enabled);
1870 * re-enable irq_work enqueuing, and call irq_work directly if
1871 * there is a pending work.
1873 spin_lock_irqsave(&wl->wl_lock, flags);
1874 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1875 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1876 run_irq_work = true;
1877 spin_unlock_irqrestore(&wl->wl_lock, flags);
1879 mutex_lock(&wl->mutex);
1881 /* test the recovery flag before calling any SDIO functions */
1882 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1886 wl1271_debug(DEBUG_MAC80211,
1887 "run postponed irq_work directly");
1889 /* don't talk to the HW if recovery is pending */
1890 if (!pending_recovery) {
1891 ret = wlcore_irq_locked(wl);
1893 wl12xx_queue_recovery_work(wl);
1896 wlcore_enable_interrupts(wl);
1899 if (pending_recovery) {
1900 wl1271_warning("queuing forgotten recovery on resume");
1901 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1905 ret = wl1271_ps_elp_wakeup(wl);
1909 wl12xx_for_each_wlvif(wl, wlvif) {
1910 if (wlcore_is_p2p_mgmt(wlvif))
1913 wl1271_configure_resume(wl, wlvif);
1916 ret = wlcore_hw_interrupt_notify(wl, true);
1920 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1921 ret = wlcore_hw_rx_ba_filter(wl, false);
1926 wl1271_ps_elp_sleep(wl);
1929 wl->wow_enabled = false;
1932 * Set a flag to re-init the watchdog on the first Tx after resume.
1933 * That way we avoid possible conditions where Tx-complete interrupts
1934 * fail to arrive and we perform a spurious recovery.
1936 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1937 mutex_unlock(&wl->mutex);
1943 static int wl1271_op_start(struct ieee80211_hw *hw)
1945 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1948 * We have to delay the booting of the hardware because
1949 * we need to know the local MAC address before downloading and
1950 * initializing the firmware. The MAC address cannot be changed
1951 * after boot, and without the proper MAC address, the firmware
1952 * will not function properly.
1954 * The MAC address is first known when the corresponding interface
1955 * is added. That is where we will initialize the hardware.
1961 static void wlcore_op_stop_locked(struct wl1271 *wl)
1965 if (wl->state == WLCORE_STATE_OFF) {
1966 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1968 wlcore_enable_interrupts(wl);
1974 * this must be before the cancel_work calls below, so that the work
1975 * functions don't perform further work.
1977 wl->state = WLCORE_STATE_OFF;
1980 * Use the nosync variant to disable interrupts, so the mutex could be
1981 * held while doing so without deadlocking.
1983 wlcore_disable_interrupts_nosync(wl);
1985 mutex_unlock(&wl->mutex);
1987 wlcore_synchronize_interrupts(wl);
1988 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1989 cancel_work_sync(&wl->recovery_work);
1990 wl1271_flush_deferred_work(wl);
1991 cancel_delayed_work_sync(&wl->scan_complete_work);
1992 cancel_work_sync(&wl->netstack_work);
1993 cancel_work_sync(&wl->tx_work);
1994 cancel_delayed_work_sync(&wl->elp_work);
1995 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1997 /* let's notify MAC80211 about the remaining pending TX frames */
1998 mutex_lock(&wl->mutex);
1999 wl12xx_tx_reset(wl);
2001 wl1271_power_off(wl);
2003 * In case a recovery was scheduled, interrupts were disabled to avoid
2004 * an interrupt storm. Now that the power is down, it is safe to
2005 * re-enable interrupts to balance the disable depth
2007 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2008 wlcore_enable_interrupts(wl);
2010 wl->band = IEEE80211_BAND_2GHZ;
2013 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2014 wl->channel_type = NL80211_CHAN_NO_HT;
2015 wl->tx_blocks_available = 0;
2016 wl->tx_allocated_blocks = 0;
2017 wl->tx_results_count = 0;
2018 wl->tx_packets_count = 0;
2019 wl->time_offset = 0;
2020 wl->ap_fw_ps_map = 0;
2022 wl->sleep_auth = WL1271_PSM_ILLEGAL;
2023 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2024 memset(wl->links_map, 0, sizeof(wl->links_map));
2025 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2026 memset(wl->session_ids, 0, sizeof(wl->session_ids));
2027 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2028 wl->active_sta_count = 0;
2029 wl->active_link_count = 0;
2031 /* The system link is always allocated */
2032 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2033 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2034 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2037 * this is performed after the cancel_work calls and the associated
2038 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2039 * get executed before all these vars have been reset.
2043 wl->tx_blocks_freed = 0;
2045 for (i = 0; i < NUM_TX_QUEUES; i++) {
2046 wl->tx_pkts_freed[i] = 0;
2047 wl->tx_allocated_pkts[i] = 0;
2050 wl1271_debugfs_reset(wl);
2052 kfree(wl->raw_fw_status);
2053 wl->raw_fw_status = NULL;
2054 kfree(wl->fw_status);
2055 wl->fw_status = NULL;
2056 kfree(wl->tx_res_if);
2057 wl->tx_res_if = NULL;
2058 kfree(wl->target_mem_map);
2059 wl->target_mem_map = NULL;
2062 * FW channels must be re-calibrated after recovery,
2063 * save current Reg-Domain channel configuration and clear it.
2065 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2066 sizeof(wl->reg_ch_conf_pending));
2067 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2070 static void wlcore_op_stop(struct ieee80211_hw *hw)
2072 struct wl1271 *wl = hw->priv;
2074 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2076 mutex_lock(&wl->mutex);
2078 wlcore_op_stop_locked(wl);
2080 mutex_unlock(&wl->mutex);
2083 static void wlcore_channel_switch_work(struct work_struct *work)
2085 struct delayed_work *dwork;
2087 struct ieee80211_vif *vif;
2088 struct wl12xx_vif *wlvif;
2091 dwork = container_of(work, struct delayed_work, work);
2092 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2095 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2097 mutex_lock(&wl->mutex);
2099 if (unlikely(wl->state != WLCORE_STATE_ON))
2102 /* check the channel switch is still ongoing */
2103 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2106 vif = wl12xx_wlvif_to_vif(wlvif);
2107 ieee80211_chswitch_done(vif, false);
2109 ret = wl1271_ps_elp_wakeup(wl);
2113 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2115 wl1271_ps_elp_sleep(wl);
2117 mutex_unlock(&wl->mutex);
2120 static void wlcore_connection_loss_work(struct work_struct *work)
2122 struct delayed_work *dwork;
2124 struct ieee80211_vif *vif;
2125 struct wl12xx_vif *wlvif;
2127 dwork = container_of(work, struct delayed_work, work);
2128 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2131 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2133 mutex_lock(&wl->mutex);
2135 if (unlikely(wl->state != WLCORE_STATE_ON))
2138 /* Call mac80211 connection loss */
2139 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2142 vif = wl12xx_wlvif_to_vif(wlvif);
2143 ieee80211_connection_loss(vif);
2145 mutex_unlock(&wl->mutex);
2148 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2150 struct delayed_work *dwork;
2152 struct wl12xx_vif *wlvif;
2153 unsigned long time_spare;
2156 dwork = container_of(work, struct delayed_work, work);
2157 wlvif = container_of(dwork, struct wl12xx_vif,
2158 pending_auth_complete_work);
2161 mutex_lock(&wl->mutex);
2163 if (unlikely(wl->state != WLCORE_STATE_ON))
2167 * Make sure a second really passed since the last auth reply. Maybe
2168 * a second auth reply arrived while we were stuck on the mutex.
2169 * Check for a little less than the timeout to protect from scheduler
2172 time_spare = jiffies +
2173 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2174 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2177 ret = wl1271_ps_elp_wakeup(wl);
2181 /* cancel the ROC if active */
2182 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2184 wl1271_ps_elp_sleep(wl);
2186 mutex_unlock(&wl->mutex);
2189 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2191 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2192 WL12XX_MAX_RATE_POLICIES);
2193 if (policy >= WL12XX_MAX_RATE_POLICIES)
2196 __set_bit(policy, wl->rate_policies_map);
2201 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2203 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2206 __clear_bit(*idx, wl->rate_policies_map);
2207 *idx = WL12XX_MAX_RATE_POLICIES;
2210 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2212 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2213 WLCORE_MAX_KLV_TEMPLATES);
2214 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2217 __set_bit(policy, wl->klv_templates_map);
2222 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2224 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2227 __clear_bit(*idx, wl->klv_templates_map);
2228 *idx = WLCORE_MAX_KLV_TEMPLATES;
2231 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2233 switch (wlvif->bss_type) {
2234 case BSS_TYPE_AP_BSS:
2236 return WL1271_ROLE_P2P_GO;
2238 return WL1271_ROLE_AP;
2240 case BSS_TYPE_STA_BSS:
2242 return WL1271_ROLE_P2P_CL;
2244 return WL1271_ROLE_STA;
2247 return WL1271_ROLE_IBSS;
2250 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2252 return WL12XX_INVALID_ROLE_TYPE;
2255 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2257 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2260 /* clear everything but the persistent data */
2261 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2263 switch (ieee80211_vif_type_p2p(vif)) {
2264 case NL80211_IFTYPE_P2P_CLIENT:
2267 case NL80211_IFTYPE_STATION:
2268 case NL80211_IFTYPE_P2P_DEVICE:
2269 wlvif->bss_type = BSS_TYPE_STA_BSS;
2271 case NL80211_IFTYPE_ADHOC:
2272 wlvif->bss_type = BSS_TYPE_IBSS;
2274 case NL80211_IFTYPE_P2P_GO:
2277 case NL80211_IFTYPE_AP:
2278 wlvif->bss_type = BSS_TYPE_AP_BSS;
2281 wlvif->bss_type = MAX_BSS_TYPE;
2285 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2286 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2287 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2289 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2290 wlvif->bss_type == BSS_TYPE_IBSS) {
2291 /* init sta/ibss data */
2292 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2293 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2294 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2295 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2296 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2297 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2298 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2299 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2302 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2303 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2304 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2305 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2306 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2307 wl12xx_allocate_rate_policy(wl,
2308 &wlvif->ap.ucast_rate_idx[i]);
2309 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2311 * TODO: check if basic_rate shouldn't be
2312 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2313 * instead (the same thing for STA above).
2315 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2316 /* TODO: this seems to be used only for STA, check it */
2317 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2320 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2321 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2322 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2325 * mac80211 configures some values globally, while we treat them
2326 * per-interface. thus, on init, we have to copy them from wl
2328 wlvif->band = wl->band;
2329 wlvif->channel = wl->channel;
2330 wlvif->power_level = wl->power_level;
2331 wlvif->channel_type = wl->channel_type;
2333 INIT_WORK(&wlvif->rx_streaming_enable_work,
2334 wl1271_rx_streaming_enable_work);
2335 INIT_WORK(&wlvif->rx_streaming_disable_work,
2336 wl1271_rx_streaming_disable_work);
2337 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2338 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2339 wlcore_channel_switch_work);
2340 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2341 wlcore_connection_loss_work);
2342 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2343 wlcore_pending_auth_complete_work);
2344 INIT_LIST_HEAD(&wlvif->list);
2346 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2347 (unsigned long) wlvif);
2351 static int wl12xx_init_fw(struct wl1271 *wl)
2353 int retries = WL1271_BOOT_RETRIES;
2354 bool booted = false;
2355 struct wiphy *wiphy = wl->hw->wiphy;
2360 ret = wl12xx_chip_wakeup(wl, false);
2364 ret = wl->ops->boot(wl);
2368 ret = wl1271_hw_init(wl);
2376 mutex_unlock(&wl->mutex);
2377 /* Unlocking the mutex in the middle of handling is
2378 inherently unsafe. In this case we deem it safe to do,
2379 because we need to let any possibly pending IRQ out of
2380 the system (and while we are WLCORE_STATE_OFF the IRQ
2381 work function will not do anything.) Also, any other
2382 possible concurrent operations will fail due to the
2383 current state, hence the wl1271 struct should be safe. */
2384 wlcore_disable_interrupts(wl);
2385 wl1271_flush_deferred_work(wl);
2386 cancel_work_sync(&wl->netstack_work);
2387 mutex_lock(&wl->mutex);
2389 wl1271_power_off(wl);
2393 wl1271_error("firmware boot failed despite %d retries",
2394 WL1271_BOOT_RETRIES);
2398 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2400 /* update hw/fw version info in wiphy struct */
2401 wiphy->hw_version = wl->chip.id;
2402 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2403 sizeof(wiphy->fw_version));
2406 * Now we know if 11a is supported (info from the NVS), so disable
2407 * 11a channels if not supported
2409 if (!wl->enable_11a)
2410 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2412 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2413 wl->enable_11a ? "" : "not ");
2415 wl->state = WLCORE_STATE_ON;
2420 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2422 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2426 * Check whether a fw switch (i.e. moving from one loaded
2427 * fw to another) is needed. This function is also responsible
2428 * for updating wl->last_vif_count, so it must be called before
2429 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2432 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2433 struct vif_counter_data vif_counter_data,
2436 enum wl12xx_fw_type current_fw = wl->fw_type;
2437 u8 vif_count = vif_counter_data.counter;
2439 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2442 /* increase the vif count if this is a new vif */
2443 if (add && !vif_counter_data.cur_vif_running)
2446 wl->last_vif_count = vif_count;
2448 /* no need for fw change if the device is OFF */
2449 if (wl->state == WLCORE_STATE_OFF)
2452 /* no need for fw change if a single fw is used */
2453 if (!wl->mr_fw_name)
2456 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2458 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2465 * Enter "forced psm". Make sure the sta is in psm against the ap,
2466 * to make the fw switch a bit more disconnection-persistent.
2468 static void wl12xx_force_active_psm(struct wl1271 *wl)
2470 struct wl12xx_vif *wlvif;
2472 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2473 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2477 struct wlcore_hw_queue_iter_data {
2478 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2480 struct ieee80211_vif *vif;
2481 /* is the current vif among those iterated */
2485 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2486 struct ieee80211_vif *vif)
2488 struct wlcore_hw_queue_iter_data *iter_data = data;
2490 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2491 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2494 if (iter_data->cur_running || vif == iter_data->vif) {
2495 iter_data->cur_running = true;
2499 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2502 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2503 struct wl12xx_vif *wlvif)
2505 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2506 struct wlcore_hw_queue_iter_data iter_data = {};
2509 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2510 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2514 iter_data.vif = vif;
2516 /* mark all bits taken by active interfaces */
2517 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2518 IEEE80211_IFACE_ITER_RESUME_ALL,
2519 wlcore_hw_queue_iter, &iter_data);
2521 /* the current vif is already running in mac80211 (resume/recovery) */
2522 if (iter_data.cur_running) {
2523 wlvif->hw_queue_base = vif->hw_queue[0];
2524 wl1271_debug(DEBUG_MAC80211,
2525 "using pre-allocated hw queue base %d",
2526 wlvif->hw_queue_base);
2528 /* interface type might have changed type */
2529 goto adjust_cab_queue;
2532 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2533 WLCORE_NUM_MAC_ADDRESSES);
2534 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2537 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2538 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2539 wlvif->hw_queue_base);
2541 for (i = 0; i < NUM_TX_QUEUES; i++) {
2542 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2543 /* register hw queues in mac80211 */
2544 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2548 /* the last places are reserved for cab queues per interface */
2549 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2550 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2551 wlvif->hw_queue_base / NUM_TX_QUEUES;
2553 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2558 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2559 struct ieee80211_vif *vif)
2561 struct wl1271 *wl = hw->priv;
2562 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2563 struct vif_counter_data vif_count;
2568 wl1271_error("Adding Interface not allowed while in PLT mode");
2572 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2573 IEEE80211_VIF_SUPPORTS_UAPSD |
2574 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2576 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2577 ieee80211_vif_type_p2p(vif), vif->addr);
2579 wl12xx_get_vif_count(hw, vif, &vif_count);
2581 mutex_lock(&wl->mutex);
2582 ret = wl1271_ps_elp_wakeup(wl);
2587 * in some very corner case HW recovery scenarios its possible to
2588 * get here before __wl1271_op_remove_interface is complete, so
2589 * opt out if that is the case.
2591 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2592 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2598 ret = wl12xx_init_vif_data(wl, vif);
2603 role_type = wl12xx_get_role_type(wl, wlvif);
2604 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2609 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2613 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2614 wl12xx_force_active_psm(wl);
2615 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2616 mutex_unlock(&wl->mutex);
2617 wl1271_recovery_work(&wl->recovery_work);
2622 * TODO: after the nvs issue will be solved, move this block
2623 * to start(), and make sure here the driver is ON.
2625 if (wl->state == WLCORE_STATE_OFF) {
2627 * we still need this in order to configure the fw
2628 * while uploading the nvs
2630 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2632 ret = wl12xx_init_fw(wl);
2637 if (!wlcore_is_p2p_mgmt(wlvif)) {
2638 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2639 role_type, &wlvif->role_id);
2643 ret = wl1271_init_vif_specific(wl, vif);
2648 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2649 &wlvif->dev_role_id);
2653 /* needed mainly for configuring rate policies */
2654 ret = wl1271_sta_hw_init(wl, wlvif);
2659 list_add(&wlvif->list, &wl->wlvif_list);
2660 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2662 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2667 wl1271_ps_elp_sleep(wl);
2669 mutex_unlock(&wl->mutex);
2674 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2675 struct ieee80211_vif *vif,
2676 bool reset_tx_queues)
2678 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2680 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2682 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2684 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2687 /* because of hardware recovery, we may get here twice */
2688 if (wl->state == WLCORE_STATE_OFF)
2691 wl1271_info("down");
2693 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2694 wl->scan_wlvif == wlvif) {
2696 * Rearm the tx watchdog just before idling scan. This
2697 * prevents just-finished scans from triggering the watchdog
2699 wl12xx_rearm_tx_watchdog_locked(wl);
2701 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2702 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2703 wl->scan_wlvif = NULL;
2704 wl->scan.req = NULL;
2705 ieee80211_scan_completed(wl->hw, true);
2708 if (wl->sched_vif == wlvif)
2709 wl->sched_vif = NULL;
2711 if (wl->roc_vif == vif) {
2713 ieee80211_remain_on_channel_expired(wl->hw);
2716 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2717 /* disable active roles */
2718 ret = wl1271_ps_elp_wakeup(wl);
2722 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2723 wlvif->bss_type == BSS_TYPE_IBSS) {
2724 if (wl12xx_dev_role_started(wlvif))
2725 wl12xx_stop_dev(wl, wlvif);
2728 if (!wlcore_is_p2p_mgmt(wlvif)) {
2729 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2733 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2738 wl1271_ps_elp_sleep(wl);
2741 wl12xx_tx_reset_wlvif(wl, wlvif);
2743 /* clear all hlids (except system_hlid) */
2744 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2746 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2747 wlvif->bss_type == BSS_TYPE_IBSS) {
2748 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2749 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2750 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2751 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2752 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2754 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2755 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2756 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2757 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2758 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2759 wl12xx_free_rate_policy(wl,
2760 &wlvif->ap.ucast_rate_idx[i]);
2761 wl1271_free_ap_keys(wl, wlvif);
2764 dev_kfree_skb(wlvif->probereq);
2765 wlvif->probereq = NULL;
2766 if (wl->last_wlvif == wlvif)
2767 wl->last_wlvif = NULL;
2768 list_del(&wlvif->list);
2769 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2770 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2771 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2779 * Last AP, have more stations. Configure sleep auth according to STA.
2780 * Don't do thin on unintended recovery.
2782 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2783 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2786 if (wl->ap_count == 0 && is_ap) {
2787 /* mask ap events */
2788 wl->event_mask &= ~wl->ap_event_mask;
2789 wl1271_event_unmask(wl);
2792 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2793 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2794 /* Configure for power according to debugfs */
2795 if (sta_auth != WL1271_PSM_ILLEGAL)
2796 wl1271_acx_sleep_auth(wl, sta_auth);
2797 /* Configure for ELP power saving */
2799 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2803 mutex_unlock(&wl->mutex);
2805 del_timer_sync(&wlvif->rx_streaming_timer);
2806 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2807 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2808 cancel_work_sync(&wlvif->rc_update_work);
2809 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2810 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2811 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2813 mutex_lock(&wl->mutex);
2816 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2817 struct ieee80211_vif *vif)
2819 struct wl1271 *wl = hw->priv;
2820 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2821 struct wl12xx_vif *iter;
2822 struct vif_counter_data vif_count;
2824 wl12xx_get_vif_count(hw, vif, &vif_count);
2825 mutex_lock(&wl->mutex);
2827 if (wl->state == WLCORE_STATE_OFF ||
2828 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2832 * wl->vif can be null here if someone shuts down the interface
2833 * just when hardware recovery has been started.
2835 wl12xx_for_each_wlvif(wl, iter) {
2839 __wl1271_op_remove_interface(wl, vif, true);
2842 WARN_ON(iter != wlvif);
2843 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2844 wl12xx_force_active_psm(wl);
2845 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2846 wl12xx_queue_recovery_work(wl);
2849 mutex_unlock(&wl->mutex);
2852 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2853 struct ieee80211_vif *vif,
2854 enum nl80211_iftype new_type, bool p2p)
2856 struct wl1271 *wl = hw->priv;
2859 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2860 wl1271_op_remove_interface(hw, vif);
2862 vif->type = new_type;
2864 ret = wl1271_op_add_interface(hw, vif);
2866 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2870 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2873 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2876 * One of the side effects of the JOIN command is that is clears
2877 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2878 * to a WPA/WPA2 access point will therefore kill the data-path.
2879 * Currently the only valid scenario for JOIN during association
2880 * is on roaming, in which case we will also be given new keys.
2881 * Keep the below message for now, unless it starts bothering
2882 * users who really like to roam a lot :)
2884 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2885 wl1271_info("JOIN while associated.");
2887 /* clear encryption type */
2888 wlvif->encryption_type = KEY_NONE;
2891 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2893 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2898 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2902 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2906 wl1271_error("No SSID in IEs!");
2911 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2912 wl1271_error("SSID is too long!");
2916 wlvif->ssid_len = ssid_len;
2917 memcpy(wlvif->ssid, ptr+2, ssid_len);
2921 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2923 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2924 struct sk_buff *skb;
2927 /* we currently only support setting the ssid from the ap probe req */
2928 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2931 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2935 ieoffset = offsetof(struct ieee80211_mgmt,
2936 u.probe_req.variable);
2937 wl1271_ssid_set(wlvif, skb, ieoffset);
2943 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2944 struct ieee80211_bss_conf *bss_conf,
2950 wlvif->aid = bss_conf->aid;
2951 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2952 wlvif->beacon_int = bss_conf->beacon_int;
2953 wlvif->wmm_enabled = bss_conf->qos;
2955 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2958 * with wl1271, we don't need to update the
2959 * beacon_int and dtim_period, because the firmware
2960 * updates it by itself when the first beacon is
2961 * received after a join.
2963 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2968 * Get a template for hardware connection maintenance
2970 dev_kfree_skb(wlvif->probereq);
2971 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2974 ieoffset = offsetof(struct ieee80211_mgmt,
2975 u.probe_req.variable);
2976 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2978 /* enable the connection monitoring feature */
2979 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2984 * The join command disable the keep-alive mode, shut down its process,
2985 * and also clear the template config, so we need to reset it all after
2986 * the join. The acx_aid starts the keep-alive process, and the order
2987 * of the commands below is relevant.
2989 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2993 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2997 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3001 ret = wl1271_acx_keep_alive_config(wl, wlvif,
3002 wlvif->sta.klv_template_id,
3003 ACX_KEEP_ALIVE_TPL_VALID);
3008 * The default fw psm configuration is AUTO, while mac80211 default
3009 * setting is off (ACTIVE), so sync the fw with the correct value.
3011 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3017 wl1271_tx_enabled_rates_get(wl,
3020 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3028 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3031 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3033 /* make sure we are connected (sta) joined */
3035 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3038 /* make sure we are joined (ibss) */
3040 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3044 /* use defaults when not associated */
3047 /* free probe-request template */
3048 dev_kfree_skb(wlvif->probereq);
3049 wlvif->probereq = NULL;
3051 /* disable connection monitor features */
3052 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3056 /* Disable the keep-alive feature */
3057 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3061 /* disable beacon filtering */
3062 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3067 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3068 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3070 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3071 ieee80211_chswitch_done(vif, false);
3072 cancel_delayed_work(&wlvif->channel_switch_work);
3075 /* invalidate keep-alive template */
3076 wl1271_acx_keep_alive_config(wl, wlvif,
3077 wlvif->sta.klv_template_id,
3078 ACX_KEEP_ALIVE_TPL_INVALID);
3083 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3085 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3086 wlvif->rate_set = wlvif->basic_rate_set;
3089 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3092 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3094 if (idle == cur_idle)
3098 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3100 /* The current firmware only supports sched_scan in idle */
3101 if (wl->sched_vif == wlvif)
3102 wl->ops->sched_scan_stop(wl, wlvif);
3104 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3108 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3109 struct ieee80211_conf *conf, u32 changed)
3113 if (wlcore_is_p2p_mgmt(wlvif))
3116 if (conf->power_level != wlvif->power_level) {
3117 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3121 wlvif->power_level = conf->power_level;
3127 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3129 struct wl1271 *wl = hw->priv;
3130 struct wl12xx_vif *wlvif;
3131 struct ieee80211_conf *conf = &hw->conf;
3134 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3136 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3138 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3141 mutex_lock(&wl->mutex);
3143 if (changed & IEEE80211_CONF_CHANGE_POWER)
3144 wl->power_level = conf->power_level;
3146 if (unlikely(wl->state != WLCORE_STATE_ON))
3149 ret = wl1271_ps_elp_wakeup(wl);
3153 /* configure each interface */
3154 wl12xx_for_each_wlvif(wl, wlvif) {
3155 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3161 wl1271_ps_elp_sleep(wl);
3164 mutex_unlock(&wl->mutex);
3169 struct wl1271_filter_params {
3172 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3175 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3176 struct netdev_hw_addr_list *mc_list)
3178 struct wl1271_filter_params *fp;
3179 struct netdev_hw_addr *ha;
3181 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3183 wl1271_error("Out of memory setting filters.");
3187 /* update multicast filtering parameters */
3188 fp->mc_list_length = 0;
3189 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3190 fp->enabled = false;
3193 netdev_hw_addr_list_for_each(ha, mc_list) {
3194 memcpy(fp->mc_list[fp->mc_list_length],
3195 ha->addr, ETH_ALEN);
3196 fp->mc_list_length++;
3200 return (u64)(unsigned long)fp;
3203 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3205 FIF_BCN_PRBRESP_PROMISC | \
3209 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3210 unsigned int changed,
3211 unsigned int *total, u64 multicast)
3213 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3214 struct wl1271 *wl = hw->priv;
3215 struct wl12xx_vif *wlvif;
3219 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3220 " total %x", changed, *total);
3222 mutex_lock(&wl->mutex);
3224 *total &= WL1271_SUPPORTED_FILTERS;
3225 changed &= WL1271_SUPPORTED_FILTERS;
3227 if (unlikely(wl->state != WLCORE_STATE_ON))
3230 ret = wl1271_ps_elp_wakeup(wl);
3234 wl12xx_for_each_wlvif(wl, wlvif) {
3235 if (wlcore_is_p2p_mgmt(wlvif))
3238 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3239 if (*total & FIF_ALLMULTI)
3240 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3244 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3247 fp->mc_list_length);
3254 * the fw doesn't provide an api to configure the filters. instead,
3255 * the filters configuration is based on the active roles / ROC
3260 wl1271_ps_elp_sleep(wl);
3263 mutex_unlock(&wl->mutex);
3267 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3268 u8 id, u8 key_type, u8 key_size,
3269 const u8 *key, u8 hlid, u32 tx_seq_32,
3272 struct wl1271_ap_key *ap_key;
3275 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3277 if (key_size > MAX_KEY_SIZE)
3281 * Find next free entry in ap_keys. Also check we are not replacing
3284 for (i = 0; i < MAX_NUM_KEYS; i++) {
3285 if (wlvif->ap.recorded_keys[i] == NULL)
3288 if (wlvif->ap.recorded_keys[i]->id == id) {
3289 wl1271_warning("trying to record key replacement");
3294 if (i == MAX_NUM_KEYS)
3297 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3302 ap_key->key_type = key_type;
3303 ap_key->key_size = key_size;
3304 memcpy(ap_key->key, key, key_size);
3305 ap_key->hlid = hlid;
3306 ap_key->tx_seq_32 = tx_seq_32;
3307 ap_key->tx_seq_16 = tx_seq_16;
3309 wlvif->ap.recorded_keys[i] = ap_key;
3313 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3317 for (i = 0; i < MAX_NUM_KEYS; i++) {
3318 kfree(wlvif->ap.recorded_keys[i]);
3319 wlvif->ap.recorded_keys[i] = NULL;
3323 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3326 struct wl1271_ap_key *key;
3327 bool wep_key_added = false;
3329 for (i = 0; i < MAX_NUM_KEYS; i++) {
3331 if (wlvif->ap.recorded_keys[i] == NULL)
3334 key = wlvif->ap.recorded_keys[i];
3336 if (hlid == WL12XX_INVALID_LINK_ID)
3337 hlid = wlvif->ap.bcast_hlid;
3339 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3340 key->id, key->key_type,
3341 key->key_size, key->key,
3342 hlid, key->tx_seq_32,
3347 if (key->key_type == KEY_WEP)
3348 wep_key_added = true;
3351 if (wep_key_added) {
3352 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3353 wlvif->ap.bcast_hlid);
3359 wl1271_free_ap_keys(wl, wlvif);
3363 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3364 u16 action, u8 id, u8 key_type,
3365 u8 key_size, const u8 *key, u32 tx_seq_32,
3366 u16 tx_seq_16, struct ieee80211_sta *sta)
3369 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3372 struct wl1271_station *wl_sta;
3376 wl_sta = (struct wl1271_station *)sta->drv_priv;
3377 hlid = wl_sta->hlid;
3379 hlid = wlvif->ap.bcast_hlid;
3382 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3384 * We do not support removing keys after AP shutdown.
3385 * Pretend we do to make mac80211 happy.
3387 if (action != KEY_ADD_OR_REPLACE)
3390 ret = wl1271_record_ap_key(wl, wlvif, id,
3392 key, hlid, tx_seq_32,
3395 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3396 id, key_type, key_size,
3397 key, hlid, tx_seq_32,
3405 static const u8 bcast_addr[ETH_ALEN] = {
3406 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3409 addr = sta ? sta->addr : bcast_addr;
3411 if (is_zero_ether_addr(addr)) {
3412 /* We dont support TX only encryption */
3416 /* The wl1271 does not allow to remove unicast keys - they
3417 will be cleared automatically on next CMD_JOIN. Ignore the
3418 request silently, as we dont want the mac80211 to emit
3419 an error message. */
3420 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3423 /* don't remove key if hlid was already deleted */
3424 if (action == KEY_REMOVE &&
3425 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3428 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3429 id, key_type, key_size,
3430 key, addr, tx_seq_32,
3440 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3441 struct ieee80211_vif *vif,
3442 struct ieee80211_sta *sta,
3443 struct ieee80211_key_conf *key_conf)
3445 struct wl1271 *wl = hw->priv;
3447 bool might_change_spare =
3448 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3449 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3451 if (might_change_spare) {
3453 * stop the queues and flush to ensure the next packets are
3454 * in sync with FW spare block accounting
3456 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3457 wl1271_tx_flush(wl);
3460 mutex_lock(&wl->mutex);
3462 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3464 goto out_wake_queues;
3467 ret = wl1271_ps_elp_wakeup(wl);
3469 goto out_wake_queues;
3471 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3473 wl1271_ps_elp_sleep(wl);
3476 if (might_change_spare)
3477 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3479 mutex_unlock(&wl->mutex);
3484 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3485 struct ieee80211_vif *vif,
3486 struct ieee80211_sta *sta,
3487 struct ieee80211_key_conf *key_conf)
3489 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3496 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3498 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3499 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3500 key_conf->cipher, key_conf->keyidx,
3501 key_conf->keylen, key_conf->flags);
3502 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3504 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3506 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3507 hlid = wl_sta->hlid;
3509 hlid = wlvif->ap.bcast_hlid;
3512 hlid = wlvif->sta.hlid;
3514 if (hlid != WL12XX_INVALID_LINK_ID) {
3515 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3516 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3517 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3520 switch (key_conf->cipher) {
3521 case WLAN_CIPHER_SUITE_WEP40:
3522 case WLAN_CIPHER_SUITE_WEP104:
3525 key_conf->hw_key_idx = key_conf->keyidx;
3527 case WLAN_CIPHER_SUITE_TKIP:
3528 key_type = KEY_TKIP;
3529 key_conf->hw_key_idx = key_conf->keyidx;
3531 case WLAN_CIPHER_SUITE_CCMP:
3533 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3535 case WL1271_CIPHER_SUITE_GEM:
3539 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3546 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3547 key_conf->keyidx, key_type,
3548 key_conf->keylen, key_conf->key,
3549 tx_seq_32, tx_seq_16, sta);
3551 wl1271_error("Could not add or replace key");
3556 * reconfiguring arp response if the unicast (or common)
3557 * encryption key type was changed
3559 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3560 (sta || key_type == KEY_WEP) &&
3561 wlvif->encryption_type != key_type) {
3562 wlvif->encryption_type = key_type;
3563 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3565 wl1271_warning("build arp rsp failed: %d", ret);
3572 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3573 key_conf->keyidx, key_type,
3574 key_conf->keylen, key_conf->key,
3577 wl1271_error("Could not remove key");
3583 wl1271_error("Unsupported key cmd 0x%x", cmd);
3589 EXPORT_SYMBOL_GPL(wlcore_set_key);
3591 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3592 struct ieee80211_vif *vif,
3595 struct wl1271 *wl = hw->priv;
3596 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3599 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3602 /* we don't handle unsetting of default key */
3606 mutex_lock(&wl->mutex);
3608 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3613 ret = wl1271_ps_elp_wakeup(wl);
3617 wlvif->default_key = key_idx;
3619 /* the default WEP key needs to be configured at least once */
3620 if (wlvif->encryption_type == KEY_WEP) {
3621 ret = wl12xx_cmd_set_default_wep_key(wl,
3629 wl1271_ps_elp_sleep(wl);
3632 mutex_unlock(&wl->mutex);
3635 void wlcore_regdomain_config(struct wl1271 *wl)
3639 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3642 mutex_lock(&wl->mutex);
3644 if (unlikely(wl->state != WLCORE_STATE_ON))
3647 ret = wl1271_ps_elp_wakeup(wl);
3651 ret = wlcore_cmd_regdomain_config_locked(wl);
3653 wl12xx_queue_recovery_work(wl);
3657 wl1271_ps_elp_sleep(wl);
3659 mutex_unlock(&wl->mutex);
3662 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3663 struct ieee80211_vif *vif,
3664 struct ieee80211_scan_request *hw_req)
3666 struct cfg80211_scan_request *req = &hw_req->req;
3667 struct wl1271 *wl = hw->priv;
3672 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3675 ssid = req->ssids[0].ssid;
3676 len = req->ssids[0].ssid_len;
3679 mutex_lock(&wl->mutex);
3681 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3683 * We cannot return -EBUSY here because cfg80211 will expect
3684 * a call to ieee80211_scan_completed if we do - in this case
3685 * there won't be any call.
3691 ret = wl1271_ps_elp_wakeup(wl);
3695 /* fail if there is any role in ROC */
3696 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3697 /* don't allow scanning right now */
3702 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3704 wl1271_ps_elp_sleep(wl);
3706 mutex_unlock(&wl->mutex);
3711 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3712 struct ieee80211_vif *vif)
3714 struct wl1271 *wl = hw->priv;
3715 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3718 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3720 mutex_lock(&wl->mutex);
3722 if (unlikely(wl->state != WLCORE_STATE_ON))
3725 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3728 ret = wl1271_ps_elp_wakeup(wl);
3732 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3733 ret = wl->ops->scan_stop(wl, wlvif);
3739 * Rearm the tx watchdog just before idling scan. This
3740 * prevents just-finished scans from triggering the watchdog
3742 wl12xx_rearm_tx_watchdog_locked(wl);
3744 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3745 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3746 wl->scan_wlvif = NULL;
3747 wl->scan.req = NULL;
3748 ieee80211_scan_completed(wl->hw, true);
3751 wl1271_ps_elp_sleep(wl);
3753 mutex_unlock(&wl->mutex);
3755 cancel_delayed_work_sync(&wl->scan_complete_work);
3758 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3759 struct ieee80211_vif *vif,
3760 struct cfg80211_sched_scan_request *req,
3761 struct ieee80211_scan_ies *ies)
3763 struct wl1271 *wl = hw->priv;
3764 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3767 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3769 mutex_lock(&wl->mutex);
3771 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3776 ret = wl1271_ps_elp_wakeup(wl);
3780 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3784 wl->sched_vif = wlvif;
3787 wl1271_ps_elp_sleep(wl);
3789 mutex_unlock(&wl->mutex);
3793 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3794 struct ieee80211_vif *vif)
3796 struct wl1271 *wl = hw->priv;
3797 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3800 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3802 mutex_lock(&wl->mutex);
3804 if (unlikely(wl->state != WLCORE_STATE_ON))
3807 ret = wl1271_ps_elp_wakeup(wl);
3811 wl->ops->sched_scan_stop(wl, wlvif);
3813 wl1271_ps_elp_sleep(wl);
3815 mutex_unlock(&wl->mutex);
3820 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3822 struct wl1271 *wl = hw->priv;
3825 mutex_lock(&wl->mutex);
3827 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3832 ret = wl1271_ps_elp_wakeup(wl);
3836 ret = wl1271_acx_frag_threshold(wl, value);
3838 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3840 wl1271_ps_elp_sleep(wl);
3843 mutex_unlock(&wl->mutex);
3848 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3850 struct wl1271 *wl = hw->priv;
3851 struct wl12xx_vif *wlvif;
3854 mutex_lock(&wl->mutex);
3856 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3861 ret = wl1271_ps_elp_wakeup(wl);
3865 wl12xx_for_each_wlvif(wl, wlvif) {
3866 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3868 wl1271_warning("set rts threshold failed: %d", ret);
3870 wl1271_ps_elp_sleep(wl);
3873 mutex_unlock(&wl->mutex);
3878 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3881 const u8 *next, *end = skb->data + skb->len;
3882 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3883 skb->len - ieoffset);
3888 memmove(ie, next, end - next);
3889 skb_trim(skb, skb->len - len);
3892 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3893 unsigned int oui, u8 oui_type,
3897 const u8 *next, *end = skb->data + skb->len;
3898 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3899 skb->data + ieoffset,
3900 skb->len - ieoffset);
3905 memmove(ie, next, end - next);
3906 skb_trim(skb, skb->len - len);
3909 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3910 struct ieee80211_vif *vif)
3912 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3913 struct sk_buff *skb;
3916 skb = ieee80211_proberesp_get(wl->hw, vif);
3920 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3921 CMD_TEMPL_AP_PROBE_RESPONSE,
3930 wl1271_debug(DEBUG_AP, "probe response updated");
3931 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3937 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3938 struct ieee80211_vif *vif,
3940 size_t probe_rsp_len,
3943 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3944 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3945 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3946 int ssid_ie_offset, ie_offset, templ_len;
3949 /* no need to change probe response if the SSID is set correctly */
3950 if (wlvif->ssid_len > 0)
3951 return wl1271_cmd_template_set(wl, wlvif->role_id,
3952 CMD_TEMPL_AP_PROBE_RESPONSE,
3957 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3958 wl1271_error("probe_rsp template too big");
3962 /* start searching from IE offset */
3963 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3965 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3966 probe_rsp_len - ie_offset);
3968 wl1271_error("No SSID in beacon!");
3972 ssid_ie_offset = ptr - probe_rsp_data;
3973 ptr += (ptr[1] + 2);
3975 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3977 /* insert SSID from bss_conf */
3978 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3979 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3980 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3981 bss_conf->ssid, bss_conf->ssid_len);
3982 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3984 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3985 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3986 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3988 return wl1271_cmd_template_set(wl, wlvif->role_id,
3989 CMD_TEMPL_AP_PROBE_RESPONSE,
3995 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3996 struct ieee80211_vif *vif,
3997 struct ieee80211_bss_conf *bss_conf,
4000 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4003 if (changed & BSS_CHANGED_ERP_SLOT) {
4004 if (bss_conf->use_short_slot)
4005 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4007 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4009 wl1271_warning("Set slot time failed %d", ret);
4014 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4015 if (bss_conf->use_short_preamble)
4016 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4018 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4021 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4022 if (bss_conf->use_cts_prot)
4023 ret = wl1271_acx_cts_protect(wl, wlvif,
4026 ret = wl1271_acx_cts_protect(wl, wlvif,
4027 CTSPROTECT_DISABLE);
4029 wl1271_warning("Set ctsprotect failed %d", ret);
4038 static int wlcore_set_beacon_template(struct wl1271 *wl,
4039 struct ieee80211_vif *vif,
4042 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4043 struct ieee80211_hdr *hdr;
4046 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4047 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4055 wl1271_debug(DEBUG_MASTER, "beacon updated");
4057 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4059 dev_kfree_skb(beacon);
4062 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4063 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4065 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4070 dev_kfree_skb(beacon);
4074 wlvif->wmm_enabled =
4075 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4076 WLAN_OUI_TYPE_MICROSOFT_WMM,
4077 beacon->data + ieoffset,
4078 beacon->len - ieoffset);
4081 * In case we already have a probe-resp beacon set explicitly
4082 * by usermode, don't use the beacon data.
4084 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4087 /* remove TIM ie from probe response */
4088 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4091 * remove p2p ie from probe response.
4092 * the fw reponds to probe requests that don't include
4093 * the p2p ie. probe requests with p2p ie will be passed,
4094 * and will be responded by the supplicant (the spec
4095 * forbids including the p2p ie when responding to probe
4096 * requests that didn't include it).
4098 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4099 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4101 hdr = (struct ieee80211_hdr *) beacon->data;
4102 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4103 IEEE80211_STYPE_PROBE_RESP);
4105 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4110 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4111 CMD_TEMPL_PROBE_RESPONSE,
4116 dev_kfree_skb(beacon);
4124 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4125 struct ieee80211_vif *vif,
4126 struct ieee80211_bss_conf *bss_conf,
4129 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4130 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4133 if (changed & BSS_CHANGED_BEACON_INT) {
4134 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4135 bss_conf->beacon_int);
4137 wlvif->beacon_int = bss_conf->beacon_int;
4140 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4141 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4143 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4146 if (changed & BSS_CHANGED_BEACON) {
4147 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4151 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4153 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4160 wl1271_error("beacon info change failed: %d", ret);
4164 /* AP mode changes */
4165 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4166 struct ieee80211_vif *vif,
4167 struct ieee80211_bss_conf *bss_conf,
4170 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4173 if (changed & BSS_CHANGED_BASIC_RATES) {
4174 u32 rates = bss_conf->basic_rates;
4176 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4178 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4179 wlvif->basic_rate_set);
4181 ret = wl1271_init_ap_rates(wl, wlvif);
4183 wl1271_error("AP rate policy change failed %d", ret);
4187 ret = wl1271_ap_init_templates(wl, vif);
4191 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4195 ret = wlcore_set_beacon_template(wl, vif, true);
4200 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4204 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4205 if (bss_conf->enable_beacon) {
4206 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4207 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4211 ret = wl1271_ap_init_hwenc(wl, wlvif);
4215 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4216 wl1271_debug(DEBUG_AP, "started AP");
4219 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4221 * AP might be in ROC in case we have just
4222 * sent auth reply. handle it.
4224 if (test_bit(wlvif->role_id, wl->roc_map))
4225 wl12xx_croc(wl, wlvif->role_id);
4227 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4231 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4232 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4234 wl1271_debug(DEBUG_AP, "stopped AP");
4239 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4243 /* Handle HT information change */
4244 if ((changed & BSS_CHANGED_HT) &&
4245 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4246 ret = wl1271_acx_set_ht_information(wl, wlvif,
4247 bss_conf->ht_operation_mode);
4249 wl1271_warning("Set ht information failed %d", ret);
4258 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4259 struct ieee80211_bss_conf *bss_conf,
4265 wl1271_debug(DEBUG_MAC80211,
4266 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4267 bss_conf->bssid, bss_conf->aid,
4268 bss_conf->beacon_int,
4269 bss_conf->basic_rates, sta_rate_set);
4271 wlvif->beacon_int = bss_conf->beacon_int;
4272 rates = bss_conf->basic_rates;
4273 wlvif->basic_rate_set =
4274 wl1271_tx_enabled_rates_get(wl, rates,
4277 wl1271_tx_min_rate_get(wl,
4278 wlvif->basic_rate_set);
4282 wl1271_tx_enabled_rates_get(wl,
4286 /* we only support sched_scan while not connected */
4287 if (wl->sched_vif == wlvif)
4288 wl->ops->sched_scan_stop(wl, wlvif);
4290 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4294 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4298 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4302 wlcore_set_ssid(wl, wlvif);
4304 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4309 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4313 /* revert back to minimum rates for the current band */
4314 wl1271_set_band_rate(wl, wlvif);
4315 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4317 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4321 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4322 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4323 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4328 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4331 /* STA/IBSS mode changes */
4332 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4333 struct ieee80211_vif *vif,
4334 struct ieee80211_bss_conf *bss_conf,
4337 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4338 bool do_join = false;
4339 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4340 bool ibss_joined = false;
4341 u32 sta_rate_set = 0;
4343 struct ieee80211_sta *sta;
4344 bool sta_exists = false;
4345 struct ieee80211_sta_ht_cap sta_ht_cap;
4348 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4354 if (changed & BSS_CHANGED_IBSS) {
4355 if (bss_conf->ibss_joined) {
4356 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4359 wlcore_unset_assoc(wl, wlvif);
4360 wl12xx_cmd_role_stop_sta(wl, wlvif);
4364 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4367 /* Need to update the SSID (for filtering etc) */
4368 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4371 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4372 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4373 bss_conf->enable_beacon ? "enabled" : "disabled");
4378 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4379 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4381 if (changed & BSS_CHANGED_CQM) {
4382 bool enable = false;
4383 if (bss_conf->cqm_rssi_thold)
4385 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4386 bss_conf->cqm_rssi_thold,
4387 bss_conf->cqm_rssi_hyst);
4390 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4393 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4394 BSS_CHANGED_ASSOC)) {
4396 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4398 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4400 /* save the supp_rates of the ap */
4401 sta_rate_set = sta->supp_rates[wlvif->band];
4402 if (sta->ht_cap.ht_supported)
4404 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4405 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4406 sta_ht_cap = sta->ht_cap;
4413 if (changed & BSS_CHANGED_BSSID) {
4414 if (!is_zero_ether_addr(bss_conf->bssid)) {
4415 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4420 /* Need to update the BSSID (for filtering etc) */
4423 ret = wlcore_clear_bssid(wl, wlvif);
4429 if (changed & BSS_CHANGED_IBSS) {
4430 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4431 bss_conf->ibss_joined);
4433 if (bss_conf->ibss_joined) {
4434 u32 rates = bss_conf->basic_rates;
4435 wlvif->basic_rate_set =
4436 wl1271_tx_enabled_rates_get(wl, rates,
4439 wl1271_tx_min_rate_get(wl,
4440 wlvif->basic_rate_set);
4442 /* by default, use 11b + OFDM rates */
4443 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4444 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4450 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4451 /* enable beacon filtering */
4452 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4457 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4462 ret = wlcore_join(wl, wlvif);
4464 wl1271_warning("cmd join failed %d", ret);
4469 if (changed & BSS_CHANGED_ASSOC) {
4470 if (bss_conf->assoc) {
4471 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4476 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4477 wl12xx_set_authorized(wl, wlvif);
4479 wlcore_unset_assoc(wl, wlvif);
4483 if (changed & BSS_CHANGED_PS) {
4484 if ((bss_conf->ps) &&
4485 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4486 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4490 if (wl->conf.conn.forced_ps) {
4491 ps_mode = STATION_POWER_SAVE_MODE;
4492 ps_mode_str = "forced";
4494 ps_mode = STATION_AUTO_PS_MODE;
4495 ps_mode_str = "auto";
4498 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4500 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4502 wl1271_warning("enter %s ps failed %d",
4504 } else if (!bss_conf->ps &&
4505 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4506 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4508 ret = wl1271_ps_set_mode(wl, wlvif,
4509 STATION_ACTIVE_MODE);
4511 wl1271_warning("exit auto ps failed %d", ret);
4515 /* Handle new association with HT. Do this after join. */
4518 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4520 ret = wlcore_hw_set_peer_cap(wl,
4526 wl1271_warning("Set ht cap failed %d", ret);
4532 ret = wl1271_acx_set_ht_information(wl, wlvif,
4533 bss_conf->ht_operation_mode);
4535 wl1271_warning("Set ht information failed %d",
4542 /* Handle arp filtering. Done after join. */
4543 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4544 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4545 __be32 addr = bss_conf->arp_addr_list[0];
4546 wlvif->sta.qos = bss_conf->qos;
4547 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4549 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4550 wlvif->ip_addr = addr;
4552 * The template should have been configured only upon
4553 * association. however, it seems that the correct ip
4554 * isn't being set (when sending), so we have to
4555 * reconfigure the template upon every ip change.
4557 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4559 wl1271_warning("build arp rsp failed: %d", ret);
4563 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4564 (ACX_ARP_FILTER_ARP_FILTERING |
4565 ACX_ARP_FILTER_AUTO_ARP),
4569 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4580 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4581 struct ieee80211_vif *vif,
4582 struct ieee80211_bss_conf *bss_conf,
4585 struct wl1271 *wl = hw->priv;
4586 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4587 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4590 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4591 wlvif->role_id, (int)changed);
4594 * make sure to cancel pending disconnections if our association
4597 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4598 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4600 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4601 !bss_conf->enable_beacon)
4602 wl1271_tx_flush(wl);
4604 mutex_lock(&wl->mutex);
4606 if (unlikely(wl->state != WLCORE_STATE_ON))
4609 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4612 ret = wl1271_ps_elp_wakeup(wl);
4616 if ((changed & BSS_CHANGED_TXPOWER) &&
4617 bss_conf->txpower != wlvif->power_level) {
4619 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4623 wlvif->power_level = bss_conf->txpower;
4627 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4629 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4631 wl1271_ps_elp_sleep(wl);
4634 mutex_unlock(&wl->mutex);
4637 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4638 struct ieee80211_chanctx_conf *ctx)
4640 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4641 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4642 cfg80211_get_chandef_type(&ctx->def));
4646 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4647 struct ieee80211_chanctx_conf *ctx)
4649 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4650 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4651 cfg80211_get_chandef_type(&ctx->def));
4654 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4655 struct ieee80211_chanctx_conf *ctx,
4658 struct wl1271 *wl = hw->priv;
4659 struct wl12xx_vif *wlvif;
4661 int channel = ieee80211_frequency_to_channel(
4662 ctx->def.chan->center_freq);
4664 wl1271_debug(DEBUG_MAC80211,
4665 "mac80211 change chanctx %d (type %d) changed 0x%x",
4666 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4668 mutex_lock(&wl->mutex);
4670 ret = wl1271_ps_elp_wakeup(wl);
4674 wl12xx_for_each_wlvif(wl, wlvif) {
4675 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4678 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4684 /* start radar if needed */
4685 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4686 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4687 ctx->radar_enabled && !wlvif->radar_enabled &&
4688 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4689 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4690 wlcore_hw_set_cac(wl, wlvif, true);
4691 wlvif->radar_enabled = true;
4695 wl1271_ps_elp_sleep(wl);
4697 mutex_unlock(&wl->mutex);
4700 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4701 struct ieee80211_vif *vif,
4702 struct ieee80211_chanctx_conf *ctx)
4704 struct wl1271 *wl = hw->priv;
4705 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4706 int channel = ieee80211_frequency_to_channel(
4707 ctx->def.chan->center_freq);
4710 wl1271_debug(DEBUG_MAC80211,
4711 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4712 wlvif->role_id, channel,
4713 cfg80211_get_chandef_type(&ctx->def),
4714 ctx->radar_enabled, ctx->def.chan->dfs_state);
4716 mutex_lock(&wl->mutex);
4718 if (unlikely(wl->state != WLCORE_STATE_ON))
4721 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4724 ret = wl1271_ps_elp_wakeup(wl);
4728 wlvif->band = ctx->def.chan->band;
4729 wlvif->channel = channel;
4730 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4732 /* update default rates according to the band */
4733 wl1271_set_band_rate(wl, wlvif);
4735 if (ctx->radar_enabled &&
4736 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4737 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4738 wlcore_hw_set_cac(wl, wlvif, true);
4739 wlvif->radar_enabled = true;
4742 wl1271_ps_elp_sleep(wl);
4744 mutex_unlock(&wl->mutex);
4749 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4750 struct ieee80211_vif *vif,
4751 struct ieee80211_chanctx_conf *ctx)
4753 struct wl1271 *wl = hw->priv;
4754 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4757 wl1271_debug(DEBUG_MAC80211,
4758 "mac80211 unassign chanctx (role %d) %d (type %d)",
4760 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4761 cfg80211_get_chandef_type(&ctx->def));
4763 wl1271_tx_flush(wl);
4765 mutex_lock(&wl->mutex);
4767 if (unlikely(wl->state != WLCORE_STATE_ON))
4770 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4773 ret = wl1271_ps_elp_wakeup(wl);
4777 if (wlvif->radar_enabled) {
4778 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4779 wlcore_hw_set_cac(wl, wlvif, false);
4780 wlvif->radar_enabled = false;
4783 wl1271_ps_elp_sleep(wl);
4785 mutex_unlock(&wl->mutex);
4788 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4789 struct wl12xx_vif *wlvif,
4790 struct ieee80211_chanctx_conf *new_ctx)
4792 int channel = ieee80211_frequency_to_channel(
4793 new_ctx->def.chan->center_freq);
4795 wl1271_debug(DEBUG_MAC80211,
4796 "switch vif (role %d) %d -> %d chan_type: %d",
4797 wlvif->role_id, wlvif->channel, channel,
4798 cfg80211_get_chandef_type(&new_ctx->def));
4800 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4803 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4805 if (wlvif->radar_enabled) {
4806 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4807 wlcore_hw_set_cac(wl, wlvif, false);
4808 wlvif->radar_enabled = false;
4811 wlvif->band = new_ctx->def.chan->band;
4812 wlvif->channel = channel;
4813 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4815 /* start radar if needed */
4816 if (new_ctx->radar_enabled) {
4817 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4818 wlcore_hw_set_cac(wl, wlvif, true);
4819 wlvif->radar_enabled = true;
4826 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4827 struct ieee80211_vif_chanctx_switch *vifs,
4829 enum ieee80211_chanctx_switch_mode mode)
4831 struct wl1271 *wl = hw->priv;
4834 wl1271_debug(DEBUG_MAC80211,
4835 "mac80211 switch chanctx n_vifs %d mode %d",
4838 mutex_lock(&wl->mutex);
4840 ret = wl1271_ps_elp_wakeup(wl);
4844 for (i = 0; i < n_vifs; i++) {
4845 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4847 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4852 wl1271_ps_elp_sleep(wl);
4854 mutex_unlock(&wl->mutex);
4859 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4860 struct ieee80211_vif *vif, u16 queue,
4861 const struct ieee80211_tx_queue_params *params)
4863 struct wl1271 *wl = hw->priv;
4864 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4868 if (wlcore_is_p2p_mgmt(wlvif))
4871 mutex_lock(&wl->mutex);
4873 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4876 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4878 ps_scheme = CONF_PS_SCHEME_LEGACY;
4880 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4883 ret = wl1271_ps_elp_wakeup(wl);
4888 * the txop is confed in units of 32us by the mac80211,
4891 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4892 params->cw_min, params->cw_max,
4893 params->aifs, params->txop << 5);
4897 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4898 CONF_CHANNEL_TYPE_EDCF,
4899 wl1271_tx_get_queue(queue),
4900 ps_scheme, CONF_ACK_POLICY_LEGACY,
4904 wl1271_ps_elp_sleep(wl);
4907 mutex_unlock(&wl->mutex);
4912 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4913 struct ieee80211_vif *vif)
4916 struct wl1271 *wl = hw->priv;
4917 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4918 u64 mactime = ULLONG_MAX;
4921 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4923 mutex_lock(&wl->mutex);
4925 if (unlikely(wl->state != WLCORE_STATE_ON))
4928 ret = wl1271_ps_elp_wakeup(wl);
4932 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4937 wl1271_ps_elp_sleep(wl);
4940 mutex_unlock(&wl->mutex);
4944 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4945 struct survey_info *survey)
4947 struct ieee80211_conf *conf = &hw->conf;
4952 survey->channel = conf->chandef.chan;
4957 static int wl1271_allocate_sta(struct wl1271 *wl,
4958 struct wl12xx_vif *wlvif,
4959 struct ieee80211_sta *sta)
4961 struct wl1271_station *wl_sta;
4965 if (wl->active_sta_count >= wl->max_ap_stations) {
4966 wl1271_warning("could not allocate HLID - too much stations");
4970 wl_sta = (struct wl1271_station *)sta->drv_priv;
4971 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4973 wl1271_warning("could not allocate HLID - too many links");
4977 /* use the previous security seq, if this is a recovery/resume */
4978 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4980 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4981 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4982 wl->active_sta_count++;
4986 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4988 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4991 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4992 __clear_bit(hlid, &wl->ap_ps_map);
4993 __clear_bit(hlid, &wl->ap_fw_ps_map);
4996 * save the last used PN in the private part of iee80211_sta,
4997 * in case of recovery/suspend
4999 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5001 wl12xx_free_link(wl, wlvif, &hlid);
5002 wl->active_sta_count--;
5005 * rearm the tx watchdog when the last STA is freed - give the FW a
5006 * chance to return STA-buffered packets before complaining.
5008 if (wl->active_sta_count == 0)
5009 wl12xx_rearm_tx_watchdog_locked(wl);
5012 static int wl12xx_sta_add(struct wl1271 *wl,
5013 struct wl12xx_vif *wlvif,
5014 struct ieee80211_sta *sta)
5016 struct wl1271_station *wl_sta;
5020 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5022 ret = wl1271_allocate_sta(wl, wlvif, sta);
5026 wl_sta = (struct wl1271_station *)sta->drv_priv;
5027 hlid = wl_sta->hlid;
5029 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5031 wl1271_free_sta(wl, wlvif, hlid);
5036 static int wl12xx_sta_remove(struct wl1271 *wl,
5037 struct wl12xx_vif *wlvif,
5038 struct ieee80211_sta *sta)
5040 struct wl1271_station *wl_sta;
5043 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5045 wl_sta = (struct wl1271_station *)sta->drv_priv;
5047 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5050 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5054 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5058 static void wlcore_roc_if_possible(struct wl1271 *wl,
5059 struct wl12xx_vif *wlvif)
5061 if (find_first_bit(wl->roc_map,
5062 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5065 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5068 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5072 * when wl_sta is NULL, we treat this call as if coming from a
5073 * pending auth reply.
5074 * wl->mutex must be taken and the FW must be awake when the call
5077 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5078 struct wl1271_station *wl_sta, bool in_conn)
5081 if (WARN_ON(wl_sta && wl_sta->in_connection))
5084 if (!wlvif->ap_pending_auth_reply &&
5085 !wlvif->inconn_count)
5086 wlcore_roc_if_possible(wl, wlvif);
5089 wl_sta->in_connection = true;
5090 wlvif->inconn_count++;
5092 wlvif->ap_pending_auth_reply = true;
5095 if (wl_sta && !wl_sta->in_connection)
5098 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5101 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5105 wl_sta->in_connection = false;
5106 wlvif->inconn_count--;
5108 wlvif->ap_pending_auth_reply = false;
5111 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5112 test_bit(wlvif->role_id, wl->roc_map))
5113 wl12xx_croc(wl, wlvif->role_id);
5117 static int wl12xx_update_sta_state(struct wl1271 *wl,
5118 struct wl12xx_vif *wlvif,
5119 struct ieee80211_sta *sta,
5120 enum ieee80211_sta_state old_state,
5121 enum ieee80211_sta_state new_state)
5123 struct wl1271_station *wl_sta;
5124 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5125 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5128 wl_sta = (struct wl1271_station *)sta->drv_priv;
5130 /* Add station (AP mode) */
5132 old_state == IEEE80211_STA_NOTEXIST &&
5133 new_state == IEEE80211_STA_NONE) {
5134 ret = wl12xx_sta_add(wl, wlvif, sta);
5138 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5141 /* Remove station (AP mode) */
5143 old_state == IEEE80211_STA_NONE &&
5144 new_state == IEEE80211_STA_NOTEXIST) {
5146 wl12xx_sta_remove(wl, wlvif, sta);
5148 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5151 /* Authorize station (AP mode) */
5153 new_state == IEEE80211_STA_AUTHORIZED) {
5154 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5158 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5163 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5166 /* Authorize station */
5168 new_state == IEEE80211_STA_AUTHORIZED) {
5169 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5170 ret = wl12xx_set_authorized(wl, wlvif);
5176 old_state == IEEE80211_STA_AUTHORIZED &&
5177 new_state == IEEE80211_STA_ASSOC) {
5178 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5179 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5182 /* save seq number on disassoc (suspend) */
5184 old_state == IEEE80211_STA_ASSOC &&
5185 new_state == IEEE80211_STA_AUTH) {
5186 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5187 wlvif->total_freed_pkts = 0;
5190 /* restore seq number on assoc (resume) */
5192 old_state == IEEE80211_STA_AUTH &&
5193 new_state == IEEE80211_STA_ASSOC) {
5194 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5197 /* clear ROCs on failure or authorization */
5199 (new_state == IEEE80211_STA_AUTHORIZED ||
5200 new_state == IEEE80211_STA_NOTEXIST)) {
5201 if (test_bit(wlvif->role_id, wl->roc_map))
5202 wl12xx_croc(wl, wlvif->role_id);
5206 old_state == IEEE80211_STA_NOTEXIST &&
5207 new_state == IEEE80211_STA_NONE) {
5208 if (find_first_bit(wl->roc_map,
5209 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5210 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5211 wl12xx_roc(wl, wlvif, wlvif->role_id,
5212 wlvif->band, wlvif->channel);
5218 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5219 struct ieee80211_vif *vif,
5220 struct ieee80211_sta *sta,
5221 enum ieee80211_sta_state old_state,
5222 enum ieee80211_sta_state new_state)
5224 struct wl1271 *wl = hw->priv;
5225 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5228 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5229 sta->aid, old_state, new_state);
5231 mutex_lock(&wl->mutex);
5233 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5238 ret = wl1271_ps_elp_wakeup(wl);
5242 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5244 wl1271_ps_elp_sleep(wl);
5246 mutex_unlock(&wl->mutex);
5247 if (new_state < old_state)
5252 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5253 struct ieee80211_vif *vif,
5254 struct ieee80211_ampdu_params *params)
5256 struct wl1271 *wl = hw->priv;
5257 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5259 u8 hlid, *ba_bitmap;
5260 struct ieee80211_sta *sta = params->sta;
5261 enum ieee80211_ampdu_mlme_action action = params->action;
5262 u16 tid = params->tid;
5263 u16 *ssn = ¶ms->ssn;
5265 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5268 /* sanity check - the fields in FW are only 8bits wide */
5269 if (WARN_ON(tid > 0xFF))
5272 mutex_lock(&wl->mutex);
5274 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5279 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5280 hlid = wlvif->sta.hlid;
5281 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5282 struct wl1271_station *wl_sta;
5284 wl_sta = (struct wl1271_station *)sta->drv_priv;
5285 hlid = wl_sta->hlid;
5291 ba_bitmap = &wl->links[hlid].ba_bitmap;
5293 ret = wl1271_ps_elp_wakeup(wl);
5297 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5301 case IEEE80211_AMPDU_RX_START:
5302 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5307 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5309 wl1271_error("exceeded max RX BA sessions");
5313 if (*ba_bitmap & BIT(tid)) {
5315 wl1271_error("cannot enable RX BA session on active "
5320 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5325 *ba_bitmap |= BIT(tid);
5326 wl->ba_rx_session_count++;
5330 case IEEE80211_AMPDU_RX_STOP:
5331 if (!(*ba_bitmap & BIT(tid))) {
5333 * this happens on reconfig - so only output a debug
5334 * message for now, and don't fail the function.
5336 wl1271_debug(DEBUG_MAC80211,
5337 "no active RX BA session on tid: %d",
5343 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5346 *ba_bitmap &= ~BIT(tid);
5347 wl->ba_rx_session_count--;
5352 * The BA initiator session management in FW independently.
5353 * Falling break here on purpose for all TX APDU commands.
5355 case IEEE80211_AMPDU_TX_START:
5356 case IEEE80211_AMPDU_TX_STOP_CONT:
5357 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5358 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5359 case IEEE80211_AMPDU_TX_OPERATIONAL:
5364 wl1271_error("Incorrect ampdu action id=%x\n", action);
5368 wl1271_ps_elp_sleep(wl);
5371 mutex_unlock(&wl->mutex);
5376 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5377 struct ieee80211_vif *vif,
5378 const struct cfg80211_bitrate_mask *mask)
5380 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5381 struct wl1271 *wl = hw->priv;
5384 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5385 mask->control[NL80211_BAND_2GHZ].legacy,
5386 mask->control[NL80211_BAND_5GHZ].legacy);
5388 mutex_lock(&wl->mutex);
5390 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5391 wlvif->bitrate_masks[i] =
5392 wl1271_tx_enabled_rates_get(wl,
5393 mask->control[i].legacy,
5396 if (unlikely(wl->state != WLCORE_STATE_ON))
5399 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5400 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5402 ret = wl1271_ps_elp_wakeup(wl);
5406 wl1271_set_band_rate(wl, wlvif);
5408 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5409 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5411 wl1271_ps_elp_sleep(wl);
5414 mutex_unlock(&wl->mutex);
5419 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5420 struct ieee80211_vif *vif,
5421 struct ieee80211_channel_switch *ch_switch)
5423 struct wl1271 *wl = hw->priv;
5424 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5427 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5429 wl1271_tx_flush(wl);
5431 mutex_lock(&wl->mutex);
5433 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5434 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5435 ieee80211_chswitch_done(vif, false);
5437 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5441 ret = wl1271_ps_elp_wakeup(wl);
5445 /* TODO: change mac80211 to pass vif as param */
5447 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5448 unsigned long delay_usec;
5450 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5454 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5456 /* indicate failure 5 seconds after channel switch time */
5457 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5459 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5460 usecs_to_jiffies(delay_usec) +
5461 msecs_to_jiffies(5000));
5465 wl1271_ps_elp_sleep(wl);
5468 mutex_unlock(&wl->mutex);
5471 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5472 struct wl12xx_vif *wlvif,
5475 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5476 struct sk_buff *beacon =
5477 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5482 return cfg80211_find_ie(eid,
5483 beacon->data + ieoffset,
5484 beacon->len - ieoffset);
5487 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5491 const struct ieee80211_channel_sw_ie *ie_csa;
5493 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5497 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5498 *csa_count = ie_csa->count;
5503 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5504 struct ieee80211_vif *vif,
5505 struct cfg80211_chan_def *chandef)
5507 struct wl1271 *wl = hw->priv;
5508 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5509 struct ieee80211_channel_switch ch_switch = {
5511 .chandef = *chandef,
5515 wl1271_debug(DEBUG_MAC80211,
5516 "mac80211 channel switch beacon (role %d)",
5519 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5521 wl1271_error("error getting beacon (for CSA counter)");
5525 mutex_lock(&wl->mutex);
5527 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5532 ret = wl1271_ps_elp_wakeup(wl);
5536 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5540 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5543 wl1271_ps_elp_sleep(wl);
5545 mutex_unlock(&wl->mutex);
5548 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5549 u32 queues, bool drop)
5551 struct wl1271 *wl = hw->priv;
5553 wl1271_tx_flush(wl);
5556 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5557 struct ieee80211_vif *vif,
5558 struct ieee80211_channel *chan,
5560 enum ieee80211_roc_type type)
5562 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5563 struct wl1271 *wl = hw->priv;
5564 int channel, ret = 0;
5566 channel = ieee80211_frequency_to_channel(chan->center_freq);
5568 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5569 channel, wlvif->role_id);
5571 mutex_lock(&wl->mutex);
5573 if (unlikely(wl->state != WLCORE_STATE_ON))
5576 /* return EBUSY if we can't ROC right now */
5577 if (WARN_ON(wl->roc_vif ||
5578 find_first_bit(wl->roc_map,
5579 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5584 ret = wl1271_ps_elp_wakeup(wl);
5588 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5593 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5594 msecs_to_jiffies(duration));
5596 wl1271_ps_elp_sleep(wl);
5598 mutex_unlock(&wl->mutex);
5602 static int __wlcore_roc_completed(struct wl1271 *wl)
5604 struct wl12xx_vif *wlvif;
5607 /* already completed */
5608 if (unlikely(!wl->roc_vif))
5611 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5613 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5616 ret = wl12xx_stop_dev(wl, wlvif);
5625 static int wlcore_roc_completed(struct wl1271 *wl)
5629 wl1271_debug(DEBUG_MAC80211, "roc complete");
5631 mutex_lock(&wl->mutex);
5633 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5638 ret = wl1271_ps_elp_wakeup(wl);
5642 ret = __wlcore_roc_completed(wl);
5644 wl1271_ps_elp_sleep(wl);
5646 mutex_unlock(&wl->mutex);
5651 static void wlcore_roc_complete_work(struct work_struct *work)
5653 struct delayed_work *dwork;
5657 dwork = container_of(work, struct delayed_work, work);
5658 wl = container_of(dwork, struct wl1271, roc_complete_work);
5660 ret = wlcore_roc_completed(wl);
5662 ieee80211_remain_on_channel_expired(wl->hw);
5665 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5667 struct wl1271 *wl = hw->priv;
5669 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5672 wl1271_tx_flush(wl);
5675 * we can't just flush_work here, because it might deadlock
5676 * (as we might get called from the same workqueue)
5678 cancel_delayed_work_sync(&wl->roc_complete_work);
5679 wlcore_roc_completed(wl);
5684 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5685 struct ieee80211_vif *vif,
5686 struct ieee80211_sta *sta,
5689 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5691 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5693 if (!(changed & IEEE80211_RC_BW_CHANGED))
5696 /* this callback is atomic, so schedule a new work */
5697 wlvif->rc_update_bw = sta->bandwidth;
5698 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5701 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5702 struct ieee80211_vif *vif,
5703 struct ieee80211_sta *sta,
5704 struct station_info *sinfo)
5706 struct wl1271 *wl = hw->priv;
5707 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5711 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5713 mutex_lock(&wl->mutex);
5715 if (unlikely(wl->state != WLCORE_STATE_ON))
5718 ret = wl1271_ps_elp_wakeup(wl);
5722 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5726 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5727 sinfo->signal = rssi_dbm;
5730 wl1271_ps_elp_sleep(wl);
5733 mutex_unlock(&wl->mutex);
5736 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5738 struct wl1271 *wl = hw->priv;
5741 mutex_lock(&wl->mutex);
5743 if (unlikely(wl->state != WLCORE_STATE_ON))
5746 /* packets are considered pending if in the TX queue or the FW */
5747 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5749 mutex_unlock(&wl->mutex);
5754 /* can't be const, mac80211 writes to this */
5755 static struct ieee80211_rate wl1271_rates[] = {
5757 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5758 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5760 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5761 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5762 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5764 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5765 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5766 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5768 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5769 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5770 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5772 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5773 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5775 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5776 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5778 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5779 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5781 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5782 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5784 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5785 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5787 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5788 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5790 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5791 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5793 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5794 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5797 /* can't be const, mac80211 writes to this */
5798 static struct ieee80211_channel wl1271_channels[] = {
5799 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5800 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5801 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5802 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5803 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5804 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5805 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5806 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5807 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5808 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5809 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5810 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5811 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5812 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5815 /* can't be const, mac80211 writes to this */
5816 static struct ieee80211_supported_band wl1271_band_2ghz = {
5817 .channels = wl1271_channels,
5818 .n_channels = ARRAY_SIZE(wl1271_channels),
5819 .bitrates = wl1271_rates,
5820 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5823 /* 5 GHz data rates for WL1273 */
5824 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5826 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5827 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5829 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5830 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5832 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5833 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5835 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5836 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5838 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5839 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5841 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5842 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5844 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5845 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5847 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5848 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5851 /* 5 GHz band channels for WL1273 */
5852 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5853 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5854 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5855 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5856 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5857 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5858 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5859 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5863 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5864 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5865 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5866 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5867 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5868 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5869 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5870 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5871 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5872 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5873 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5874 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5875 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5876 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5877 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5878 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5879 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5880 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5881 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5882 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5883 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5886 static struct ieee80211_supported_band wl1271_band_5ghz = {
5887 .channels = wl1271_channels_5ghz,
5888 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5889 .bitrates = wl1271_rates_5ghz,
5890 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5893 static const struct ieee80211_ops wl1271_ops = {
5894 .start = wl1271_op_start,
5895 .stop = wlcore_op_stop,
5896 .add_interface = wl1271_op_add_interface,
5897 .remove_interface = wl1271_op_remove_interface,
5898 .change_interface = wl12xx_op_change_interface,
5900 .suspend = wl1271_op_suspend,
5901 .resume = wl1271_op_resume,
5903 .config = wl1271_op_config,
5904 .prepare_multicast = wl1271_op_prepare_multicast,
5905 .configure_filter = wl1271_op_configure_filter,
5907 .set_key = wlcore_op_set_key,
5908 .hw_scan = wl1271_op_hw_scan,
5909 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5910 .sched_scan_start = wl1271_op_sched_scan_start,
5911 .sched_scan_stop = wl1271_op_sched_scan_stop,
5912 .bss_info_changed = wl1271_op_bss_info_changed,
5913 .set_frag_threshold = wl1271_op_set_frag_threshold,
5914 .set_rts_threshold = wl1271_op_set_rts_threshold,
5915 .conf_tx = wl1271_op_conf_tx,
5916 .get_tsf = wl1271_op_get_tsf,
5917 .get_survey = wl1271_op_get_survey,
5918 .sta_state = wl12xx_op_sta_state,
5919 .ampdu_action = wl1271_op_ampdu_action,
5920 .tx_frames_pending = wl1271_tx_frames_pending,
5921 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5922 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5923 .channel_switch = wl12xx_op_channel_switch,
5924 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5925 .flush = wlcore_op_flush,
5926 .remain_on_channel = wlcore_op_remain_on_channel,
5927 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5928 .add_chanctx = wlcore_op_add_chanctx,
5929 .remove_chanctx = wlcore_op_remove_chanctx,
5930 .change_chanctx = wlcore_op_change_chanctx,
5931 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5932 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5933 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5934 .sta_rc_update = wlcore_op_sta_rc_update,
5935 .sta_statistics = wlcore_op_sta_statistics,
5936 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5940 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5946 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5947 wl1271_error("Illegal RX rate from HW: %d", rate);
5951 idx = wl->band_rate_to_idx[band][rate];
5952 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5953 wl1271_error("Unsupported RX rate from HW: %d", rate);
5960 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5964 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5967 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5968 wl1271_warning("NIC part of the MAC address wraps around!");
5970 for (i = 0; i < wl->num_mac_addr; i++) {
5971 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5972 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5973 wl->addresses[i].addr[2] = (u8) oui;
5974 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5975 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5976 wl->addresses[i].addr[5] = (u8) nic;
5980 /* we may be one address short at the most */
5981 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5984 * turn on the LAA bit in the first address and use it as
5987 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5988 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5989 memcpy(&wl->addresses[idx], &wl->addresses[0],
5990 sizeof(wl->addresses[0]));
5992 wl->addresses[idx].addr[0] |= BIT(1);
5995 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5996 wl->hw->wiphy->addresses = wl->addresses;
5999 static int wl12xx_get_hw_info(struct wl1271 *wl)
6003 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6007 wl->fuse_oui_addr = 0;
6008 wl->fuse_nic_addr = 0;
6010 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6014 if (wl->ops->get_mac)
6015 ret = wl->ops->get_mac(wl);
6021 static int wl1271_register_hw(struct wl1271 *wl)
6024 u32 oui_addr = 0, nic_addr = 0;
6026 if (wl->mac80211_registered)
6029 if (wl->nvs_len >= 12) {
6030 /* NOTE: The wl->nvs->nvs element must be first, in
6031 * order to simplify the casting, we assume it is at
6032 * the beginning of the wl->nvs structure.
6034 u8 *nvs_ptr = (u8 *)wl->nvs;
6037 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6039 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6042 /* if the MAC address is zeroed in the NVS derive from fuse */
6043 if (oui_addr == 0 && nic_addr == 0) {
6044 oui_addr = wl->fuse_oui_addr;
6045 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6046 nic_addr = wl->fuse_nic_addr + 1;
6049 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6051 ret = ieee80211_register_hw(wl->hw);
6053 wl1271_error("unable to register mac80211 hw: %d", ret);
6057 wl->mac80211_registered = true;
6059 wl1271_debugfs_init(wl);
6061 wl1271_notice("loaded");
6067 static void wl1271_unregister_hw(struct wl1271 *wl)
6070 wl1271_plt_stop(wl);
6072 ieee80211_unregister_hw(wl->hw);
6073 wl->mac80211_registered = false;
6077 static int wl1271_init_ieee80211(struct wl1271 *wl)
6080 static const u32 cipher_suites[] = {
6081 WLAN_CIPHER_SUITE_WEP40,
6082 WLAN_CIPHER_SUITE_WEP104,
6083 WLAN_CIPHER_SUITE_TKIP,
6084 WLAN_CIPHER_SUITE_CCMP,
6085 WL1271_CIPHER_SUITE_GEM,
6088 /* The tx descriptor buffer */
6089 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6091 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6092 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6095 /* FIXME: find a proper value */
6096 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6098 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6099 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6100 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6101 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6102 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6103 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6104 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6105 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6106 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6107 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6108 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6109 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6110 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6112 wl->hw->wiphy->cipher_suites = cipher_suites;
6113 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6115 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6116 BIT(NL80211_IFTYPE_AP) |
6117 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6118 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6119 BIT(NL80211_IFTYPE_P2P_GO);
6120 wl->hw->wiphy->max_scan_ssids = 1;
6121 wl->hw->wiphy->max_sched_scan_ssids = 16;
6122 wl->hw->wiphy->max_match_sets = 16;
6124 * Maximum length of elements in scanning probe request templates
6125 * should be the maximum length possible for a template, without
6126 * the IEEE80211 header of the template
6128 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6129 sizeof(struct ieee80211_header);
6131 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6132 sizeof(struct ieee80211_header);
6134 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6136 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6137 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6138 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6139 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6141 /* make sure all our channels fit in the scanned_ch bitmask */
6142 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6143 ARRAY_SIZE(wl1271_channels_5ghz) >
6144 WL1271_MAX_CHANNELS);
6146 * clear channel flags from the previous usage
6147 * and restore max_power & max_antenna_gain values.
6149 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6150 wl1271_band_2ghz.channels[i].flags = 0;
6151 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6152 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6155 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6156 wl1271_band_5ghz.channels[i].flags = 0;
6157 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6158 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6162 * We keep local copies of the band structs because we need to
6163 * modify them on a per-device basis.
6165 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6166 sizeof(wl1271_band_2ghz));
6167 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6168 &wl->ht_cap[IEEE80211_BAND_2GHZ],
6169 sizeof(*wl->ht_cap));
6170 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6171 sizeof(wl1271_band_5ghz));
6172 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6173 &wl->ht_cap[IEEE80211_BAND_5GHZ],
6174 sizeof(*wl->ht_cap));
6176 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6177 &wl->bands[IEEE80211_BAND_2GHZ];
6178 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6179 &wl->bands[IEEE80211_BAND_5GHZ];
6182 * allow 4 queues per mac address we support +
6183 * 1 cab queue per mac + one global offchannel Tx queue
6185 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6187 /* the last queue is the offchannel queue */
6188 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6189 wl->hw->max_rates = 1;
6191 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6193 /* the FW answers probe-requests in AP-mode */
6194 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6195 wl->hw->wiphy->probe_resp_offload =
6196 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6197 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6198 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6200 /* allowed interface combinations */
6201 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6202 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6204 /* register vendor commands */
6205 wlcore_set_vendor_commands(wl->hw->wiphy);
6207 SET_IEEE80211_DEV(wl->hw, wl->dev);
6209 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6210 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6212 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6217 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6220 struct ieee80211_hw *hw;
6225 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6227 wl1271_error("could not alloc ieee80211_hw");
6233 memset(wl, 0, sizeof(*wl));
6235 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6237 wl1271_error("could not alloc wl priv");
6239 goto err_priv_alloc;
6242 INIT_LIST_HEAD(&wl->wlvif_list);
6247 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6248 * we don't allocate any additional resource here, so that's fine.
6250 for (i = 0; i < NUM_TX_QUEUES; i++)
6251 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6252 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6254 skb_queue_head_init(&wl->deferred_rx_queue);
6255 skb_queue_head_init(&wl->deferred_tx_queue);
6257 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6258 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6259 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6260 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6261 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6262 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6263 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6265 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6266 if (!wl->freezable_wq) {
6273 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6274 wl->band = IEEE80211_BAND_2GHZ;
6275 wl->channel_type = NL80211_CHAN_NO_HT;
6277 wl->sg_enabled = true;
6278 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6279 wl->recovery_count = 0;
6282 wl->ap_fw_ps_map = 0;
6284 wl->system_hlid = WL12XX_SYSTEM_HLID;
6285 wl->active_sta_count = 0;
6286 wl->active_link_count = 0;
6288 init_waitqueue_head(&wl->fwlog_waitq);
6290 /* The system link is always allocated */
6291 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6293 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6294 for (i = 0; i < wl->num_tx_desc; i++)
6295 wl->tx_frames[i] = NULL;
6297 spin_lock_init(&wl->wl_lock);
6299 wl->state = WLCORE_STATE_OFF;
6300 wl->fw_type = WL12XX_FW_TYPE_NONE;
6301 mutex_init(&wl->mutex);
6302 mutex_init(&wl->flush_mutex);
6303 init_completion(&wl->nvs_loading_complete);
6305 order = get_order(aggr_buf_size);
6306 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6307 if (!wl->aggr_buf) {
6311 wl->aggr_buf_size = aggr_buf_size;
6313 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6314 if (!wl->dummy_packet) {
6319 /* Allocate one page for the FW log */
6320 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6323 goto err_dummy_packet;
6326 wl->mbox_size = mbox_size;
6327 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6333 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6334 if (!wl->buffer_32) {
6345 free_page((unsigned long)wl->fwlog);
6348 dev_kfree_skb(wl->dummy_packet);
6351 free_pages((unsigned long)wl->aggr_buf, order);
6354 destroy_workqueue(wl->freezable_wq);
6357 wl1271_debugfs_exit(wl);
6361 ieee80211_free_hw(hw);
6365 return ERR_PTR(ret);
6367 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6369 int wlcore_free_hw(struct wl1271 *wl)
6371 /* Unblock any fwlog readers */
6372 mutex_lock(&wl->mutex);
6373 wl->fwlog_size = -1;
6374 wake_up_interruptible_all(&wl->fwlog_waitq);
6375 mutex_unlock(&wl->mutex);
6377 wlcore_sysfs_free(wl);
6379 kfree(wl->buffer_32);
6381 free_page((unsigned long)wl->fwlog);
6382 dev_kfree_skb(wl->dummy_packet);
6383 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6385 wl1271_debugfs_exit(wl);
6389 wl->fw_type = WL12XX_FW_TYPE_NONE;
6393 kfree(wl->raw_fw_status);
6394 kfree(wl->fw_status);
6395 kfree(wl->tx_res_if);
6396 destroy_workqueue(wl->freezable_wq);
6399 ieee80211_free_hw(wl->hw);
6403 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6406 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6407 .flags = WIPHY_WOWLAN_ANY,
6408 .n_patterns = WL1271_MAX_RX_FILTERS,
6409 .pattern_min_len = 1,
6410 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6414 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6416 return IRQ_WAKE_THREAD;
6419 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6421 struct wl1271 *wl = context;
6422 struct platform_device *pdev = wl->pdev;
6423 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6424 struct resource *res;
6427 irq_handler_t hardirq_fn = NULL;
6430 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6432 wl1271_error("Could not allocate nvs data");
6435 wl->nvs_len = fw->size;
6437 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6443 ret = wl->ops->setup(wl);
6447 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6449 /* adjust some runtime configuration parameters */
6450 wlcore_adjust_conf(wl);
6452 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6454 wl1271_error("Could not get IRQ resource");
6458 wl->irq = res->start;
6459 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6460 wl->if_ops = pdev_data->if_ops;
6462 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6463 hardirq_fn = wlcore_hardirq;
6465 wl->irq_flags |= IRQF_ONESHOT;
6467 ret = wl12xx_set_power_on(wl);
6471 ret = wl12xx_get_hw_info(wl);
6473 wl1271_error("couldn't get hw info");
6474 wl1271_power_off(wl);
6478 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6479 wl->irq_flags, pdev->name, wl);
6481 wl1271_error("interrupt configuration failed");
6482 wl1271_power_off(wl);
6487 ret = enable_irq_wake(wl->irq);
6489 wl->irq_wake_enabled = true;
6490 device_init_wakeup(wl->dev, 1);
6491 if (pdev_data->pwr_in_suspend)
6492 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6495 disable_irq(wl->irq);
6496 wl1271_power_off(wl);
6498 ret = wl->ops->identify_chip(wl);
6502 ret = wl1271_init_ieee80211(wl);
6506 ret = wl1271_register_hw(wl);
6510 ret = wlcore_sysfs_init(wl);
6514 wl->initialized = true;
6518 wl1271_unregister_hw(wl);
6521 free_irq(wl->irq, wl);
6527 release_firmware(fw);
6528 complete_all(&wl->nvs_loading_complete);
6531 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6535 if (!wl->ops || !wl->ptable)
6538 wl->dev = &pdev->dev;
6540 platform_set_drvdata(pdev, wl);
6542 ret = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6543 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6546 wl1271_error("request_firmware_nowait failed: %d", ret);
6547 complete_all(&wl->nvs_loading_complete);
6552 EXPORT_SYMBOL_GPL(wlcore_probe);
6554 int wlcore_remove(struct platform_device *pdev)
6556 struct wl1271 *wl = platform_get_drvdata(pdev);
6558 wait_for_completion(&wl->nvs_loading_complete);
6559 if (!wl->initialized)
6562 if (wl->irq_wake_enabled) {
6563 device_init_wakeup(wl->dev, 0);
6564 disable_irq_wake(wl->irq);
6566 wl1271_unregister_hw(wl);
6567 free_irq(wl->irq, wl);
6572 EXPORT_SYMBOL_GPL(wlcore_remove);
6574 u32 wl12xx_debug_level = DEBUG_NONE;
6575 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6576 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6577 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6579 module_param_named(fwlog, fwlog_param, charp, 0);
6580 MODULE_PARM_DESC(fwlog,
6581 "FW logger options: continuous, ondemand, dbgpins or disable");
6583 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6584 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6586 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6587 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6589 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6590 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6592 MODULE_LICENSE("GPL");
6593 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6594 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");