2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/firmware.h>
19 #include <linux/delay.h>
22 #include "mt76x2_mcu.h"
23 #include "mt76x2_dma.h"
24 #include "mt76x2_eeprom.h"
26 static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
30 skb = alloc_skb(len, GFP_KERNEL);
33 memcpy(skb_put(skb, len), data, len);
38 static struct sk_buff *
39 mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
41 unsigned long timeout;
43 if (!time_is_after_jiffies(expires))
46 timeout = expires - jiffies;
47 wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
49 return skb_dequeue(&dev->mcu.res_q);
53 mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
56 unsigned long expires = jiffies + HZ;
63 mutex_lock(&dev->mcu.mutex);
65 seq = ++dev->mcu.msg_seq & 0xf;
67 seq = ++dev->mcu.msg_seq & 0xf;
69 ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
75 bool check_seq = false;
77 skb = mt76x2_mcu_get_response(dev, expires);
79 dev_err(dev->mt76.dev,
80 "MCU message %d (seq %d) timed out\n", cmd,
86 rxfce = (u32 *) skb->cb;
88 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
97 mutex_unlock(&dev->mcu.mutex);
103 mt76pci_load_rom_patch(struct mt76x2_dev *dev)
105 const struct firmware *fw = NULL;
106 struct mt76x2_patch_header *hdr;
107 bool rom_protect = !is_mt7612(dev);
110 u32 patch_mask, patch_reg;
112 if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
113 dev_err(dev->mt76.dev,
114 "Could not get hardware semaphore for ROM PATCH\n");
118 if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
120 patch_reg = MT_MCU_CLOCK_CTL;
123 patch_reg = MT_MCU_COM_REG0;
126 if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
127 dev_info(dev->mt76.dev, "ROM patch already applied\n");
131 ret = reject_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
135 if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
137 dev_err(dev->mt76.dev, "Failed to load firmware\n");
141 hdr = (struct mt76x2_patch_header *) fw->data;
142 dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
144 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
146 cur = (__le32 *) (fw->data + sizeof(*hdr));
147 len = fw->size - sizeof(*hdr);
148 mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
150 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
153 mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
155 if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
156 dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
161 /* release semaphore */
163 mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
164 release_firmware(fw);
169 mt76pci_load_firmware(struct mt76x2_dev *dev)
171 const struct firmware *fw;
172 const struct mt76x2_fw_header *hdr;
177 ret = reject_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
181 if (!fw || !fw->data || fw->size < sizeof(*hdr))
184 hdr = (const struct mt76x2_fw_header *) fw->data;
187 len += le32_to_cpu(hdr->ilm_len);
188 len += le32_to_cpu(hdr->dlm_len);
193 val = le16_to_cpu(hdr->fw_ver);
194 dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
195 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
197 val = le16_to_cpu(hdr->build_ver);
198 dev_info(dev->mt76.dev, "Build: %x\n", val);
199 dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
201 cur = (__le32 *) (fw->data + sizeof(*hdr));
202 len = le32_to_cpu(hdr->ilm_len);
204 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
205 mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
207 cur += len / sizeof(*cur);
208 len = le32_to_cpu(hdr->dlm_len);
210 if (mt76xx_rev(dev) >= MT76XX_REV_E3)
211 offset = MT_MCU_DLM_ADDR_E3;
213 offset = MT_MCU_DLM_ADDR;
215 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
216 mt76_wr_copy(dev, offset, cur, len);
218 mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
220 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
221 if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
222 mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
224 /* trigger firmware */
225 mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
226 if (!mt76_poll_msec(dev, MT_MCU_COM_REG0, 1, 1, 200)) {
227 dev_err(dev->mt76.dev, "Firmware failed to start\n");
228 release_firmware(fw);
232 dev_info(dev->mt76.dev, "Firmware running!\n");
234 release_firmware(fw);
239 dev_err(dev->mt76.dev, "Invalid firmware\n");
240 release_firmware(fw);
245 mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
252 } __packed __aligned(4) msg = {
253 .id = cpu_to_le32(func),
254 .value = cpu_to_le32(val),
257 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
258 return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
261 int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
272 } __packed __aligned(4) msg = {
280 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
281 val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
282 msg.cfg = cpu_to_le32(val);
284 /* first set the channel without the extension channel info */
285 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
286 return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
289 int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
290 u8 bw_index, bool scan)
303 } __packed __aligned(4) msg = {
307 .chainmask = cpu_to_le16(dev->chainmask),
310 /* first set the channel without the extension channel info */
311 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
312 mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
314 usleep_range(5000, 10000);
316 msg.ext_chan = 0xe0 + bw_index;
317 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
318 return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
321 int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
327 } __packed __aligned(4) msg = {
328 .mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
329 .level = cpu_to_le32(0),
332 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
333 return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
336 int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
343 } __packed __aligned(4) msg = {
344 .id = cpu_to_le32(type),
345 .value = cpu_to_le32(param),
349 mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
351 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
352 ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
356 if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
357 BIT(31), BIT(31), 100)))
363 int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
364 struct mt76x2_tssi_comp *tssi_data)
369 struct mt76x2_tssi_comp data;
370 } __packed __aligned(4) msg = {
371 .id = cpu_to_le32(MCU_CAL_TSSI_COMP),
375 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
376 return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
379 int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
386 } __packed __aligned(4) msg = {
387 .channel = cpu_to_le32(channel),
388 .gain_val = cpu_to_le32(gain),
392 msg.channel |= cpu_to_le32(BIT(31));
394 skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
395 return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
398 int mt76x2_mcu_init(struct mt76x2_dev *dev)
402 mutex_init(&dev->mcu.mutex);
404 ret = mt76pci_load_rom_patch(dev);
408 ret = mt76pci_load_firmware(dev);
412 mt76x2_mcu_function_select(dev, Q_SELECT, 1);
416 int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
420 mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
421 usleep_range(20000, 30000);
423 while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)