2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
27 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
30 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_scan_ev_arg *arg);
32 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_mgmt_rx_ev_arg *arg);
34 int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
36 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_ch_info_ev_arg *arg);
38 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_vdev_start_ev_arg *arg);
40 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_peer_kick_ev_arg *arg);
42 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
43 struct wmi_swba_ev_arg *arg);
44 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_phyerr_hdr_arg *arg);
46 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
47 int left_len, struct wmi_phyerr_ev_arg *arg);
48 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
49 struct wmi_svc_rdy_ev_arg *arg);
50 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_rdy_ev_arg *arg);
52 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
53 struct ath10k_fw_stats *stats);
54 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
55 struct wmi_roam_ev_arg *arg);
56 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
57 struct wmi_wow_ev_arg *arg);
58 int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
59 struct wmi_echo_ev_arg *arg);
60 int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
61 struct wmi_dfs_status_ev_arg *arg);
62 int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
63 struct wmi_svc_avail_ev_arg *arg);
65 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
67 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
68 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
69 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
70 u16 rd5g, u16 ctl2g, u16 ctl5g,
71 enum wmi_dfs_region dfs_reg);
72 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
74 struct sk_buff *(*gen_init)(struct ath10k *ar);
75 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
76 const struct wmi_start_scan_arg *arg);
77 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
78 const struct wmi_stop_scan_arg *arg);
79 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
80 enum wmi_vdev_type type,
81 enum wmi_vdev_subtype subtype,
82 const u8 macaddr[ETH_ALEN]);
83 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
84 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
85 const struct wmi_vdev_start_request_arg *arg,
87 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
88 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
90 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
91 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
92 u32 param_id, u32 param_value);
93 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
94 const struct wmi_vdev_install_key_arg *arg);
95 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
96 const struct wmi_vdev_spectral_conf_arg *arg);
97 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
98 u32 trigger, u32 enable);
99 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
100 const struct wmi_wmm_params_all_arg *arg);
101 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
102 const u8 peer_addr[ETH_ALEN],
103 enum wmi_peer_type peer_type);
104 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
105 const u8 peer_addr[ETH_ALEN]);
106 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
107 const u8 peer_addr[ETH_ALEN],
109 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
111 enum wmi_peer_param param_id,
113 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
114 const struct wmi_peer_assoc_complete_arg *arg);
115 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
116 enum wmi_sta_ps_mode psmode);
117 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
118 enum wmi_sta_powersave_param param_id,
120 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
122 enum wmi_ap_ps_peer_param param_id,
124 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
125 const struct wmi_scan_chan_list_arg *arg);
126 struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
128 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
129 const void *bcn, size_t bcn_len,
130 u32 bcn_paddr, bool dtim_zero,
132 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
133 const struct wmi_wmm_params_all_arg *arg);
134 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
135 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
136 enum wmi_force_fw_hang_type type,
138 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
139 struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
142 int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
143 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
145 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
146 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
147 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
148 u32 period, u32 duration,
151 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
152 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
154 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
155 const u8 *mac, u32 tid, u32 buf_size);
156 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
157 const u8 *mac, u32 tid,
159 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
160 const u8 *mac, u32 tid, u32 initiator,
162 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
163 u32 tim_ie_offset, struct sk_buff *bcn,
164 u32 prb_caps, u32 prb_erp,
165 void *prb_ies, size_t prb_ies_len);
166 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
167 struct sk_buff *bcn);
168 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
170 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
171 const u8 peer_addr[ETH_ALEN],
172 const struct wmi_sta_uapsd_auto_trig_arg *args,
174 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
175 const struct wmi_sta_keepalive_arg *arg);
176 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
177 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
178 enum wmi_wow_wakeup_event event,
180 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
181 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
187 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
189 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
191 enum wmi_tdls_state state);
192 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
193 const struct wmi_tdls_peer_update_cmd_arg *arg,
194 const struct wmi_tdls_peer_capab_arg *cap,
195 const struct wmi_channel_arg *chan);
196 struct sk_buff *(*gen_radar_found)
198 const struct ath10k_radar_found_info *arg);
199 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
200 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
202 void (*fw_stats_fill)(struct ath10k *ar,
203 struct ath10k_fw_stats *fw_stats,
205 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
209 struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
210 enum wmi_host_platform_type type,
211 u32 fw_feature_bitmap);
212 int (*get_vdev_subtype)(struct ath10k *ar,
213 enum wmi_vdev_subtype subtype);
214 struct sk_buff *(*gen_pdev_bss_chan_info_req)
216 enum wmi_bss_survey_req_type type);
217 struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
218 struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
223 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
226 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
228 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
231 ar->wmi.ops->rx(ar, skb);
236 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
239 if (!ar->wmi.ops->map_svc)
242 ar->wmi.ops->map_svc(in, out, len);
247 ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
250 if (!ar->wmi.ops->map_svc_ext)
253 ar->wmi.ops->map_svc_ext(in, out, len);
258 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
259 struct wmi_scan_ev_arg *arg)
261 if (!ar->wmi.ops->pull_scan)
264 return ar->wmi.ops->pull_scan(ar, skb, arg);
268 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
269 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
271 if (!ar->wmi.ops->pull_mgmt_tx_compl)
274 return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
278 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
279 struct wmi_mgmt_rx_ev_arg *arg)
281 if (!ar->wmi.ops->pull_mgmt_rx)
284 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
288 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
289 struct wmi_ch_info_ev_arg *arg)
291 if (!ar->wmi.ops->pull_ch_info)
294 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
298 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
299 struct wmi_vdev_start_ev_arg *arg)
301 if (!ar->wmi.ops->pull_vdev_start)
304 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
308 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
309 struct wmi_peer_kick_ev_arg *arg)
311 if (!ar->wmi.ops->pull_peer_kick)
314 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
318 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
319 struct wmi_swba_ev_arg *arg)
321 if (!ar->wmi.ops->pull_swba)
324 return ar->wmi.ops->pull_swba(ar, skb, arg);
328 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
329 struct wmi_phyerr_hdr_arg *arg)
331 if (!ar->wmi.ops->pull_phyerr_hdr)
334 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
338 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
339 int left_len, struct wmi_phyerr_ev_arg *arg)
341 if (!ar->wmi.ops->pull_phyerr)
344 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
348 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
349 struct wmi_svc_rdy_ev_arg *arg)
351 if (!ar->wmi.ops->pull_svc_rdy)
354 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
358 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
359 struct wmi_rdy_ev_arg *arg)
361 if (!ar->wmi.ops->pull_rdy)
364 return ar->wmi.ops->pull_rdy(ar, skb, arg);
368 ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
369 struct wmi_svc_avail_ev_arg *arg)
371 if (!ar->wmi.ops->pull_svc_avail)
373 return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
377 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
378 struct ath10k_fw_stats *stats)
380 if (!ar->wmi.ops->pull_fw_stats)
383 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
387 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
388 struct wmi_roam_ev_arg *arg)
390 if (!ar->wmi.ops->pull_roam_ev)
393 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
397 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
398 struct wmi_wow_ev_arg *arg)
400 if (!ar->wmi.ops->pull_wow_event)
403 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
407 ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
408 struct wmi_echo_ev_arg *arg)
410 if (!ar->wmi.ops->pull_echo_ev)
413 return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
417 ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
418 struct wmi_dfs_status_ev_arg *arg)
420 if (!ar->wmi.ops->pull_dfs_status_ev)
423 return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
426 static inline enum wmi_txbf_conf
427 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
429 if (!ar->wmi.ops->get_txbf_conf_scheme)
430 return WMI_TXBF_CONF_UNSUPPORTED;
432 return ar->wmi.ops->get_txbf_conf_scheme(ar);
436 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
438 if (!ar->wmi.ops->cleanup_mgmt_tx_send)
441 return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
445 ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
451 if (!ar->wmi.ops->gen_mgmt_tx_send)
454 skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
458 ret = ath10k_wmi_cmd_send(ar, skb,
459 ar->wmi.cmd->mgmt_tx_send_cmdid);
467 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
473 if (!ar->wmi.ops->gen_mgmt_tx)
476 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
480 ret = ath10k_wmi_cmd_send(ar, skb,
481 ar->wmi.cmd->mgmt_tx_cmdid);
485 /* FIXME There's no ACK event for Management Tx. This probably
486 * shouldn't be called here either.
488 info->flags |= IEEE80211_TX_STAT_ACK;
489 ieee80211_tx_status_irqsafe(ar->hw, msdu);
495 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
496 u16 ctl2g, u16 ctl5g,
497 enum wmi_dfs_region dfs_reg)
501 if (!ar->wmi.ops->gen_pdev_set_rd)
504 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
509 return ath10k_wmi_cmd_send(ar, skb,
510 ar->wmi.cmd->pdev_set_regdomain_cmdid);
514 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
518 if (!ar->wmi.ops->gen_pdev_suspend)
521 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
525 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
529 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
533 if (!ar->wmi.ops->gen_pdev_resume)
536 skb = ar->wmi.ops->gen_pdev_resume(ar);
540 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
544 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
548 if (!ar->wmi.ops->gen_pdev_set_param)
551 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
555 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
559 ath10k_wmi_cmd_init(struct ath10k *ar)
563 if (!ar->wmi.ops->gen_init)
566 skb = ar->wmi.ops->gen_init(ar);
570 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
574 ath10k_wmi_start_scan(struct ath10k *ar,
575 const struct wmi_start_scan_arg *arg)
579 if (!ar->wmi.ops->gen_start_scan)
582 skb = ar->wmi.ops->gen_start_scan(ar, arg);
586 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
590 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
594 if (!ar->wmi.ops->gen_stop_scan)
597 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
601 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
605 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
606 enum wmi_vdev_type type,
607 enum wmi_vdev_subtype subtype,
608 const u8 macaddr[ETH_ALEN])
612 if (!ar->wmi.ops->gen_vdev_create)
615 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
619 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
623 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
627 if (!ar->wmi.ops->gen_vdev_delete)
630 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
634 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
638 ath10k_wmi_vdev_start(struct ath10k *ar,
639 const struct wmi_vdev_start_request_arg *arg)
643 if (!ar->wmi.ops->gen_vdev_start)
646 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
650 return ath10k_wmi_cmd_send(ar, skb,
651 ar->wmi.cmd->vdev_start_request_cmdid);
655 ath10k_wmi_vdev_restart(struct ath10k *ar,
656 const struct wmi_vdev_start_request_arg *arg)
660 if (!ar->wmi.ops->gen_vdev_start)
663 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
667 return ath10k_wmi_cmd_send(ar, skb,
668 ar->wmi.cmd->vdev_restart_request_cmdid);
672 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
676 if (!ar->wmi.ops->gen_vdev_stop)
679 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
683 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
687 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
691 if (!ar->wmi.ops->gen_vdev_up)
694 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
698 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
702 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
706 if (!ar->wmi.ops->gen_vdev_down)
709 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
713 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
717 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
722 if (!ar->wmi.ops->gen_vdev_set_param)
725 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
730 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
734 ath10k_wmi_vdev_install_key(struct ath10k *ar,
735 const struct wmi_vdev_install_key_arg *arg)
739 if (!ar->wmi.ops->gen_vdev_install_key)
742 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
746 return ath10k_wmi_cmd_send(ar, skb,
747 ar->wmi.cmd->vdev_install_key_cmdid);
751 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
752 const struct wmi_vdev_spectral_conf_arg *arg)
757 if (!ar->wmi.ops->gen_vdev_spectral_conf)
760 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
764 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
765 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
769 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
775 if (!ar->wmi.ops->gen_vdev_spectral_enable)
778 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
783 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
784 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
788 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
789 const u8 peer_addr[ETH_ALEN],
790 const struct wmi_sta_uapsd_auto_trig_arg *args,
796 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
799 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
804 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
805 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
809 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
810 const struct wmi_wmm_params_all_arg *arg)
815 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
819 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
820 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
824 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
825 const u8 peer_addr[ETH_ALEN],
826 enum wmi_peer_type peer_type)
830 if (!ar->wmi.ops->gen_peer_create)
833 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
837 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
841 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
842 const u8 peer_addr[ETH_ALEN])
846 if (!ar->wmi.ops->gen_peer_delete)
849 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
853 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
857 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
858 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
862 if (!ar->wmi.ops->gen_peer_flush)
865 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
869 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
873 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
874 enum wmi_peer_param param_id, u32 param_value)
878 if (!ar->wmi.ops->gen_peer_set_param)
881 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
886 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
890 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
891 enum wmi_sta_ps_mode psmode)
895 if (!ar->wmi.ops->gen_set_psmode)
898 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
902 return ath10k_wmi_cmd_send(ar, skb,
903 ar->wmi.cmd->sta_powersave_mode_cmdid);
907 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
908 enum wmi_sta_powersave_param param_id, u32 value)
912 if (!ar->wmi.ops->gen_set_sta_ps)
915 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
919 return ath10k_wmi_cmd_send(ar, skb,
920 ar->wmi.cmd->sta_powersave_param_cmdid);
924 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
925 enum wmi_ap_ps_peer_param param_id, u32 value)
929 if (!ar->wmi.ops->gen_set_ap_ps)
932 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
936 return ath10k_wmi_cmd_send(ar, skb,
937 ar->wmi.cmd->ap_ps_peer_param_cmdid);
941 ath10k_wmi_scan_chan_list(struct ath10k *ar,
942 const struct wmi_scan_chan_list_arg *arg)
946 if (!ar->wmi.ops->gen_scan_chan_list)
949 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
953 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
957 ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
962 prob_req_oui = (((u32)mac_addr[0]) << 16) |
963 (((u32)mac_addr[1]) << 8) | mac_addr[2];
965 if (!ar->wmi.ops->gen_scan_prob_req_oui)
968 skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
972 return ath10k_wmi_cmd_send(ar, skb,
973 ar->wmi.cmd->scan_prob_req_oui_cmdid);
977 ath10k_wmi_peer_assoc(struct ath10k *ar,
978 const struct wmi_peer_assoc_complete_arg *arg)
982 if (!ar->wmi.ops->gen_peer_assoc)
985 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
989 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
993 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
994 const void *bcn, size_t bcn_len,
995 u32 bcn_paddr, bool dtim_zero,
1001 if (!ar->wmi.ops->gen_beacon_dma)
1004 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
1005 dtim_zero, deliver_cab);
1007 return PTR_ERR(skb);
1009 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
1010 ar->wmi.cmd->pdev_send_bcn_cmdid);
1020 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
1021 const struct wmi_wmm_params_all_arg *arg)
1023 struct sk_buff *skb;
1025 if (!ar->wmi.ops->gen_pdev_set_wmm)
1028 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
1030 return PTR_ERR(skb);
1032 return ath10k_wmi_cmd_send(ar, skb,
1033 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
1037 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
1039 struct sk_buff *skb;
1041 if (!ar->wmi.ops->gen_request_stats)
1044 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
1046 return PTR_ERR(skb);
1048 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
1052 ath10k_wmi_force_fw_hang(struct ath10k *ar,
1053 enum wmi_force_fw_hang_type type, u32 delay_ms)
1055 struct sk_buff *skb;
1057 if (!ar->wmi.ops->gen_force_fw_hang)
1060 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
1062 return PTR_ERR(skb);
1064 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
1068 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
1070 struct sk_buff *skb;
1072 if (!ar->wmi.ops->gen_dbglog_cfg)
1075 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
1077 return PTR_ERR(skb);
1079 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
1083 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
1085 struct sk_buff *skb;
1087 if (!ar->wmi.ops->gen_pktlog_enable)
1090 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
1092 return PTR_ERR(skb);
1094 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
1098 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
1100 struct sk_buff *skb;
1102 if (!ar->wmi.ops->gen_pktlog_disable)
1105 skb = ar->wmi.ops->gen_pktlog_disable(ar);
1107 return PTR_ERR(skb);
1109 return ath10k_wmi_cmd_send(ar, skb,
1110 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
1114 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
1115 u32 next_offset, u32 enabled)
1117 struct sk_buff *skb;
1119 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
1122 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
1123 next_offset, enabled);
1125 return PTR_ERR(skb);
1127 return ath10k_wmi_cmd_send(ar, skb,
1128 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
1132 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
1134 struct sk_buff *skb;
1136 if (!ar->wmi.ops->gen_pdev_get_temperature)
1139 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1141 return PTR_ERR(skb);
1143 return ath10k_wmi_cmd_send(ar, skb,
1144 ar->wmi.cmd->pdev_get_temperature_cmdid);
1148 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1150 struct sk_buff *skb;
1152 if (!ar->wmi.ops->gen_addba_clear_resp)
1155 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1157 return PTR_ERR(skb);
1159 return ath10k_wmi_cmd_send(ar, skb,
1160 ar->wmi.cmd->addba_clear_resp_cmdid);
1164 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1165 u32 tid, u32 buf_size)
1167 struct sk_buff *skb;
1169 if (!ar->wmi.ops->gen_addba_send)
1172 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1174 return PTR_ERR(skb);
1176 return ath10k_wmi_cmd_send(ar, skb,
1177 ar->wmi.cmd->addba_send_cmdid);
1181 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1182 u32 tid, u32 status)
1184 struct sk_buff *skb;
1186 if (!ar->wmi.ops->gen_addba_set_resp)
1189 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1191 return PTR_ERR(skb);
1193 return ath10k_wmi_cmd_send(ar, skb,
1194 ar->wmi.cmd->addba_set_resp_cmdid);
1198 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1199 u32 tid, u32 initiator, u32 reason)
1201 struct sk_buff *skb;
1203 if (!ar->wmi.ops->gen_delba_send)
1206 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1209 return PTR_ERR(skb);
1211 return ath10k_wmi_cmd_send(ar, skb,
1212 ar->wmi.cmd->delba_send_cmdid);
1216 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1217 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1218 void *prb_ies, size_t prb_ies_len)
1220 struct sk_buff *skb;
1222 if (!ar->wmi.ops->gen_bcn_tmpl)
1225 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1226 prb_caps, prb_erp, prb_ies,
1229 return PTR_ERR(skb);
1231 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1235 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1237 struct sk_buff *skb;
1239 if (!ar->wmi.ops->gen_prb_tmpl)
1242 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1244 return PTR_ERR(skb);
1246 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1250 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1252 struct sk_buff *skb;
1254 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1257 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1259 return PTR_ERR(skb);
1261 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1265 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1266 const struct wmi_sta_keepalive_arg *arg)
1268 struct sk_buff *skb;
1271 if (!ar->wmi.ops->gen_sta_keepalive)
1274 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1276 return PTR_ERR(skb);
1278 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1279 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1283 ath10k_wmi_wow_enable(struct ath10k *ar)
1285 struct sk_buff *skb;
1288 if (!ar->wmi.ops->gen_wow_enable)
1291 skb = ar->wmi.ops->gen_wow_enable(ar);
1293 return PTR_ERR(skb);
1295 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1296 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1300 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1301 enum wmi_wow_wakeup_event event,
1304 struct sk_buff *skb;
1307 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1310 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1312 return PTR_ERR(skb);
1314 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1315 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1319 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1321 struct sk_buff *skb;
1324 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1327 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1329 return PTR_ERR(skb);
1331 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1332 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1336 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1337 const u8 *pattern, const u8 *mask,
1338 int pattern_len, int pattern_offset)
1340 struct sk_buff *skb;
1343 if (!ar->wmi.ops->gen_wow_add_pattern)
1346 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1347 pattern, mask, pattern_len,
1350 return PTR_ERR(skb);
1352 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1353 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1357 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1359 struct sk_buff *skb;
1362 if (!ar->wmi.ops->gen_wow_del_pattern)
1365 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1367 return PTR_ERR(skb);
1369 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1370 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1374 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1375 enum wmi_tdls_state state)
1377 struct sk_buff *skb;
1379 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1382 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1384 return PTR_ERR(skb);
1386 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1390 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1391 const struct wmi_tdls_peer_update_cmd_arg *arg,
1392 const struct wmi_tdls_peer_capab_arg *cap,
1393 const struct wmi_channel_arg *chan)
1395 struct sk_buff *skb;
1397 if (!ar->wmi.ops->gen_tdls_peer_update)
1400 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1402 return PTR_ERR(skb);
1404 return ath10k_wmi_cmd_send(ar, skb,
1405 ar->wmi.cmd->tdls_peer_update_cmdid);
1409 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1411 struct sk_buff *skb;
1413 if (!ar->wmi.ops->gen_adaptive_qcs)
1416 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1418 return PTR_ERR(skb);
1420 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1424 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1426 struct sk_buff *skb;
1428 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1431 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1434 return PTR_ERR(skb);
1436 return ath10k_wmi_cmd_send(ar, skb,
1437 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1441 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1444 if (!ar->wmi.ops->fw_stats_fill)
1447 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1452 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1453 u32 detect_level, u32 detect_margin)
1455 struct sk_buff *skb;
1457 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1460 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1465 return PTR_ERR(skb);
1467 return ath10k_wmi_cmd_send(ar, skb,
1468 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1472 ath10k_wmi_ext_resource_config(struct ath10k *ar,
1473 enum wmi_host_platform_type type,
1474 u32 fw_feature_bitmap)
1476 struct sk_buff *skb;
1478 if (!ar->wmi.ops->ext_resource_config)
1481 skb = ar->wmi.ops->ext_resource_config(ar, type,
1485 return PTR_ERR(skb);
1487 return ath10k_wmi_cmd_send(ar, skb,
1488 ar->wmi.cmd->ext_resource_cfg_cmdid);
1492 ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
1494 if (!ar->wmi.ops->get_vdev_subtype)
1497 return ar->wmi.ops->get_vdev_subtype(ar, subtype);
1501 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
1502 enum wmi_bss_survey_req_type type)
1504 struct ath10k_wmi *wmi = &ar->wmi;
1505 struct sk_buff *skb;
1507 if (!wmi->ops->gen_pdev_bss_chan_info_req)
1510 skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
1512 return PTR_ERR(skb);
1514 return ath10k_wmi_cmd_send(ar, skb,
1515 wmi->cmd->pdev_bss_chan_info_request_cmdid);
1519 ath10k_wmi_echo(struct ath10k *ar, u32 value)
1521 struct ath10k_wmi *wmi = &ar->wmi;
1522 struct sk_buff *skb;
1524 if (!wmi->ops->gen_echo)
1527 skb = wmi->ops->gen_echo(ar, value);
1529 return PTR_ERR(skb);
1531 return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
1535 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
1537 struct sk_buff *skb;
1539 if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
1542 skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
1545 return PTR_ERR(skb);
1547 return ath10k_wmi_cmd_send(ar, skb,
1548 ar->wmi.cmd->pdev_get_tpc_table_cmdid);
1552 ath10k_wmi_report_radar_found(struct ath10k *ar,
1553 const struct ath10k_radar_found_info *arg)
1555 struct sk_buff *skb;
1557 if (!ar->wmi.ops->gen_radar_found)
1560 skb = ar->wmi.ops->gen_radar_found(ar, arg);
1562 return PTR_ERR(skb);
1564 return ath10k_wmi_cmd_send(ar, skb,
1565 ar->wmi.cmd->radar_found_cmdid);