2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_hdr_arg *arg);
42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 int left_len, struct wmi_phyerr_ev_arg *arg);
44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_svc_rdy_ev_arg *arg);
46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_rdy_ev_arg *arg);
48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 struct ath10k_fw_stats *stats);
50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_roam_ev_arg *arg);
52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_wow_ev_arg *arg);
54 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
56 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
57 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
58 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
59 u16 rd5g, u16 ctl2g, u16 ctl5g,
60 enum wmi_dfs_region dfs_reg);
61 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
63 struct sk_buff *(*gen_init)(struct ath10k *ar);
64 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
65 const struct wmi_start_scan_arg *arg);
66 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
67 const struct wmi_stop_scan_arg *arg);
68 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
69 enum wmi_vdev_type type,
70 enum wmi_vdev_subtype subtype,
71 const u8 macaddr[ETH_ALEN]);
72 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
74 const struct wmi_vdev_start_request_arg *arg,
76 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
77 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
79 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
80 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
81 u32 param_id, u32 param_value);
82 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
83 const struct wmi_vdev_install_key_arg *arg);
84 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
85 const struct wmi_vdev_spectral_conf_arg *arg);
86 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
87 u32 trigger, u32 enable);
88 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
89 const struct wmi_wmm_params_all_arg *arg);
90 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
91 const u8 peer_addr[ETH_ALEN],
92 enum wmi_peer_type peer_type);
93 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
94 const u8 peer_addr[ETH_ALEN]);
95 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN],
98 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
100 enum wmi_peer_param param_id,
102 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
103 const struct wmi_peer_assoc_complete_arg *arg);
104 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
105 enum wmi_sta_ps_mode psmode);
106 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
107 enum wmi_sta_powersave_param param_id,
109 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
111 enum wmi_ap_ps_peer_param param_id,
113 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
114 const struct wmi_scan_chan_list_arg *arg);
115 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
116 const void *bcn, size_t bcn_len,
117 u32 bcn_paddr, bool dtim_zero,
119 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
120 const struct wmi_wmm_params_all_arg *arg);
121 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
122 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
123 enum wmi_force_fw_hang_type type,
125 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
126 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
128 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
129 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
130 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
131 u32 period, u32 duration,
134 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
135 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
137 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
138 const u8 *mac, u32 tid, u32 buf_size);
139 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
140 const u8 *mac, u32 tid,
142 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
143 const u8 *mac, u32 tid, u32 initiator,
145 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
146 u32 tim_ie_offset, struct sk_buff *bcn,
147 u32 prb_caps, u32 prb_erp,
148 void *prb_ies, size_t prb_ies_len);
149 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
150 struct sk_buff *bcn);
151 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
153 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
154 const u8 peer_addr[ETH_ALEN],
155 const struct wmi_sta_uapsd_auto_trig_arg *args,
157 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
158 const struct wmi_sta_keepalive_arg *arg);
159 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
160 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
161 enum wmi_wow_wakeup_event event,
163 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
164 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
170 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
172 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
174 enum wmi_tdls_state state);
175 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
176 const struct wmi_tdls_peer_update_cmd_arg *arg,
177 const struct wmi_tdls_peer_capab_arg *cap,
178 const struct wmi_channel_arg *chan);
179 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
180 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
182 void (*fw_stats_fill)(struct ath10k *ar,
183 struct ath10k_fw_stats *fw_stats,
187 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
190 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
192 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
195 ar->wmi.ops->rx(ar, skb);
200 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
203 if (!ar->wmi.ops->map_svc)
206 ar->wmi.ops->map_svc(in, out, len);
211 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
212 struct wmi_scan_ev_arg *arg)
214 if (!ar->wmi.ops->pull_scan)
217 return ar->wmi.ops->pull_scan(ar, skb, arg);
221 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
222 struct wmi_mgmt_rx_ev_arg *arg)
224 if (!ar->wmi.ops->pull_mgmt_rx)
227 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
231 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
232 struct wmi_ch_info_ev_arg *arg)
234 if (!ar->wmi.ops->pull_ch_info)
237 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
241 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
242 struct wmi_vdev_start_ev_arg *arg)
244 if (!ar->wmi.ops->pull_vdev_start)
247 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
251 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
252 struct wmi_peer_kick_ev_arg *arg)
254 if (!ar->wmi.ops->pull_peer_kick)
257 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
261 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
262 struct wmi_swba_ev_arg *arg)
264 if (!ar->wmi.ops->pull_swba)
267 return ar->wmi.ops->pull_swba(ar, skb, arg);
271 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
272 struct wmi_phyerr_hdr_arg *arg)
274 if (!ar->wmi.ops->pull_phyerr_hdr)
277 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
281 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
282 int left_len, struct wmi_phyerr_ev_arg *arg)
284 if (!ar->wmi.ops->pull_phyerr)
287 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
291 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
292 struct wmi_svc_rdy_ev_arg *arg)
294 if (!ar->wmi.ops->pull_svc_rdy)
297 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
301 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
302 struct wmi_rdy_ev_arg *arg)
304 if (!ar->wmi.ops->pull_rdy)
307 return ar->wmi.ops->pull_rdy(ar, skb, arg);
311 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
312 struct ath10k_fw_stats *stats)
314 if (!ar->wmi.ops->pull_fw_stats)
317 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
321 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
322 struct wmi_roam_ev_arg *arg)
324 if (!ar->wmi.ops->pull_roam_ev)
327 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
331 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
332 struct wmi_wow_ev_arg *arg)
334 if (!ar->wmi.ops->pull_wow_event)
337 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
340 static inline enum wmi_txbf_conf
341 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
343 if (!ar->wmi.ops->get_txbf_conf_scheme)
344 return WMI_TXBF_CONF_UNSUPPORTED;
346 return ar->wmi.ops->get_txbf_conf_scheme(ar);
350 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
352 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
356 if (!ar->wmi.ops->gen_mgmt_tx)
359 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
363 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
367 /* FIXME There's no ACK event for Management Tx. This probably
368 * shouldn't be called here either. */
369 info->flags |= IEEE80211_TX_STAT_ACK;
370 ieee80211_tx_status_irqsafe(ar->hw, msdu);
376 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
377 u16 ctl2g, u16 ctl5g,
378 enum wmi_dfs_region dfs_reg)
382 if (!ar->wmi.ops->gen_pdev_set_rd)
385 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
390 return ath10k_wmi_cmd_send(ar, skb,
391 ar->wmi.cmd->pdev_set_regdomain_cmdid);
395 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
399 if (!ar->wmi.ops->gen_pdev_suspend)
402 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
406 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
410 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
414 if (!ar->wmi.ops->gen_pdev_resume)
417 skb = ar->wmi.ops->gen_pdev_resume(ar);
421 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
425 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
429 if (!ar->wmi.ops->gen_pdev_set_param)
432 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
436 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
440 ath10k_wmi_cmd_init(struct ath10k *ar)
444 if (!ar->wmi.ops->gen_init)
447 skb = ar->wmi.ops->gen_init(ar);
451 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
455 ath10k_wmi_start_scan(struct ath10k *ar,
456 const struct wmi_start_scan_arg *arg)
460 if (!ar->wmi.ops->gen_start_scan)
463 skb = ar->wmi.ops->gen_start_scan(ar, arg);
467 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
471 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
475 if (!ar->wmi.ops->gen_stop_scan)
478 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
482 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
486 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
487 enum wmi_vdev_type type,
488 enum wmi_vdev_subtype subtype,
489 const u8 macaddr[ETH_ALEN])
493 if (!ar->wmi.ops->gen_vdev_create)
496 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
500 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
504 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
508 if (!ar->wmi.ops->gen_vdev_delete)
511 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
515 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
519 ath10k_wmi_vdev_start(struct ath10k *ar,
520 const struct wmi_vdev_start_request_arg *arg)
524 if (!ar->wmi.ops->gen_vdev_start)
527 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
531 return ath10k_wmi_cmd_send(ar, skb,
532 ar->wmi.cmd->vdev_start_request_cmdid);
536 ath10k_wmi_vdev_restart(struct ath10k *ar,
537 const struct wmi_vdev_start_request_arg *arg)
541 if (!ar->wmi.ops->gen_vdev_start)
544 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
548 return ath10k_wmi_cmd_send(ar, skb,
549 ar->wmi.cmd->vdev_restart_request_cmdid);
553 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
557 if (!ar->wmi.ops->gen_vdev_stop)
560 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
564 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
568 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
572 if (!ar->wmi.ops->gen_vdev_up)
575 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
579 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
583 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
587 if (!ar->wmi.ops->gen_vdev_down)
590 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
594 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
598 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
603 if (!ar->wmi.ops->gen_vdev_set_param)
606 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
611 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
615 ath10k_wmi_vdev_install_key(struct ath10k *ar,
616 const struct wmi_vdev_install_key_arg *arg)
620 if (!ar->wmi.ops->gen_vdev_install_key)
623 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
627 return ath10k_wmi_cmd_send(ar, skb,
628 ar->wmi.cmd->vdev_install_key_cmdid);
632 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
633 const struct wmi_vdev_spectral_conf_arg *arg)
638 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
642 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
643 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
647 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
653 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
658 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
659 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
663 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
664 const u8 peer_addr[ETH_ALEN],
665 const struct wmi_sta_uapsd_auto_trig_arg *args,
671 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
674 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
679 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
680 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
684 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
685 const struct wmi_wmm_params_all_arg *arg)
690 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
694 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
695 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
699 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
700 const u8 peer_addr[ETH_ALEN],
701 enum wmi_peer_type peer_type)
705 if (!ar->wmi.ops->gen_peer_create)
708 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
712 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
716 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
717 const u8 peer_addr[ETH_ALEN])
721 if (!ar->wmi.ops->gen_peer_delete)
724 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
728 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
732 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
733 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
737 if (!ar->wmi.ops->gen_peer_flush)
740 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
744 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
748 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
749 enum wmi_peer_param param_id, u32 param_value)
753 if (!ar->wmi.ops->gen_peer_set_param)
756 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
761 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
765 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
766 enum wmi_sta_ps_mode psmode)
770 if (!ar->wmi.ops->gen_set_psmode)
773 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
777 return ath10k_wmi_cmd_send(ar, skb,
778 ar->wmi.cmd->sta_powersave_mode_cmdid);
782 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
783 enum wmi_sta_powersave_param param_id, u32 value)
787 if (!ar->wmi.ops->gen_set_sta_ps)
790 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
794 return ath10k_wmi_cmd_send(ar, skb,
795 ar->wmi.cmd->sta_powersave_param_cmdid);
799 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
800 enum wmi_ap_ps_peer_param param_id, u32 value)
804 if (!ar->wmi.ops->gen_set_ap_ps)
807 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
811 return ath10k_wmi_cmd_send(ar, skb,
812 ar->wmi.cmd->ap_ps_peer_param_cmdid);
816 ath10k_wmi_scan_chan_list(struct ath10k *ar,
817 const struct wmi_scan_chan_list_arg *arg)
821 if (!ar->wmi.ops->gen_scan_chan_list)
824 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
828 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
832 ath10k_wmi_peer_assoc(struct ath10k *ar,
833 const struct wmi_peer_assoc_complete_arg *arg)
837 if (!ar->wmi.ops->gen_peer_assoc)
840 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
844 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
848 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
849 const void *bcn, size_t bcn_len,
850 u32 bcn_paddr, bool dtim_zero,
856 if (!ar->wmi.ops->gen_beacon_dma)
859 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
860 dtim_zero, deliver_cab);
864 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
865 ar->wmi.cmd->pdev_send_bcn_cmdid);
875 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
876 const struct wmi_wmm_params_all_arg *arg)
880 if (!ar->wmi.ops->gen_pdev_set_wmm)
883 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
887 return ath10k_wmi_cmd_send(ar, skb,
888 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
892 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
896 if (!ar->wmi.ops->gen_request_stats)
899 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
903 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
907 ath10k_wmi_force_fw_hang(struct ath10k *ar,
908 enum wmi_force_fw_hang_type type, u32 delay_ms)
912 if (!ar->wmi.ops->gen_force_fw_hang)
915 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
919 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
923 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
927 if (!ar->wmi.ops->gen_dbglog_cfg)
930 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
934 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
938 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
942 if (!ar->wmi.ops->gen_pktlog_enable)
945 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
949 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
953 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
957 if (!ar->wmi.ops->gen_pktlog_disable)
960 skb = ar->wmi.ops->gen_pktlog_disable(ar);
964 return ath10k_wmi_cmd_send(ar, skb,
965 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
969 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
970 u32 next_offset, u32 enabled)
974 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
977 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
978 next_offset, enabled);
982 return ath10k_wmi_cmd_send(ar, skb,
983 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
987 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
991 if (!ar->wmi.ops->gen_pdev_get_temperature)
994 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
998 return ath10k_wmi_cmd_send(ar, skb,
999 ar->wmi.cmd->pdev_get_temperature_cmdid);
1003 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1005 struct sk_buff *skb;
1007 if (!ar->wmi.ops->gen_addba_clear_resp)
1010 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1012 return PTR_ERR(skb);
1014 return ath10k_wmi_cmd_send(ar, skb,
1015 ar->wmi.cmd->addba_clear_resp_cmdid);
1019 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1020 u32 tid, u32 buf_size)
1022 struct sk_buff *skb;
1024 if (!ar->wmi.ops->gen_addba_send)
1027 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1029 return PTR_ERR(skb);
1031 return ath10k_wmi_cmd_send(ar, skb,
1032 ar->wmi.cmd->addba_send_cmdid);
1036 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1037 u32 tid, u32 status)
1039 struct sk_buff *skb;
1041 if (!ar->wmi.ops->gen_addba_set_resp)
1044 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1046 return PTR_ERR(skb);
1048 return ath10k_wmi_cmd_send(ar, skb,
1049 ar->wmi.cmd->addba_set_resp_cmdid);
1053 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1054 u32 tid, u32 initiator, u32 reason)
1056 struct sk_buff *skb;
1058 if (!ar->wmi.ops->gen_delba_send)
1061 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1064 return PTR_ERR(skb);
1066 return ath10k_wmi_cmd_send(ar, skb,
1067 ar->wmi.cmd->delba_send_cmdid);
1071 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1072 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1073 void *prb_ies, size_t prb_ies_len)
1075 struct sk_buff *skb;
1077 if (!ar->wmi.ops->gen_bcn_tmpl)
1080 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1081 prb_caps, prb_erp, prb_ies,
1084 return PTR_ERR(skb);
1086 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1090 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1092 struct sk_buff *skb;
1094 if (!ar->wmi.ops->gen_prb_tmpl)
1097 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1099 return PTR_ERR(skb);
1101 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1105 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1107 struct sk_buff *skb;
1109 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1112 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1114 return PTR_ERR(skb);
1116 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1120 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1121 const struct wmi_sta_keepalive_arg *arg)
1123 struct sk_buff *skb;
1126 if (!ar->wmi.ops->gen_sta_keepalive)
1129 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1131 return PTR_ERR(skb);
1133 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1134 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1138 ath10k_wmi_wow_enable(struct ath10k *ar)
1140 struct sk_buff *skb;
1143 if (!ar->wmi.ops->gen_wow_enable)
1146 skb = ar->wmi.ops->gen_wow_enable(ar);
1148 return PTR_ERR(skb);
1150 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1151 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1155 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1156 enum wmi_wow_wakeup_event event,
1159 struct sk_buff *skb;
1162 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1165 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1167 return PTR_ERR(skb);
1169 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1170 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1174 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1176 struct sk_buff *skb;
1179 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1182 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1184 return PTR_ERR(skb);
1186 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1187 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1191 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1192 const u8 *pattern, const u8 *mask,
1193 int pattern_len, int pattern_offset)
1195 struct sk_buff *skb;
1198 if (!ar->wmi.ops->gen_wow_add_pattern)
1201 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1202 pattern, mask, pattern_len,
1205 return PTR_ERR(skb);
1207 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1208 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1212 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1214 struct sk_buff *skb;
1217 if (!ar->wmi.ops->gen_wow_del_pattern)
1220 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1222 return PTR_ERR(skb);
1224 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1225 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1229 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1230 enum wmi_tdls_state state)
1232 struct sk_buff *skb;
1234 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1237 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1239 return PTR_ERR(skb);
1241 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1245 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1246 const struct wmi_tdls_peer_update_cmd_arg *arg,
1247 const struct wmi_tdls_peer_capab_arg *cap,
1248 const struct wmi_channel_arg *chan)
1250 struct sk_buff *skb;
1252 if (!ar->wmi.ops->gen_tdls_peer_update)
1255 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1257 return PTR_ERR(skb);
1259 return ath10k_wmi_cmd_send(ar, skb,
1260 ar->wmi.cmd->tdls_peer_update_cmdid);
1264 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1266 struct sk_buff *skb;
1268 if (!ar->wmi.ops->gen_adaptive_qcs)
1271 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1273 return PTR_ERR(skb);
1275 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1279 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1281 struct sk_buff *skb;
1283 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1286 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1289 return PTR_ERR(skb);
1291 return ath10k_wmi_cmd_send(ar, skb,
1292 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1296 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1299 if (!ar->wmi.ops->fw_stats_fill)
1302 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);