1 /*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include "htc.h"
18
wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)19 static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
20 {
21 switch (wmi_cmd) {
22 case WMI_ECHO_CMDID:
23 return "WMI_ECHO_CMDID";
24 case WMI_ACCESS_MEMORY_CMDID:
25 return "WMI_ACCESS_MEMORY_CMDID";
26 case WMI_GET_FW_VERSION:
27 return "WMI_GET_FW_VERSION";
28 case WMI_DISABLE_INTR_CMDID:
29 return "WMI_DISABLE_INTR_CMDID";
30 case WMI_ENABLE_INTR_CMDID:
31 return "WMI_ENABLE_INTR_CMDID";
32 case WMI_ATH_INIT_CMDID:
33 return "WMI_ATH_INIT_CMDID";
34 case WMI_ABORT_TXQ_CMDID:
35 return "WMI_ABORT_TXQ_CMDID";
36 case WMI_STOP_TX_DMA_CMDID:
37 return "WMI_STOP_TX_DMA_CMDID";
38 case WMI_ABORT_TX_DMA_CMDID:
39 return "WMI_ABORT_TX_DMA_CMDID";
40 case WMI_DRAIN_TXQ_CMDID:
41 return "WMI_DRAIN_TXQ_CMDID";
42 case WMI_DRAIN_TXQ_ALL_CMDID:
43 return "WMI_DRAIN_TXQ_ALL_CMDID";
44 case WMI_START_RECV_CMDID:
45 return "WMI_START_RECV_CMDID";
46 case WMI_STOP_RECV_CMDID:
47 return "WMI_STOP_RECV_CMDID";
48 case WMI_FLUSH_RECV_CMDID:
49 return "WMI_FLUSH_RECV_CMDID";
50 case WMI_SET_MODE_CMDID:
51 return "WMI_SET_MODE_CMDID";
52 case WMI_NODE_CREATE_CMDID:
53 return "WMI_NODE_CREATE_CMDID";
54 case WMI_NODE_REMOVE_CMDID:
55 return "WMI_NODE_REMOVE_CMDID";
56 case WMI_VAP_REMOVE_CMDID:
57 return "WMI_VAP_REMOVE_CMDID";
58 case WMI_VAP_CREATE_CMDID:
59 return "WMI_VAP_CREATE_CMDID";
60 case WMI_REG_READ_CMDID:
61 return "WMI_REG_READ_CMDID";
62 case WMI_REG_WRITE_CMDID:
63 return "WMI_REG_WRITE_CMDID";
64 case WMI_REG_RMW_CMDID:
65 return "WMI_REG_RMW_CMDID";
66 case WMI_RC_STATE_CHANGE_CMDID:
67 return "WMI_RC_STATE_CHANGE_CMDID";
68 case WMI_RC_RATE_UPDATE_CMDID:
69 return "WMI_RC_RATE_UPDATE_CMDID";
70 case WMI_TARGET_IC_UPDATE_CMDID:
71 return "WMI_TARGET_IC_UPDATE_CMDID";
72 case WMI_TX_AGGR_ENABLE_CMDID:
73 return "WMI_TX_AGGR_ENABLE_CMDID";
74 case WMI_TGT_DETACH_CMDID:
75 return "WMI_TGT_DETACH_CMDID";
76 case WMI_NODE_UPDATE_CMDID:
77 return "WMI_NODE_UPDATE_CMDID";
78 case WMI_INT_STATS_CMDID:
79 return "WMI_INT_STATS_CMDID";
80 case WMI_TX_STATS_CMDID:
81 return "WMI_TX_STATS_CMDID";
82 case WMI_RX_STATS_CMDID:
83 return "WMI_RX_STATS_CMDID";
84 case WMI_BITRATE_MASK_CMDID:
85 return "WMI_BITRATE_MASK_CMDID";
86 }
87
88 return "Bogus";
89 }
90
ath9k_init_wmi(struct ath9k_htc_priv * priv)91 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
92 {
93 struct wmi *wmi;
94
95 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
96 if (!wmi)
97 return NULL;
98
99 wmi->drv_priv = priv;
100 wmi->stopped = false;
101 skb_queue_head_init(&wmi->wmi_event_queue);
102 spin_lock_init(&wmi->wmi_lock);
103 spin_lock_init(&wmi->event_lock);
104 mutex_init(&wmi->op_mutex);
105 mutex_init(&wmi->multi_write_mutex);
106 mutex_init(&wmi->multi_rmw_mutex);
107 init_completion(&wmi->cmd_wait);
108 INIT_LIST_HEAD(&wmi->pending_tx_events);
109 tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet);
110
111 return wmi;
112 }
113
ath9k_stop_wmi(struct ath9k_htc_priv * priv)114 void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
115 {
116 struct wmi *wmi = priv->wmi;
117
118 mutex_lock(&wmi->op_mutex);
119 wmi->stopped = true;
120 mutex_unlock(&wmi->op_mutex);
121 }
122
ath9k_destroy_wmi(struct ath9k_htc_priv * priv)123 void ath9k_destroy_wmi(struct ath9k_htc_priv *priv)
124 {
125 kfree(priv->wmi);
126 }
127
ath9k_wmi_event_drain(struct ath9k_htc_priv * priv)128 void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv)
129 {
130 unsigned long flags;
131
132 tasklet_kill(&priv->wmi->wmi_event_tasklet);
133 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
134 __skb_queue_purge(&priv->wmi->wmi_event_queue);
135 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
136 }
137
ath9k_wmi_event_tasklet(struct tasklet_struct * t)138 void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
139 {
140 struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet);
141 struct ath9k_htc_priv *priv = wmi->drv_priv;
142 struct wmi_cmd_hdr *hdr;
143 void *wmi_event;
144 struct wmi_event_swba *swba;
145 struct sk_buff *skb = NULL;
146 unsigned long flags;
147 u16 cmd_id;
148
149 do {
150 spin_lock_irqsave(&wmi->wmi_lock, flags);
151 skb = __skb_dequeue(&wmi->wmi_event_queue);
152 if (!skb) {
153 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
154 return;
155 }
156 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
157
158 hdr = (struct wmi_cmd_hdr *) skb->data;
159 cmd_id = be16_to_cpu(hdr->command_id);
160 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
161
162 switch (cmd_id) {
163 case WMI_SWBA_EVENTID:
164 swba = wmi_event;
165 ath9k_htc_swba(priv, swba);
166 break;
167 case WMI_FATAL_EVENTID:
168 ieee80211_queue_work(wmi->drv_priv->hw,
169 &wmi->drv_priv->fatal_work);
170 break;
171 case WMI_TXSTATUS_EVENTID:
172 /* Check if ath9k_tx_init() completed. */
173 if (!data_race(priv->tx.initialized))
174 break;
175
176 spin_lock_bh(&priv->tx.tx_lock);
177 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
178 spin_unlock_bh(&priv->tx.tx_lock);
179 break;
180 }
181 spin_unlock_bh(&priv->tx.tx_lock);
182
183 ath9k_htc_txstatus(priv, wmi_event);
184 break;
185 default:
186 break;
187 }
188
189 kfree_skb(skb);
190 } while (1);
191 }
192
ath9k_fatal_work(struct work_struct * work)193 void ath9k_fatal_work(struct work_struct *work)
194 {
195 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
196 fatal_work);
197 struct ath_common *common = ath9k_hw_common(priv->ah);
198
199 ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
200 ath9k_htc_reset(priv);
201 }
202
ath9k_wmi_rsp_callback(struct wmi * wmi,struct sk_buff * skb)203 static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
204 {
205 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
206
207 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
208 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
209
210 complete(&wmi->cmd_wait);
211 }
212
ath9k_wmi_ctrl_rx(void * priv,struct sk_buff * skb,enum htc_endpoint_id epid)213 static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
214 enum htc_endpoint_id epid)
215 {
216 struct wmi *wmi = priv;
217 struct wmi_cmd_hdr *hdr;
218 unsigned long flags;
219 u16 cmd_id;
220
221 if (unlikely(wmi->stopped))
222 goto free_skb;
223
224 /* Validate the obtained SKB. */
225 if (unlikely(skb->len < sizeof(struct wmi_cmd_hdr)))
226 goto free_skb;
227
228 hdr = (struct wmi_cmd_hdr *) skb->data;
229 cmd_id = be16_to_cpu(hdr->command_id);
230
231 if (cmd_id & 0x1000) {
232 spin_lock_irqsave(&wmi->wmi_lock, flags);
233 __skb_queue_tail(&wmi->wmi_event_queue, skb);
234 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
235 tasklet_schedule(&wmi->wmi_event_tasklet);
236 return;
237 }
238
239 /* Check if there has been a timeout. */
240 spin_lock_irqsave(&wmi->wmi_lock, flags);
241 if (be16_to_cpu(hdr->seq_no) != wmi->last_seq_id) {
242 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
243 goto free_skb;
244 }
245
246 /* WMI command response */
247 ath9k_wmi_rsp_callback(wmi, skb);
248 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
249
250 free_skb:
251 kfree_skb(skb);
252 }
253
ath9k_wmi_ctrl_tx(void * priv,struct sk_buff * skb,enum htc_endpoint_id epid,bool txok)254 static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
255 enum htc_endpoint_id epid, bool txok)
256 {
257 kfree_skb(skb);
258 }
259
ath9k_wmi_connect(struct htc_target * htc,struct wmi * wmi,enum htc_endpoint_id * wmi_ctrl_epid)260 int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
261 enum htc_endpoint_id *wmi_ctrl_epid)
262 {
263 struct htc_service_connreq connect;
264 int ret;
265
266 wmi->htc = htc;
267
268 memset(&connect, 0, sizeof(connect));
269
270 connect.ep_callbacks.priv = wmi;
271 connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
272 connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
273 connect.service_id = WMI_CONTROL_SVC;
274
275 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
276 if (ret)
277 return ret;
278
279 *wmi_ctrl_epid = wmi->ctrl_epid;
280
281 return 0;
282 }
283
ath9k_wmi_cmd_issue(struct wmi * wmi,struct sk_buff * skb,enum wmi_cmd_id cmd,u16 len,u8 * rsp_buf,u32 rsp_len)284 static int ath9k_wmi_cmd_issue(struct wmi *wmi,
285 struct sk_buff *skb,
286 enum wmi_cmd_id cmd, u16 len,
287 u8 *rsp_buf, u32 rsp_len)
288 {
289 struct wmi_cmd_hdr *hdr;
290 unsigned long flags;
291
292 hdr = skb_push(skb, sizeof(struct wmi_cmd_hdr));
293 hdr->command_id = cpu_to_be16(cmd);
294 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
295
296 spin_lock_irqsave(&wmi->wmi_lock, flags);
297
298 /* record the rsp buffer and length */
299 wmi->cmd_rsp_buf = rsp_buf;
300 wmi->cmd_rsp_len = rsp_len;
301
302 wmi->last_seq_id = wmi->tx_seq_id;
303 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
304
305 return htc_send_epid(wmi->htc, skb, wmi->ctrl_epid);
306 }
307
ath9k_wmi_cmd(struct wmi * wmi,enum wmi_cmd_id cmd_id,u8 * cmd_buf,u32 cmd_len,u8 * rsp_buf,u32 rsp_len,u32 timeout)308 int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
309 u8 *cmd_buf, u32 cmd_len,
310 u8 *rsp_buf, u32 rsp_len,
311 u32 timeout)
312 {
313 struct ath_hw *ah = wmi->drv_priv->ah;
314 struct ath_common *common = ath9k_hw_common(ah);
315 u16 headroom = sizeof(struct htc_frame_hdr) +
316 sizeof(struct wmi_cmd_hdr);
317 unsigned long time_left, flags;
318 struct sk_buff *skb;
319 int ret = 0;
320
321 if (ah->ah_flags & AH_UNPLUGGED)
322 return 0;
323
324 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
325 if (!skb)
326 return -ENOMEM;
327
328 skb_reserve(skb, headroom);
329
330 if (cmd_len != 0 && cmd_buf != NULL) {
331 skb_put_data(skb, cmd_buf, cmd_len);
332 }
333
334 mutex_lock(&wmi->op_mutex);
335
336 /* check if wmi stopped flag is set */
337 if (unlikely(wmi->stopped)) {
338 ret = -EPROTO;
339 goto out;
340 }
341
342 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len, rsp_buf, rsp_len);
343 if (ret)
344 goto out;
345
346 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
347 if (!time_left) {
348 ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
349 wmi_cmd_to_name(cmd_id));
350 spin_lock_irqsave(&wmi->wmi_lock, flags);
351 wmi->last_seq_id = 0;
352 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
353 mutex_unlock(&wmi->op_mutex);
354 return -ETIMEDOUT;
355 }
356
357 mutex_unlock(&wmi->op_mutex);
358
359 return 0;
360
361 out:
362 ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
363 mutex_unlock(&wmi->op_mutex);
364 kfree_skb(skb);
365
366 return ret;
367 }
368