1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 */
5
6 #include <crypto/hash.h>
7 #include "core.h"
8 #include "dp_tx.h"
9 #include "hal_tx.h"
10 #include "hif.h"
11 #include "debug.h"
12 #include "dp_rx.h"
13 #include "peer.h"
14
ath11k_dp_htt_htc_tx_complete(struct ath11k_base * ab,struct sk_buff * skb)15 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
16 struct sk_buff *skb)
17 {
18 dev_kfree_skb_any(skb);
19 }
20
ath11k_dp_peer_cleanup(struct ath11k * ar,int vdev_id,const u8 * addr)21 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
22 {
23 struct ath11k_base *ab = ar->ab;
24 struct ath11k_peer *peer;
25
26 /* TODO: Any other peer specific DP cleanup */
27
28 spin_lock_bh(&ab->base_lock);
29 peer = ath11k_peer_find(ab, vdev_id, addr);
30 if (!peer) {
31 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
32 addr, vdev_id);
33 spin_unlock_bh(&ab->base_lock);
34 return;
35 }
36
37 ath11k_peer_rx_tid_cleanup(ar, peer);
38 crypto_free_shash(peer->tfm_mmic);
39 spin_unlock_bh(&ab->base_lock);
40 }
41
ath11k_dp_peer_setup(struct ath11k * ar,int vdev_id,const u8 * addr)42 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
43 {
44 struct ath11k_base *ab = ar->ab;
45 struct ath11k_peer *peer;
46 u32 reo_dest;
47 int ret = 0, tid;
48
49 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
50 reo_dest = ar->dp.mac_id + 1;
51 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
52 WMI_PEER_SET_DEFAULT_ROUTING,
53 DP_RX_HASH_ENABLE | (reo_dest << 1));
54
55 if (ret) {
56 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
57 ret, addr, vdev_id);
58 return ret;
59 }
60
61 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
62 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
63 HAL_PN_TYPE_NONE);
64 if (ret) {
65 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
66 tid, ret);
67 goto peer_clean;
68 }
69 }
70
71 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
72 if (ret) {
73 ath11k_warn(ab, "failed to setup rx defrag context\n");
74 return ret;
75 }
76
77 /* TODO: Setup other peer specific resource used in data path */
78
79 return 0;
80
81 peer_clean:
82 spin_lock_bh(&ab->base_lock);
83
84 peer = ath11k_peer_find(ab, vdev_id, addr);
85 if (!peer) {
86 ath11k_warn(ab, "failed to find the peer to del rx tid\n");
87 spin_unlock_bh(&ab->base_lock);
88 return -ENOENT;
89 }
90
91 for (; tid >= 0; tid--)
92 ath11k_peer_rx_tid_delete(ar, peer, tid);
93
94 spin_unlock_bh(&ab->base_lock);
95
96 return ret;
97 }
98
ath11k_dp_srng_cleanup(struct ath11k_base * ab,struct dp_srng * ring)99 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
100 {
101 if (!ring->vaddr_unaligned)
102 return;
103
104 if (ring->cached)
105 kfree(ring->vaddr_unaligned);
106 else
107 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
108 ring->paddr_unaligned);
109
110 ring->vaddr_unaligned = NULL;
111 }
112
ath11k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)113 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
114 {
115 int ext_group_num;
116 u8 mask = 1 << ring_num;
117
118 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
119 ext_group_num++) {
120 if (mask & grp_mask[ext_group_num])
121 return ext_group_num;
122 }
123
124 return -ENOENT;
125 }
126
ath11k_dp_srng_calculate_msi_group(struct ath11k_base * ab,enum hal_ring_type type,int ring_num)127 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
128 enum hal_ring_type type, int ring_num)
129 {
130 const u8 *grp_mask;
131
132 switch (type) {
133 case HAL_WBM2SW_RELEASE:
134 if (ring_num < 3) {
135 grp_mask = &ab->hw_params.ring_mask->tx[0];
136 } else if (ring_num == 3) {
137 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
138 ring_num = 0;
139 } else {
140 return -ENOENT;
141 }
142 break;
143 case HAL_REO_EXCEPTION:
144 grp_mask = &ab->hw_params.ring_mask->rx_err[0];
145 break;
146 case HAL_REO_DST:
147 grp_mask = &ab->hw_params.ring_mask->rx[0];
148 break;
149 case HAL_REO_STATUS:
150 grp_mask = &ab->hw_params.ring_mask->reo_status[0];
151 break;
152 case HAL_RXDMA_MONITOR_STATUS:
153 case HAL_RXDMA_MONITOR_DST:
154 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
155 break;
156 case HAL_RXDMA_DST:
157 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
158 break;
159 case HAL_RXDMA_BUF:
160 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
161 break;
162 case HAL_RXDMA_MONITOR_BUF:
163 case HAL_TCL_DATA:
164 case HAL_TCL_CMD:
165 case HAL_REO_CMD:
166 case HAL_SW2WBM_RELEASE:
167 case HAL_WBM_IDLE_LINK:
168 case HAL_TCL_STATUS:
169 case HAL_REO_REINJECT:
170 case HAL_CE_SRC:
171 case HAL_CE_DST:
172 case HAL_CE_DST_STATUS:
173 default:
174 return -ENOENT;
175 }
176
177 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
178 }
179
ath11k_dp_srng_msi_setup(struct ath11k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)180 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
181 struct hal_srng_params *ring_params,
182 enum hal_ring_type type, int ring_num)
183 {
184 int msi_group_number, msi_data_count;
185 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
186 int ret;
187
188 ret = ath11k_get_user_msi_vector(ab, "DP",
189 &msi_data_count, &msi_data_start,
190 &msi_irq_start);
191 if (ret)
192 return;
193
194 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
195 ring_num);
196 if (msi_group_number < 0) {
197 ath11k_dbg(ab, ATH11K_DBG_PCI,
198 "ring not part of an ext_group; ring_type: %d,ring_num %d",
199 type, ring_num);
200 ring_params->msi_addr = 0;
201 ring_params->msi_data = 0;
202 return;
203 }
204
205 if (msi_group_number > msi_data_count) {
206 ath11k_dbg(ab, ATH11K_DBG_PCI,
207 "multiple msi_groups share one msi, msi_group_num %d",
208 msi_group_number);
209 }
210
211 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
212
213 ring_params->msi_addr = addr_lo;
214 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
215 ring_params->msi_data = (msi_group_number % msi_data_count)
216 + msi_data_start;
217 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
218 }
219
ath11k_dp_srng_setup(struct ath11k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)220 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
221 enum hal_ring_type type, int ring_num,
222 int mac_id, int num_entries)
223 {
224 struct hal_srng_params params = { 0 };
225 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
226 int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
227 int ret;
228 bool cached = false;
229
230 if (max_entries < 0 || entry_sz < 0)
231 return -EINVAL;
232
233 if (num_entries > max_entries)
234 num_entries = max_entries;
235
236 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
237
238 if (ab->hw_params.alloc_cacheable_memory) {
239 /* Allocate the reo dst and tx completion rings from cacheable memory */
240 switch (type) {
241 case HAL_REO_DST:
242 case HAL_WBM2SW_RELEASE:
243 cached = true;
244 break;
245 default:
246 cached = false;
247 }
248
249 if (cached) {
250 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
251 ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
252 }
253 }
254
255 if (!cached)
256 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
257 &ring->paddr_unaligned,
258 GFP_KERNEL);
259
260 if (!ring->vaddr_unaligned)
261 return -ENOMEM;
262
263 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
264 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
265 (unsigned long)ring->vaddr_unaligned);
266
267 params.ring_base_vaddr = ring->vaddr;
268 params.ring_base_paddr = ring->paddr;
269 params.num_entries = num_entries;
270 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
271
272 switch (type) {
273 case HAL_REO_DST:
274 params.intr_batch_cntr_thres_entries =
275 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
276 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
277 break;
278 case HAL_RXDMA_BUF:
279 case HAL_RXDMA_MONITOR_BUF:
280 case HAL_RXDMA_MONITOR_STATUS:
281 params.low_threshold = num_entries >> 3;
282 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
283 params.intr_batch_cntr_thres_entries = 0;
284 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
285 break;
286 case HAL_WBM2SW_RELEASE:
287 if (ring_num < 3) {
288 params.intr_batch_cntr_thres_entries =
289 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
290 params.intr_timer_thres_us =
291 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
292 break;
293 }
294 /* follow through when ring_num >= 3 */
295 fallthrough;
296 case HAL_REO_EXCEPTION:
297 case HAL_REO_REINJECT:
298 case HAL_REO_CMD:
299 case HAL_REO_STATUS:
300 case HAL_TCL_DATA:
301 case HAL_TCL_CMD:
302 case HAL_TCL_STATUS:
303 case HAL_WBM_IDLE_LINK:
304 case HAL_SW2WBM_RELEASE:
305 case HAL_RXDMA_DST:
306 case HAL_RXDMA_MONITOR_DST:
307 case HAL_RXDMA_MONITOR_DESC:
308 params.intr_batch_cntr_thres_entries =
309 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
310 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
311 break;
312 case HAL_RXDMA_DIR_BUF:
313 break;
314 default:
315 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
316 return -EINVAL;
317 }
318
319 if (cached) {
320 params.flags |= HAL_SRNG_FLAGS_CACHED;
321 ring->cached = 1;
322 }
323
324 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
325 if (ret < 0) {
326 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
327 ret, ring_num);
328 return ret;
329 }
330
331 ring->ring_id = ret;
332
333 return 0;
334 }
335
ath11k_dp_stop_shadow_timers(struct ath11k_base * ab)336 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
337 {
338 int i;
339
340 if (!ab->hw_params.supports_shadow_regs)
341 return;
342
343 for (i = 0; i < ab->hw_params.max_tx_ring; i++)
344 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
345
346 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
347 }
348
ath11k_dp_srng_common_cleanup(struct ath11k_base * ab)349 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
350 {
351 struct ath11k_dp *dp = &ab->dp;
352 int i;
353
354 ath11k_dp_stop_shadow_timers(ab);
355 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
356 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
357 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
358 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
359 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
360 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
361 }
362 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
363 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
364 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
365 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
366 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
367 }
368
ath11k_dp_srng_common_setup(struct ath11k_base * ab)369 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
370 {
371 struct ath11k_dp *dp = &ab->dp;
372 struct hal_srng *srng;
373 int i, ret;
374
375 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
376 HAL_SW2WBM_RELEASE, 0, 0,
377 DP_WBM_RELEASE_RING_SIZE);
378 if (ret) {
379 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
380 ret);
381 goto err;
382 }
383
384 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
385 DP_TCL_CMD_RING_SIZE);
386 if (ret) {
387 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
388 goto err;
389 }
390
391 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
392 0, 0, DP_TCL_STATUS_RING_SIZE);
393 if (ret) {
394 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
395 goto err;
396 }
397
398 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
399 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
400 HAL_TCL_DATA, i, 0,
401 DP_TCL_DATA_RING_SIZE);
402 if (ret) {
403 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
404 i, ret);
405 goto err;
406 }
407
408 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
409 HAL_WBM2SW_RELEASE, i, 0,
410 DP_TX_COMP_RING_SIZE);
411 if (ret) {
412 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
413 i, ret);
414 goto err;
415 }
416
417 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
418 ath11k_hal_tx_init_data_ring(ab, srng);
419
420 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
421 ATH11K_SHADOW_DP_TIMER_INTERVAL,
422 dp->tx_ring[i].tcl_data_ring.ring_id);
423 }
424
425 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
426 0, 0, DP_REO_REINJECT_RING_SIZE);
427 if (ret) {
428 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
429 ret);
430 goto err;
431 }
432
433 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
434 3, 0, DP_RX_RELEASE_RING_SIZE);
435 if (ret) {
436 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
437 goto err;
438 }
439
440 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
441 0, 0, DP_REO_EXCEPTION_RING_SIZE);
442 if (ret) {
443 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
444 ret);
445 goto err;
446 }
447
448 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
449 0, 0, DP_REO_CMD_RING_SIZE);
450 if (ret) {
451 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
452 goto err;
453 }
454
455 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
456 ath11k_hal_reo_init_cmd_ring(ab, srng);
457
458 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
459 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
460 dp->reo_cmd_ring.ring_id);
461
462 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
463 0, 0, DP_REO_STATUS_RING_SIZE);
464 if (ret) {
465 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
466 goto err;
467 }
468
469 /* When hash based routing of rx packet is enabled, 32 entries to map
470 * the hash values to the ring will be configured.
471 */
472 ab->hw_params.hw_ops->reo_setup(ab);
473
474 return 0;
475
476 err:
477 ath11k_dp_srng_common_cleanup(ab);
478
479 return ret;
480 }
481
ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base * ab)482 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
483 {
484 struct ath11k_dp *dp = &ab->dp;
485 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
486 int i;
487
488 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
489 if (!slist[i].vaddr)
490 continue;
491
492 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
493 slist[i].vaddr, slist[i].paddr);
494 slist[i].vaddr = NULL;
495 }
496 }
497
ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)498 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
499 int size,
500 u32 n_link_desc_bank,
501 u32 n_link_desc,
502 u32 last_bank_sz)
503 {
504 struct ath11k_dp *dp = &ab->dp;
505 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
506 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
507 u32 n_entries_per_buf;
508 int num_scatter_buf, scatter_idx;
509 struct hal_wbm_link_desc *scatter_buf;
510 int align_bytes, n_entries;
511 dma_addr_t paddr;
512 int rem_entries;
513 int i;
514 int ret = 0;
515 u32 end_offset;
516
517 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
518 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
519 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
520
521 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
522 return -EINVAL;
523
524 for (i = 0; i < num_scatter_buf; i++) {
525 slist[i].vaddr = dma_alloc_coherent(ab->dev,
526 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
527 &slist[i].paddr, GFP_KERNEL);
528 if (!slist[i].vaddr) {
529 ret = -ENOMEM;
530 goto err;
531 }
532 }
533
534 scatter_idx = 0;
535 scatter_buf = slist[scatter_idx].vaddr;
536 rem_entries = n_entries_per_buf;
537
538 for (i = 0; i < n_link_desc_bank; i++) {
539 align_bytes = link_desc_banks[i].vaddr -
540 link_desc_banks[i].vaddr_unaligned;
541 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
542 HAL_LINK_DESC_SIZE;
543 paddr = link_desc_banks[i].paddr;
544 while (n_entries) {
545 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
546 n_entries--;
547 paddr += HAL_LINK_DESC_SIZE;
548 if (rem_entries) {
549 rem_entries--;
550 scatter_buf++;
551 continue;
552 }
553
554 rem_entries = n_entries_per_buf;
555 scatter_idx++;
556 scatter_buf = slist[scatter_idx].vaddr;
557 }
558 }
559
560 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
561 sizeof(struct hal_wbm_link_desc);
562 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
563 n_link_desc, end_offset);
564
565 return 0;
566
567 err:
568 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
569
570 return ret;
571 }
572
573 static void
ath11k_dp_link_desc_bank_free(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks)574 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
575 struct dp_link_desc_bank *link_desc_banks)
576 {
577 int i;
578
579 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
580 if (link_desc_banks[i].vaddr_unaligned) {
581 dma_free_coherent(ab->dev,
582 link_desc_banks[i].size,
583 link_desc_banks[i].vaddr_unaligned,
584 link_desc_banks[i].paddr_unaligned);
585 link_desc_banks[i].vaddr_unaligned = NULL;
586 }
587 }
588 }
589
ath11k_dp_link_desc_bank_alloc(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)590 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
591 struct dp_link_desc_bank *desc_bank,
592 int n_link_desc_bank,
593 int last_bank_sz)
594 {
595 struct ath11k_dp *dp = &ab->dp;
596 int i;
597 int ret = 0;
598 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
599
600 for (i = 0; i < n_link_desc_bank; i++) {
601 if (i == (n_link_desc_bank - 1) && last_bank_sz)
602 desc_sz = last_bank_sz;
603
604 desc_bank[i].vaddr_unaligned =
605 dma_alloc_coherent(ab->dev, desc_sz,
606 &desc_bank[i].paddr_unaligned,
607 GFP_KERNEL);
608 if (!desc_bank[i].vaddr_unaligned) {
609 ret = -ENOMEM;
610 goto err;
611 }
612
613 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
614 HAL_LINK_DESC_ALIGN);
615 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
616 ((unsigned long)desc_bank[i].vaddr -
617 (unsigned long)desc_bank[i].vaddr_unaligned);
618 desc_bank[i].size = desc_sz;
619 }
620
621 return 0;
622
623 err:
624 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
625
626 return ret;
627 }
628
ath11k_dp_link_desc_cleanup(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)629 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
630 struct dp_link_desc_bank *desc_bank,
631 u32 ring_type, struct dp_srng *ring)
632 {
633 ath11k_dp_link_desc_bank_free(ab, desc_bank);
634
635 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
636 ath11k_dp_srng_cleanup(ab, ring);
637 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
638 }
639 }
640
ath11k_wbm_idle_ring_setup(struct ath11k_base * ab,u32 * n_link_desc)641 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
642 {
643 struct ath11k_dp *dp = &ab->dp;
644 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
645 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
646 int ret = 0;
647
648 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
649 HAL_NUM_MPDUS_PER_LINK_DESC;
650
651 n_mpdu_queue_desc = n_mpdu_link_desc /
652 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
653
654 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
655 DP_AVG_MSDUS_PER_FLOW) /
656 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
657
658 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
659 DP_AVG_MSDUS_PER_MPDU) /
660 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
661
662 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
663 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
664
665 if (*n_link_desc & (*n_link_desc - 1))
666 *n_link_desc = 1 << fls(*n_link_desc);
667
668 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
669 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
670 if (ret) {
671 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
672 return ret;
673 }
674 return ret;
675 }
676
ath11k_dp_link_desc_setup(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)677 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
678 struct dp_link_desc_bank *link_desc_banks,
679 u32 ring_type, struct hal_srng *srng,
680 u32 n_link_desc)
681 {
682 u32 tot_mem_sz;
683 u32 n_link_desc_bank, last_bank_sz;
684 u32 entry_sz, align_bytes, n_entries;
685 u32 paddr;
686 u32 *desc;
687 int i, ret;
688
689 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
690 tot_mem_sz += HAL_LINK_DESC_ALIGN;
691
692 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
693 n_link_desc_bank = 1;
694 last_bank_sz = tot_mem_sz;
695 } else {
696 n_link_desc_bank = tot_mem_sz /
697 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
698 HAL_LINK_DESC_ALIGN);
699 last_bank_sz = tot_mem_sz %
700 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
701 HAL_LINK_DESC_ALIGN);
702
703 if (last_bank_sz)
704 n_link_desc_bank += 1;
705 }
706
707 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
708 return -EINVAL;
709
710 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
711 n_link_desc_bank, last_bank_sz);
712 if (ret)
713 return ret;
714
715 /* Setup link desc idle list for HW internal usage */
716 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
717 tot_mem_sz = entry_sz * n_link_desc;
718
719 /* Setup scatter desc list when the total memory requirement is more */
720 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
721 ring_type != HAL_RXDMA_MONITOR_DESC) {
722 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
723 n_link_desc_bank,
724 n_link_desc,
725 last_bank_sz);
726 if (ret) {
727 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
728 ret);
729 goto fail_desc_bank_free;
730 }
731
732 return 0;
733 }
734
735 spin_lock_bh(&srng->lock);
736
737 ath11k_hal_srng_access_begin(ab, srng);
738
739 for (i = 0; i < n_link_desc_bank; i++) {
740 align_bytes = link_desc_banks[i].vaddr -
741 link_desc_banks[i].vaddr_unaligned;
742 n_entries = (link_desc_banks[i].size - align_bytes) /
743 HAL_LINK_DESC_SIZE;
744 paddr = link_desc_banks[i].paddr;
745 while (n_entries &&
746 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
747 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
748 i, paddr);
749 n_entries--;
750 paddr += HAL_LINK_DESC_SIZE;
751 }
752 }
753
754 ath11k_hal_srng_access_end(ab, srng);
755
756 spin_unlock_bh(&srng->lock);
757
758 return 0;
759
760 fail_desc_bank_free:
761 ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
762
763 return ret;
764 }
765
ath11k_dp_service_srng(struct ath11k_base * ab,struct ath11k_ext_irq_grp * irq_grp,int budget)766 int ath11k_dp_service_srng(struct ath11k_base *ab,
767 struct ath11k_ext_irq_grp *irq_grp,
768 int budget)
769 {
770 struct napi_struct *napi = &irq_grp->napi;
771 const struct ath11k_hw_hal_params *hal_params;
772 int grp_id = irq_grp->grp_id;
773 int work_done = 0;
774 int i, j;
775 int tot_work_done = 0;
776
777 if (ab->hw_params.ring_mask->tx[grp_id]) {
778 i = __fls(ab->hw_params.ring_mask->tx[grp_id]);
779 ath11k_dp_tx_completion_handler(ab, i);
780 }
781
782 if (ab->hw_params.ring_mask->rx_err[grp_id]) {
783 work_done = ath11k_dp_process_rx_err(ab, napi, budget);
784 budget -= work_done;
785 tot_work_done += work_done;
786 if (budget <= 0)
787 goto done;
788 }
789
790 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
791 work_done = ath11k_dp_rx_process_wbm_err(ab,
792 napi,
793 budget);
794 budget -= work_done;
795 tot_work_done += work_done;
796
797 if (budget <= 0)
798 goto done;
799 }
800
801 if (ab->hw_params.ring_mask->rx[grp_id]) {
802 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
803 work_done = ath11k_dp_process_rx(ab, i, napi,
804 budget);
805 budget -= work_done;
806 tot_work_done += work_done;
807 if (budget <= 0)
808 goto done;
809 }
810
811 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
812 for (i = 0; i < ab->num_radios; i++) {
813 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
814 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
815
816 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
817 BIT(id)) {
818 work_done =
819 ath11k_dp_rx_process_mon_rings(ab,
820 id,
821 napi, budget);
822 budget -= work_done;
823 tot_work_done += work_done;
824
825 if (budget <= 0)
826 goto done;
827 }
828 }
829 }
830 }
831
832 if (ab->hw_params.ring_mask->reo_status[grp_id])
833 ath11k_dp_process_reo_status(ab);
834
835 for (i = 0; i < ab->num_radios; i++) {
836 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
837 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
838
839 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
840 work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
841 budget -= work_done;
842 tot_work_done += work_done;
843 }
844
845 if (budget <= 0)
846 goto done;
847
848 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
849 struct ath11k *ar = ath11k_ab_to_ar(ab, id);
850 struct ath11k_pdev_dp *dp = &ar->dp;
851 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
852
853 hal_params = ab->hw_params.hal_params;
854 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
855 hal_params->rx_buf_rbm);
856 }
857 }
858 }
859 /* TODO: Implement handler for other interrupts */
860
861 done:
862 return tot_work_done;
863 }
864 EXPORT_SYMBOL(ath11k_dp_service_srng);
865
ath11k_dp_pdev_free(struct ath11k_base * ab)866 void ath11k_dp_pdev_free(struct ath11k_base *ab)
867 {
868 struct ath11k *ar;
869 int i;
870
871 del_timer_sync(&ab->mon_reap_timer);
872
873 for (i = 0; i < ab->num_radios; i++) {
874 ar = ab->pdevs[i].ar;
875 ath11k_dp_rx_pdev_free(ab, i);
876 ath11k_debugfs_unregister(ar);
877 ath11k_dp_rx_pdev_mon_detach(ar);
878 }
879 }
880
ath11k_dp_pdev_pre_alloc(struct ath11k_base * ab)881 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
882 {
883 struct ath11k *ar;
884 struct ath11k_pdev_dp *dp;
885 int i;
886 int j;
887
888 for (i = 0; i < ab->num_radios; i++) {
889 ar = ab->pdevs[i].ar;
890 dp = &ar->dp;
891 dp->mac_id = i;
892 idr_init(&dp->rx_refill_buf_ring.bufs_idr);
893 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
894 atomic_set(&dp->num_tx_pending, 0);
895 init_waitqueue_head(&dp->tx_empty_waitq);
896 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
897 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
898 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
899 }
900 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
901 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
902 }
903 }
904
ath11k_dp_pdev_alloc(struct ath11k_base * ab)905 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
906 {
907 struct ath11k *ar;
908 int ret;
909 int i;
910
911 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
912 for (i = 0; i < ab->num_radios; i++) {
913 ar = ab->pdevs[i].ar;
914 ret = ath11k_dp_rx_pdev_alloc(ab, i);
915 if (ret) {
916 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
917 i);
918 goto err;
919 }
920 ret = ath11k_dp_rx_pdev_mon_attach(ar);
921 if (ret) {
922 ath11k_warn(ab, "failed to initialize mon pdev %d\n",
923 i);
924 goto err;
925 }
926 }
927
928 return 0;
929
930 err:
931 ath11k_dp_pdev_free(ab);
932
933 return ret;
934 }
935
ath11k_dp_htt_connect(struct ath11k_dp * dp)936 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
937 {
938 struct ath11k_htc_svc_conn_req conn_req;
939 struct ath11k_htc_svc_conn_resp conn_resp;
940 int status;
941
942 memset(&conn_req, 0, sizeof(conn_req));
943 memset(&conn_resp, 0, sizeof(conn_resp));
944
945 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
946 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
947
948 /* connect to control service */
949 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
950
951 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
952 &conn_resp);
953
954 if (status)
955 return status;
956
957 dp->eid = conn_resp.eid;
958
959 return 0;
960 }
961
ath11k_dp_update_vdev_search(struct ath11k_vif * arvif)962 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
963 {
964 /* When v2_map_support is true:for STA mode, enable address
965 * search index, tcl uses ast_hash value in the descriptor.
966 * When v2_map_support is false: for STA mode, dont' enable
967 * address search index.
968 */
969 switch (arvif->vdev_type) {
970 case WMI_VDEV_TYPE_STA:
971 if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
972 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
973 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
974 } else {
975 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
976 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
977 }
978 break;
979 case WMI_VDEV_TYPE_AP:
980 case WMI_VDEV_TYPE_IBSS:
981 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
982 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
983 break;
984 case WMI_VDEV_TYPE_MONITOR:
985 default:
986 return;
987 }
988 }
989
ath11k_dp_vdev_tx_attach(struct ath11k * ar,struct ath11k_vif * arvif)990 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
991 {
992 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
993 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
994 arvif->vdev_id) |
995 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
996 ar->pdev->pdev_id);
997
998 /* set HTT extension valid bit to 0 by default */
999 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1000
1001 ath11k_dp_update_vdev_search(arvif);
1002 }
1003
ath11k_dp_tx_pending_cleanup(int buf_id,void * skb,void * ctx)1004 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1005 {
1006 struct ath11k_base *ab = (struct ath11k_base *)ctx;
1007 struct sk_buff *msdu = skb;
1008
1009 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1010 DMA_TO_DEVICE);
1011
1012 dev_kfree_skb_any(msdu);
1013
1014 return 0;
1015 }
1016
ath11k_dp_free(struct ath11k_base * ab)1017 void ath11k_dp_free(struct ath11k_base *ab)
1018 {
1019 struct ath11k_dp *dp = &ab->dp;
1020 int i;
1021
1022 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1023 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1024
1025 ath11k_dp_srng_common_cleanup(ab);
1026
1027 ath11k_dp_reo_cmd_list_cleanup(ab);
1028
1029 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1030 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1031 idr_for_each(&dp->tx_ring[i].txbuf_idr,
1032 ath11k_dp_tx_pending_cleanup, ab);
1033 idr_destroy(&dp->tx_ring[i].txbuf_idr);
1034 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1035 kfree(dp->tx_ring[i].tx_status);
1036 }
1037
1038 /* Deinit any SOC level resource */
1039 }
1040
ath11k_dp_alloc(struct ath11k_base * ab)1041 int ath11k_dp_alloc(struct ath11k_base *ab)
1042 {
1043 struct ath11k_dp *dp = &ab->dp;
1044 struct hal_srng *srng = NULL;
1045 size_t size = 0;
1046 u32 n_link_desc = 0;
1047 int ret;
1048 int i;
1049
1050 dp->ab = ab;
1051
1052 INIT_LIST_HEAD(&dp->reo_cmd_list);
1053 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1054 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1055 spin_lock_init(&dp->reo_cmd_lock);
1056
1057 dp->reo_cmd_cache_flush_count = 0;
1058
1059 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1060 if (ret) {
1061 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1062 return ret;
1063 }
1064
1065 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1066
1067 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1068 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1069 if (ret) {
1070 ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1071 return ret;
1072 }
1073
1074 ret = ath11k_dp_srng_common_setup(ab);
1075 if (ret)
1076 goto fail_link_desc_cleanup;
1077
1078 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1079
1080 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1081 idr_init(&dp->tx_ring[i].txbuf_idr);
1082 spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1083 dp->tx_ring[i].tcl_data_ring_id = i;
1084
1085 dp->tx_ring[i].tx_status_head = 0;
1086 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1087 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1088 if (!dp->tx_ring[i].tx_status) {
1089 ret = -ENOMEM;
1090 goto fail_cmn_srng_cleanup;
1091 }
1092 }
1093
1094 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1095 ath11k_hal_tx_set_dscp_tid_map(ab, i);
1096
1097 /* Init any SOC level resource for DP */
1098
1099 return 0;
1100
1101 fail_cmn_srng_cleanup:
1102 ath11k_dp_srng_common_cleanup(ab);
1103
1104 fail_link_desc_cleanup:
1105 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1106 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1107
1108 return ret;
1109 }
1110
ath11k_dp_shadow_timer_handler(struct timer_list * t)1111 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1112 {
1113 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1114 t, timer);
1115 struct ath11k_base *ab = update_timer->ab;
1116 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
1117
1118 spin_lock_bh(&srng->lock);
1119
1120 /* when the timer is fired, the handler checks whether there
1121 * are new TX happened. The handler updates HP only when there
1122 * are no TX operations during the timeout interval, and stop
1123 * the timer. Timer will be started again when TX happens again.
1124 */
1125 if (update_timer->timer_tx_num != update_timer->tx_num) {
1126 update_timer->timer_tx_num = update_timer->tx_num;
1127 mod_timer(&update_timer->timer, jiffies +
1128 msecs_to_jiffies(update_timer->interval));
1129 } else {
1130 update_timer->started = false;
1131 ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1132 }
1133
1134 spin_unlock_bh(&srng->lock);
1135 }
1136
ath11k_dp_shadow_start_timer(struct ath11k_base * ab,struct hal_srng * srng,struct ath11k_hp_update_timer * update_timer)1137 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1138 struct hal_srng *srng,
1139 struct ath11k_hp_update_timer *update_timer)
1140 {
1141 lockdep_assert_held(&srng->lock);
1142
1143 if (!ab->hw_params.supports_shadow_regs)
1144 return;
1145
1146 update_timer->tx_num++;
1147
1148 if (update_timer->started)
1149 return;
1150
1151 update_timer->started = true;
1152 update_timer->timer_tx_num = update_timer->tx_num;
1153 mod_timer(&update_timer->timer, jiffies +
1154 msecs_to_jiffies(update_timer->interval));
1155 }
1156
ath11k_dp_shadow_stop_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer)1157 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1158 struct ath11k_hp_update_timer *update_timer)
1159 {
1160 if (!ab->hw_params.supports_shadow_regs)
1161 return;
1162
1163 if (!update_timer->init)
1164 return;
1165
1166 del_timer_sync(&update_timer->timer);
1167 }
1168
ath11k_dp_shadow_init_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer,u32 interval,u32 ring_id)1169 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1170 struct ath11k_hp_update_timer *update_timer,
1171 u32 interval, u32 ring_id)
1172 {
1173 if (!ab->hw_params.supports_shadow_regs)
1174 return;
1175
1176 update_timer->tx_num = 0;
1177 update_timer->timer_tx_num = 0;
1178 update_timer->ab = ab;
1179 update_timer->ring_id = ring_id;
1180 update_timer->interval = interval;
1181 update_timer->init = true;
1182 timer_setup(&update_timer->timer,
1183 ath11k_dp_shadow_timer_handler, 0);
1184 }
1185