Lines Matching refs:q

116 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)  in mt76_dma_sync_idx()  argument
118 Q_WRITE(dev, q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
119 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx()
120 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx()
121 q->tail = q->head; in mt76_dma_sync_idx()
125 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
129 if (!q || !q->ndesc) in mt76_dma_queue_reset()
133 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset()
134 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset()
136 Q_WRITE(dev, q, cpu_idx, 0); in mt76_dma_queue_reset()
137 Q_WRITE(dev, q, dma_idx, 0); in mt76_dma_queue_reset()
138 mt76_dma_sync_idx(dev, q); in mt76_dma_queue_reset()
142 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
152 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
153 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
159 idx = q->head; in mt76_dma_add_buf()
160 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
162 desc = &q->desc[idx]; in mt76_dma_add_buf()
163 entry = &q->entry[idx]; in mt76_dma_add_buf()
192 q->queued++; in mt76_dma_add_buf()
195 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
196 q->entry[idx].skb = skb; in mt76_dma_add_buf()
197 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
203 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
206 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
227 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
230 Q_WRITE(dev, q, cpu_idx, q->head); in mt76_dma_kick_queue()
234 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
239 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
242 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
246 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
248 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
249 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
250 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
257 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
258 last = Q_READ(dev, q, dma_idx); in mt76_dma_tx_cleanup()
260 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
263 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
264 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
265 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
266 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
269 if (!q->queued) in mt76_dma_tx_cleanup()
274 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
277 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
278 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
281 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_get_buf()
300 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
303 int idx = q->tail; in mt76_dma_dequeue()
306 if (!q->queued) in mt76_dma_dequeue()
310 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
311 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
314 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
315 q->queued--; in mt76_dma_dequeue()
317 return mt76_dma_get_buf(dev, q, idx, len, info, more); in mt76_dma_dequeue()
321 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
327 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
338 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
339 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
340 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
341 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
351 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
402 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
409 ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); in mt76_dma_tx_queue_skb()
415 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
445 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_fill() argument
450 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill()
451 int offset = q->buf_offset; in mt76_dma_rx_fill()
453 if (!q->ndesc) in mt76_dma_rx_fill()
456 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
458 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
461 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); in mt76_dma_rx_fill()
474 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); in mt76_dma_rx_fill()
479 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
481 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
487 mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_wed_setup() argument
492 u8 flags = q->flags; in mt76_dma_wed_setup()
495 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
497 if (!(q->flags & MT_QFLAG_WED)) in mt76_dma_wed_setup()
500 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); in mt76_dma_wed_setup()
501 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); in mt76_dma_wed_setup()
505 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs); in mt76_dma_wed_setup()
507 q->wed_regs = wed->tx_ring[ring].reg_base; in mt76_dma_wed_setup()
511 q->flags = 0; in mt76_dma_wed_setup()
512 mt76_dma_queue_reset(dev, q); in mt76_dma_wed_setup()
513 mt76_dma_rx_fill(dev, q); in mt76_dma_wed_setup()
514 q->flags = flags; in mt76_dma_wed_setup()
516 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); in mt76_dma_wed_setup()
518 q->wed_regs = wed->txfree_ring.reg_base; in mt76_dma_wed_setup()
531 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
537 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
538 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
540 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
541 q->ndesc = n_desc; in mt76_dma_alloc_queue()
542 q->buf_size = bufsize; in mt76_dma_alloc_queue()
543 q->hw_idx = idx; in mt76_dma_alloc_queue()
545 size = q->ndesc * sizeof(struct mt76_desc); in mt76_dma_alloc_queue()
546 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
547 if (!q->desc) in mt76_dma_alloc_queue()
550 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
551 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
552 if (!q->entry) in mt76_dma_alloc_queue()
555 ret = mt76_dma_wed_setup(dev, q); in mt76_dma_alloc_queue()
559 if (q->flags != MT_WED_Q_TXFREE) in mt76_dma_alloc_queue()
560 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
566 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
572 if (!q->ndesc) in mt76_dma_rx_cleanup()
575 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
577 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); in mt76_dma_rx_cleanup()
583 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
585 if (!q->rx_page.va) in mt76_dma_rx_cleanup()
588 page = virt_to_page(q->rx_page.va); in mt76_dma_rx_cleanup()
589 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); in mt76_dma_rx_cleanup()
590 memset(&q->rx_page, 0, sizeof(q->rx_page)); in mt76_dma_rx_cleanup()
596 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
599 if (!q->ndesc) in mt76_dma_rx_reset()
602 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
603 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
605 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
606 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
607 mt76_dma_rx_fill(dev, q); in mt76_dma_rx_reset()
609 if (!q->rx_head) in mt76_dma_rx_reset()
612 dev_kfree_skb(q->rx_head); in mt76_dma_rx_reset()
613 q->rx_head = NULL; in mt76_dma_rx_reset()
617 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
620 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
626 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
628 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
636 q->rx_head = NULL; in mt76_add_fragment()
638 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_add_fragment()
644 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
653 q->flags == MT_WED_Q_TXFREE) { in mt76_dma_rx_process()
654 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
662 if (q->tail == dma_idx) in mt76_dma_rx_process()
663 dma_idx = Q_READ(dev, q, dma_idx); in mt76_dma_rx_process()
665 if (q->tail == dma_idx) in mt76_dma_rx_process()
669 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); in mt76_dma_rx_process()
673 if (q->rx_head) in mt76_dma_rx_process()
674 data_len = q->buf_size; in mt76_dma_rx_process()
676 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
678 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
679 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
680 q->rx_head = NULL; in mt76_dma_rx_process()
684 if (q->rx_head) { in mt76_dma_rx_process()
685 mt76_add_fragment(dev, q, data, len, more); in mt76_dma_rx_process()
693 skb = build_skb(data, q->buf_size); in mt76_dma_rx_process()
697 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
699 if (q == &dev->q_rx[MT_RXQ_MCU]) { in mt76_dma_rx_process()
708 q->rx_head = skb; in mt76_dma_rx_process()
712 dev->drv->rx_skb(dev, q - dev->q_rx, skb); in mt76_dma_rx_process()
719 mt76_dma_rx_fill(dev, q); in mt76_dma_rx_process()