Lines Matching refs:pq

81 	struct qib_user_sdma_queue *pq;	/* which pq this pkt belongs to */  member
185 struct qib_user_sdma_queue *pq = in qib_user_sdma_queue_create() local
189 if (!pq) in qib_user_sdma_queue_create()
192 pq->counter = 0; in qib_user_sdma_queue_create()
193 pq->sent_counter = 0; in qib_user_sdma_queue_create()
194 pq->num_pending = 0; in qib_user_sdma_queue_create()
195 pq->num_sending = 0; in qib_user_sdma_queue_create()
196 pq->added = 0; in qib_user_sdma_queue_create()
197 pq->sdma_rb_node = NULL; in qib_user_sdma_queue_create()
199 INIT_LIST_HEAD(&pq->sent); in qib_user_sdma_queue_create()
200 spin_lock_init(&pq->sent_lock); in qib_user_sdma_queue_create()
201 mutex_init(&pq->lock); in qib_user_sdma_queue_create()
203 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), in qib_user_sdma_queue_create()
205 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, in qib_user_sdma_queue_create()
209 if (!pq->pkt_slab) in qib_user_sdma_queue_create()
212 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), in qib_user_sdma_queue_create()
214 pq->header_cache = dma_pool_create(pq->header_cache_name, in qib_user_sdma_queue_create()
218 if (!pq->header_cache) in qib_user_sdma_queue_create()
221 pq->dma_pages_root = RB_ROOT; in qib_user_sdma_queue_create()
238 pq->sdma_rb_node = sdma_rb_node; in qib_user_sdma_queue_create()
243 dma_pool_destroy(pq->header_cache); in qib_user_sdma_queue_create()
245 kmem_cache_destroy(pq->pkt_slab); in qib_user_sdma_queue_create()
247 kfree(pq); in qib_user_sdma_queue_create()
248 pq = NULL; in qib_user_sdma_queue_create()
251 return pq; in qib_user_sdma_queue_create()
273 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq, in qib_user_sdma_alloc_header() argument
279 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL, in qib_user_sdma_alloc_header()
296 struct qib_user_sdma_queue *pq, in qib_user_sdma_page_to_frags() argument
432 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr); in qib_user_sdma_page_to_frags()
557 struct qib_user_sdma_queue *pq, in qib_user_sdma_coalesce() argument
590 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
614 struct qib_user_sdma_queue *pq, in qib_user_sdma_free_pkt_frag() argument
643 dma_pool_free(pq->header_cache, in qib_user_sdma_free_pkt_frag()
654 struct qib_user_sdma_queue *pq, in qib_user_sdma_pin_pages() argument
682 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
711 struct qib_user_sdma_queue *pq, in qib_user_sdma_pin_pkt() argument
723 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
734 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
753 struct qib_user_sdma_queue *pq, in qib_user_sdma_init_payload() argument
762 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
764 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
771 struct qib_user_sdma_queue *pq, in qib_user_sdma_free_pkt_list() argument
780 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
785 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
799 struct qib_user_sdma_queue *pq, in qib_user_sdma_queue_pkts() argument
813 u32 counter = pq->counter; in qib_user_sdma_queue_pkts()
836 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr); in qib_user_sdma_queue_pkts()
967 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
992 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
1021 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1036 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1039 dma_pool_free(pq->header_cache, pbc, dma_addr); in qib_user_sdma_queue_pkts()
1043 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); in qib_user_sdma_queue_pkts()
1048 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, in qib_user_sdma_set_complete_counter() argument
1051 pq->sent_counter = c; in qib_user_sdma_set_complete_counter()
1056 struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_clean() argument
1065 if (!pq->num_sending) in qib_user_sdma_queue_clean()
1075 spin_lock_irqsave(&pq->sent_lock, flags); in qib_user_sdma_queue_clean()
1076 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1086 pq->num_sending--; in qib_user_sdma_queue_clean()
1088 spin_unlock_irqrestore(&pq->sent_lock, flags); in qib_user_sdma_queue_clean()
1097 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in qib_user_sdma_queue_clean()
1098 qib_user_sdma_set_complete_counter(pq, counter); in qib_user_sdma_queue_clean()
1104 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_destroy() argument
1106 if (!pq) in qib_user_sdma_queue_destroy()
1109 pq->sdma_rb_node->refcount--; in qib_user_sdma_queue_destroy()
1110 if (pq->sdma_rb_node->refcount == 0) { in qib_user_sdma_queue_destroy()
1111 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root); in qib_user_sdma_queue_destroy()
1112 kfree(pq->sdma_rb_node); in qib_user_sdma_queue_destroy()
1114 dma_pool_destroy(pq->header_cache); in qib_user_sdma_queue_destroy()
1115 kmem_cache_destroy(pq->pkt_slab); in qib_user_sdma_queue_destroy()
1116 kfree(pq); in qib_user_sdma_queue_destroy()
1134 struct qib_user_sdma_queue *pq) in qib_user_sdma_queue_drain() argument
1140 if (!pq) in qib_user_sdma_queue_drain()
1144 mutex_lock(&pq->lock); in qib_user_sdma_queue_drain()
1145 if (!pq->num_pending && !pq->num_sending) { in qib_user_sdma_queue_drain()
1146 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1150 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_queue_drain()
1151 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1155 if (pq->num_pending || pq->num_sending) { in qib_user_sdma_queue_drain()
1160 mutex_lock(&pq->lock); in qib_user_sdma_queue_drain()
1165 if (pq->num_pending) { in qib_user_sdma_queue_drain()
1168 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1169 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1170 pq->num_pending--; in qib_user_sdma_queue_drain()
1171 pq->num_sending++; in qib_user_sdma_queue_drain()
1179 list_splice_init(&pq->sent, &free_list); in qib_user_sdma_queue_drain()
1180 pq->num_sending = 0; in qib_user_sdma_queue_drain()
1181 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); in qib_user_sdma_queue_drain()
1182 mutex_unlock(&pq->lock); in qib_user_sdma_queue_drain()
1307 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1308 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1309 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1310 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1311 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1312 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1330 struct qib_user_sdma_queue *pq, in qib_user_sdma_push_pkts() argument
1339 if (pq->sdma_rb_node->refcount > 1) { in qib_user_sdma_push_pkts()
1345 pq->num_pending += count; in qib_user_sdma_push_pkts()
1359 pq->num_pending += count; in qib_user_sdma_push_pkts()
1382 struct qib_user_sdma_queue *pq, in qib_user_sdma_writev() argument
1394 mutex_lock(&pq->lock); in qib_user_sdma_writev()
1401 if (pq->added > ppd->sdma_descq_removed) in qib_user_sdma_writev()
1404 if (pq->num_sending) in qib_user_sdma_writev()
1405 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1411 ret = qib_user_sdma_queue_pkts(dd, ppd, pq, in qib_user_sdma_writev()
1427 if (pq->num_sending) in qib_user_sdma_writev()
1428 qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_writev()
1431 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp); in qib_user_sdma_writev()
1436 pq->counter += mxp; in qib_user_sdma_writev()
1443 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); in qib_user_sdma_writev()
1444 mutex_unlock(&pq->lock); in qib_user_sdma_writev()
1450 struct qib_user_sdma_queue *pq) in qib_user_sdma_make_progress() argument
1454 mutex_lock(&pq->lock); in qib_user_sdma_make_progress()
1456 ret = qib_user_sdma_queue_clean(ppd, pq); in qib_user_sdma_make_progress()
1457 mutex_unlock(&pq->lock); in qib_user_sdma_make_progress()
1462 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) in qib_user_sdma_complete_counter() argument
1464 return pq ? pq->sent_counter : 0; in qib_user_sdma_complete_counter()
1467 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) in qib_user_sdma_inflight_counter() argument
1469 return pq ? pq->counter : 0; in qib_user_sdma_inflight_counter()