Lines Matching refs:pkt

254 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,  in qib_user_sdma_init_frag()  argument
261 pkt->addr[i].offset = offset; in qib_user_sdma_init_frag()
262 pkt->addr[i].length = len; in qib_user_sdma_init_frag()
263 pkt->addr[i].first_desc = first_desc; in qib_user_sdma_init_frag()
264 pkt->addr[i].last_desc = last_desc; in qib_user_sdma_init_frag()
265 pkt->addr[i].put_page = put_page; in qib_user_sdma_init_frag()
266 pkt->addr[i].dma_mapped = dma_mapped; in qib_user_sdma_init_frag()
267 pkt->addr[i].page = page; in qib_user_sdma_init_frag()
268 pkt->addr[i].kvaddr = kvaddr; in qib_user_sdma_init_frag()
269 pkt->addr[i].addr = dma_addr; in qib_user_sdma_init_frag()
270 pkt->addr[i].dma_length = dma_length; in qib_user_sdma_init_frag()
297 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_page_to_frags() argument
339 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
340 newlen = pkt->tidsm[pkt->tidsmidx].length; in qib_user_sdma_page_to_frags()
352 if ((pkt->payload_size + newlen) >= pkt->frag_size) { in qib_user_sdma_page_to_frags()
353 newlen = pkt->frag_size - pkt->payload_size; in qib_user_sdma_page_to_frags()
355 } else if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
356 if (newlen == pkt->tidsm[pkt->tidsmidx].length) in qib_user_sdma_page_to_frags()
359 if (newlen == pkt->bytes_togo) in qib_user_sdma_page_to_frags()
364 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
370 pkt->bytes_togo -= newlen; in qib_user_sdma_page_to_frags()
371 pkt->payload_size += newlen; in qib_user_sdma_page_to_frags()
372 pkt->naddr++; in qib_user_sdma_page_to_frags()
373 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
379 if (pkt->bytes_togo == 0) { in qib_user_sdma_page_to_frags()
382 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
383 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
385 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
386 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
389 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
393 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
400 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
401 pkt->tidsm[pkt->tidsmidx].length -= newlen; in qib_user_sdma_page_to_frags()
402 if (pkt->tidsm[pkt->tidsmidx].length) { in qib_user_sdma_page_to_frags()
403 pkt->tidsm[pkt->tidsmidx].offset += newlen; in qib_user_sdma_page_to_frags()
405 pkt->tidsmidx++; in qib_user_sdma_page_to_frags()
406 if (pkt->tidsmidx == pkt->tidsmcount) { in qib_user_sdma_page_to_frags()
432 pbclen = pkt->addr[pkt->index].length; in qib_user_sdma_page_to_frags()
439 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr; in qib_user_sdma_page_to_frags()
446 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2)); in qib_user_sdma_page_to_frags()
451 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
473 if (!pkt->addr[pkt->index].addr) { in qib_user_sdma_page_to_frags()
474 pkt->addr[pkt->index].addr = in qib_user_sdma_page_to_frags()
476 pkt->addr[pkt->index].kvaddr, in qib_user_sdma_page_to_frags()
477 pkt->addr[pkt->index].dma_length, in qib_user_sdma_page_to_frags()
480 pkt->addr[pkt->index].addr)) { in qib_user_sdma_page_to_frags()
484 pkt->addr[pkt->index].dma_mapped = 1; in qib_user_sdma_page_to_frags()
492 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2)); in qib_user_sdma_page_to_frags()
497 if (pkt->tiddma) { in qib_user_sdma_page_to_frags()
501 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) + in qib_user_sdma_page_to_frags()
502 (pkt->tidsm[pkt->tidsmidx].offset>>2)); in qib_user_sdma_page_to_frags()
505 hdr->uwords[2] += pkt->payload_size; in qib_user_sdma_page_to_frags()
517 if (pkt->tiddma) in qib_user_sdma_page_to_frags()
520 seqnum.pkt++; in qib_user_sdma_page_to_frags()
524 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */ in qib_user_sdma_page_to_frags()
530 pkt->index = pkt->naddr; in qib_user_sdma_page_to_frags()
531 pkt->payload_size = 0; in qib_user_sdma_page_to_frags()
532 pkt->naddr++; in qib_user_sdma_page_to_frags()
533 if (pkt->naddr == pkt->addrlimit) { in qib_user_sdma_page_to_frags()
559 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_coalesce() argument
591 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_coalesce()
617 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_free_pkt_frag() argument
622 if (pkt->addr[i].page) { in qib_user_sdma_free_pkt_frag()
624 if (pkt->addr[i].dma_mapped) in qib_user_sdma_free_pkt_frag()
626 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
627 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
630 if (pkt->addr[i].kvaddr) in qib_user_sdma_free_pkt_frag()
631 kunmap(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
633 if (pkt->addr[i].put_page) in qib_user_sdma_free_pkt_frag()
634 unpin_user_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
636 __free_page(pkt->addr[i].page); in qib_user_sdma_free_pkt_frag()
637 } else if (pkt->addr[i].kvaddr) { in qib_user_sdma_free_pkt_frag()
639 if (pkt->addr[i].dma_mapped) { in qib_user_sdma_free_pkt_frag()
642 pkt->addr[i].addr, in qib_user_sdma_free_pkt_frag()
643 pkt->addr[i].dma_length, in qib_user_sdma_free_pkt_frag()
645 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
646 } else if (pkt->addr[i].addr) { in qib_user_sdma_free_pkt_frag()
649 pkt->addr[i].kvaddr, pkt->addr[i].addr); in qib_user_sdma_free_pkt_frag()
652 kfree(pkt->addr[i].kvaddr); in qib_user_sdma_free_pkt_frag()
660 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pages() argument
687 ret = qib_user_sdma_page_to_frags(dd, pq, pkt, in qib_user_sdma_pin_pages()
717 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_pin_pkt() argument
728 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, in qib_user_sdma_pin_pkt()
738 for (idx = 1; idx < pkt->naddr; idx++) in qib_user_sdma_pin_pkt()
739 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); in qib_user_sdma_pin_pkt()
744 if (pkt->addr[0].dma_mapped) { in qib_user_sdma_pin_pkt()
746 pkt->addr[0].addr, in qib_user_sdma_pin_pkt()
747 pkt->addr[0].dma_length, in qib_user_sdma_pin_pkt()
749 pkt->addr[0].addr = 0; in qib_user_sdma_pin_pkt()
750 pkt->addr[0].dma_mapped = 0; in qib_user_sdma_pin_pkt()
759 struct qib_user_sdma_pkt *pkt, in qib_user_sdma_init_payload() argument
765 if (pkt->frag_size == pkt->bytes_togo && in qib_user_sdma_init_payload()
766 npages >= ARRAY_SIZE(pkt->addr)) in qib_user_sdma_init_payload()
767 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
769 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); in qib_user_sdma_init_payload()
779 struct qib_user_sdma_pkt *pkt, *pkt_next; in qib_user_sdma_free_pkt_list() local
781 list_for_each_entry_safe(pkt, pkt_next, list, list) { in qib_user_sdma_free_pkt_list()
784 for (i = 0; i < pkt->naddr; i++) in qib_user_sdma_free_pkt_list()
785 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); in qib_user_sdma_free_pkt_list()
787 if (pkt->largepkt) in qib_user_sdma_free_pkt_list()
788 kfree(pkt); in qib_user_sdma_free_pkt_list()
790 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_free_pkt_list()
815 struct qib_user_sdma_pkt *pkt = NULL; in qib_user_sdma_queue_pkts() local
889 bytes_togo > type_max(typeof(pkt->bytes_togo))) { in qib_user_sdma_queue_pkts()
914 pktsize = struct_size(pkt, addr, n); in qib_user_sdma_queue_pkts()
933 pkt = kmalloc(sz, GFP_KERNEL); in qib_user_sdma_queue_pkts()
934 if (!pkt) { in qib_user_sdma_queue_pkts()
938 pkt->largepkt = 1; in qib_user_sdma_queue_pkts()
939 pkt->frag_size = frag_size; in qib_user_sdma_queue_pkts()
940 if (check_add_overflow(n, ARRAY_SIZE(pkt->addr), in qib_user_sdma_queue_pkts()
942 addrlimit > type_max(typeof(pkt->addrlimit))) { in qib_user_sdma_queue_pkts()
946 pkt->addrlimit = addrlimit; in qib_user_sdma_queue_pkts()
949 char *tidsm = (char *)pkt + pktsize; in qib_user_sdma_queue_pkts()
957 pkt->tidsm = in qib_user_sdma_queue_pkts()
959 pkt->tidsmcount = tidsmsize/ in qib_user_sdma_queue_pkts()
961 pkt->tidsmidx = 0; in qib_user_sdma_queue_pkts()
972 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); in qib_user_sdma_queue_pkts()
973 if (!pkt) { in qib_user_sdma_queue_pkts()
977 pkt->largepkt = 0; in qib_user_sdma_queue_pkts()
978 pkt->frag_size = bytes_togo; in qib_user_sdma_queue_pkts()
979 pkt->addrlimit = ARRAY_SIZE(pkt->addr); in qib_user_sdma_queue_pkts()
981 pkt->bytes_togo = bytes_togo; in qib_user_sdma_queue_pkts()
982 pkt->payload_size = 0; in qib_user_sdma_queue_pkts()
983 pkt->counter = counter; in qib_user_sdma_queue_pkts()
984 pkt->tiddma = tiddma; in qib_user_sdma_queue_pkts()
987 qib_user_sdma_init_frag(pkt, 0, /* index */ in qib_user_sdma_queue_pkts()
993 pkt->index = 0; in qib_user_sdma_queue_pkts()
994 pkt->naddr = 1; in qib_user_sdma_queue_pkts()
997 ret = qib_user_sdma_init_payload(dd, pq, pkt, in qib_user_sdma_queue_pkts()
1005 pkt->addr[0].last_desc = 1; in qib_user_sdma_queue_pkts()
1019 pkt->addr[0].addr = dma_addr; in qib_user_sdma_queue_pkts()
1020 pkt->addr[0].dma_mapped = 1; in qib_user_sdma_queue_pkts()
1026 pkt->pq = pq; in qib_user_sdma_queue_pkts()
1027 pkt->index = 0; /* reset index for push on hw */ in qib_user_sdma_queue_pkts()
1028 *ndesc += pkt->naddr; in qib_user_sdma_queue_pkts()
1030 list_add_tail(&pkt->list, list); in qib_user_sdma_queue_pkts()
1038 if (pkt->largepkt) in qib_user_sdma_queue_pkts()
1039 kfree(pkt); in qib_user_sdma_queue_pkts()
1041 kmem_cache_free(pq->pkt_slab, pkt); in qib_user_sdma_queue_pkts()
1065 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_clean() local
1081 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { in qib_user_sdma_queue_clean()
1082 s64 descd = ppd->sdma_descq_removed - pkt->added; in qib_user_sdma_queue_clean()
1087 list_move_tail(&pkt->list, &free_list); in qib_user_sdma_queue_clean()
1098 pkt = list_entry(free_list.prev, in qib_user_sdma_queue_clean()
1100 counter = pkt->counter; in qib_user_sdma_queue_clean()
1161 struct qib_user_sdma_pkt *pkt; in qib_user_sdma_queue_drain() local
1171 list_for_each_entry_safe(pkt, pkt_prev, in qib_user_sdma_queue_drain()
1173 if (pkt->pq == pq) { in qib_user_sdma_queue_drain()
1174 list_move_tail(&pkt->list, &pq->sent); in qib_user_sdma_queue_drain()
1222 struct qib_user_sdma_pkt *pkt, int idx, in qib_user_sdma_send_frag() argument
1225 const u64 addr = (u64) pkt->addr[idx].addr + in qib_user_sdma_send_frag()
1226 (u64) pkt->addr[idx].offset; in qib_user_sdma_send_frag()
1227 const u64 dwlen = (u64) pkt->addr[idx].length / 4; in qib_user_sdma_send_frag()
1234 if (pkt->addr[idx].first_desc) in qib_user_sdma_send_frag()
1236 if (pkt->addr[idx].last_desc) { in qib_user_sdma_send_frag()
1265 struct qib_user_sdma_pkt *pkt = in qib_user_sdma_send_desc() local
1272 for (i = pkt->index; i < pkt->naddr && nfree; i++) { in qib_user_sdma_send_desc()
1273 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen); in qib_user_sdma_send_desc()
1274 ofs += pkt->addr[i].length >> 2; in qib_user_sdma_send_desc()
1284 if (pkt->addr[i].last_desc == 0) in qib_user_sdma_send_desc()
1294 for (j = pkt->index; j <= i; j++) { in qib_user_sdma_send_desc()
1301 c += i + 1 - pkt->index; in qib_user_sdma_send_desc()
1302 pkt->index = i + 1; /* index for next first */ in qib_user_sdma_send_desc()
1310 if (pkt->index == pkt->naddr) { in qib_user_sdma_send_desc()
1311 pkt->added = ppd->sdma_descq_added; in qib_user_sdma_send_desc()
1312 pkt->pq->added = pkt->added; in qib_user_sdma_send_desc()
1313 pkt->pq->num_pending--; in qib_user_sdma_send_desc()
1314 spin_lock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()
1315 pkt->pq->num_sending++; in qib_user_sdma_send_desc()
1316 list_move_tail(&pkt->list, &pkt->pq->sent); in qib_user_sdma_send_desc()
1317 spin_unlock(&pkt->pq->sent_lock); in qib_user_sdma_send_desc()