Lines Matching refs:tx_cb

1919 	struct ql_tx_buf_cb *tx_cb;  in ql_process_mac_tx_intr()  local
1927 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; in ql_process_mac_tx_intr()
1938 if (tx_cb->seg_count == 0) { in ql_process_mac_tx_intr()
1947 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_process_mac_tx_intr()
1948 dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE); in ql_process_mac_tx_intr()
1949 tx_cb->seg_count--; in ql_process_mac_tx_intr()
1950 if (tx_cb->seg_count) { in ql_process_mac_tx_intr()
1951 for (i = 1; i < tx_cb->seg_count; i++) { in ql_process_mac_tx_intr()
1953 dma_unmap_addr(&tx_cb->map[i], mapaddr), in ql_process_mac_tx_intr()
1954 dma_unmap_len(&tx_cb->map[i], maplen), in ql_process_mac_tx_intr()
1959 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; in ql_process_mac_tx_intr()
1962 dev_kfree_skb_irq(tx_cb->skb); in ql_process_mac_tx_intr()
1963 tx_cb->skb = NULL; in ql_process_mac_tx_intr()
2302 struct ql_tx_buf_cb *tx_cb, in ql_send_map() argument
2314 seg_cnt = tx_cb->seg_count; in ql_send_map()
2332 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2333 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); in ql_send_map()
2341 oal = tx_cb->oal; in ql_send_map()
2372 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2373 dma_unmap_len_set(&tx_cb->map[seg], maplen, in ql_send_map()
2394 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); in ql_send_map()
2395 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); in ql_send_map()
2409 oal = tx_cb->oal; in ql_send_map()
2423 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2424 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2431 dma_unmap_addr(&tx_cb->map[seg], mapaddr), in ql_send_map()
2432 dma_unmap_len(&tx_cb->map[seg], maplen), in ql_send_map()
2437 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_send_map()
2438 dma_unmap_addr(&tx_cb->map[0], maplen), in ql_send_map()
2462 struct ql_tx_buf_cb *tx_cb; in ql3xxx_send() local
2469 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; in ql3xxx_send()
2470 tx_cb->seg_count = ql_get_seg_count(qdev, in ql3xxx_send()
2472 if (tx_cb->seg_count == -1) { in ql3xxx_send()
2478 mac_iocb_ptr = tx_cb->queue_entry; in ql3xxx_send()
2485 tx_cb->skb = skb; in ql3xxx_send()
2490 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { in ql3xxx_send()
2798 struct ql_tx_buf_cb *tx_cb; in ql_free_send_free_list() local
2801 tx_cb = &qdev->tx_buf[0]; in ql_free_send_free_list()
2803 kfree(tx_cb->oal); in ql_free_send_free_list()
2804 tx_cb->oal = NULL; in ql_free_send_free_list()
2805 tx_cb++; in ql_free_send_free_list()
2811 struct ql_tx_buf_cb *tx_cb; in ql_create_send_free_list() local
2818 tx_cb = &qdev->tx_buf[i]; in ql_create_send_free_list()
2819 tx_cb->skb = NULL; in ql_create_send_free_list()
2820 tx_cb->queue_entry = req_q_curr; in ql_create_send_free_list()
2822 tx_cb->oal = kmalloc(512, GFP_KERNEL); in ql_create_send_free_list()
2823 if (tx_cb->oal == NULL) in ql_create_send_free_list()
3612 struct ql_tx_buf_cb *tx_cb; in ql_reset_work() local
3627 tx_cb = &qdev->tx_buf[i]; in ql_reset_work()
3628 if (tx_cb->skb) { in ql_reset_work()
3632 dma_unmap_addr(&tx_cb->map[0], mapaddr), in ql_reset_work()
3633 dma_unmap_len(&tx_cb->map[0], maplen), in ql_reset_work()
3635 for (j = 1; j < tx_cb->seg_count; j++) { in ql_reset_work()
3637 dma_unmap_addr(&tx_cb->map[j], mapaddr), in ql_reset_work()
3638 dma_unmap_len(&tx_cb->map[j], maplen), in ql_reset_work()
3641 dev_kfree_skb(tx_cb->skb); in ql_reset_work()
3642 tx_cb->skb = NULL; in ql_reset_work()