Lines Matching refs:gpd
34 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO) argument
133 struct qmu_gpd *gpd) in gpd_virt_to_dma() argument
139 offset = gpd - gpd_head; in gpd_virt_to_dma()
143 return dma_base + (offset * sizeof(*gpd)); in gpd_virt_to_dma()
146 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd) in gpd_ring_init() argument
148 ring->start = gpd; in gpd_ring_init()
149 ring->enqueue = gpd; in gpd_ring_init()
150 ring->dequeue = gpd; in gpd_ring_init()
151 ring->end = gpd + MAX_GPD_NUM - 1; in gpd_ring_init()
157 struct qmu_gpd *gpd = ring->start; in reset_gpd_list() local
159 if (gpd) { in reset_gpd_list()
160 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO); in reset_gpd_list()
161 gpd_ring_init(ring, gpd); in reset_gpd_list()
167 struct qmu_gpd *gpd; in mtu3_gpd_ring_alloc() local
171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); in mtu3_gpd_ring_alloc()
172 if (gpd == NULL) in mtu3_gpd_ring_alloc()
175 gpd_ring_init(ring, gpd); in mtu3_gpd_ring_alloc()
248 struct qmu_gpd *gpd = ring->enqueue; in mtu3_prepare_tx_gpd() local
254 gpd->dw0_info = 0; /* SW own it */ in mtu3_prepare_tx_gpd()
255 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); in mtu3_prepare_tx_gpd()
257 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length)); in mtu3_prepare_tx_gpd()
263 mep->epnum, gpd, enq, &enq_dma); in mtu3_prepare_tx_gpd()
266 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); in mtu3_prepare_tx_gpd()
268 gpd->dw0_info = cpu_to_le32(ext_addr); in mtu3_prepare_tx_gpd()
272 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP); in mtu3_prepare_tx_gpd()
274 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP); in mtu3_prepare_tx_gpd()
279 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); in mtu3_prepare_tx_gpd()
281 mreq->gpd = gpd; in mtu3_prepare_tx_gpd()
282 trace_mtu3_prepare_gpd(mep, gpd); in mtu3_prepare_tx_gpd()
291 struct qmu_gpd *gpd = ring->enqueue; in mtu3_prepare_rx_gpd() local
297 gpd->dw0_info = 0; /* SW own it */ in mtu3_prepare_rx_gpd()
298 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma)); in mtu3_prepare_rx_gpd()
300 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length)); in mtu3_prepare_rx_gpd()
306 mep->epnum, gpd, enq, &enq_dma); in mtu3_prepare_rx_gpd()
309 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma)); in mtu3_prepare_rx_gpd()
311 gpd->dw3_info = cpu_to_le32(ext_addr); in mtu3_prepare_rx_gpd()
314 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO); in mtu3_prepare_rx_gpd()
316 mreq->gpd = gpd; in mtu3_prepare_rx_gpd()
317 trace_mtu3_prepare_gpd(mep, gpd); in mtu3_prepare_rx_gpd()
486 if (!mreq || mreq->gpd != gpd_current) { in qmu_error_rx()
513 struct qmu_gpd *gpd = ring->dequeue; in qmu_done_tx() local
524 __func__, epnum, gpd, gpd_current, ring->enqueue); in qmu_done_tx()
526 while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) { in qmu_done_tx()
530 if (mreq == NULL || mreq->gpd != gpd) { in qmu_done_tx()
536 request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); in qmu_done_tx()
537 trace_mtu3_complete_gpd(mep, gpd); in qmu_done_tx()
540 gpd = advance_deq_gpd(ring); in qmu_done_tx()
553 struct qmu_gpd *gpd = ring->dequeue; in qmu_done_rx() local
563 __func__, epnum, gpd, gpd_current, ring->enqueue); in qmu_done_rx()
565 while (gpd && gpd != gpd_current && !GET_GPD_HWO(gpd)) { in qmu_done_rx()
569 if (mreq == NULL || mreq->gpd != gpd) { in qmu_done_rx()
575 req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info)); in qmu_done_rx()
576 trace_mtu3_complete_gpd(mep, gpd); in qmu_done_rx()
579 gpd = advance_deq_gpd(ring); in qmu_done_rx()