Lines Matching refs:dqo

38 	buffer_id = rx->dqo.free_buf_states;  in gve_alloc_buf_state()
42 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_alloc_buf_state()
45 rx->dqo.free_buf_states = buf_state->next; in gve_alloc_buf_state()
56 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_buf_state_is_allocated()
64 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_free_buf_state()
66 buf_state->next = rx->dqo.free_buf_states; in gve_free_buf_state()
67 rx->dqo.free_buf_states = buffer_id; in gve_free_buf_state()
80 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_dequeue_buf_state()
97 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_enqueue_buf_state()
107 rx->dqo.buf_states[tail].next = buffer_id; in gve_enqueue_buf_state()
119 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); in gve_get_recycled_buf_state()
123 if (unlikely(rx->dqo.used_buf_states.head == -1)) in gve_get_recycled_buf_state()
132 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
136 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_get_recycled_buf_state()
142 if (unlikely(rx->dqo.free_buf_states == -1)) { in gve_get_recycled_buf_state()
143 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
185 completion_queue_slots = rx->dqo.complq.mask + 1; in gve_rx_free_ring_dqo()
186 buffer_queue_slots = rx->dqo.bufq.mask + 1; in gve_rx_free_ring_dqo()
196 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_free_ring_dqo()
197 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_free_ring_dqo()
203 if (rx->dqo.bufq.desc_ring) { in gve_rx_free_ring_dqo()
204 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_free_ring_dqo()
205 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, in gve_rx_free_ring_dqo()
206 rx->dqo.bufq.bus); in gve_rx_free_ring_dqo()
207 rx->dqo.bufq.desc_ring = NULL; in gve_rx_free_ring_dqo()
210 if (rx->dqo.complq.desc_ring) { in gve_rx_free_ring_dqo()
211 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_free_ring_dqo()
213 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, in gve_rx_free_ring_dqo()
214 rx->dqo.complq.bus); in gve_rx_free_ring_dqo()
215 rx->dqo.complq.desc_ring = NULL; in gve_rx_free_ring_dqo()
218 kvfree(rx->dqo.buf_states); in gve_rx_free_ring_dqo()
219 rx->dqo.buf_states = NULL; in gve_rx_free_ring_dqo()
240 rx->dqo.bufq.mask = buffer_queue_slots - 1; in gve_rx_alloc_ring_dqo()
241 rx->dqo.complq.num_free_slots = completion_queue_slots; in gve_rx_alloc_ring_dqo()
242 rx->dqo.complq.mask = completion_queue_slots - 1; in gve_rx_alloc_ring_dqo()
246 rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4); in gve_rx_alloc_ring_dqo()
247 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, in gve_rx_alloc_ring_dqo()
248 sizeof(rx->dqo.buf_states[0]), in gve_rx_alloc_ring_dqo()
250 if (!rx->dqo.buf_states) in gve_rx_alloc_ring_dqo()
254 for (i = 0; i < rx->dqo.num_buf_states - 1; i++) in gve_rx_alloc_ring_dqo()
255 rx->dqo.buf_states[i].next = i + 1; in gve_rx_alloc_ring_dqo()
257 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; in gve_rx_alloc_ring_dqo()
258 rx->dqo.recycled_buf_states.head = -1; in gve_rx_alloc_ring_dqo()
259 rx->dqo.recycled_buf_states.tail = -1; in gve_rx_alloc_ring_dqo()
260 rx->dqo.used_buf_states.head = -1; in gve_rx_alloc_ring_dqo()
261 rx->dqo.used_buf_states.tail = -1; in gve_rx_alloc_ring_dqo()
264 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_alloc_ring_dqo()
266 rx->dqo.complq.desc_ring = in gve_rx_alloc_ring_dqo()
267 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
268 if (!rx->dqo.complq.desc_ring) in gve_rx_alloc_ring_dqo()
272 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_alloc_ring_dqo()
273 rx->dqo.bufq.desc_ring = in gve_rx_alloc_ring_dqo()
274 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
275 if (!rx->dqo.bufq.desc_ring) in gve_rx_alloc_ring_dqo()
297 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); in gve_rx_write_doorbell_dqo()
334 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_post_buffers_dqo()
335 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_post_buffers_dqo()
364 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_rx_post_buffers_dqo()
413 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_try_recycle_buf()
417 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_try_recycle_buf()
528 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { in gve_rx_dqo()
533 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_rx_dqo()
541 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_rx_dqo()
581 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_rx_dqo()
600 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_dqo()
669 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_poll_dqo()
712 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_poll_dqo()