Lines Matching refs:buf_state
35 struct gve_rx_buf_state_dqo *buf_state; in gve_alloc_buf_state() local
42 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_alloc_buf_state()
45 rx->dqo.free_buf_states = buf_state->next; in gve_alloc_buf_state()
48 buf_state->next = buffer_id; in gve_alloc_buf_state()
50 return buf_state; in gve_alloc_buf_state()
54 struct gve_rx_buf_state_dqo *buf_state) in gve_buf_state_is_allocated() argument
56 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_buf_state_is_allocated()
58 return buf_state->next == buffer_id; in gve_buf_state_is_allocated()
62 struct gve_rx_buf_state_dqo *buf_state) in gve_free_buf_state() argument
64 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_free_buf_state()
66 buf_state->next = rx->dqo.free_buf_states; in gve_free_buf_state()
73 struct gve_rx_buf_state_dqo *buf_state; in gve_dequeue_buf_state() local
80 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_dequeue_buf_state()
83 list->head = buf_state->next; in gve_dequeue_buf_state()
84 if (buf_state->next == -1) in gve_dequeue_buf_state()
88 buf_state->next = buffer_id; in gve_dequeue_buf_state()
90 return buf_state; in gve_dequeue_buf_state()
95 struct gve_rx_buf_state_dqo *buf_state) in gve_enqueue_buf_state() argument
97 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_enqueue_buf_state()
99 buf_state->next = -1; in gve_enqueue_buf_state()
115 struct gve_rx_buf_state_dqo *buf_state; in gve_get_recycled_buf_state() local
119 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); in gve_get_recycled_buf_state()
120 if (likely(buf_state)) in gve_get_recycled_buf_state()
121 return buf_state; in gve_get_recycled_buf_state()
132 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
133 if (gve_buf_ref_cnt(buf_state) == 0) in gve_get_recycled_buf_state()
134 return buf_state; in gve_get_recycled_buf_state()
136 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_get_recycled_buf_state()
143 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
144 if (gve_buf_ref_cnt(buf_state) == 0) in gve_get_recycled_buf_state()
145 return buf_state; in gve_get_recycled_buf_state()
147 gve_free_page_dqo(rx->gve, buf_state); in gve_get_recycled_buf_state()
148 gve_free_buf_state(rx, buf_state); in gve_get_recycled_buf_state()
155 struct gve_rx_buf_state_dqo *buf_state) in gve_alloc_page_dqo() argument
159 err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page, in gve_alloc_page_dqo()
160 &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC); in gve_alloc_page_dqo()
164 buf_state->page_info.page_offset = 0; in gve_alloc_page_dqo()
165 buf_state->page_info.page_address = in gve_alloc_page_dqo()
166 page_address(buf_state->page_info.page); in gve_alloc_page_dqo()
167 buf_state->last_single_ref_offset = 0; in gve_alloc_page_dqo()
170 page_ref_add(buf_state->page_info.page, INT_MAX - 1); in gve_alloc_page_dqo()
171 buf_state->page_info.pagecnt_bias = INT_MAX; in gve_alloc_page_dqo()
347 struct gve_rx_buf_state_dqo *buf_state; in gve_rx_post_buffers_dqo() local
349 buf_state = gve_get_recycled_buf_state(rx); in gve_rx_post_buffers_dqo()
350 if (unlikely(!buf_state)) { in gve_rx_post_buffers_dqo()
351 buf_state = gve_alloc_buf_state(rx); in gve_rx_post_buffers_dqo()
352 if (unlikely(!buf_state)) in gve_rx_post_buffers_dqo()
355 if (unlikely(gve_alloc_page_dqo(priv, buf_state))) { in gve_rx_post_buffers_dqo()
359 gve_free_buf_state(rx, buf_state); in gve_rx_post_buffers_dqo()
364 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_rx_post_buffers_dqo()
365 desc->buf_addr = cpu_to_le64(buf_state->addr + in gve_rx_post_buffers_dqo()
366 buf_state->page_info.page_offset); in gve_rx_post_buffers_dqo()
380 struct gve_rx_buf_state_dqo *buf_state) in gve_try_recycle_buf() argument
389 pagecount = gve_buf_ref_cnt(buf_state); in gve_try_recycle_buf()
397 buf_state->last_single_ref_offset = in gve_try_recycle_buf()
398 buf_state->page_info.page_offset; in gve_try_recycle_buf()
402 buf_state->page_info.page_offset += data_buffer_size; in gve_try_recycle_buf()
403 buf_state->page_info.page_offset &= (PAGE_SIZE - 1); in gve_try_recycle_buf()
408 if (buf_state->page_info.page_offset == in gve_try_recycle_buf()
409 buf_state->last_single_ref_offset) { in gve_try_recycle_buf()
413 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_try_recycle_buf()
417 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_try_recycle_buf()
482 struct gve_rx_buf_state_dqo *buf_state, in gve_rx_append_frags() argument
506 buf_state->page_info.page, in gve_rx_append_frags()
507 buf_state->page_info.page_offset, in gve_rx_append_frags()
509 gve_dec_pagecnt_bias(&buf_state->page_info); in gve_rx_append_frags()
524 struct gve_rx_buf_state_dqo *buf_state; in gve_rx_dqo() local
533 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_rx_dqo()
534 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { in gve_rx_dqo()
542 buf_state); in gve_rx_dqo()
551 prefetch(buf_state->page_info.page); in gve_rx_dqo()
554 dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, in gve_rx_dqo()
555 buf_state->page_info.page_offset, in gve_rx_dqo()
560 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, in gve_rx_dqo()
565 gve_try_recycle_buf(priv, rx, buf_state); in gve_rx_dqo()
571 &buf_state->page_info, buf_len, 0, NULL); in gve_rx_dqo()
582 buf_state); in gve_rx_dqo()
591 skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, in gve_rx_dqo()
592 buf_state->page_info.page_offset, buf_len, in gve_rx_dqo()
594 gve_dec_pagecnt_bias(&buf_state->page_info); in gve_rx_dqo()
596 gve_try_recycle_buf(priv, rx, buf_state); in gve_rx_dqo()
600 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_dqo()