Lines Matching refs:rx

33 static struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx)  in gve_alloc_buf_state()  argument
38 buffer_id = rx->dqo.free_buf_states; in gve_alloc_buf_state()
42 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_alloc_buf_state()
45 rx->dqo.free_buf_states = buf_state->next; in gve_alloc_buf_state()
53 static bool gve_buf_state_is_allocated(struct gve_rx_ring *rx, in gve_buf_state_is_allocated() argument
56 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_buf_state_is_allocated()
61 static void gve_free_buf_state(struct gve_rx_ring *rx, in gve_free_buf_state() argument
64 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_free_buf_state()
66 buf_state->next = rx->dqo.free_buf_states; in gve_free_buf_state()
67 rx->dqo.free_buf_states = buffer_id; in gve_free_buf_state()
71 gve_dequeue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list) in gve_dequeue_buf_state() argument
80 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_dequeue_buf_state()
93 static void gve_enqueue_buf_state(struct gve_rx_ring *rx, in gve_enqueue_buf_state() argument
97 s16 buffer_id = buf_state - rx->dqo.buf_states; in gve_enqueue_buf_state()
107 rx->dqo.buf_states[tail].next = buffer_id; in gve_enqueue_buf_state()
113 gve_get_recycled_buf_state(struct gve_rx_ring *rx) in gve_get_recycled_buf_state() argument
119 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.recycled_buf_states); in gve_get_recycled_buf_state()
123 if (unlikely(rx->dqo.used_buf_states.head == -1)) in gve_get_recycled_buf_state()
132 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
136 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_get_recycled_buf_state()
142 if (unlikely(rx->dqo.free_buf_states == -1)) { in gve_get_recycled_buf_state()
143 buf_state = gve_dequeue_buf_state(rx, &rx->dqo.used_buf_states); in gve_get_recycled_buf_state()
147 gve_free_page_dqo(rx->gve, buf_state); in gve_get_recycled_buf_state()
148 gve_free_buf_state(rx, buf_state); in gve_get_recycled_buf_state()
178 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_free_ring_dqo() local
185 completion_queue_slots = rx->dqo.complq.mask + 1; in gve_rx_free_ring_dqo()
186 buffer_queue_slots = rx->dqo.bufq.mask + 1; in gve_rx_free_ring_dqo()
190 if (rx->q_resources) { in gve_rx_free_ring_dqo()
191 dma_free_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_free_ring_dqo()
192 rx->q_resources, rx->q_resources_bus); in gve_rx_free_ring_dqo()
193 rx->q_resources = NULL; in gve_rx_free_ring_dqo()
196 for (i = 0; i < rx->dqo.num_buf_states; i++) { in gve_rx_free_ring_dqo()
197 struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i]; in gve_rx_free_ring_dqo()
203 if (rx->dqo.bufq.desc_ring) { in gve_rx_free_ring_dqo()
204 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_free_ring_dqo()
205 dma_free_coherent(hdev, size, rx->dqo.bufq.desc_ring, in gve_rx_free_ring_dqo()
206 rx->dqo.bufq.bus); in gve_rx_free_ring_dqo()
207 rx->dqo.bufq.desc_ring = NULL; in gve_rx_free_ring_dqo()
210 if (rx->dqo.complq.desc_ring) { in gve_rx_free_ring_dqo()
211 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_free_ring_dqo()
213 dma_free_coherent(hdev, size, rx->dqo.complq.desc_ring, in gve_rx_free_ring_dqo()
214 rx->dqo.complq.bus); in gve_rx_free_ring_dqo()
215 rx->dqo.complq.desc_ring = NULL; in gve_rx_free_ring_dqo()
218 kvfree(rx->dqo.buf_states); in gve_rx_free_ring_dqo()
219 rx->dqo.buf_states = NULL; in gve_rx_free_ring_dqo()
226 struct gve_rx_ring *rx = &priv->rx[idx]; in gve_rx_alloc_ring_dqo() local
237 memset(rx, 0, sizeof(*rx)); in gve_rx_alloc_ring_dqo()
238 rx->gve = priv; in gve_rx_alloc_ring_dqo()
239 rx->q_num = idx; in gve_rx_alloc_ring_dqo()
240 rx->dqo.bufq.mask = buffer_queue_slots - 1; in gve_rx_alloc_ring_dqo()
241 rx->dqo.complq.num_free_slots = completion_queue_slots; in gve_rx_alloc_ring_dqo()
242 rx->dqo.complq.mask = completion_queue_slots - 1; in gve_rx_alloc_ring_dqo()
243 rx->ctx.skb_head = NULL; in gve_rx_alloc_ring_dqo()
244 rx->ctx.skb_tail = NULL; in gve_rx_alloc_ring_dqo()
246 rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4); in gve_rx_alloc_ring_dqo()
247 rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, in gve_rx_alloc_ring_dqo()
248 sizeof(rx->dqo.buf_states[0]), in gve_rx_alloc_ring_dqo()
250 if (!rx->dqo.buf_states) in gve_rx_alloc_ring_dqo()
254 for (i = 0; i < rx->dqo.num_buf_states - 1; i++) in gve_rx_alloc_ring_dqo()
255 rx->dqo.buf_states[i].next = i + 1; in gve_rx_alloc_ring_dqo()
257 rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1; in gve_rx_alloc_ring_dqo()
258 rx->dqo.recycled_buf_states.head = -1; in gve_rx_alloc_ring_dqo()
259 rx->dqo.recycled_buf_states.tail = -1; in gve_rx_alloc_ring_dqo()
260 rx->dqo.used_buf_states.head = -1; in gve_rx_alloc_ring_dqo()
261 rx->dqo.used_buf_states.tail = -1; in gve_rx_alloc_ring_dqo()
264 size = sizeof(rx->dqo.complq.desc_ring[0]) * in gve_rx_alloc_ring_dqo()
266 rx->dqo.complq.desc_ring = in gve_rx_alloc_ring_dqo()
267 dma_alloc_coherent(hdev, size, &rx->dqo.complq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
268 if (!rx->dqo.complq.desc_ring) in gve_rx_alloc_ring_dqo()
272 size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; in gve_rx_alloc_ring_dqo()
273 rx->dqo.bufq.desc_ring = in gve_rx_alloc_ring_dqo()
274 dma_alloc_coherent(hdev, size, &rx->dqo.bufq.bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
275 if (!rx->dqo.bufq.desc_ring) in gve_rx_alloc_ring_dqo()
278 rx->q_resources = dma_alloc_coherent(hdev, sizeof(*rx->q_resources), in gve_rx_alloc_ring_dqo()
279 &rx->q_resources_bus, GFP_KERNEL); in gve_rx_alloc_ring_dqo()
280 if (!rx->q_resources) in gve_rx_alloc_ring_dqo()
294 const struct gve_rx_ring *rx = &priv->rx[queue_idx]; in gve_rx_write_doorbell_dqo() local
295 u64 index = be32_to_cpu(rx->q_resources->db_index); in gve_rx_write_doorbell_dqo()
297 iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); in gve_rx_write_doorbell_dqo()
332 void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) in gve_rx_post_buffers_dqo() argument
334 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_post_buffers_dqo()
335 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_post_buffers_dqo()
336 struct gve_priv *priv = rx->gve; in gve_rx_post_buffers_dqo()
349 buf_state = gve_get_recycled_buf_state(rx); in gve_rx_post_buffers_dqo()
351 buf_state = gve_alloc_buf_state(rx); in gve_rx_post_buffers_dqo()
356 u64_stats_update_begin(&rx->statss); in gve_rx_post_buffers_dqo()
357 rx->rx_buf_alloc_fail++; in gve_rx_post_buffers_dqo()
358 u64_stats_update_end(&rx->statss); in gve_rx_post_buffers_dqo()
359 gve_free_buf_state(rx, buf_state); in gve_rx_post_buffers_dqo()
364 desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); in gve_rx_post_buffers_dqo()
373 gve_rx_write_doorbell_dqo(priv, rx->q_num); in gve_rx_post_buffers_dqo()
376 rx->fill_cnt += num_posted; in gve_rx_post_buffers_dqo()
379 static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, in gve_try_recycle_buf() argument
413 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_try_recycle_buf()
417 gve_enqueue_buf_state(rx, &rx->dqo.used_buf_states, buf_state); in gve_try_recycle_buf()
468 static void gve_rx_free_skb(struct gve_rx_ring *rx) in gve_rx_free_skb() argument
470 if (!rx->ctx.skb_head) in gve_rx_free_skb()
473 dev_kfree_skb_any(rx->ctx.skb_head); in gve_rx_free_skb()
474 rx->ctx.skb_head = NULL; in gve_rx_free_skb()
475 rx->ctx.skb_tail = NULL; in gve_rx_free_skb()
483 u16 buf_len, struct gve_rx_ring *rx, in gve_rx_append_frags() argument
486 int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags; in gve_rx_append_frags()
495 skb_shinfo(rx->ctx.skb_tail)->frag_list = skb; in gve_rx_append_frags()
496 rx->ctx.skb_tail = skb; in gve_rx_append_frags()
499 if (rx->ctx.skb_tail != rx->ctx.skb_head) { in gve_rx_append_frags()
500 rx->ctx.skb_head->len += buf_len; in gve_rx_append_frags()
501 rx->ctx.skb_head->data_len += buf_len; in gve_rx_append_frags()
502 rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo; in gve_rx_append_frags()
505 skb_add_rx_frag(rx->ctx.skb_tail, num_frags, in gve_rx_append_frags()
518 static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, in gve_rx_dqo() argument
525 struct gve_priv *priv = rx->gve; in gve_rx_dqo()
528 if (unlikely(buffer_id >= rx->dqo.num_buf_states)) { in gve_rx_dqo()
533 buf_state = &rx->dqo.buf_states[buffer_id]; in gve_rx_dqo()
534 if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) { in gve_rx_dqo()
541 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_rx_dqo()
559 if (rx->ctx.skb_head) { in gve_rx_dqo()
560 if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx, in gve_rx_dqo()
565 gve_try_recycle_buf(priv, rx, buf_state); in gve_rx_dqo()
570 rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, in gve_rx_dqo()
572 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
574 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
576 u64_stats_update_begin(&rx->statss); in gve_rx_dqo()
577 rx->rx_copied_pkt++; in gve_rx_dqo()
578 rx->rx_copybreak_pkt++; in gve_rx_dqo()
579 u64_stats_update_end(&rx->statss); in gve_rx_dqo()
581 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, in gve_rx_dqo()
586 rx->ctx.skb_head = napi_get_frags(napi); in gve_rx_dqo()
587 if (unlikely(!rx->ctx.skb_head)) in gve_rx_dqo()
589 rx->ctx.skb_tail = rx->ctx.skb_head; in gve_rx_dqo()
591 skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, in gve_rx_dqo()
596 gve_try_recycle_buf(priv, rx, buf_state); in gve_rx_dqo()
600 gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state); in gve_rx_dqo()
630 static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, in gve_rx_complete_skb() argument
635 rx->gve->ptype_lut_dqo->ptypes[desc->packet_type]; in gve_rx_complete_skb()
638 skb_record_rx_queue(rx->ctx.skb_head, rx->q_num); in gve_rx_complete_skb()
641 gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
644 gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
650 err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype); in gve_rx_complete_skb()
655 if (skb_headlen(rx->ctx.skb_head) == 0) in gve_rx_complete_skb()
658 napi_gro_receive(napi, rx->ctx.skb_head); in gve_rx_complete_skb()
668 struct gve_rx_ring *rx = block->rx; in gve_rx_poll_dqo() local
669 struct gve_rx_compl_queue_dqo *complq = &rx->dqo.complq; in gve_rx_poll_dqo()
691 err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num); in gve_rx_poll_dqo()
693 gve_rx_free_skb(rx); in gve_rx_poll_dqo()
694 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
696 rx->rx_skb_alloc_fail++; in gve_rx_poll_dqo()
698 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
699 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
712 struct gve_rx_buf_queue_dqo *bufq = &rx->dqo.bufq; in gve_rx_poll_dqo()
718 rx->cnt++; in gve_rx_poll_dqo()
720 if (!rx->ctx.skb_head) in gve_rx_poll_dqo()
727 pkt_bytes = rx->ctx.skb_head->len; in gve_rx_poll_dqo()
731 if (skb_headlen(rx->ctx.skb_head)) in gve_rx_poll_dqo()
735 if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) { in gve_rx_poll_dqo()
736 gve_rx_free_skb(rx); in gve_rx_poll_dqo()
737 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
738 rx->rx_desc_err_dropped_pkt++; in gve_rx_poll_dqo()
739 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()
744 rx->ctx.skb_head = NULL; in gve_rx_poll_dqo()
745 rx->ctx.skb_tail = NULL; in gve_rx_poll_dqo()
748 gve_rx_post_buffers_dqo(rx); in gve_rx_poll_dqo()
750 u64_stats_update_begin(&rx->statss); in gve_rx_poll_dqo()
751 rx->rpackets += work_done; in gve_rx_poll_dqo()
752 rx->rbytes += bytes; in gve_rx_poll_dqo()
753 u64_stats_update_end(&rx->statss); in gve_rx_poll_dqo()