Searched refs:cached_cons (Results 1 – 4 of 4) sorted by relevance
44 u32 cached_cons; member114 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument117 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()124 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()125 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()194 while (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()196 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()202 q->cached_cons++; in xskq_cons_read_desc()211 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch() local214 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()[all …]
541 u32 i, cached_cons, nb_entries; in xp_alloc_new_from_fq() local547 cached_cons = pool->fq->cached_cons; in xp_alloc_new_from_fq()555 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr); in xp_alloc_new_from_fq()
101 __u32 cached_cons; \159 __u32 free_entries = r->cached_cons - r->cached_prod; in xsk_prod_nb_free()171 r->cached_cons = libbpf_smp_load_acquire(r->consumer); in xsk_prod_nb_free()172 r->cached_cons += r->size; in xsk_prod_nb_free()174 return r->cached_cons - r->cached_prod; in xsk_prod_nb_free()179 __u32 entries = r->cached_prod - r->cached_cons; in xsk_cons_nb_avail()183 entries = r->cached_prod - r->cached_cons; in xsk_cons_nb_avail()213 *idx = cons->cached_cons; in xsk_ring_cons__peek()214 cons->cached_cons += entries; in xsk_ring_cons__peek()222 cons->cached_cons -= nb; in xsk_ring_cons__cancel()
265 fill->cached_cons = umem->config.fill_size; in xsk_create_umem_rings()1106 rx->cached_cons = *rx->consumer; in xsk_socket__create_shared()1130 tx->cached_cons = *tx->consumer + xsk->config.tx_size; in xsk_socket__create_shared()