Searched refs:nb_entries (Results 1 – 4 of 4) sorted by relevance
/linux-5.19.10/net/xdp/ |
D | xsk_queue.h | 211 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch() local 214 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch() 218 descs[nb_entries] = ring->desc[idx]; in xskq_cons_read_desc_batch() 219 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) { in xskq_cons_read_desc_batch() 225 nb_entries++; in xskq_cons_read_desc_batch() 229 return nb_entries; in xskq_cons_read_desc_batch() 357 u32 nb_entries, i, cached_prod; in xskq_prod_reserve_addr_batch() local 359 nb_entries = xskq_prod_nb_free(q, max); in xskq_prod_reserve_addr_batch() 363 for (i = 0; i < nb_entries; i++) in xskq_prod_reserve_addr_batch() 367 return nb_entries; in xskq_prod_reserve_addr_batch() [all …]
|
D | xsk_buff_pool.c | 541 u32 i, cached_cons, nb_entries; in xp_alloc_new_from_fq() local 548 nb_entries = max; in xp_alloc_new_from_fq() 561 nb_entries--; in xp_alloc_new_from_fq() 579 return nb_entries; in xp_alloc_new_from_fq() 582 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) in xp_alloc_reused() argument 587 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); in xp_alloc_reused() 589 i = nb_entries; in xp_alloc_reused() 597 pool->free_list_cnt -= nb_entries; in xp_alloc_reused() 599 return nb_entries; in xp_alloc_reused()
|
D | xsk.c | 296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) in xsk_tx_completed() argument 298 xskq_prod_submit_n(pool->cq, nb_entries); in xsk_tx_completed()
|
/linux-5.19.10/include/net/ |
D | xdp_sock_drv.h | 14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 142 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) in xsk_tx_completed() argument
|