/linux-6.1.9/net/core/ |
D | page_pool.c | 32 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) argument 34 #define recycle_stat_inc(pool, __stat) \ argument 36 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 40 #define recycle_stat_add(pool, __stat, val) \ argument 42 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \ 60 bool page_pool_get_stats(struct page_pool *pool, in page_pool_get_stats() argument 69 stats->alloc_stats.fast += pool->alloc_stats.fast; in page_pool_get_stats() 70 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats() 71 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order; in page_pool_get_stats() 72 stats->alloc_stats.empty += pool->alloc_stats.empty; in page_pool_get_stats() [all …]
|
/linux-6.1.9/net/xdp/ |
D | xsk_buff_pool.c | 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk() 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk() 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk() 35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument 37 if (!pool) in xp_destroy() 40 kvfree(pool->tx_descs); in xp_destroy() [all …]
|
/linux-6.1.9/mm/ |
D | mempool.c | 24 static void poison_error(mempool_t *pool, void *element, size_t size, in poison_error() argument 27 const int nr = pool->curr_nr; in poison_error() 33 pr_err("Mempool %p size %zu\n", pool, size); in poison_error() 41 static void __check_element(mempool_t *pool, void *element, size_t size) in __check_element() argument 50 poison_error(pool, element, size, i); in __check_element() 57 static void check_element(mempool_t *pool, void *element) in check_element() argument 60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) { in check_element() 61 __check_element(pool, element, ksize(element)); in check_element() 62 } else if (pool->free == mempool_free_pages) { in check_element() 64 int order = (int)(long)pool->pool_data; in check_element() [all …]
|
D | zbud.c | 78 int (*evict)(struct zbud_pool *pool, unsigned long handle); 222 struct zbud_pool *pool; in zbud_create_pool() local 225 pool = kzalloc(sizeof(struct zbud_pool), gfp); in zbud_create_pool() 226 if (!pool) in zbud_create_pool() 228 spin_lock_init(&pool->lock); in zbud_create_pool() 230 INIT_LIST_HEAD(&pool->unbuddied[i]); in zbud_create_pool() 231 INIT_LIST_HEAD(&pool->buddied); in zbud_create_pool() 232 INIT_LIST_HEAD(&pool->lru); in zbud_create_pool() 233 pool->pages_nr = 0; in zbud_create_pool() 234 pool->ops = ops; in zbud_create_pool() [all …]
|
D | dmapool.c | 71 struct dma_pool *pool; in pools_show() local 81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 85 spin_lock_irq(&pool->lock); in pools_show() 86 list_for_each_entry(page, &pool->page_list, page_list) { in pools_show() 90 spin_unlock_irq(&pool->lock); in pools_show() 94 pool->name, blocks, in pools_show() 95 pages * (pool->allocation / pool->size), in pools_show() 96 pool->size, pages); in pools_show() 203 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument 206 unsigned int next_boundary = pool->boundary; in pool_initialise_page() [all …]
|
D | z3fold.c | 72 int (*evict)(struct z3fold_pool *pool, unsigned long handle); 89 unsigned long pool; /* back link */ member 117 struct z3fold_pool *pool; member 207 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, in alloc_slots() argument 210 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, in alloc_slots() 216 slots->pool = (unsigned long)pool; in alloc_slots() 225 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool() 305 if (test_bit(HANDLES_NOFREE, &slots->pool)) { in free_handle() 323 struct z3fold_pool *pool = slots_to_pool(slots); in free_handle() local 327 kmem_cache_free(pool->c_handle, slots); in free_handle() [all …]
|
/linux-6.1.9/drivers/net/ethernet/ti/ |
D | k3-cppi-desc-pool.c | 27 void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool) in k3_cppi_desc_pool_destroy() argument 29 if (!pool) in k3_cppi_desc_pool_destroy() 32 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in k3_cppi_desc_pool_destroy() 34 gen_pool_size(pool->gen_pool), in k3_cppi_desc_pool_destroy() 35 gen_pool_avail(pool->gen_pool)); in k3_cppi_desc_pool_destroy() 36 if (pool->cpumem) in k3_cppi_desc_pool_destroy() 37 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem, in k3_cppi_desc_pool_destroy() 38 pool->dma_addr); in k3_cppi_desc_pool_destroy() 40 gen_pool_destroy(pool->gen_pool); /* frees pool->name */ in k3_cppi_desc_pool_destroy() 48 struct k3_cppi_desc_pool *pool; in k3_cppi_desc_pool_create_name() local [all …]
|
/linux-6.1.9/drivers/md/ |
D | dm-thin.c | 229 struct pool { struct 287 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 289 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 291 return pool->pf.mode; in get_pool_mode() 294 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 304 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() 307 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change() 313 dm_table_event(pool->ti->table); in notify_of_pool_mode_change() 315 dm_device_name(pool->pool_md), in notify_of_pool_mode_change() 324 struct pool *pool; member [all …]
|
/linux-6.1.9/sound/core/seq/ |
D | seq_memory.c | 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) in snd_seq_pool_available() argument 24 return pool->total_elements - atomic_read(&pool->counter); in snd_seq_pool_available() 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) in snd_seq_output_ok() argument 29 return snd_seq_pool_available(pool) >= pool->room; in snd_seq_output_ok() 168 static inline void free_cell(struct snd_seq_pool *pool, in free_cell() argument 171 cell->next = pool->free; in free_cell() 172 pool->free = cell; in free_cell() 173 atomic_dec(&pool->counter); in free_cell() 179 struct snd_seq_pool *pool; in snd_seq_cell_free() local 183 pool = cell->pool; in snd_seq_cell_free() [all …]
|
/linux-6.1.9/include/net/ |
D | xdp_sock_drv.h | 17 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); 18 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); 19 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); 20 void xsk_tx_release(struct xsk_buff_pool *pool); 23 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool); 24 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool); 25 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool); 26 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool); 27 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool); 29 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool) in xsk_pool_get_headroom() argument [all …]
|
D | xsk_buff_pool.h | 26 struct xsk_buff_pool *pool; member 96 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 98 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 100 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 101 void xp_destroy(struct xsk_buff_pool *pool); 102 void xp_get_pool(struct xsk_buff_pool *pool); 103 bool xp_put_pool(struct xsk_buff_pool *pool); 104 void xp_clear_dev(struct xsk_buff_pool *pool); 105 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 106 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); [all …]
|
D | page_pool.h | 129 bool page_pool_get_stats(struct page_pool *pool, 212 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); 214 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() argument 218 return page_pool_alloc_pages(pool, gfp); in page_pool_dev_alloc_pages() 221 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, 224 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() argument 230 return page_pool_alloc_frag(pool, offset, size, gfp); in page_pool_dev_alloc_frag() 237 inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) in page_pool_get_dma_dir() argument 239 return pool->p.dma_dir; in page_pool_get_dma_dir() 249 void page_pool_destroy(struct page_pool *pool); [all …]
|
/linux-6.1.9/net/ceph/ |
D | msgpool.c | 14 struct ceph_msgpool *pool = arg; in msgpool_alloc() local 17 msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, in msgpool_alloc() 20 dout("msgpool_alloc %s failed\n", pool->name); in msgpool_alloc() 22 dout("msgpool_alloc %s %p\n", pool->name, msg); in msgpool_alloc() 23 msg->pool = pool; in msgpool_alloc() 30 struct ceph_msgpool *pool = arg; in msgpool_free() local 33 dout("msgpool_release %s %p\n", pool->name, msg); in msgpool_free() 34 msg->pool = NULL; in msgpool_free() 38 int ceph_msgpool_init(struct ceph_msgpool *pool, int type, in ceph_msgpool_init() argument 43 pool->type = type; in ceph_msgpool_init() [all …]
|
/linux-6.1.9/drivers/staging/media/atomisp/pci/runtime/rmgr/src/ |
D | rmgr_vbuf.c | 128 int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument 134 assert(pool); in ia_css_rmgr_init_vbuf() 135 if (!pool) in ia_css_rmgr_init_vbuf() 138 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf() 142 pool->size; in ia_css_rmgr_init_vbuf() 143 pool->handles = kvmalloc(bytes_needed, GFP_KERNEL); in ia_css_rmgr_init_vbuf() 144 if (pool->handles) in ia_css_rmgr_init_vbuf() 145 memset(pool->handles, 0, bytes_needed); in ia_css_rmgr_init_vbuf() 150 pool->size = 0; in ia_css_rmgr_init_vbuf() 151 pool->handles = NULL; in ia_css_rmgr_init_vbuf() [all …]
|
/linux-6.1.9/drivers/gpu/drm/amd/display/dc/dce80/ |
D | dce80_resource.c | 800 static void dce80_resource_destruct(struct dce110_resource_pool *pool) in dce80_resource_destruct() argument 804 for (i = 0; i < pool->base.pipe_count; i++) { in dce80_resource_destruct() 805 if (pool->base.opps[i] != NULL) in dce80_resource_destruct() 806 dce110_opp_destroy(&pool->base.opps[i]); in dce80_resource_destruct() 808 if (pool->base.transforms[i] != NULL) in dce80_resource_destruct() 809 dce80_transform_destroy(&pool->base.transforms[i]); in dce80_resource_destruct() 811 if (pool->base.ipps[i] != NULL) in dce80_resource_destruct() 812 dce_ipp_destroy(&pool->base.ipps[i]); in dce80_resource_destruct() 814 if (pool->base.mis[i] != NULL) { in dce80_resource_destruct() 815 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce80_resource_destruct() [all …]
|
/linux-6.1.9/drivers/gpu/drm/amd/display/dc/dce60/ |
D | dce60_resource.c | 797 static void dce60_resource_destruct(struct dce110_resource_pool *pool) in dce60_resource_destruct() argument 801 for (i = 0; i < pool->base.pipe_count; i++) { in dce60_resource_destruct() 802 if (pool->base.opps[i] != NULL) in dce60_resource_destruct() 803 dce110_opp_destroy(&pool->base.opps[i]); in dce60_resource_destruct() 805 if (pool->base.transforms[i] != NULL) in dce60_resource_destruct() 806 dce60_transform_destroy(&pool->base.transforms[i]); in dce60_resource_destruct() 808 if (pool->base.ipps[i] != NULL) in dce60_resource_destruct() 809 dce_ipp_destroy(&pool->base.ipps[i]); in dce60_resource_destruct() 811 if (pool->base.mis[i] != NULL) { in dce60_resource_destruct() 812 kfree(TO_DCE_MEM_INPUT(pool->base.mis[i])); in dce60_resource_destruct() [all …]
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/ |
D | irq_affinity.c | 8 static void cpu_put(struct mlx5_irq_pool *pool, int cpu) in cpu_put() argument 10 pool->irqs_per_cpu[cpu]--; in cpu_put() 13 static void cpu_get(struct mlx5_irq_pool *pool, int cpu) in cpu_get() argument 15 pool->irqs_per_cpu[cpu]++; in cpu_get() 19 static int cpu_get_least_loaded(struct mlx5_irq_pool *pool, in cpu_get_least_loaded() argument 27 if (!pool->irqs_per_cpu[cpu]) { in cpu_get_least_loaded() 33 if (pool->irqs_per_cpu[cpu] < pool->irqs_per_cpu[best_cpu]) in cpu_get_least_loaded() 38 mlx5_core_err(pool->dev, "NO online CPUs in req_mask (%*pbl)\n", in cpu_get_least_loaded() 42 pool->irqs_per_cpu[best_cpu]++; in cpu_get_least_loaded() 48 irq_pool_request_irq(struct mlx5_irq_pool *pool, const struct cpumask *req_mask) in irq_pool_request_irq() argument [all …]
|
/linux-6.1.9/drivers/infiniband/sw/rxe/ |
D | rxe_pool.c | 92 void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool, in rxe_pool_init() argument 97 memset(pool, 0, sizeof(*pool)); in rxe_pool_init() 99 pool->rxe = rxe; in rxe_pool_init() 100 pool->name = info->name; in rxe_pool_init() 101 pool->type = type; in rxe_pool_init() 102 pool->max_elem = info->max_elem; in rxe_pool_init() 103 pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); in rxe_pool_init() 104 pool->elem_offset = info->elem_offset; in rxe_pool_init() 105 pool->cleanup = info->cleanup; in rxe_pool_init() 107 atomic_set(&pool->num_elem, 0); in rxe_pool_init() [all …]
|
/linux-6.1.9/net/rds/ |
D | ib_rdma.c | 198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) in rds_ib_reuse_mr() argument 204 spin_lock_irqsave(&pool->clean_lock, flags); in rds_ib_reuse_mr() 205 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr() 206 spin_unlock_irqrestore(&pool->clean_lock, flags); in rds_ib_reuse_mr() 209 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr() 275 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local 277 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr() 281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument 285 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal() 342 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() argument [all …]
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_cnt.c | 54 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_init() local 62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init() 63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init() 99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local 104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini() 105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini() 115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local 117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get() 124 struct mlxsw_sp_counter_pool *pool; in mlxsw_sp_counter_pool_init() local [all …]
|
/linux-6.1.9/arch/arm64/kvm/hyp/nvhe/ |
D | page_alloc.c | 33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, in __find_buddy_nocheck() argument 45 if (addr < pool->range_start || addr >= pool->range_end) in __find_buddy_nocheck() 52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, in __find_buddy_avail() argument 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); in __find_buddy_avail() 93 static void __hyp_attach_page(struct hyp_pool *pool, in __hyp_attach_page() argument 108 for (; (order + 1) < pool->max_order; order++) { in __hyp_attach_page() 109 buddy = __find_buddy_avail(pool, p, order); in __hyp_attach_page() 121 page_add_to_list(p, &pool->free_area[order]); in __hyp_attach_page() 124 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, in __hyp_extract_page() argument 139 buddy = __find_buddy_nocheck(pool, p, p->order); in __hyp_extract_page() [all …]
|
/linux-6.1.9/include/linux/ |
D | genalloc.h | 52 void *data, struct gen_pool *pool, 97 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long); 101 static inline int gen_pool_add_virt(struct gen_pool *pool, unsigned long addr, in gen_pool_add_virt() argument 104 return gen_pool_add_owner(pool, addr, phys, size, nid, NULL); in gen_pool_add_virt() 119 static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr, in gen_pool_add() argument 122 return gen_pool_add_virt(pool, addr, -1, size, nid); in gen_pool_add() 125 unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size, 128 static inline unsigned long gen_pool_alloc_owner(struct gen_pool *pool, in gen_pool_alloc_owner() argument 131 return gen_pool_alloc_algo_owner(pool, size, pool->algo, pool->data, in gen_pool_alloc_owner() 135 static inline unsigned long gen_pool_alloc_algo(struct gen_pool *pool, in gen_pool_alloc_algo() argument [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/gt/ |
D | intel_gt_buffer_pool.c | 14 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) in bucket_for_size() argument 24 if (n >= ARRAY_SIZE(pool->cache_list)) in bucket_for_size() 25 n = ARRAY_SIZE(pool->cache_list) - 1; in bucket_for_size() 27 return &pool->cache_list[n]; in bucket_for_size() 37 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) in pool_free_older_than() argument 44 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in pool_free_older_than() 45 struct list_head *list = &pool->cache_list[n]; in pool_free_older_than() 50 if (spin_trylock_irq(&pool->lock)) { in pool_free_older_than() 73 spin_unlock_irq(&pool->lock); in pool_free_older_than() 89 struct intel_gt_buffer_pool *pool = in pool_free_work() local [all …]
|
/linux-6.1.9/lib/ |
D | genalloc.c | 155 struct gen_pool *pool; in gen_pool_create() local 157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); in gen_pool_create() 158 if (pool != NULL) { in gen_pool_create() 159 spin_lock_init(&pool->lock); in gen_pool_create() 160 INIT_LIST_HEAD(&pool->chunks); in gen_pool_create() 161 pool->min_alloc_order = min_alloc_order; in gen_pool_create() 162 pool->algo = gen_pool_first_fit; in gen_pool_create() 163 pool->data = NULL; in gen_pool_create() 164 pool->name = NULL; in gen_pool_create() 166 return pool; in gen_pool_create() [all …]
|
/linux-6.1.9/include/trace/events/ |
D | page_pool.h | 16 TP_PROTO(const struct page_pool *pool, 19 TP_ARGS(pool, inflight, hold, release), 22 __field(const struct page_pool *, pool) 30 __entry->pool = pool; 34 __entry->cnt = pool->destroy_cnt; 38 __entry->pool, __entry->inflight, __entry->hold, 44 TP_PROTO(const struct page_pool *pool, 47 TP_ARGS(pool, page, release), 50 __field(const struct page_pool *, pool) 57 __entry->pool = pool; [all …]
|