/linux-5.19.10/arch/m68k/atari/ |
D | stram.c | 48 static unsigned long pool_size = 1024*1024; variable 57 pool_size = memparse(arg, NULL); in atari_stram_setup() 97 stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, in atari_stram_reserve_pages() 101 __func__, pool_size, PAGE_SIZE); in atari_stram_reserve_pages() 103 stram_pool.end = stram_pool.start + pool_size - 1; in atari_stram_reserve_pages() 107 pool_size, &stram_pool); in atari_stram_reserve_pages() 126 stram_pool.end = stram_pool.start + pool_size - 1; in atari_stram_map_pages() 131 pool_size, &stram_pool); in atari_stram_map_pages()
|
/linux-5.19.10/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_cnt.c | 22 u64 pool_size; member 139 &pool->pool_size); in mlxsw_sp_counter_pool_init() 145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init() 173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != in mlxsw_sp_counter_pool_fini() 174 pool->pool_size); in mlxsw_sp_counter_pool_fini() 231 if (WARN_ON(counter_index >= pool->pool_size)) in mlxsw_sp_counter_free() 250 u64 pool_size; in mlxsw_sp_counter_resources_register() local 259 pool_size = MLXSW_CORE_RES_GET(mlxsw_core, COUNTER_POOL_SIZE); in mlxsw_sp_counter_resources_register() 262 devlink_resource_size_params_init(&size_params, pool_size, in mlxsw_sp_counter_resources_register() 263 pool_size, bank_size, in mlxsw_sp_counter_resources_register() [all …]
|
/linux-5.19.10/kernel/dma/ |
D | pool.c | 79 static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, in atomic_pool_expand() argument 88 order = min(get_order(pool_size), MAX_ORDER-1); in atomic_pool_expand() 91 pool_size = 1 << (PAGE_SHIFT + order); in atomic_pool_expand() 101 arch_dma_prep_coherent(page, pool_size); in atomic_pool_expand() 104 addr = dma_common_contiguous_remap(page, pool_size, in atomic_pool_expand() 121 pool_size, NUMA_NO_NODE); in atomic_pool_expand() 125 dma_atomic_pool_size_add(gfp, pool_size); in atomic_pool_expand() 137 dma_common_free_remap(addr, pool_size); in atomic_pool_expand() 162 static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size, in __dma_atomic_pool_init() argument 174 ret = atomic_pool_expand(pool, pool_size, gfp); in __dma_atomic_pool_init() [all …]
|
/linux-5.19.10/drivers/staging/media/atomisp/pci/hmm/ |
D | hmm_reserved_pool.c | 86 unsigned int pool_size) in hmm_reserved_pool_setup() argument 95 pool_info->pages = kmalloc(sizeof(struct page *) * pool_size, in hmm_reserved_pool_setup() 112 static int hmm_reserved_pool_init(void **pool, unsigned int pool_size) in hmm_reserved_pool_init() argument 116 unsigned int pgnr = pool_size; in hmm_reserved_pool_init() 124 if (pool_size == 0) in hmm_reserved_pool_init() 127 ret = hmm_reserved_pool_setup(&repool_info, pool_size); in hmm_reserved_pool_init() 133 pgnr = pool_size; in hmm_reserved_pool_init()
|
D | hmm_dynamic_pool.c | 94 if (dypool_info->pgnr >= dypool_info->pool_size) { in free_pages_to_dynamic_pool() 140 static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size) in hmm_dynamic_pool_init() argument 144 if (pool_size == 0) in hmm_dynamic_pool_init() 163 dypool_info->pool_size = pool_size; in hmm_dynamic_pool_init()
|
D | hmm.c | 153 pinfo->pgnr, pinfo->pool_size); in dynamic_pool_show() 675 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type) in hmm_pool_register() argument 682 pool_size); in hmm_pool_register() 686 pool_size); in hmm_pool_register()
|
/linux-5.19.10/drivers/net/ethernet/mscc/ |
D | ocelot_devlink.c | 359 buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] - in ocelot_setup_sharing_watermarks() 361 buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] - in ocelot_setup_sharing_watermarks() 363 ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] - in ocelot_setup_sharing_watermarks() 365 ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] - in ocelot_setup_sharing_watermarks() 391 if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) { in ocelot_watermark_validate() 396 if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) { in ocelot_watermark_validate() 401 if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) { in ocelot_watermark_validate() 406 if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) { in ocelot_watermark_validate() 515 pool_info->size = ocelot->pool_size[sb_index][pool_index]; in ocelot_sb_pool_get() 554 old_pool_size = ocelot->pool_size[sb_index][pool_index]; in ocelot_sb_pool_set() [all …]
|
/linux-5.19.10/arch/mips/cavium-octeon/executive/ |
D | cvmx-cmd-queue.c | 101 int pool_size) in cvmx_cmd_queue_initialize() argument 124 if ((pool_size < 128) || (pool_size > 65536)) in cvmx_cmd_queue_initialize() 143 if ((pool_size >> 3) - 1 != qstate->pool_size_m1) { in cvmx_cmd_queue_initialize() 172 qstate->pool_size_m1 = (pool_size >> 3) - 1; in cvmx_cmd_queue_initialize()
|
/linux-5.19.10/arch/csky/mm/ |
D | tcm.c | 133 u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) local 139 u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) 149 ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); 156 __func__, pool_size, tcm_pool_start);
|
/linux-5.19.10/arch/arm/mach-iop32x/ |
D | adma.c | 100 .pool_size = PAGE_SIZE, 105 .pool_size = PAGE_SIZE, 110 .pool_size = 3 * PAGE_SIZE,
|
/linux-5.19.10/drivers/staging/media/atomisp/include/hmm/ |
D | hmm_pool.h | 49 int (*pool_init)(void **pool, unsigned int pool_size); 104 unsigned int pool_size; member
|
D | hmm.h | 35 int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type);
|
/linux-5.19.10/block/ |
D | bio-integrity.c | 425 int bioset_integrity_create(struct bio_set *bs, int pool_size) in bioset_integrity_create() argument 431 pool_size, bip_slab)) in bioset_integrity_create() 434 if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) { in bioset_integrity_create()
|
/linux-5.19.10/include/linux/platform_data/ |
D | dma-iop32x.h | 103 size_t pool_size; member
|
/linux-5.19.10/drivers/mtd/ubi/ |
D | fastmap.c | 401 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, in scan_pool() argument 422 dbg_bld("scanning fastmap pool: size = %i", pool_size); in scan_pool() 428 for (i = 0; i < pool_size; i++) { in scan_pool() 561 int ret, i, j, pool_size, wl_pool_size; in ubi_attach_fastmap() local 607 pool_size = be16_to_cpu(fmpl->size); in ubi_attach_fastmap() 612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { in ubi_attach_fastmap() 613 ubi_err(ubi, "bad pool size: %i", pool_size); in ubi_attach_fastmap() 765 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free); in ubi_attach_fastmap()
|
/linux-5.19.10/drivers/dma/ppc4xx/ |
D | adma.h | 70 size_t pool_size; member
|
/linux-5.19.10/drivers/misc/ |
D | ibmvmc.h | 111 __be16 pool_size; /* Maximum number of buffers supported per HMC member
|
/linux-5.19.10/drivers/net/ethernet/intel/i40e/ |
D | i40e_dcb.c | 1633 u32 pool_size[I40E_MAX_TRAFFIC_CLASS]; in i40e_dcb_hw_calculate_pool_sizes() local 1665 pool_size[i] = high_wm[i]; in i40e_dcb_hw_calculate_pool_sizes() 1666 pool_size[i] += I40E_BT2B(I40E_STD_DV_TC(mfs_max, in i40e_dcb_hw_calculate_pool_sizes() 1670 pool_size[i] = (I40E_DCB_WATERMARK_START_FACTOR * in i40e_dcb_hw_calculate_pool_sizes() 1672 high_wm[i] = pool_size[i]; in i40e_dcb_hw_calculate_pool_sizes() 1674 total_pool_size += pool_size[i]; in i40e_dcb_hw_calculate_pool_sizes() 1685 pb_cfg->tc_pool_size[i] = pool_size[i]; in i40e_dcb_hw_calculate_pool_sizes()
|
/linux-5.19.10/drivers/net/ethernet/ibm/ |
D | ibmveth.h | 102 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; variable
|
/linux-5.19.10/drivers/dma/ |
D | mv_xor.h | 117 size_t pool_size; member
|
/linux-5.19.10/drivers/net/ethernet/neterion/vxge/ |
D | vxge-config.c | 1119 blockpool->pool_size--; in __vxge_hw_blockpool_destroy() 1136 u32 pool_size, in __vxge_hw_blockpool_create() argument 1154 blockpool->pool_size = 0; in __vxge_hw_blockpool_create() 1161 for (i = 0; i < pool_size + pool_max; i++) { in __vxge_hw_blockpool_create() 1172 for (i = 0; i < pool_size; i++) { in __vxge_hw_blockpool_create() 1213 blockpool->pool_size++; in __vxge_hw_blockpool_create() 2297 blockpool->pool_size++; in vxge_hw_blockpool_block_add() 2323 if ((blockpool->pool_size + blockpool->req_out) < in __vxge_hw_blockpool_blocks_add() 2385 blockpool->pool_size--; in __vxge_hw_blockpool_malloc() 2405 if (blockpool->pool_size < blockpool->pool_max) in __vxge_hw_blockpool_blocks_remove() [all …]
|
/linux-5.19.10/Documentation/networking/ |
D | page_pool.rst | 70 * pool_size: size of the ptr_ring 165 pp_params.pool_size = DESC_NUM;
|
/linux-5.19.10/drivers/ufs/core/ |
D | ufshpb.c | 2451 int pool_size; in ufshpb_hpb_lu_prepared() local 2463 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; in ufshpb_hpb_lu_prepared() 2464 if (pool_size > tot_active_srgn_pages) { in ufshpb_hpb_lu_prepared() 2519 unsigned int pool_size; in ufshpb_init_mem_wq() local 2529 pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; in ufshpb_init_mem_wq() 2531 __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); in ufshpb_init_mem_wq() 2533 ufshpb_mctx_pool = mempool_create_slab_pool(pool_size, in ufshpb_init_mem_wq() 2541 ufshpb_page_pool = mempool_create_page_pool(pool_size, 0); in ufshpb_init_mem_wq()
|
/linux-5.19.10/include/net/ |
D | page_pool.h | 77 unsigned int pool_size; member
|
/linux-5.19.10/arch/powerpc/platforms/pseries/ |
D | lparcfg.c | 279 if (mpp_data.pool_size != -1) in parse_mpp_data() 281 mpp_data.pool_size); in parse_mpp_data()
|