/linux-5.19.10/tools/testing/selftests/drivers/net/mlxsw/ |
D | sharedbuffer_configuration.py | 28 def __init__(self, pools): argument 30 for pool in pools: 187 pools = PoolList() 190 pools.append(Pool(pooldict)) 191 return pools 194 def do_check_pools(dlname, pools, vp): argument 195 for pool in pools: 219 def check_pools(dlname, pools): argument 221 record_vp = RecordValuePicker(pools) 224 do_check_pools(dlname, pools, RandomValuePicker(pools)) [all …]
|
/linux-5.19.10/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | pool.c | 25 if (!xsk->pools) { in mlx5e_xsk_get_pools() 26 xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS, in mlx5e_xsk_get_pools() 27 sizeof(*xsk->pools), GFP_KERNEL); in mlx5e_xsk_get_pools() 28 if (unlikely(!xsk->pools)) in mlx5e_xsk_get_pools() 41 kfree(xsk->pools); in mlx5e_xsk_put_pools() 42 xsk->pools = NULL; in mlx5e_xsk_put_pools() 54 xsk->pools[ix] = pool; in mlx5e_xsk_add_pool() 60 xsk->pools[ix] = NULL; in mlx5e_xsk_remove_pool()
|
D | pool.h | 12 if (!xsk || !xsk->pools) in mlx5e_xsk_get_pool() 18 return xsk->pools[ix]; in mlx5e_xsk_get_pool()
|
/linux-5.19.10/arch/sparc/kernel/ |
D | iommu-common.c | 82 spin_lock_init(&(iommu->pools[i].lock)); in iommu_tbl_pool_init() 83 iommu->pools[i].start = start; in iommu_tbl_pool_init() 84 iommu->pools[i].hint = start; in iommu_tbl_pool_init() 86 iommu->pools[i].end = start - 1; in iommu_tbl_pool_init() 131 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 161 pool = &(iommu->pools[0]); in iommu_tbl_range_alloc() 193 pool = &(iommu->pools[pool_nr]); in iommu_tbl_range_alloc() 237 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux-5.19.10/mm/ |
D | dmapool.c | 51 struct list_head pools; member 81 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show() 105 static DEVICE_ATTR_RO(pools); 169 INIT_LIST_HEAD(&retval->pools); in dma_pool_create() 183 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create() 191 list_del(&retval->pools); in dma_pool_create() 277 list_del(&pool->pools); in dma_pool_destroy()
|
/linux-5.19.10/drivers/net/ethernet/chelsio/libcxgb/ |
D | libcxgb_ppm.c | 348 struct cxgbi_ppm_pool *pools; in ppm_alloc_cpu_pool() local 350 unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; in ppm_alloc_cpu_pool() 367 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; in ppm_alloc_cpu_pool() 368 pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); in ppm_alloc_cpu_pool() 370 if (!pools) in ppm_alloc_cpu_pool() 374 struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); in ppm_alloc_cpu_pool() 384 return pools; in ppm_alloc_cpu_pool()
|
/linux-5.19.10/Documentation/core-api/ |
D | workqueue.rst | 60 * Use per-CPU unified worker pools shared by all wq to provide 83 called worker-pools. 87 which manages worker-pools and processes the queued work items. 89 There are two worker-pools, one for normal work items and the other 91 worker-pools to serve work items queued on unbound workqueues - the 92 number of these backing pools is dynamic. 132 For unbound workqueues, the number of backing pools is dynamic. 135 backing worker pools matching the attributes. The responsibility of 169 worker-pools which host workers which are not bound to any 172 worker-pools try to start execution of work items as soon as [all …]
|
D | mm-api.rst | 91 Memory pools 97 DMA pools
|
/linux-5.19.10/drivers/soc/ti/ |
D | knav_qmss.h | 203 struct list_head pools; member 304 struct list_head pools; member 363 list_for_each_entry(pool, &kdev->pools, list)
|
D | knav_qmss_queue.c | 818 node = ®ion->pools; in knav_pool_create() 819 list_for_each_entry(iter, ®ion->pools, region_inst) { in knav_pool_create() 833 list_add_tail(&pool->list, &kdev->pools); in knav_pool_create() 1035 list_add(&pool->region_inst, ®ion->pools); in knav_queue_setup_region() 1119 INIT_LIST_HEAD(®ion->pools); in knav_queue_setup_regions() 1357 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst) in knav_queue_free_regions() 1781 INIT_LIST_HEAD(&kdev->pools); in knav_queue_probe()
|
/linux-5.19.10/arch/powerpc/kernel/ |
D | iommu.c | 242 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 270 pool = &(tbl->pools[0]); in iommu_range_alloc() 292 pool = &tbl->pools[pool_nr]; in iommu_range_alloc() 419 p = &tbl->pools[pool_nr]; in get_pool() 745 p = &tbl->pools[i]; in iommu_init_table() 1108 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership() 1118 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership() 1131 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_release_ownership() 1139 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
|
/linux-5.19.10/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 6 processors(PDSP), linking RAM, descriptor pools and infrastructure 49 - queue-pools : child node classifying the queue ranges into pools. 50 Queue ranges are grouped into 3 type of pools: 151 queue-pools {
|
/linux-5.19.10/arch/sparc/include/asm/ |
D | iommu-common.h | 26 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|
/linux-5.19.10/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ |
D | ethernet-driver.rst | 26 - buffer pools 69 DPBPs represent hardware buffer pools. Packet I/O is performed in the context 124 The role of hardware buffer pools is storage of ingress frame data. Each network
|
D | overview.rst | 25 The MC uses DPAA2 hardware resources such as queues, buffer pools, and 56 | -buffer pools -DPMCP | 360 - DPBPs for network buffer pools
|
/linux-5.19.10/Documentation/arm/keystone/ |
D | knav-qmss.rst | 12 processors(PDSP), linking RAM, descriptor pools and infrastructure 25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
|
/linux-5.19.10/drivers/md/ |
D | dm.c | 2989 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools() local 2994 if (!pools) in dm_alloc_md_mempools() 3003 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0); in dm_alloc_md_mempools() 3006 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) in dm_alloc_md_mempools() 3018 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); in dm_alloc_md_mempools() 3022 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) in dm_alloc_md_mempools() 3025 return pools; in dm_alloc_md_mempools() 3028 dm_free_md_mempools(pools); in dm_alloc_md_mempools() 3033 void dm_free_md_mempools(struct dm_md_mempools *pools) in dm_free_md_mempools() argument 3035 if (!pools) in dm_free_md_mempools() [all …]
|
D | dm.h | 224 void dm_free_md_mempools(struct dm_md_mempools *pools);
|
/linux-5.19.10/drivers/soc/fsl/qbman/ |
D | qman_priv.h | 177 u32 pools; member
|
D | qman_portal.c | 248 pcfg->pools = qm_get_pools_sdqcr(); in qman_portal_probe()
|
/linux-5.19.10/arch/arm/boot/dts/ |
D | keystone-k2g-netcp.dtsi | 36 queue-pools {
|
D | keystone-k2l-netcp.dtsi | 35 queue-pools {
|
D | keystone-k2e-netcp.dtsi | 35 queue-pools {
|
/linux-5.19.10/Documentation/devicetree/bindings/misc/ |
D | fsl,qoriq-mc.txt | 6 block is enabled, pools of hardware resources are available, such as 7 queues, buffer pools, I/O interfaces. These resources are building
|
/linux-5.19.10/arch/powerpc/include/asm/ |
D | iommu.h | 105 struct iommu_pool pools[IOMMU_NR_POOLS]; member
|