/linux-5.19.10/tools/testing/memblock/tests/ |
D | basic_api.c | 34 struct memblock_region *rgn; in memblock_add_simple_check() local 36 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check() 46 assert(rgn->base == r.base); in memblock_add_simple_check() 47 assert(rgn->size == r.size); in memblock_add_simple_check() 63 struct memblock_region *rgn; in memblock_add_node_simple_check() local 65 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check() 75 assert(rgn->base == r.base); in memblock_add_node_simple_check() 76 assert(rgn->size == r.size); in memblock_add_node_simple_check() 78 assert(rgn->nid == 1); in memblock_add_node_simple_check() 80 assert(rgn->flags == MEMBLOCK_HOTPLUG); in memblock_add_node_simple_check() [all …]
|
D | alloc_helpers_api.c | 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_simple_generic_check() local 37 assert(rgn->size == size); in alloc_from_simple_generic_check() 38 assert(rgn->base == min_addr); in alloc_from_simple_generic_check() 63 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_misaligned_generic_check() local 81 assert(rgn->size == size); in alloc_from_misaligned_generic_check() 82 assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES); in alloc_from_misaligned_generic_check() 110 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_high_addr_check() local 124 assert(rgn->size == size); in alloc_from_top_down_high_addr_check() 125 assert(rgn->base == memblock_end_of_DRAM() - SMP_CACHE_BYTES); in alloc_from_top_down_high_addr_check() 151 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_no_space_above_check() local [all …]
|
D | alloc_nid_api.c | 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_try_nid_top_down_simple_check() local 37 rgn_end = rgn->base + rgn->size; in alloc_try_nid_top_down_simple_check() 42 assert(rgn->size == size); in alloc_try_nid_top_down_simple_check() 43 assert(rgn->base == max_addr - size); in alloc_try_nid_top_down_simple_check() 71 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_try_nid_top_down_end_misaligned_check() local 89 rgn_end = rgn->base + rgn->size; in alloc_try_nid_top_down_end_misaligned_check() 94 assert(rgn->size == size); in alloc_try_nid_top_down_end_misaligned_check() 95 assert(rgn->base == max_addr - size - misalign); in alloc_try_nid_top_down_end_misaligned_check() 121 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_try_nid_exact_address_generic_check() local 138 rgn_end = rgn->base + rgn->size; in alloc_try_nid_exact_address_generic_check() [all …]
|
D | alloc_api.c | 10 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check() local 23 assert(rgn->size == size); in alloc_top_down_simple_check() 24 assert(rgn->base == expected_start); in alloc_top_down_simple_check() 101 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check() local 118 assert(rgn->size == total_size); in alloc_top_down_before_check() 119 assert(rgn->base == memblock_end_of_DRAM() - total_size); in alloc_top_down_before_check() 140 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check() local 162 assert(rgn->size == total_size); in alloc_top_down_after_check() 163 assert(rgn->base == r1.base - r2_size); in alloc_top_down_after_check() 185 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check() local [all …]
|
/linux-5.19.10/drivers/ufs/core/ |
D | ufshpb.c | 67 static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, in ufshpb_is_valid_srgn() argument 70 return rgn->rgn_state != HPB_RGN_INACTIVE && in ufshpb_is_valid_srgn() 149 struct ufshpb_region *rgn; in ufshpb_iterate_rgn() local 156 rgn = hpb->rgn_tbl + rgn_idx; in ufshpb_iterate_rgn() 157 srgn = rgn->srgn_tbl + srgn_idx; in ufshpb_iterate_rgn() 170 if (rgn->rgn_state != HPB_RGN_INACTIVE) { in ufshpb_iterate_rgn() 177 rgn->read_timeout = ktime_add_ms(ktime_get(), in ufshpb_iterate_rgn() 178 rgn->hpb->params.read_timeout_ms); in ufshpb_iterate_rgn() 179 rgn->read_timeout_expiries = in ufshpb_iterate_rgn() 180 rgn->hpb->params.read_timeout_expiries; in ufshpb_iterate_rgn() [all …]
|
D | ufshpb.h | 141 #define for_each_sub_region(rgn, i, srgn) \ argument 143 ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
|
/linux-5.19.10/arch/powerpc/platforms/powernv/ |
D | opal-fadump.c | 104 fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest); in opal_fadump_update_config() 130 base = be64_to_cpu(fdm->rgn[i].src); in opal_fadump_get_config() 131 size = be64_to_cpu(fdm->rgn[i].size); in opal_fadump_get_config() 146 fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest); in opal_fadump_get_config() 164 i, be64_to_cpu(fdm->rgn[i].src), in opal_fadump_get_config() 165 be64_to_cpu(fdm->rgn[i].size)); in opal_fadump_get_config() 199 opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]); in opal_fadump_init_mem_struct() 200 opal_fdm->rgn[i].dest = cpu_to_be64(addr); in opal_fadump_init_mem_struct() 201 opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]); in opal_fadump_init_mem_struct() 212 opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) + in opal_fadump_init_mem_struct() [all …]
|
D | opal-fadump.h | 42 struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS]; member
|
/linux-5.19.10/mm/ |
D | memblock.c | 143 #define for_each_memblock_type(i, memblock_type, rgn) \ argument 144 for (i = 0, rgn = &memblock_type->regions[0]; \ 146 i++, rgn = &memblock_type->regions[i]) 545 struct memblock_region *rgn = &type->regions[idx]; in memblock_insert_region() local 548 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); in memblock_insert_region() 549 rgn->base = base; in memblock_insert_region() 550 rgn->size = size; in memblock_insert_region() 551 rgn->flags = flags; in memblock_insert_region() 552 memblock_set_region_node(rgn, nid); in memblock_insert_region() 581 struct memblock_region *rgn; in memblock_add_range() local [all …]
|
/linux-5.19.10/drivers/mtd/nand/onenand/ |
D | onenand_bbt.c | 65 int rgn; in create_bbt() local 113 rgn = flexonenand_region(mtd, from); in create_bbt() 114 from += mtd->eraseregions[rgn].erasesize; in create_bbt()
|
/linux-5.19.10/drivers/of/ |
D | fdt.c | 991 struct memblock_region rgn[MAX_USABLE_RANGES] = {0}; in early_init_dt_check_for_usable_mem_range() local 1007 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop); in early_init_dt_check_for_usable_mem_range() 1008 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop); in early_init_dt_check_for_usable_mem_range() 1011 i, &rgn[i].base, &rgn[i].size); in early_init_dt_check_for_usable_mem_range() 1014 memblock_cap_memory_range(rgn[0].base, rgn[0].size); in early_init_dt_check_for_usable_mem_range() 1015 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++) in early_init_dt_check_for_usable_mem_range() 1016 memblock_add(rgn[i].base, rgn[i].size); in early_init_dt_check_for_usable_mem_range()
|
/linux-5.19.10/drivers/iommu/ |
D | mtk_iommu.c | 501 const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; in mtk_iommu_get_iova_region_id() local 510 for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) { in mtk_iommu_get_iova_region_id() 512 if (dma_rgn->dma_start == rgn->iova_base && in mtk_iommu_get_iova_region_id() 513 dma_end == rgn->iova_base + rgn->size - 1) in mtk_iommu_get_iova_region_id() 516 if (dma_rgn->dma_start >= rgn->iova_base && in mtk_iommu_get_iova_region_id() 517 dma_end < rgn->iova_base + rgn->size) in mtk_iommu_get_iova_region_id()
|