/linux-6.6.21/arch/powerpc/mm/nohash/ |
D | kaslr_booke.c | 23 struct regions { struct 38 struct regions __initdata regions; argument 113 if (regions.reserved_mem < 0) in overlaps_reserved_region() 117 for (subnode = fdt_first_subnode(fdt, regions.reserved_mem); in overlaps_reserved_region() 125 while (len >= (regions.reserved_mem_addr_cells + in overlaps_reserved_region() 126 regions.reserved_mem_size_cells)) { in overlaps_reserved_region() 128 if (regions.reserved_mem_addr_cells == 2) in overlaps_reserved_region() 131 reg += regions.reserved_mem_addr_cells; in overlaps_reserved_region() 132 len -= 4 * regions.reserved_mem_addr_cells; in overlaps_reserved_region() 135 if (regions.reserved_mem_size_cells == 2) in overlaps_reserved_region() [all …]
|
/linux-6.6.21/drivers/mtd/chips/ |
D | jedec_probe.c | 275 const uint32_t regions[6]; member 307 .regions = { 319 .regions = { 334 .regions = { 349 .regions = { 364 .regions = { 379 .regions = { 395 .regions = { 412 .regions = { 429 .regions = { [all …]
|
/linux-6.6.21/mm/damon/ |
D | vaddr-test.h | 69 struct damon_addr_range regions[3] = {0,}; in damon_test_three_regions_in_vmas() local 84 __damon_va_three_regions(&mm, regions); in damon_test_three_regions_in_vmas() 86 KUNIT_EXPECT_EQ(test, 10ul, regions[0].start); in damon_test_three_regions_in_vmas() 87 KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); in damon_test_three_regions_in_vmas() 88 KUNIT_EXPECT_EQ(test, 200ul, regions[1].start); in damon_test_three_regions_in_vmas() 89 KUNIT_EXPECT_EQ(test, 220ul, regions[1].end); in damon_test_three_regions_in_vmas() 90 KUNIT_EXPECT_EQ(test, 300ul, regions[2].start); in damon_test_three_regions_in_vmas() 91 KUNIT_EXPECT_EQ(test, 330ul, regions[2].end); in damon_test_three_regions_in_vmas() 130 unsigned long *regions, int nr_regions, in damon_do_test_apply_three_regions() argument 140 r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); in damon_do_test_apply_three_regions() [all …]
|
D | vaddr.c | 117 struct damon_addr_range regions[3]) in __damon_va_three_regions() 158 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions() 159 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); in __damon_va_three_regions() 160 regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION); in __damon_va_three_regions() 161 regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION); in __damon_va_three_regions() 162 regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION); in __damon_va_three_regions() 163 regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); in __damon_va_three_regions() 174 struct damon_addr_range regions[3]) in damon_va_three_regions() 184 rc = __damon_va_three_regions(mm, regions); in damon_va_three_regions() 238 struct damon_addr_range regions[3]; in __damon_va_init_regions() local [all …]
|
/linux-6.6.21/drivers/vfio/cdx/ |
D | main.c | 19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region), in vfio_cdx_open_device() 21 if (!vdev->regions) in vfio_cdx_open_device() 27 vdev->regions[i].addr = res->start; in vfio_cdx_open_device() 28 vdev->regions[i].size = resource_size(res); in vfio_cdx_open_device() 29 vdev->regions[i].type = res->flags; in vfio_cdx_open_device() 34 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_cdx_open_device() 35 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_cdx_open_device() 36 vdev->regions[i].flags |= in vfio_cdx_open_device() 38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_cdx_open_device() 40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE; in vfio_cdx_open_device() [all …]
|
/linux-6.6.21/drivers/gpu/drm/nouveau/nvkm/nvfw/ |
D | acr.c | 130 hdr->regions.no_regions); in flcn_acr_desc_dump() 132 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_dump() 135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump() 137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump() 139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump() 141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump() 143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump() 145 hdr->regions.region_props[i].client_mask); in flcn_acr_desc_dump() 173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump() 175 for (i = 0; i < ARRAY_SIZE(hdr->regions.region_props); i++) { in flcn_acr_desc_v1_dump() [all …]
|
/linux-6.6.21/drivers/vfio/platform/ |
D | vfio_platform_common.c | 144 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region), in vfio_platform_regions_init() 146 if (!vdev->regions) in vfio_platform_regions_init() 153 vdev->regions[i].addr = res->start; in vfio_platform_regions_init() 154 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init() 155 vdev->regions[i].flags = 0; in vfio_platform_regions_init() 159 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO; in vfio_platform_regions_init() 160 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_platform_regions_init() 162 vdev->regions[i].flags |= in vfio_platform_regions_init() 169 if (!(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_platform_regions_init() 170 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_platform_regions_init() [all …]
|
/linux-6.6.21/tools/testing/memblock/tests/ |
D | alloc_exact_nid_api.c | 30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check() 31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check() 82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check() 83 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_part_reserved_check() 143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check() 144 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_split_range_low_check() 200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 201 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 258 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_low_check() [all …]
|
D | alloc_nid_api.c | 66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check() 118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check() 169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check() 221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check() 311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check() 363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check() 416 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_with_space_check() 417 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_nid_top_down_reserved_with_space_check() 481 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_reserved_full_merge_generic_check() 543 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_no_space_check() [all …]
|
D | basic_api.c | 17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check() 22 ASSERT_NE(memblock.reserved.regions, NULL); in memblock_initialization_check() 45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check() 78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check() 121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check() 122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check() 175 rgn = &memblock.memory.regions[0]; in memblock_add_overlap_top_check() 227 rgn = &memblock.memory.regions[0]; in memblock_add_overlap_bottom_check() 276 rgn = &memblock.memory.regions[0]; in memblock_add_within_check() 347 rgn = &memblock.memory.regions[0]; in memblock_add_between_check() [all …]
|
D | alloc_api.c | 26 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check() 73 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_top_down_disjoint_check() 74 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_top_down_disjoint_check() 125 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check() 168 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check() 217 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check() 266 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_in_between_generic_check() 416 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_limited_space_generic_check() 450 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_no_memory_generic_check() 484 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_too_large_generic_check() [all …]
|
D | alloc_helpers_api.c | 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_simple_generic_check() 63 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_misaligned_generic_check() 110 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_high_addr_check() 153 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_no_space_above_check() 190 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_min_addr_cap_check() 236 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_high_addr_check() 278 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_no_space_above_check() 314 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_min_addr_cap_check()
|
/linux-6.6.21/mm/ |
D | memblock.c | 116 .memory.regions = memblock_memory_init_regions, 121 .reserved.regions = memblock_reserved_init_regions, 132 .regions = memblock_physmem_init_regions, 148 for (i = 0, rgn = &memblock_type->regions[0]; \ 150 i++, rgn = &memblock_type->regions[i]) 198 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region() 199 type->regions[i].size)) in memblock_overlaps_region() 351 type->total_size -= type->regions[r].size; in memblock_remove_region() 352 memmove(&type->regions[r], &type->regions[r + 1], in memblock_remove_region() 353 (type->cnt - (r + 1)) * sizeof(type->regions[r])); in memblock_remove_region() [all …]
|
/linux-6.6.21/drivers/md/ |
D | dm-bio-prison-v1.c | 29 struct prison_region regions[]; member 47 prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL); in dm_bio_prison_create() 53 spin_lock_init(&prison->regions[i].lock); in dm_bio_prison_create() 54 prison->regions[i].cell = RB_ROOT; in dm_bio_prison_create() 184 spin_lock_irq(&prison->regions[l].lock); in bio_detain() 185 r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result); in bio_detain() 186 spin_unlock_irq(&prison->regions[l].lock); in bio_detain() 232 spin_lock_irq(&prison->regions[l].lock); in dm_cell_release() 233 __cell_release(&prison->regions[l].cell, cell, bios); in dm_cell_release() 234 spin_unlock_irq(&prison->regions[l].lock); in dm_cell_release() [all …]
|
/linux-6.6.21/drivers/vfio/fsl-mc/ |
D | vfio_fsl_mc.c | 30 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region), in vfio_fsl_mc_open_device() 32 if (!vdev->regions) in vfio_fsl_mc_open_device() 36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device() 39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device() 40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device() 41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device() 46 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) && in vfio_fsl_mc_open_device() 47 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_fsl_mc_open_device() 48 vdev->regions[i].flags |= in vfio_fsl_mc_open_device() 50 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ; in vfio_fsl_mc_open_device() [all …]
|
/linux-6.6.21/drivers/virt/nitro_enclaves/ |
D | ne_misc_dev_test.c | 117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions() 118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions() 120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions() 135 KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start, in ne_misc_dev_test_merge_phys_contig_memory_regions() 137 KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]), in ne_misc_dev_test_merge_phys_contig_memory_regions() 141 kunit_kfree(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
|
/linux-6.6.21/drivers/net/dsa/sja1105/ |
D | sja1105_devlink.c | 85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions() 87 if (!priv->regions) in sja1105_setup_devlink_regions() 97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions() 99 kfree(priv->regions); in sja1105_setup_devlink_regions() 103 priv->regions[i] = region; in sja1105_setup_devlink_regions() 115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions() 117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
|
/linux-6.6.21/Documentation/admin-guide/device-mapper/ |
D | dm-clone.rst | 58 3. A small metadata device - it records which regions are already valid in the 59 destination device, i.e., which regions have already been hydrated, or have 68 dm-clone divides the source and destination devices in fixed sized regions. 77 Reads and writes from/to hydrated regions are serviced from the destination 93 as a hint to skip hydration of the regions covered by the request, i.e., it 111 A message `hydration_threshold <#regions>` can be used to set the maximum number 112 of regions being copied, the default being 1 region. 116 region size. A message `hydration_batch_size <#regions>` can be used to tune the 118 dm-clone trying to batch together contiguous regions, so we copy the data in 119 batches of this many regions. [all …]
|
/linux-6.6.21/drivers/soc/qcom/ |
D | smem.c | 288 struct smem_region regions[]; member 442 header = smem->regions[0].virt_base; in qcom_smem_alloc_global() 529 header = smem->regions[0].virt_base; in qcom_smem_get_global() 537 region = &smem->regions[i]; in qcom_smem_get_global() 725 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space() 728 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space() 774 area = &__smem->regions[i]; in qcom_smem_virt_to_phys() 814 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version() 867 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); in qcom_smem_partition_header() 949 smem->global_partition.phys_base = smem->regions[0].aux_base + in qcom_smem_set_global_partition() [all …]
|
/linux-6.6.21/drivers/perf/ |
D | marvell_cn10k_tad_pmu.c | 32 struct tad_region *regions; member 53 new += readq(tad_pmu->regions[i].base + in tad_pmu_event_counter_read() 71 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_stop() 92 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_start() 100 writeq_relaxed(reg_val, tad_pmu->regions[i].base + in tad_pmu_event_counter_start() 258 struct tad_region *regions; in tad_pmu_probe() local 299 regions = devm_kcalloc(&pdev->dev, tad_cnt, in tad_pmu_probe() 300 sizeof(*regions), GFP_KERNEL); in tad_pmu_probe() 301 if (!regions) in tad_pmu_probe() 306 regions[i].base = devm_ioremap(&pdev->dev, in tad_pmu_probe() [all …]
|
/linux-6.6.21/Documentation/admin-guide/mm/damon/ |
D | lru_sort.rst | 31 DAMON_LRU_SORT finds hot pages (pages of memory regions that showing access 33 memory regions that showing no access for a time that longer than a 85 Access frequency threshold for hot memory regions identification in permil. 94 Time threshold for cold memory regions identification in microseconds. 179 Minimum number of monitoring regions. 181 The minimal number of monitoring regions of DAMON for the cold memory 190 Maximum number of monitoring regions. 192 The maximum number of monitoring regions of DAMON for the cold memory 225 Number of hot memory regions that tried to be LRU-sorted. 230 Total bytes of hot memory regions that tried to be LRU-sorted. [all …]
|
/linux-6.6.21/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ |
D | gp102.c | 203 desc->regions.no_regions = 2; in gp102_acr_load_setup() 204 desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; in gp102_acr_load_setup() 205 desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; in gp102_acr_load_setup() 206 desc->regions.region_props[0].region_id = 1; in gp102_acr_load_setup() 207 desc->regions.region_props[0].read_mask = 0xf; in gp102_acr_load_setup() 208 desc->regions.region_props[0].write_mask = 0xc; in gp102_acr_load_setup() 209 desc->regions.region_props[0].client_mask = 0x2; in gp102_acr_load_setup() 210 desc->regions.region_props[0].shadow_mem_start_addr = acr->shadow_start >> 8; in gp102_acr_load_setup()
|
/linux-6.6.21/Documentation/networking/devlink/ |
D | devlink-region.rst | 7 ``devlink`` regions enable access to driver defined address regions using 10 Each device can create and register its own supported address regions. The 22 address regions that are otherwise inaccessible to the user. 54 # Show all of the exposed regions with region sizes: 81 As regions are likely very device or driver specific, no generic regions are 83 specific regions a driver supports.
|
/linux-6.6.21/drivers/virt/acrn/ |
D | mm.c | 20 struct vm_memory_region_batch *regions; in modify_region() local 23 regions = kzalloc(sizeof(*regions), GFP_KERNEL); in modify_region() 24 if (!regions) in modify_region() 27 regions->vmid = vm->vmid; in modify_region() 28 regions->regions_num = 1; in modify_region() 29 regions->regions_gpa = virt_to_phys(region); in modify_region() 31 ret = hcall_set_memory_regions(virt_to_phys(regions)); in modify_region() 36 kfree(regions); in modify_region()
|
/linux-6.6.21/drivers/gpu/drm/i915/gem/selftests/ |
D | i915_gem_dmabuf.c | 95 struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0]; in igt_dmabuf_import_same_driver_lmem() 217 struct intel_memory_region **regions, in igt_dmabuf_import_same_driver() argument 231 regions, num_regions); in igt_dmabuf_import_same_driver() 275 if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) { in igt_dmabuf_import_same_driver() 323 struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_smem() 331 struct intel_memory_region *regions[2]; in igt_dmabuf_import_same_driver_lmem_smem() local 333 if (!i915->mm.regions[INTEL_REGION_LMEM_0]) in igt_dmabuf_import_same_driver_lmem_smem() 336 regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0]; in igt_dmabuf_import_same_driver_lmem_smem() 337 regions[1] = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_lmem_smem() 338 return igt_dmabuf_import_same_driver(i915, regions, 2); in igt_dmabuf_import_same_driver_lmem_smem()
|