/linux-6.1.9/tools/testing/selftests/dma/ |
D | dma_map_benchmark.c | 33 int granule = 1; in main() local 59 granule = atoi(optarg); in main() 96 if (granule < 1 || granule > 1024) { in main() 114 map.granule = granule; in main() 122 threads, seconds, node, dir[directions], granule); in main()
|
/linux-6.1.9/include/linux/ |
D | iova.h | 33 unsigned long granule; /* pfn granularity for this domain */ member 50 return __ffs(iovad->granule); in iova_shift() 55 return iovad->granule - 1; in iova_mask() 65 return ALIGN(size, iovad->granule); in iova_align() 95 void init_iova_domain(struct iova_domain *iovad, unsigned long granule, 148 unsigned long granule, in init_iova_domain() argument
|
D | io-pgtable.h | 43 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, 46 unsigned long iova, size_t granule, void *cookie); 231 size_t size, size_t granule) in io_pgtable_tlb_flush_walk() argument 234 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk() 240 size_t granule) in io_pgtable_tlb_add_page() argument 243 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
|
D | map_benchmark.h | 29 __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ member
|
/linux-6.1.9/arch/arm64/kvm/hyp/ |
D | pgtable.c | 69 u64 granule = kvm_granule_size(level); in kvm_block_mapping_supported() local 74 if (granule > (end - addr)) in kvm_block_mapping_supported() 77 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) in kvm_block_mapping_supported() 80 return IS_ALIGNED(addr, granule); in kvm_block_mapping_supported() 390 u64 granule = kvm_granule_size(level), phys = data->phys; in hyp_map_walker_try_leaf() local 395 data->phys += granule; in hyp_map_walker_try_leaf() 463 u64 granule = kvm_granule_size(level); in hyp_unmap_walker() local 480 if (end - addr < granule) in hyp_unmap_walker() 486 data->unmapped += granule; in hyp_unmap_walker() 725 u64 granule = kvm_granule_size(level), phys = data->phys; in stage2_map_walker_try_leaf() local [all …]
|
/linux-6.1.9/drivers/iommu/arm/arm-smmu/ |
D | qcom_iommu.c | 158 size_t granule, bool leaf, void *cookie) in qcom_iommu_tlb_inv_range_nosync() argument 174 iova += granule; in qcom_iommu_tlb_inv_range_nosync() 175 } while (s -= granule); in qcom_iommu_tlb_inv_range_nosync() 180 size_t granule, void *cookie) in qcom_iommu_tlb_flush_walk() argument 182 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); in qcom_iommu_tlb_flush_walk() 187 unsigned long iova, size_t granule, in qcom_iommu_tlb_add_page() argument 190 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); in qcom_iommu_tlb_add_page()
|
D | arm-smmu.c | 263 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s1() argument 278 iova += granule; in arm_smmu_tlb_inv_range_s1() 279 } while (size -= granule); in arm_smmu_tlb_inv_range_s1() 285 iova += granule >> 12; in arm_smmu_tlb_inv_range_s1() 286 } while (size -= granule); in arm_smmu_tlb_inv_range_s1() 291 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s2() argument 306 iova += granule >> 12; in arm_smmu_tlb_inv_range_s2() 307 } while (size -= granule); in arm_smmu_tlb_inv_range_s2() 311 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk_s1() argument 319 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, in arm_smmu_tlb_inv_walk_s1() [all …]
|
/linux-6.1.9/drivers/iommu/ |
D | msm_iommu.c | 139 size_t granule, bool leaf, void *cookie) in __flush_iotlb_range() argument 159 iova += granule; in __flush_iotlb_range() 160 } while (temp_size -= granule); in __flush_iotlb_range() 171 size_t granule, void *cookie) in __flush_iotlb_walk() argument 173 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk() 177 unsigned long iova, size_t granule, void *cookie) in __flush_iotlb_page() argument 179 __flush_iotlb_range(iova, granule, granule, true, cookie); in __flush_iotlb_page()
|
D | io-pgtable-arm.c | 729 unsigned long granule, page_sizes; in arm_lpae_restrict_pgsizes() local 740 granule = PAGE_SIZE; in arm_lpae_restrict_pgsizes() 742 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes() 744 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes() 746 granule = 0; in arm_lpae_restrict_pgsizes() 748 switch (granule) { in arm_lpae_restrict_pgsizes() 1132 size_t granule, void *cookie) in dummy_tlb_flush() argument 1139 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument 1142 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
|
D | iova.c | 52 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument 60 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); in init_iova_domain() 66 iovad->granule = granule; in init_iova_domain()
|
D | virtio-iommu.c | 389 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); in viommu_domain_map_identity() local 391 iova = ALIGN(iova, granule); in viommu_domain_map_identity() 392 limit = ALIGN_DOWN(limit + 1, granule) - 1; in viommu_domain_map_identity() 395 u64 resv_start = ALIGN_DOWN(resv->start, granule); in viommu_domain_map_identity() 396 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; in viommu_domain_map_identity()
|
D | io-pgtable-arm-v7s.c | 919 size_t granule, void *cookie) in dummy_tlb_flush() argument 926 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument 929 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
|
D | dma-iommu.c | 287 return cookie->iovad.granule; in cookie_msi_granule() 365 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { in iommu_put_dma_cookie() 416 start += iovad->granule; in cookie_init_hw_msi_region() 569 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
|
/linux-6.1.9/Documentation/ia64/ |
D | aliasing.rst | 72 identity mapping only when the entire granule supports cacheable 75 Therefore, kern_memmap contains only full granule-sized regions that 162 If the EFI memory map reports that the entire granule supports 166 If the granule contains non-WB memory, but we can cover the 200 which uses a granule-sized UC mapping. This granule will cover some 236 at 0xA0000 prevents use of a WB granule. The UC mapping causes
|
/linux-6.1.9/arch/ia64/ |
D | Kconfig.debug | 12 Select "16MB" for a small granule size. 13 Select "64MB" for a large granule size. This is the current default.
|
/linux-6.1.9/kernel/dma/ |
D | map_benchmark.c | 39 int npages = map->bparam.granule; in map_benchmark_thread() 216 if (map->bparam.granule < 1 || map->bparam.granule > 1024) { in map_benchmark_ioctl()
|
/linux-6.1.9/drivers/iommu/amd/ |
D | io_pgtable_v2.c | 346 size_t granule, void *cookie) in v2_tlb_flush_walk() argument 351 unsigned long iova, size_t granule, in v2_tlb_add_page() argument
|
D | io_pgtable.c | 31 size_t granule, void *cookie) in v1_tlb_flush_walk() argument 36 unsigned long iova, size_t granule, in v1_tlb_add_page() argument
|
/linux-6.1.9/drivers/gpu/drm/msm/ |
D | msm_iommu.c | 200 size_t granule, void *cookie) in msm_iommu_tlb_flush_walk() argument 205 unsigned long iova, size_t granule, void *cookie) in msm_iommu_tlb_add_page() argument
|
/linux-6.1.9/arch/arm64/kvm/hyp/nvhe/ |
D | mem_protect.c | 301 u64 granule = kvm_granule_size(level); in host_stage2_adjust_range() local 302 cur.start = ALIGN_DOWN(addr, granule); in host_stage2_adjust_range() 303 cur.end = cur.start + granule; in host_stage2_adjust_range()
|
/linux-6.1.9/arch/powerpc/boot/dts/ |
D | microwatt.dts | 70 reservation-granule-size = <64>;
|
/linux-6.1.9/Documentation/arm64/ |
D | memory-tagging-extension.rst | 19 allocation tag for each 16-byte granule in the physical address space. 197 4-bit tag per byte and correspond to a 16-byte MTE tag granule in the 200 **Note**: If ``addr`` is not aligned to a 16-byte granule, the kernel 247 in a byte. With the tag granule of 16 bytes, a 4K page requires 128
|
/linux-6.1.9/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3.c | 1868 size_t granule, in __arm_smmu_tlb_inv_range() argument 1873 size_t inv_range = granule; in __arm_smmu_tlb_inv_range() 1887 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range() 1928 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_domain() argument 1945 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain() 1955 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_asid() argument 1967 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid() 1971 unsigned long iova, size_t granule, in arm_smmu_tlb_inv_page_nosync() argument 1977 iommu_iotlb_gather_add_page(domain, gather, iova, granule); in arm_smmu_tlb_inv_page_nosync() 1981 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk() argument [all …]
|
D | arm-smmu-v3.h | 744 size_t granule, bool leaf,
|
/linux-6.1.9/Documentation/dev-tools/ |
D | kasan.rst | 227 Internally, KASAN tracks memory state separately for each memory granule, which 232 For Generic KASAN, the size of each memory granule is 8. The state of each 233 granule is encoded in one shadow byte. Those 8 bytes can be accessible,
|