Home
last modified time | relevance | path

Searched refs:granule (Results 1 – 25 of 29) sorted by relevance

12

/linux-6.6.21/tools/testing/selftests/dma/
Ddma_map_benchmark.c33 int granule = 1; in main() local
59 granule = atoi(optarg); in main()
96 if (granule < 1 || granule > 1024) { in main()
114 map.granule = granule; in main()
122 threads, seconds, node, dir[directions], granule); in main()
/linux-6.6.21/include/linux/
Diova.h33 unsigned long granule; /* pfn granularity for this domain */ member
50 return __ffs(iovad->granule); in iova_shift()
55 return iovad->granule - 1; in iova_mask()
65 return ALIGN(size, iovad->granule); in iova_align()
95 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
148 unsigned long granule, in init_iova_domain() argument
Dio-pgtable.h43 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
46 unsigned long iova, size_t granule, void *cookie);
225 size_t size, size_t granule) in io_pgtable_tlb_flush_walk() argument
228 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk()
234 size_t granule) in io_pgtable_tlb_add_page() argument
237 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
Dmap_benchmark.h29 __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ member
/linux-6.6.21/drivers/iommu/arm/arm-smmu/
Dqcom_iommu.c158 size_t granule, bool leaf, void *cookie) in qcom_iommu_tlb_inv_range_nosync() argument
174 iova += granule; in qcom_iommu_tlb_inv_range_nosync()
175 } while (s -= granule); in qcom_iommu_tlb_inv_range_nosync()
180 size_t granule, void *cookie) in qcom_iommu_tlb_flush_walk() argument
182 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); in qcom_iommu_tlb_flush_walk()
187 unsigned long iova, size_t granule, in qcom_iommu_tlb_add_page() argument
190 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); in qcom_iommu_tlb_add_page()
Darm-smmu.c262 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s1() argument
277 iova += granule; in arm_smmu_tlb_inv_range_s1()
278 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
284 iova += granule >> 12; in arm_smmu_tlb_inv_range_s1()
285 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
290 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s2() argument
305 iova += granule >> 12; in arm_smmu_tlb_inv_range_s2()
306 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
310 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk_s1() argument
318 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, in arm_smmu_tlb_inv_walk_s1()
[all …]
/linux-6.6.21/drivers/iommu/
Dmsm_iommu.c139 size_t granule, bool leaf, void *cookie) in __flush_iotlb_range() argument
159 iova += granule; in __flush_iotlb_range()
160 } while (temp_size -= granule); in __flush_iotlb_range()
171 size_t granule, void *cookie) in __flush_iotlb_walk() argument
173 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk()
177 unsigned long iova, size_t granule, void *cookie) in __flush_iotlb_page() argument
179 __flush_iotlb_range(iova, granule, granule, true, cookie); in __flush_iotlb_page()
Dio-pgtable-arm.c716 unsigned long granule, page_sizes; in arm_lpae_restrict_pgsizes() local
727 granule = PAGE_SIZE; in arm_lpae_restrict_pgsizes()
729 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes()
731 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes()
733 granule = 0; in arm_lpae_restrict_pgsizes()
735 switch (granule) { in arm_lpae_restrict_pgsizes()
1117 size_t granule, void *cookie) in dummy_tlb_flush() argument
1124 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
1127 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
Diova.c52 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
60 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); in init_iova_domain()
66 iovad->granule = granule; in init_iova_domain()
Dvirtio-iommu.c389 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); in viommu_domain_map_identity() local
391 iova = ALIGN(iova, granule); in viommu_domain_map_identity()
392 limit = ALIGN_DOWN(limit + 1, granule) - 1; in viommu_domain_map_identity()
395 u64 resv_start = ALIGN_DOWN(resv->start, granule); in viommu_domain_map_identity()
396 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; in viommu_domain_map_identity()
Dio-pgtable-arm-v7s.c904 size_t granule, void *cookie) in dummy_tlb_flush() argument
911 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
914 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
Ddma-iommu.c289 return cookie->iovad.granule; in cookie_msi_granule()
367 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { in iommu_put_dma_cookie()
420 start += iovad->granule; in cookie_init_hw_msi_region()
602 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
/linux-6.6.21/Documentation/arch/ia64/
Daliasing.rst72 identity mapping only when the entire granule supports cacheable
75 Therefore, kern_memmap contains only full granule-sized regions that
162 If the EFI memory map reports that the entire granule supports
166 If the granule contains non-WB memory, but we can cover the
200 which uses a granule-sized UC mapping. This granule will cover some
236 at 0xA0000 prevents use of a WB granule. The UC mapping causes
/linux-6.6.21/arch/ia64/
DKconfig.debug12 Select "16MB" for a small granule size.
13 Select "64MB" for a large granule size. This is the current default.
/linux-6.6.21/kernel/dma/
Dmap_benchmark.c39 int npages = map->bparam.granule; in map_benchmark_thread()
216 if (map->bparam.granule < 1 || map->bparam.granule > 1024) { in map_benchmark_ioctl()
/linux-6.6.21/arch/arm64/kvm/hyp/
Dpgtable.c87 u64 granule = kvm_granule_size(ctx->level); in kvm_block_mapping_supported() local
92 if (granule > (ctx->end - ctx->addr)) in kvm_block_mapping_supported()
95 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) in kvm_block_mapping_supported()
98 return IS_ALIGNED(ctx->addr, granule); in kvm_block_mapping_supported()
511 u64 granule = kvm_granule_size(ctx->level); in hyp_unmap_walker() local
528 if (ctx->end - ctx->addr < granule) in hyp_unmap_walker()
534 *unmapped += granule; in hyp_unmap_walker()
916 u64 granule = kvm_granule_size(ctx->level); in stage2_map_walker_try_leaf() local
944 granule); in stage2_map_walker_try_leaf()
948 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule); in stage2_map_walker_try_leaf()
/linux-6.6.21/drivers/gpu/drm/msm/
Dmsm_iommu.c220 size_t granule, void *cookie) in msm_iommu_tlb_flush_walk() argument
230 pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); in msm_iommu_tlb_flush_walk()
236 unsigned long iova, size_t granule, void *cookie) in msm_iommu_tlb_add_page() argument
/linux-6.6.21/drivers/iommu/amd/
Dio_pgtable_v2.c341 size_t granule, void *cookie) in v2_tlb_flush_walk() argument
346 unsigned long iova, size_t granule, in v2_tlb_add_page() argument
Dio_pgtable.c31 size_t granule, void *cookie) in v1_tlb_flush_walk() argument
36 unsigned long iova, size_t granule, in v1_tlb_add_page() argument
/linux-6.6.21/Documentation/arch/arm64/
Dmemory-tagging-extension.rst19 allocation tag for each 16-byte granule in the physical address space.
197 4-bit tag per byte and correspond to a 16-byte MTE tag granule in the
200 **Note**: If ``addr`` is not aligned to a 16-byte granule, the kernel
247 in a byte. With the tag granule of 16 bytes, a 4K page requires 128
/linux-6.6.21/arch/powerpc/boot/dts/
Dmicrowatt.dts78 reservation-granule-size = <64>;
/linux-6.6.21/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3.c1883 size_t granule, in __arm_smmu_tlb_inv_range() argument
1888 size_t inv_range = granule; in __arm_smmu_tlb_inv_range()
1912 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range()
1953 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_domain() argument
1970 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain()
1980 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_asid() argument
1992 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid()
1996 unsigned long iova, size_t granule, in arm_smmu_tlb_inv_page_nosync() argument
2002 iommu_iotlb_gather_add_page(domain, gather, iova, granule); in arm_smmu_tlb_inv_page_nosync()
2006 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk() argument
[all …]
Darm-smmu-v3.h752 size_t granule, bool leaf,
/linux-6.6.21/arch/arm64/kvm/hyp/nvhe/
Dmem_protect.c461 u64 granule = kvm_granule_size(level); in host_stage2_adjust_range() local
462 cur.start = ALIGN_DOWN(addr, granule); in host_stage2_adjust_range()
463 cur.end = cur.start + granule; in host_stage2_adjust_range()
/linux-6.6.21/Documentation/dev-tools/
Dkasan.rst247 Internally, KASAN tracks memory state separately for each memory granule, which
252 For Generic KASAN, the size of each memory granule is 8. The state of each
253 granule is encoded in one shadow byte. Those 8 bytes can be accessible,

12