Home
last modified time | relevance | path

Searched refs:granule (Results 1 – 25 of 29) sorted by relevance

12

/linux-6.1.9/tools/testing/selftests/dma/
Ddma_map_benchmark.c33 int granule = 1; in main() local
59 granule = atoi(optarg); in main()
96 if (granule < 1 || granule > 1024) { in main()
114 map.granule = granule; in main()
122 threads, seconds, node, dir[directions], granule); in main()
/linux-6.1.9/include/linux/
Diova.h33 unsigned long granule; /* pfn granularity for this domain */ member
50 return __ffs(iovad->granule); in iova_shift()
55 return iovad->granule - 1; in iova_mask()
65 return ALIGN(size, iovad->granule); in iova_align()
95 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
148 unsigned long granule, in init_iova_domain() argument
Dio-pgtable.h43 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
46 unsigned long iova, size_t granule, void *cookie);
231 size_t size, size_t granule) in io_pgtable_tlb_flush_walk() argument
234 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); in io_pgtable_tlb_flush_walk()
240 size_t granule) in io_pgtable_tlb_add_page() argument
243 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); in io_pgtable_tlb_add_page()
Dmap_benchmark.h29 __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ member
/linux-6.1.9/arch/arm64/kvm/hyp/
Dpgtable.c69 u64 granule = kvm_granule_size(level); in kvm_block_mapping_supported() local
74 if (granule > (end - addr)) in kvm_block_mapping_supported()
77 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule)) in kvm_block_mapping_supported()
80 return IS_ALIGNED(addr, granule); in kvm_block_mapping_supported()
390 u64 granule = kvm_granule_size(level), phys = data->phys; in hyp_map_walker_try_leaf() local
395 data->phys += granule; in hyp_map_walker_try_leaf()
463 u64 granule = kvm_granule_size(level); in hyp_unmap_walker() local
480 if (end - addr < granule) in hyp_unmap_walker()
486 data->unmapped += granule; in hyp_unmap_walker()
725 u64 granule = kvm_granule_size(level), phys = data->phys; in stage2_map_walker_try_leaf() local
[all …]
/linux-6.1.9/drivers/iommu/arm/arm-smmu/
Dqcom_iommu.c158 size_t granule, bool leaf, void *cookie) in qcom_iommu_tlb_inv_range_nosync() argument
174 iova += granule; in qcom_iommu_tlb_inv_range_nosync()
175 } while (s -= granule); in qcom_iommu_tlb_inv_range_nosync()
180 size_t granule, void *cookie) in qcom_iommu_tlb_flush_walk() argument
182 qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); in qcom_iommu_tlb_flush_walk()
187 unsigned long iova, size_t granule, in qcom_iommu_tlb_add_page() argument
190 qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); in qcom_iommu_tlb_add_page()
Darm-smmu.c263 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s1() argument
278 iova += granule; in arm_smmu_tlb_inv_range_s1()
279 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
285 iova += granule >> 12; in arm_smmu_tlb_inv_range_s1()
286 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
291 size_t granule, void *cookie, int reg) in arm_smmu_tlb_inv_range_s2() argument
306 iova += granule >> 12; in arm_smmu_tlb_inv_range_s2()
307 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
311 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk_s1() argument
319 arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, in arm_smmu_tlb_inv_walk_s1()
[all …]
/linux-6.1.9/drivers/iommu/
Dmsm_iommu.c139 size_t granule, bool leaf, void *cookie) in __flush_iotlb_range() argument
159 iova += granule; in __flush_iotlb_range()
160 } while (temp_size -= granule); in __flush_iotlb_range()
171 size_t granule, void *cookie) in __flush_iotlb_walk() argument
173 __flush_iotlb_range(iova, size, granule, false, cookie); in __flush_iotlb_walk()
177 unsigned long iova, size_t granule, void *cookie) in __flush_iotlb_page() argument
179 __flush_iotlb_range(iova, granule, granule, true, cookie); in __flush_iotlb_page()
Dio-pgtable-arm.c729 unsigned long granule, page_sizes; in arm_lpae_restrict_pgsizes() local
740 granule = PAGE_SIZE; in arm_lpae_restrict_pgsizes()
742 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); in arm_lpae_restrict_pgsizes()
744 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); in arm_lpae_restrict_pgsizes()
746 granule = 0; in arm_lpae_restrict_pgsizes()
748 switch (granule) { in arm_lpae_restrict_pgsizes()
1132 size_t granule, void *cookie) in dummy_tlb_flush() argument
1139 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
1142 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
Diova.c52 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
60 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); in init_iova_domain()
66 iovad->granule = granule; in init_iova_domain()
Dvirtio-iommu.c389 unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap); in viommu_domain_map_identity() local
391 iova = ALIGN(iova, granule); in viommu_domain_map_identity()
392 limit = ALIGN_DOWN(limit + 1, granule) - 1; in viommu_domain_map_identity()
395 u64 resv_start = ALIGN_DOWN(resv->start, granule); in viommu_domain_map_identity()
396 u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1; in viommu_domain_map_identity()
Dio-pgtable-arm-v7s.c919 size_t granule, void *cookie) in dummy_tlb_flush() argument
926 unsigned long iova, size_t granule, in dummy_tlb_add_page() argument
929 dummy_tlb_flush(iova, granule, granule, cookie); in dummy_tlb_add_page()
Ddma-iommu.c287 return cookie->iovad.granule; in cookie_msi_granule()
365 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { in iommu_put_dma_cookie()
416 start += iovad->granule; in cookie_init_hw_msi_region()
569 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
/linux-6.1.9/Documentation/ia64/
Daliasing.rst72 identity mapping only when the entire granule supports cacheable
75 Therefore, kern_memmap contains only full granule-sized regions that
162 If the EFI memory map reports that the entire granule supports
166 If the granule contains non-WB memory, but we can cover the
200 which uses a granule-sized UC mapping. This granule will cover some
236 at 0xA0000 prevents use of a WB granule. The UC mapping causes
/linux-6.1.9/arch/ia64/
DKconfig.debug12 Select "16MB" for a small granule size.
13 Select "64MB" for a large granule size. This is the current default.
/linux-6.1.9/kernel/dma/
Dmap_benchmark.c39 int npages = map->bparam.granule; in map_benchmark_thread()
216 if (map->bparam.granule < 1 || map->bparam.granule > 1024) { in map_benchmark_ioctl()
/linux-6.1.9/drivers/iommu/amd/
Dio_pgtable_v2.c346 size_t granule, void *cookie) in v2_tlb_flush_walk() argument
351 unsigned long iova, size_t granule, in v2_tlb_add_page() argument
Dio_pgtable.c31 size_t granule, void *cookie) in v1_tlb_flush_walk() argument
36 unsigned long iova, size_t granule, in v1_tlb_add_page() argument
/linux-6.1.9/drivers/gpu/drm/msm/
Dmsm_iommu.c200 size_t granule, void *cookie) in msm_iommu_tlb_flush_walk() argument
205 unsigned long iova, size_t granule, void *cookie) in msm_iommu_tlb_add_page() argument
/linux-6.1.9/arch/arm64/kvm/hyp/nvhe/
Dmem_protect.c301 u64 granule = kvm_granule_size(level); in host_stage2_adjust_range() local
302 cur.start = ALIGN_DOWN(addr, granule); in host_stage2_adjust_range()
303 cur.end = cur.start + granule; in host_stage2_adjust_range()
/linux-6.1.9/arch/powerpc/boot/dts/
Dmicrowatt.dts70 reservation-granule-size = <64>;
/linux-6.1.9/Documentation/arm64/
Dmemory-tagging-extension.rst19 allocation tag for each 16-byte granule in the physical address space.
197 4-bit tag per byte and correspond to a 16-byte MTE tag granule in the
200 **Note**: If ``addr`` is not aligned to a 16-byte granule, the kernel
247 in a byte. With the tag granule of 16 bytes, a 4K page requires 128
/linux-6.1.9/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3.c1868 size_t granule, in __arm_smmu_tlb_inv_range() argument
1873 size_t inv_range = granule; in __arm_smmu_tlb_inv_range()
1887 cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); in __arm_smmu_tlb_inv_range()
1928 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_domain() argument
1945 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain()
1955 size_t granule, bool leaf, in arm_smmu_tlb_inv_range_asid() argument
1967 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid()
1971 unsigned long iova, size_t granule, in arm_smmu_tlb_inv_page_nosync() argument
1977 iommu_iotlb_gather_add_page(domain, gather, iova, granule); in arm_smmu_tlb_inv_page_nosync()
1981 size_t granule, void *cookie) in arm_smmu_tlb_inv_walk() argument
[all …]
Darm-smmu-v3.h744 size_t granule, bool leaf,
/linux-6.1.9/Documentation/dev-tools/
Dkasan.rst227 Internally, KASAN tracks memory state separately for each memory granule, which
232 For Generic KASAN, the size of each memory granule is 8. The state of each
233 granule is encoded in one shadow byte. Those 8 bytes can be accessible,

12