Home
last modified time | relevance | path

Searched refs:pages_mapped (Results 1 – 4 of 4) sorted by relevance

/linux-5.19.10/arch/parisc/kernel/
Dpci-dma.c309 unsigned int pages_mapped = size >> PAGE_SHIFT; in pcxl_free_range() local
312 mask >>= BITS_PER_LONG - pages_mapped; in pcxl_free_range()
315 res_idx, size, pages_mapped, mask); in pcxl_free_range()
319 if(pages_mapped <= 8) { in pcxl_free_range()
321 } else if(pages_mapped <= 16) { in pcxl_free_range()
323 } else if(pages_mapped <= 32) { in pcxl_free_range()
330 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1); in pcxl_free_range()
331 pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1); in pcxl_free_range()
/linux-5.19.10/drivers/parisc/
Dccio-dma.c431 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) in ccio_free_range() argument
436 BUG_ON(pages_mapped == 0); in ccio_free_range()
437 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); in ccio_free_range()
438 BUG_ON(pages_mapped > BITS_PER_LONG); in ccio_free_range()
441 __func__, res_idx, pages_mapped); in ccio_free_range()
444 ioc->used_pages -= pages_mapped; in ccio_free_range()
447 if(pages_mapped <= 8) { in ccio_free_range()
450 unsigned long mask = ~(~0UL >> pages_mapped); in ccio_free_range()
455 } else if(pages_mapped <= 16) { in ccio_free_range()
457 } else if(pages_mapped <= 32) { in ccio_free_range()
[all …]
/linux-5.19.10/drivers/infiniband/hw/mlx5/
Dumr.c698 size_t pages_mapped = 0; in mlx5r_umr_update_xlt() local
741 for (pages_mapped = 0; in mlx5r_umr_update_xlt()
742 pages_mapped < pages_to_map && !err; in mlx5r_umr_update_xlt()
743 pages_mapped += pages_iter, idx += pages_iter) { in mlx5r_umr_update_xlt()
744 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); in mlx5r_umr_update_xlt()
753 if (pages_mapped + pages_iter >= pages_to_map) in mlx5r_umr_update_xlt()
/linux-5.19.10/net/ipv4/
Dtcp.c2036 unsigned int pages_mapped; in tcp_zerocopy_vm_insert_batch() local
2041 pages_mapped = pages_to_map - (unsigned int)pages_remaining; in tcp_zerocopy_vm_insert_batch()
2042 bytes_mapped = PAGE_SIZE * pages_mapped; in tcp_zerocopy_vm_insert_batch()
2053 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, in tcp_zerocopy_vm_insert_batch()