Lines Matching refs:dp

169 	void (*get_page)(struct dpages *dp,
171 void (*next_page)(struct dpages *dp);
186 static void list_get_page(struct dpages *dp, in list_get_page() argument
189 unsigned int o = dp->context_u; in list_get_page()
190 struct page_list *pl = dp->context_ptr; in list_get_page()
197 static void list_next_page(struct dpages *dp) in list_next_page() argument
199 struct page_list *pl = dp->context_ptr; in list_next_page()
201 dp->context_ptr = pl->next; in list_next_page()
202 dp->context_u = 0; in list_next_page()
205 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset) in list_dp_init() argument
207 dp->get_page = list_get_page; in list_dp_init()
208 dp->next_page = list_next_page; in list_dp_init()
209 dp->context_u = offset; in list_dp_init()
210 dp->context_ptr = pl; in list_dp_init()
216 static void bio_get_page(struct dpages *dp, struct page **p, in bio_get_page() argument
219 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr, in bio_get_page()
220 dp->context_bi); in bio_get_page()
227 dp->context_bi.bi_sector = (sector_t)bvec.bv_len; in bio_get_page()
230 static void bio_next_page(struct dpages *dp) in bio_next_page() argument
232 unsigned int len = (unsigned int)dp->context_bi.bi_sector; in bio_next_page()
234 bvec_iter_advance((struct bio_vec *)dp->context_ptr, in bio_next_page()
235 &dp->context_bi, len); in bio_next_page()
238 static void bio_dp_init(struct dpages *dp, struct bio *bio) in bio_dp_init() argument
240 dp->get_page = bio_get_page; in bio_dp_init()
241 dp->next_page = bio_next_page; in bio_dp_init()
247 dp->context_ptr = bio->bi_io_vec; in bio_dp_init()
248 dp->context_bi = bio->bi_iter; in bio_dp_init()
254 static void vm_get_page(struct dpages *dp, in vm_get_page() argument
257 *p = vmalloc_to_page(dp->context_ptr); in vm_get_page()
258 *offset = dp->context_u; in vm_get_page()
259 *len = PAGE_SIZE - dp->context_u; in vm_get_page()
262 static void vm_next_page(struct dpages *dp) in vm_next_page() argument
264 dp->context_ptr += PAGE_SIZE - dp->context_u; in vm_next_page()
265 dp->context_u = 0; in vm_next_page()
268 static void vm_dp_init(struct dpages *dp, void *data) in vm_dp_init() argument
270 dp->get_page = vm_get_page; in vm_dp_init()
271 dp->next_page = vm_next_page; in vm_dp_init()
272 dp->context_u = offset_in_page(data); in vm_dp_init()
273 dp->context_ptr = data; in vm_dp_init()
279 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, in km_get_page() argument
282 *p = virt_to_page(dp->context_ptr); in km_get_page()
283 *offset = dp->context_u; in km_get_page()
284 *len = PAGE_SIZE - dp->context_u; in km_get_page()
287 static void km_next_page(struct dpages *dp) in km_next_page() argument
289 dp->context_ptr += PAGE_SIZE - dp->context_u; in km_next_page()
290 dp->context_u = 0; in km_next_page()
293 static void km_dp_init(struct dpages *dp, void *data) in km_dp_init() argument
295 dp->get_page = km_get_page; in km_dp_init()
296 dp->next_page = km_next_page; in km_dp_init()
297 dp->context_u = offset_in_page(data); in km_dp_init()
298 dp->context_ptr = data; in km_dp_init()
307 struct dm_io_region *where, struct dpages *dp, in do_region() argument
368 dp->get_page(dp, &page, &len, &offset); in do_region()
375 dp->next_page(dp); in do_region()
385 struct dm_io_region *where, struct dpages *dp, in dispatch_io() argument
389 struct dpages old_pages = *dp; in dispatch_io()
401 *dp = old_pages; in dispatch_io()
403 do_region(opf, i, where + i, dp, io); in dispatch_io()
427 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, in sync_io() argument
447 io->vma_invalidate_address = dp->vma_invalidate_address; in sync_io()
448 io->vma_invalidate_size = dp->vma_invalidate_size; in sync_io()
450 dispatch_io(opf, num_regions, where, dp, io, 1); in sync_io()
462 struct dpages *dp, io_notify_fn fn, void *context) in async_io() argument
479 io->vma_invalidate_address = dp->vma_invalidate_address; in async_io()
480 io->vma_invalidate_size = dp->vma_invalidate_size; in async_io()
482 dispatch_io(opf, num_regions, where, dp, io, 0); in async_io()
486 static int dp_init(struct dm_io_request *io_req, struct dpages *dp, in dp_init() argument
491 dp->vma_invalidate_address = NULL; in dp_init()
492 dp->vma_invalidate_size = 0; in dp_init()
496 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); in dp_init()
500 bio_dp_init(dp, io_req->mem.ptr.bio); in dp_init()
506 dp->vma_invalidate_address = io_req->mem.ptr.vma; in dp_init()
507 dp->vma_invalidate_size = size; in dp_init()
509 vm_dp_init(dp, io_req->mem.ptr.vma); in dp_init()
513 km_dp_init(dp, io_req->mem.ptr.addr); in dp_init()
527 struct dpages dp; in dm_io() local
529 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); in dm_io()
535 io_req->bi_opf, &dp, sync_error_bits); in dm_io()
538 io_req->bi_opf, &dp, io_req->notify.fn, in dm_io()