Lines Matching refs:brd
55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() argument
73 page = radix_tree_lookup(&brd->brd_pages, idx); in brd_lookup_page()
86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page() argument
92 page = brd_lookup_page(brd, sector); in brd_insert_page()
118 spin_lock(&brd->brd_lock); in brd_insert_page()
121 if (radix_tree_insert(&brd->brd_pages, idx, page)) { in brd_insert_page()
123 page = radix_tree_lookup(&brd->brd_pages, idx); in brd_insert_page()
127 spin_unlock(&brd->brd_lock); in brd_insert_page()
134 static void brd_free_page(struct brd_device *brd, sector_t sector) in brd_free_page() argument
139 spin_lock(&brd->brd_lock); in brd_free_page()
141 page = radix_tree_delete(&brd->brd_pages, idx); in brd_free_page()
142 spin_unlock(&brd->brd_lock); in brd_free_page()
147 static void brd_zero_page(struct brd_device *brd, sector_t sector) in brd_zero_page() argument
151 page = brd_lookup_page(brd, sector); in brd_zero_page()
161 static void brd_free_pages(struct brd_device *brd) in brd_free_pages() argument
170 nr_pages = radix_tree_gang_lookup(&brd->brd_pages, in brd_free_pages()
178 ret = radix_tree_delete(&brd->brd_pages, pos); in brd_free_pages()
196 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) in copy_to_brd_setup() argument
202 if (!brd_insert_page(brd, sector)) in copy_to_brd_setup()
206 if (!brd_insert_page(brd, sector)) in copy_to_brd_setup()
212 static void discard_from_brd(struct brd_device *brd, in discard_from_brd() argument
222 brd_free_page(brd, sector); in discard_from_brd()
224 brd_zero_page(brd, sector); in discard_from_brd()
233 static void copy_to_brd(struct brd_device *brd, const void *src, in copy_to_brd() argument
242 page = brd_lookup_page(brd, sector); in copy_to_brd()
253 page = brd_lookup_page(brd, sector); in copy_to_brd()
265 static void copy_from_brd(void *dst, struct brd_device *brd, in copy_from_brd() argument
274 page = brd_lookup_page(brd, sector); in copy_from_brd()
286 page = brd_lookup_page(brd, sector); in copy_from_brd()
299 static int brd_do_bvec(struct brd_device *brd, struct page *page, in brd_do_bvec() argument
307 err = copy_to_brd_setup(brd, sector, len); in brd_do_bvec()
314 copy_from_brd(mem + off, brd, sector, len); in brd_do_bvec()
318 copy_to_brd(brd, mem + off, sector, len); in brd_do_bvec()
329 struct brd_device *brd = bdev->bd_disk->private_data; in brd_make_request() local
343 discard_from_brd(brd, sector, bio->bi_size); in brd_make_request()
353 err = brd_do_bvec(brd, bvec->bv_page, len, in brd_make_request()
368 struct brd_device *brd = bdev->bd_disk->private_data; in brd_direct_access() local
371 if (!brd) in brd_direct_access()
377 page = brd_insert_page(brd, sector); in brd_direct_access()
391 struct brd_device *brd = bdev->bd_disk->private_data; in brd_ioctl() local
412 brd_free_pages(brd); in brd_ioctl()
465 struct brd_device *brd; in brd_alloc() local
468 brd = kzalloc(sizeof(*brd), GFP_KERNEL); in brd_alloc()
469 if (!brd) in brd_alloc()
471 brd->brd_number = i; in brd_alloc()
472 spin_lock_init(&brd->brd_lock); in brd_alloc()
473 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); in brd_alloc()
475 brd->brd_queue = blk_alloc_queue(GFP_KERNEL); in brd_alloc()
476 if (!brd->brd_queue) in brd_alloc()
478 blk_queue_make_request(brd->brd_queue, brd_make_request); in brd_alloc()
479 blk_queue_max_hw_sectors(brd->brd_queue, 1024); in brd_alloc()
480 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); in brd_alloc()
482 brd->brd_queue->limits.discard_granularity = PAGE_SIZE; in brd_alloc()
483 brd->brd_queue->limits.max_discard_sectors = UINT_MAX; in brd_alloc()
484 brd->brd_queue->limits.discard_zeroes_data = 1; in brd_alloc()
485 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue); in brd_alloc()
487 disk = brd->brd_disk = alloc_disk(1 << part_shift); in brd_alloc()
493 disk->private_data = brd; in brd_alloc()
494 disk->queue = brd->brd_queue; in brd_alloc()
499 return brd; in brd_alloc()
502 blk_cleanup_queue(brd->brd_queue); in brd_alloc()
504 kfree(brd); in brd_alloc()
509 static void brd_free(struct brd_device *brd) in brd_free() argument
511 put_disk(brd->brd_disk); in brd_free()
512 blk_cleanup_queue(brd->brd_queue); in brd_free()
513 brd_free_pages(brd); in brd_free()
514 kfree(brd); in brd_free()
519 struct brd_device *brd; in brd_init_one() local
521 list_for_each_entry(brd, &brd_devices, brd_list) { in brd_init_one()
522 if (brd->brd_number == i) in brd_init_one()
526 brd = brd_alloc(i); in brd_init_one()
527 if (brd) { in brd_init_one()
528 add_disk(brd->brd_disk); in brd_init_one()
529 list_add_tail(&brd->brd_list, &brd_devices); in brd_init_one()
532 return brd; in brd_init_one()
535 static void brd_del_one(struct brd_device *brd) in brd_del_one() argument
537 list_del(&brd->brd_list); in brd_del_one()
538 del_gendisk(brd->brd_disk); in brd_del_one()
539 brd_free(brd); in brd_del_one()
544 struct brd_device *brd; in brd_probe() local
548 brd = brd_init_one(MINOR(dev) >> part_shift); in brd_probe()
549 kobj = brd ? get_disk(brd->brd_disk) : NULL; in brd_probe()
560 struct brd_device *brd, *next; in brd_init() local
610 brd = brd_alloc(i); in brd_init()
611 if (!brd) in brd_init()
613 list_add_tail(&brd->brd_list, &brd_devices); in brd_init()
618 list_for_each_entry(brd, &brd_devices, brd_list) in brd_init()
619 add_disk(brd->brd_disk); in brd_init()
628 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { in brd_init()
629 list_del(&brd->brd_list); in brd_init()
630 brd_free(brd); in brd_init()
640 struct brd_device *brd, *next; in brd_exit() local
644 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) in brd_exit()
645 brd_del_one(brd); in brd_exit()