Lines Matching refs:b
115 struct drbd_bitmap *b = device->bitmap; in __bm_print_lock_info() local
120 func, b->bm_why ?: "?", in __bm_print_lock_info()
121 b->bm_task->comm, task_pid_nr(b->bm_task)); in __bm_print_lock_info()
126 struct drbd_bitmap *b = device->bitmap; in drbd_bm_lock() local
129 if (!b) { in drbd_bm_lock()
134 trylock_failed = !mutex_trylock(&b->bm_change); in drbd_bm_lock()
139 why, b->bm_why ?: "?", in drbd_bm_lock()
140 b->bm_task->comm, task_pid_nr(b->bm_task)); in drbd_bm_lock()
141 mutex_lock(&b->bm_change); in drbd_bm_lock()
143 if (BM_LOCKED_MASK & b->bm_flags) in drbd_bm_lock()
145 b->bm_flags |= flags & BM_LOCKED_MASK; in drbd_bm_lock()
147 b->bm_why = why; in drbd_bm_lock()
148 b->bm_task = current; in drbd_bm_lock()
153 struct drbd_bitmap *b = device->bitmap; in drbd_bm_unlock() local
154 if (!b) { in drbd_bm_unlock()
162 b->bm_flags &= ~BM_LOCKED_MASK; in drbd_bm_unlock()
163 b->bm_why = NULL; in drbd_bm_unlock()
164 b->bm_task = NULL; in drbd_bm_unlock()
165 mutex_unlock(&b->bm_change); in drbd_bm_unlock()
213 struct drbd_bitmap *b = device->bitmap; in bm_page_lock_io() local
214 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_lock_io()
215 wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr)); in bm_page_lock_io()
220 struct drbd_bitmap *b = device->bitmap; in bm_page_unlock_io() local
221 void *addr = &page_private(b->bm_pages[page_nr]); in bm_page_unlock_io()
256 struct drbd_bitmap *b = device->bitmap; in drbd_bm_mark_for_writeout() local
264 BUG_ON(b->n_bitmap_hints >= ARRAY_SIZE(b->al_bitmap_hints)); in drbd_bm_mark_for_writeout()
266 b->al_bitmap_hints[b->n_bitmap_hints++] = page_nr; in drbd_bm_mark_for_writeout()
296 static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b, unsigned long long_nr) in bm_word_to_page_idx() argument
300 BUG_ON(page_nr >= b->bm_number_of_pages); in bm_word_to_page_idx()
304 static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr) in bm_bit_to_page_idx() argument
308 BUG_ON(page_nr >= b->bm_number_of_pages); in bm_bit_to_page_idx()
312 static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) in __bm_map_pidx() argument
314 struct page *page = b->bm_pages[idx]; in __bm_map_pidx()
318 static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) in bm_map_pidx() argument
320 return __bm_map_pidx(b, idx); in bm_map_pidx()
378 static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) in bm_realloc_pages() argument
380 struct page **old_pages = b->bm_pages; in bm_realloc_pages()
383 unsigned long have = b->bm_number_of_pages; in bm_realloc_pages()
435 struct drbd_bitmap *b = device->bitmap; in drbd_bm_init() local
436 WARN_ON(b != NULL); in drbd_bm_init()
437 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); in drbd_bm_init()
438 if (!b) in drbd_bm_init()
440 spin_lock_init(&b->bm_lock); in drbd_bm_init()
441 mutex_init(&b->bm_change); in drbd_bm_init()
442 init_waitqueue_head(&b->bm_io_wait); in drbd_bm_init()
444 device->bitmap = b; in drbd_bm_init()
482 static int bm_clear_surplus(struct drbd_bitmap *b) in bm_clear_surplus() argument
490 tmp = (b->bm_bits & BITS_PER_PAGE_MASK); in bm_clear_surplus()
497 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); in bm_clear_surplus()
519 static void bm_set_surplus(struct drbd_bitmap *b) in bm_set_surplus() argument
526 tmp = (b->bm_bits & BITS_PER_PAGE_MASK); in bm_set_surplus()
533 p_addr = bm_map_pidx(b, b->bm_number_of_pages - 1); in bm_set_surplus()
554 static unsigned long bm_count_bits(struct drbd_bitmap *b) in bm_count_bits() argument
558 unsigned long mask = (1UL << (b->bm_bits & BITS_PER_LONG_MASK)) -1; in bm_count_bits()
562 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { in bm_count_bits()
563 p_addr = __bm_map_pidx(b, idx); in bm_count_bits()
569 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; in bm_count_bits()
570 p_addr = __bm_map_pidx(b, idx); in bm_count_bits()
582 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) in bm_memset() argument
590 if (end > b->bm_words) { in bm_memset()
597 idx = bm_word_to_page_idx(b, offset); in bm_memset()
598 p_addr = bm_map_pidx(b, idx); in bm_memset()
606 bm_set_page_need_writeout(b->bm_pages[idx]); in bm_memset()
632 struct drbd_bitmap *b = device->bitmap; in drbd_bm_resize() local
639 if (!expect(b)) in drbd_bm_resize()
647 if (capacity == b->bm_dev_capacity) in drbd_bm_resize()
651 spin_lock_irq(&b->bm_lock); in drbd_bm_resize()
652 opages = b->bm_pages; in drbd_bm_resize()
653 onpages = b->bm_number_of_pages; in drbd_bm_resize()
654 owords = b->bm_words; in drbd_bm_resize()
655 b->bm_pages = NULL; in drbd_bm_resize()
656 b->bm_number_of_pages = in drbd_bm_resize()
657 b->bm_set = in drbd_bm_resize()
658 b->bm_bits = in drbd_bm_resize()
659 b->bm_words = in drbd_bm_resize()
660 b->bm_dev_capacity = 0; in drbd_bm_resize()
661 spin_unlock_irq(&b->bm_lock); in drbd_bm_resize()
687 have = b->bm_number_of_pages; in drbd_bm_resize()
689 D_ASSERT(device, b->bm_pages != NULL); in drbd_bm_resize()
690 npages = b->bm_pages; in drbd_bm_resize()
695 npages = bm_realloc_pages(b, want); in drbd_bm_resize()
703 spin_lock_irq(&b->bm_lock); in drbd_bm_resize()
704 opages = b->bm_pages; in drbd_bm_resize()
705 owords = b->bm_words; in drbd_bm_resize()
706 obits = b->bm_bits; in drbd_bm_resize()
710 bm_set_surplus(b); in drbd_bm_resize()
712 b->bm_pages = npages; in drbd_bm_resize()
713 b->bm_number_of_pages = want; in drbd_bm_resize()
714 b->bm_bits = bits; in drbd_bm_resize()
715 b->bm_words = words; in drbd_bm_resize()
716 b->bm_dev_capacity = capacity; in drbd_bm_resize()
720 bm_memset(b, owords, 0xff, words-owords); in drbd_bm_resize()
721 b->bm_set += bits - obits; in drbd_bm_resize()
723 bm_memset(b, owords, 0x00, words-owords); in drbd_bm_resize()
732 (void)bm_clear_surplus(b); in drbd_bm_resize()
734 spin_unlock_irq(&b->bm_lock); in drbd_bm_resize()
738 b->bm_set = bm_count_bits(b); in drbd_bm_resize()
756 struct drbd_bitmap *b = device->bitmap; in _drbd_bm_total_weight() local
760 if (!expect(b)) in _drbd_bm_total_weight()
762 if (!expect(b->bm_pages)) in _drbd_bm_total_weight()
765 spin_lock_irqsave(&b->bm_lock, flags); in _drbd_bm_total_weight()
766 s = b->bm_set; in _drbd_bm_total_weight()
767 spin_unlock_irqrestore(&b->bm_lock, flags); in _drbd_bm_total_weight()
785 struct drbd_bitmap *b = device->bitmap; in drbd_bm_words() local
786 if (!expect(b)) in drbd_bm_words()
788 if (!expect(b->bm_pages)) in drbd_bm_words()
791 return b->bm_words; in drbd_bm_words()
796 struct drbd_bitmap *b = device->bitmap; in drbd_bm_bits() local
797 if (!expect(b)) in drbd_bm_bits()
800 return b->bm_bits; in drbd_bm_bits()
811 struct drbd_bitmap *b = device->bitmap; in drbd_bm_merge_lel() local
819 if (!expect(b)) in drbd_bm_merge_lel()
821 if (!expect(b->bm_pages)) in drbd_bm_merge_lel()
825 WARN_ON(offset >= b->bm_words); in drbd_bm_merge_lel()
826 WARN_ON(end > b->bm_words); in drbd_bm_merge_lel()
828 spin_lock_irq(&b->bm_lock); in drbd_bm_merge_lel()
831 idx = bm_word_to_page_idx(b, offset); in drbd_bm_merge_lel()
832 p_addr = bm_map_pidx(b, idx); in drbd_bm_merge_lel()
839 b->bm_set += hweight_long(word) - bits; in drbd_bm_merge_lel()
842 bm_set_page_need_writeout(b->bm_pages[idx]); in drbd_bm_merge_lel()
849 if (end == b->bm_words) in drbd_bm_merge_lel()
850 b->bm_set -= bm_clear_surplus(b); in drbd_bm_merge_lel()
851 spin_unlock_irq(&b->bm_lock); in drbd_bm_merge_lel()
860 struct drbd_bitmap *b = device->bitmap; in drbd_bm_get_lel() local
866 if (!expect(b)) in drbd_bm_get_lel()
868 if (!expect(b->bm_pages)) in drbd_bm_get_lel()
871 spin_lock_irq(&b->bm_lock); in drbd_bm_get_lel()
872 if ((offset >= b->bm_words) || in drbd_bm_get_lel()
873 (end > b->bm_words) || in drbd_bm_get_lel()
878 (unsigned long) b->bm_words); in drbd_bm_get_lel()
882 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, offset)); in drbd_bm_get_lel()
890 spin_unlock_irq(&b->bm_lock); in drbd_bm_get_lel()
896 struct drbd_bitmap *b = device->bitmap; in drbd_bm_set_all() local
897 if (!expect(b)) in drbd_bm_set_all()
899 if (!expect(b->bm_pages)) in drbd_bm_set_all()
902 spin_lock_irq(&b->bm_lock); in drbd_bm_set_all()
903 bm_memset(b, 0, 0xff, b->bm_words); in drbd_bm_set_all()
904 (void)bm_clear_surplus(b); in drbd_bm_set_all()
905 b->bm_set = b->bm_bits; in drbd_bm_set_all()
906 spin_unlock_irq(&b->bm_lock); in drbd_bm_set_all()
912 struct drbd_bitmap *b = device->bitmap; in drbd_bm_clear_all() local
913 if (!expect(b)) in drbd_bm_clear_all()
915 if (!expect(b->bm_pages)) in drbd_bm_clear_all()
918 spin_lock_irq(&b->bm_lock); in drbd_bm_clear_all()
919 bm_memset(b, 0, 0, b->bm_words); in drbd_bm_clear_all()
920 b->bm_set = 0; in drbd_bm_clear_all()
921 spin_unlock_irq(&b->bm_lock); in drbd_bm_clear_all()
941 struct drbd_bitmap *b = device->bitmap; in drbd_bm_endio() local
945 !bm_test_page_unchanged(b->bm_pages[idx])) in drbd_bm_endio()
952 bm_set_page_io_err(b->bm_pages[idx]); in drbd_bm_endio()
959 bm_clear_page_io_err(b->bm_pages[idx]); in drbd_bm_endio()
983 struct drbd_bitmap *b = device->bitmap; in bm_page_io_async() local
1001 bm_set_page_unchanged(b->bm_pages[page_nr]); in bm_page_io_async()
1006 copy_highpage(page, b->bm_pages[page_nr]); in bm_page_io_async()
1009 page = b->bm_pages[page_nr]; in bm_page_io_async()
1033 struct drbd_bitmap *b = device->bitmap; in bm_rw() local
1071 WARN_ON(!(BM_LOCKED_MASK & b->bm_flags)); in bm_rw()
1077 num_pages = b->bm_number_of_pages; in bm_rw()
1093 for (hint = 0; hint < b->n_bitmap_hints; hint++) { in bm_rw()
1094 i = b->al_bitmap_hints[hint]; in bm_rw()
1099 &page_private(b->bm_pages[i]))) in bm_rw()
1102 if (bm_test_page_unchanged(b->bm_pages[i])) in bm_rw()
1114 bm_test_page_unchanged(b->bm_pages[i])) { in bm_rw()
1121 !bm_test_page_lazy_writeout(b->bm_pages[i])) { in bm_rw()
1166 b->bm_set = bm_count_bits(b); in bm_rw()
1170 now = b->bm_set; in bm_rw()
1257 struct drbd_bitmap *b = device->bitmap; in __bm_find_next() local
1263 if (bm_fo > b->bm_bits) { in __bm_find_next()
1264 drbd_err(device, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); in __bm_find_next()
1267 while (bm_fo < b->bm_bits) { in __bm_find_next()
1270 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo)); in __bm_find_next()
1282 if (bm_fo >= b->bm_bits) in __bm_find_next()
1297 struct drbd_bitmap *b = device->bitmap; in bm_find_next() local
1300 if (!expect(b)) in bm_find_next()
1302 if (!expect(b->bm_pages)) in bm_find_next()
1305 spin_lock_irq(&b->bm_lock); in bm_find_next()
1306 if (BM_DONT_TEST & b->bm_flags) in bm_find_next()
1311 spin_unlock_irq(&b->bm_lock); in bm_find_next()
1351 struct drbd_bitmap *b = device->bitmap; in __bm_change_bits_to() local
1358 if (e >= b->bm_bits) { in __bm_change_bits_to()
1360 s, e, b->bm_bits); in __bm_change_bits_to()
1361 e = b->bm_bits ? b->bm_bits -1 : 0; in __bm_change_bits_to()
1364 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); in __bm_change_bits_to()
1369 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1371 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1374 p_addr = __bm_map_pidx(b, page_nr); in __bm_change_bits_to()
1385 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1387 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); in __bm_change_bits_to()
1389 b->bm_set += changed_total; in __bm_change_bits_to()
1401 struct drbd_bitmap *b = device->bitmap; in bm_change_bits_to() local
1404 if (!expect(b)) in bm_change_bits_to()
1406 if (!expect(b->bm_pages)) in bm_change_bits_to()
1409 spin_lock_irqsave(&b->bm_lock, flags); in bm_change_bits_to()
1410 if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags) in bm_change_bits_to()
1415 spin_unlock_irqrestore(&b->bm_lock, flags); in bm_change_bits_to()
1433 static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, in bm_set_full_words_within_one_page() argument
1439 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]); in bm_set_full_words_within_one_page()
1453 bm_set_page_lazy_writeout(b->bm_pages[page_nr]); in bm_set_full_words_within_one_page()
1454 b->bm_set += changed; in bm_set_full_words_within_one_page()
1473 struct drbd_bitmap *b = device->bitmap; in _drbd_bm_set_bits() local
1484 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1486 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1492 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1509 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1512 spin_lock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1533 spin_unlock_irq(&b->bm_lock); in _drbd_bm_set_bits()
1546 struct drbd_bitmap *b = device->bitmap; in drbd_bm_test_bit() local
1550 if (!expect(b)) in drbd_bm_test_bit()
1552 if (!expect(b->bm_pages)) in drbd_bm_test_bit()
1555 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_test_bit()
1556 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_test_bit()
1558 if (bitnr < b->bm_bits) { in drbd_bm_test_bit()
1559 p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr)); in drbd_bm_test_bit()
1562 } else if (bitnr == b->bm_bits) { in drbd_bm_test_bit()
1565 drbd_err(device, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); in drbd_bm_test_bit()
1569 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_test_bit()
1577 struct drbd_bitmap *b = device->bitmap; in drbd_bm_count_bits() local
1587 if (!expect(b)) in drbd_bm_count_bits()
1589 if (!expect(b->bm_pages)) in drbd_bm_count_bits()
1592 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_count_bits()
1593 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_count_bits()
1596 unsigned int idx = bm_bit_to_page_idx(b, bitnr); in drbd_bm_count_bits()
1601 p_addr = bm_map_pidx(b, idx); in drbd_bm_count_bits()
1603 if (expect(bitnr < b->bm_bits)) in drbd_bm_count_bits()
1606 drbd_err(device, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); in drbd_bm_count_bits()
1610 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_count_bits()
1631 struct drbd_bitmap *b = device->bitmap; in drbd_bm_e_weight() local
1636 if (!expect(b)) in drbd_bm_e_weight()
1638 if (!expect(b->bm_pages)) in drbd_bm_e_weight()
1641 spin_lock_irqsave(&b->bm_lock, flags); in drbd_bm_e_weight()
1642 if (BM_DONT_TEST & b->bm_flags) in drbd_bm_e_weight()
1646 e = min((size_t)S2W(enr+1), b->bm_words); in drbd_bm_e_weight()
1648 if (s < b->bm_words) { in drbd_bm_e_weight()
1650 p_addr = bm_map_pidx(b, bm_word_to_page_idx(b, s)); in drbd_bm_e_weight()
1657 spin_unlock_irqrestore(&b->bm_lock, flags); in drbd_bm_e_weight()