Lines Matching refs:k

23 	struct bkey *k, *next;  in bch_dump_bset()  local
25 for (k = i->start; k < bset_bkey_last(i); k = next) { in bch_dump_bset()
26 next = bkey_next(k); in bch_dump_bset()
29 (unsigned int) ((u64 *) k - i->d), i->keys); in bch_dump_bset()
32 b->ops->key_dump(b, k); in bch_dump_bset()
34 pr_cont("%llu:%llu\n", KEY_INODE(k), KEY_OFFSET(k)); in bch_dump_bset()
37 bkey_cmp(k, b->ops->is_extents ? in bch_dump_bset()
58 struct bkey *k; in __bch_count_data() local
61 for_each_key(b, k, &iter) in __bch_count_data()
62 ret += KEY_SIZE(k); in __bch_count_data()
69 struct bkey *k, *p = NULL; in __bch_check_keys() local
73 for_each_key(b, k, &iter) { in __bch_check_keys()
76 if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0) in __bch_check_keys()
79 if (bch_ptr_invalid(b, k)) in __bch_check_keys()
83 if (p && bkey_cmp(p, &START_KEY(k)) > 0) in __bch_check_keys()
86 if (bch_ptr_bad(b, k)) in __bch_check_keys()
90 if (p && !bkey_cmp(p, k)) in __bch_check_keys()
93 p = k; in __bch_check_keys()
113 struct bkey *k = iter->data->k, *next = bkey_next(k); in bch_btree_iter_next_check() local
116 bkey_cmp(k, iter->b->ops->is_extents ? in bch_btree_iter_next_check()
161 struct bkey *k = l->keys; in bch_keylist_pop() local
163 if (k == l->top) in bch_keylist_pop()
166 while (bkey_next(k) != l->top) in bch_keylist_pop()
167 k = bkey_next(k); in bch_keylist_pop()
169 return l->top = k; in bch_keylist_pop()
197 bool __bch_cut_front(const struct bkey *where, struct bkey *k) in __bch_cut_front() argument
201 if (bkey_cmp(where, &START_KEY(k)) <= 0) in __bch_cut_front()
204 if (bkey_cmp(where, k) < 0) in __bch_cut_front()
205 len = KEY_OFFSET(k) - KEY_OFFSET(where); in __bch_cut_front()
207 bkey_copy_key(k, where); in __bch_cut_front()
209 for (i = 0; i < KEY_PTRS(k); i++) in __bch_cut_front()
210 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + KEY_SIZE(k) - len); in __bch_cut_front()
212 BUG_ON(len > KEY_SIZE(k)); in __bch_cut_front()
213 SET_KEY_SIZE(k, len); in __bch_cut_front()
217 bool __bch_cut_back(const struct bkey *where, struct bkey *k) in __bch_cut_back() argument
221 if (bkey_cmp(where, k) >= 0) in __bch_cut_back()
224 BUG_ON(KEY_INODE(where) != KEY_INODE(k)); in __bch_cut_back()
226 if (bkey_cmp(where, &START_KEY(k)) > 0) in __bch_cut_back()
227 len = KEY_OFFSET(where) - KEY_START(k); in __bch_cut_back()
229 bkey_copy_key(k, where); in __bch_cut_back()
231 BUG_ON(len > KEY_SIZE(k)); in __bch_cut_back()
232 SET_KEY_SIZE(k, len); in __bch_cut_back()
532 static unsigned int bkey_to_cacheline(struct bset_tree *t, struct bkey *k) in bkey_to_cacheline() argument
534 return ((void *) k - (void *) t->data) / BSET_CACHELINE; in bkey_to_cacheline()
539 struct bkey *k) in bkey_to_cacheline_offset() argument
541 return (u64 *) k - (u64 *) cacheline_to_bkey(t, cacheline, 0); in bkey_to_cacheline_offset()
584 static inline unsigned int bfloat_mantissa(const struct bkey *k, in bfloat_mantissa() argument
587 const uint64_t *p = &k->low - (f->exponent >> 6); in bfloat_mantissa()
693 struct bkey *prev = NULL, *k = t->data->start; in bch_bset_build_written_tree() local
715 while (bkey_to_cacheline(t, k) < cacheline) { in bch_bset_build_written_tree()
716 prev = k; in bch_bset_build_written_tree()
717 k = bkey_next(k); in bch_bset_build_written_tree()
721 t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); in bch_bset_build_written_tree()
724 while (bkey_next(k) != bset_bkey_last(t->data)) in bch_bset_build_written_tree()
725 k = bkey_next(k); in bch_bset_build_written_tree()
727 t->end = *k; in bch_bset_build_written_tree()
738 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) in bch_bset_fix_invalidated_key() argument
744 if (k < bset_bkey_last(t->data)) in bch_bset_fix_invalidated_key()
752 inorder = bkey_to_cacheline(t, k); in bch_bset_fix_invalidated_key()
754 if (k == t->data->start) in bch_bset_fix_invalidated_key()
757 if (bkey_next(k) == bset_bkey_last(t->data)) { in bch_bset_fix_invalidated_key()
758 t->end = *k; in bch_bset_fix_invalidated_key()
766 k == tree_to_bkey(t, j)) in bch_bset_fix_invalidated_key()
776 k == tree_to_prev_bkey(t, j)) in bch_bset_fix_invalidated_key()
785 struct bkey *k) in bch_bset_fix_lookup_table() argument
787 unsigned int shift = bkey_u64s(k); in bch_bset_fix_lookup_table()
788 unsigned int j = bkey_to_cacheline(t, k); in bch_bset_fix_lookup_table()
800 table_to_bkey(t, j) <= k) in bch_bset_fix_lookup_table()
811 k = table_to_bkey(t, j - 1); in bch_bset_fix_lookup_table()
813 while (k < cacheline_to_bkey(t, j, 0)) in bch_bset_fix_lookup_table()
814 k = bkey_next(k); in bch_bset_fix_lookup_table()
816 t->prev[j] = bkey_to_cacheline_offset(t, j, k); in bch_bset_fix_lookup_table()
825 for (k = table_to_bkey(t, t->size - 1); in bch_bset_fix_lookup_table()
826 k != bset_bkey_last(t->data); in bch_bset_fix_lookup_table()
827 k = bkey_next(k)) in bch_bset_fix_lookup_table()
828 if (t->size == bkey_to_cacheline(t, k)) { in bch_bset_fix_lookup_table()
830 bkey_to_cacheline_offset(t, t->size, k); in bch_bset_fix_lookup_table()
876 unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, in bch_btree_insert_key() argument
886 BUG_ON(b->ops->is_extents && !KEY_SIZE(k)); in bch_btree_insert_key()
894 preceding_key(&START_KEY(k), &preceding_key_p); in bch_btree_insert_key()
896 preceding_key(k, &preceding_key_p); in bch_btree_insert_key()
900 if (b->ops->insert_fixup(b, k, &iter, replace_key)) in bch_btree_insert_key()
906 bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) { in bch_btree_insert_key()
914 bch_bkey_try_merge(b, prev, k)) in bch_btree_insert_key()
919 KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m)) in bch_btree_insert_key()
924 bch_bkey_try_merge(b, k, m)) in bch_btree_insert_key()
927 bch_bset_insert(b, m, k); in bch_btree_insert_key()
928 copy: bkey_copy(m, k); in bch_btree_insert_key()
1086 return bkey_cmp(l.k, r.k) > 0; in btree_iter_cmp()
1094 void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, in bch_btree_iter_push() argument
1097 if (k != end) in bch_btree_iter_push()
1099 ((struct btree_iter_set) { k, end }), in bch_btree_iter_push()
1141 ret = iter->data->k; in __bch_btree_iter_next()
1142 iter->data->k = bkey_next(iter->data->k); in __bch_btree_iter_next()
1144 if (iter->data->k > iter->data->end) { in __bch_btree_iter_next()
1146 iter->data->k = iter->data->end; in __bch_btree_iter_next()
1149 if (iter->data->k == iter->data->end) in __bch_btree_iter_next()
1199 struct bkey *k, *last = NULL; in btree_mergesort() local
1200 BKEY_PADDED(k) tmp; in btree_mergesort()
1211 k = b->ops->sort_fixup(iter, &tmp.k); in btree_mergesort()
1213 k = NULL; in btree_mergesort()
1215 if (!k) in btree_mergesort()
1216 k = __bch_btree_iter_next(iter, b->ops->sort_cmp); in btree_mergesort()
1218 if (bad(b, k)) in btree_mergesort()
1223 bkey_copy(last, k); in btree_mergesort()
1224 } else if (!bch_bkey_try_merge(b, last, k)) { in btree_mergesort()
1226 bkey_copy(last, k); in btree_mergesort()