Lines Matching refs:e
87 struct entry *e; in __get_entry() local
89 e = es->begin + block; in __get_entry()
90 BUG_ON(e >= es->end); in __get_entry()
92 return e; in __get_entry()
95 static unsigned to_index(struct entry_space *es, struct entry *e) in to_index() argument
97 BUG_ON(e < es->begin || e >= es->end); in to_index()
98 return e - es->begin; in to_index()
132 static struct entry *l_next(struct entry_space *es, struct entry *e) in l_next() argument
134 return to_entry(es, e->next); in l_next()
137 static struct entry *l_prev(struct entry_space *es, struct entry *e) in l_prev() argument
139 return to_entry(es, e->prev); in l_prev()
147 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_head() argument
151 e->next = l->head; in l_add_head()
152 e->prev = INDEXER_NULL; in l_add_head()
155 head->prev = l->head = to_index(es, e); in l_add_head()
157 l->head = l->tail = to_index(es, e); in l_add_head()
159 if (!e->sentinel) in l_add_head()
163 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e) in l_add_tail() argument
167 e->next = INDEXER_NULL; in l_add_tail()
168 e->prev = l->tail; in l_add_tail()
171 tail->next = l->tail = to_index(es, e); in l_add_tail()
173 l->head = l->tail = to_index(es, e); in l_add_tail()
175 if (!e->sentinel) in l_add_tail()
180 struct entry *old, struct entry *e) in l_add_before() argument
185 l_add_head(es, l, e); in l_add_before()
188 e->prev = old->prev; in l_add_before()
189 e->next = to_index(es, old); in l_add_before()
190 prev->next = old->prev = to_index(es, e); in l_add_before()
192 if (!e->sentinel) in l_add_before()
197 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e) in l_del() argument
199 struct entry *prev = l_prev(es, e); in l_del()
200 struct entry *next = l_next(es, e); in l_del()
203 prev->next = e->next; in l_del()
205 l->head = e->next; in l_del()
208 next->prev = e->prev; in l_del()
210 l->tail = e->prev; in l_del()
212 if (!e->sentinel) in l_del()
218 struct entry *e; in l_pop_head() local
220 for (e = l_head(es, l); e; e = l_next(es, e)) in l_pop_head()
221 if (!e->sentinel) { in l_pop_head()
222 l_del(es, l, e); in l_pop_head()
223 return e; in l_pop_head()
231 struct entry *e; in l_pop_tail() local
233 for (e = l_tail(es, l); e; e = l_prev(es, e)) in l_pop_tail()
234 if (!e->sentinel) { in l_pop_tail()
235 l_del(es, l, e); in l_pop_tail()
236 return e; in l_pop_tail()
295 static void q_push(struct queue *q, struct entry *e) in q_push() argument
297 BUG_ON(e->pending_work); in q_push()
299 if (!e->sentinel) in q_push()
302 l_add_tail(q->es, q->qs + e->level, e); in q_push()
305 static void q_push_front(struct queue *q, struct entry *e) in q_push_front() argument
307 BUG_ON(e->pending_work); in q_push_front()
309 if (!e->sentinel) in q_push_front()
312 l_add_head(q->es, q->qs + e->level, e); in q_push_front()
315 static void q_push_before(struct queue *q, struct entry *old, struct entry *e) in q_push_before() argument
317 BUG_ON(e->pending_work); in q_push_before()
319 if (!e->sentinel) in q_push_before()
322 l_add_before(q->es, q->qs + e->level, old, e); in q_push_before()
325 static void q_del(struct queue *q, struct entry *e) in q_del() argument
327 l_del(q->es, q->qs + e->level, e); in q_del()
328 if (!e->sentinel) in q_del()
338 struct entry *e; in q_peek() local
343 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { in q_peek()
344 if (e->sentinel) { in q_peek()
351 return e; in q_peek()
359 struct entry *e = q_peek(q, q->nr_levels, true); in q_pop() local
361 if (e) in q_pop()
362 q_del(q, e); in q_pop()
364 return e; in q_pop()
374 struct entry *e; in __redist_pop_from() local
377 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) in __redist_pop_from()
378 if (!e->sentinel) { in __redist_pop_from()
379 l_del(q->es, q->qs + e->level, e); in __redist_pop_from()
380 return e; in __redist_pop_from()
431 struct entry *e; in q_redistribute() local
443 e = __redist_pop_from(q, level + 1u); in q_redistribute()
444 if (!e) { in q_redistribute()
449 e->level = level; in q_redistribute()
450 l_add_tail(q->es, l, e); in q_redistribute()
458 e = l_pop_tail(q->es, l); in q_redistribute()
460 if (!e) in q_redistribute()
464 e->level = level + 1u; in q_redistribute()
465 l_add_tail(q->es, l_above, e); in q_redistribute()
470 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels, in q_requeue() argument
475 unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels); in q_requeue()
478 if (extra_levels && (e->level < q->nr_levels - 1u)) { in q_requeue()
484 de->level = e->level; in q_requeue()
503 q_del(q, e); in q_requeue()
504 e->level = new_level; in q_requeue()
505 q_push(q, e); in q_requeue()
611 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e) in h_next() argument
613 return to_entry(ht->es, e->hash_next); in h_next()
616 static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e) in __h_insert() argument
618 e->hash_next = ht->buckets[bucket]; in __h_insert()
619 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
622 static void h_insert(struct smq_hash_table *ht, struct entry *e) in h_insert() argument
624 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_insert()
625 __h_insert(ht, h, e); in h_insert()
631 struct entry *e; in __h_lookup() local
634 for (e = h_head(ht, h); e; e = h_next(ht, e)) { in __h_lookup()
635 if (e->oblock == oblock) in __h_lookup()
636 return e; in __h_lookup()
638 *prev = e; in __h_lookup()
645 struct entry *e, struct entry *prev) in __h_unlink() argument
648 prev->hash_next = e->hash_next; in __h_unlink()
650 ht->buckets[h] = e->hash_next; in __h_unlink()
658 struct entry *e, *prev; in h_lookup() local
661 e = __h_lookup(ht, h, oblock, &prev); in h_lookup()
662 if (e && prev) { in h_lookup()
667 __h_unlink(ht, h, e, prev); in h_lookup()
668 __h_insert(ht, h, e); in h_lookup()
671 return e; in h_lookup()
674 static void h_remove(struct smq_hash_table *ht, struct entry *e) in h_remove() argument
676 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits); in h_remove()
683 e = __h_lookup(ht, h, e->oblock, &prev); in h_remove()
684 if (e) in h_remove()
685 __h_unlink(ht, h, e, prev); in h_remove()
712 static void init_entry(struct entry *e) in init_entry() argument
718 e->hash_next = INDEXER_NULL; in init_entry()
719 e->next = INDEXER_NULL; in init_entry()
720 e->prev = INDEXER_NULL; in init_entry()
721 e->level = 0u; in init_entry()
722 e->dirty = true; /* FIXME: audit */ in init_entry()
723 e->allocated = true; in init_entry()
724 e->sentinel = false; in init_entry()
725 e->pending_work = false; in init_entry()
730 struct entry *e; in alloc_entry() local
735 e = l_pop_head(ea->es, &ea->free); in alloc_entry()
736 init_entry(e); in alloc_entry()
739 return e; in alloc_entry()
747 struct entry *e = __get_entry(ea->es, ea->begin + i); in alloc_particular_entry() local
749 BUG_ON(e->allocated); in alloc_particular_entry()
751 l_del(ea->es, &ea->free, e); in alloc_particular_entry()
752 init_entry(e); in alloc_particular_entry()
755 return e; in alloc_particular_entry()
758 static void free_entry(struct entry_alloc *ea, struct entry *e) in free_entry() argument
761 BUG_ON(!e->allocated); in free_entry()
764 e->allocated = false; in free_entry()
765 l_add_tail(ea->es, &ea->free, e); in free_entry()
773 static unsigned get_index(struct entry_alloc *ea, struct entry *e) in get_index() argument
775 return to_index(ea->es, e) - ea->begin; in get_index()
950 static void del_queue(struct smq_policy *mq, struct entry *e) in del_queue() argument
952 q_del(e->dirty ? &mq->dirty : &mq->clean, e); in del_queue()
955 static void push_queue(struct smq_policy *mq, struct entry *e) in push_queue() argument
957 if (e->dirty) in push_queue()
958 q_push(&mq->dirty, e); in push_queue()
960 q_push(&mq->clean, e); in push_queue()
964 static void push(struct smq_policy *mq, struct entry *e) in push() argument
966 h_insert(&mq->table, e); in push()
967 if (!e->pending_work) in push()
968 push_queue(mq, e); in push()
971 static void push_queue_front(struct smq_policy *mq, struct entry *e) in push_queue_front() argument
973 if (e->dirty) in push_queue_front()
974 q_push_front(&mq->dirty, e); in push_queue_front()
976 q_push_front(&mq->clean, e); in push_queue_front()
979 static void push_front(struct smq_policy *mq, struct entry *e) in push_front() argument
981 h_insert(&mq->table, e); in push_front()
982 if (!e->pending_work) in push_front()
983 push_queue_front(mq, e); in push_front()
986 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e) in infer_cblock() argument
988 return to_cblock(get_index(&mq->cache_alloc, e)); in infer_cblock()
991 static void requeue(struct smq_policy *mq, struct entry *e) in requeue() argument
996 if (e->pending_work) in requeue()
999 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) { in requeue()
1000 if (!e->dirty) { in requeue()
1001 q_requeue(&mq->clean, e, 1u, NULL, NULL); in requeue()
1005 q_requeue(&mq->dirty, e, 1u, in requeue()
1006 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels), in requeue()
1007 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels)); in requeue()
1162 static void mark_pending(struct smq_policy *mq, struct entry *e) in mark_pending() argument
1164 BUG_ON(e->sentinel); in mark_pending()
1165 BUG_ON(!e->allocated); in mark_pending()
1166 BUG_ON(e->pending_work); in mark_pending()
1167 e->pending_work = true; in mark_pending()
1170 static void clear_pending(struct smq_policy *mq, struct entry *e) in clear_pending() argument
1172 BUG_ON(!e->pending_work); in clear_pending()
1173 e->pending_work = false; in clear_pending()
1180 struct entry *e; in queue_writeback() local
1182 e = q_peek(&mq->dirty, mq->dirty.nr_levels, idle); in queue_writeback()
1183 if (e) { in queue_writeback()
1184 mark_pending(mq, e); in queue_writeback()
1185 q_del(&mq->dirty, e); in queue_writeback()
1188 work.oblock = e->oblock; in queue_writeback()
1189 work.cblock = infer_cblock(mq, e); in queue_writeback()
1193 clear_pending(mq, e); in queue_writeback()
1194 q_push_front(&mq->dirty, e); in queue_writeback()
1203 struct entry *e; in queue_demotion() local
1208 e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true); in queue_demotion()
1209 if (!e) { in queue_demotion()
1215 mark_pending(mq, e); in queue_demotion()
1216 q_del(&mq->clean, e); in queue_demotion()
1219 work.oblock = e->oblock; in queue_demotion()
1220 work.cblock = infer_cblock(mq, e); in queue_demotion()
1223 clear_pending(mq, e); in queue_demotion()
1224 q_push_front(&mq->clean, e); in queue_demotion()
1232 struct entry *e; in queue_promotion() local
1255 e = alloc_entry(&mq->cache_alloc); in queue_promotion()
1256 BUG_ON(!e); in queue_promotion()
1257 e->pending_work = true; in queue_promotion()
1260 work.cblock = infer_cblock(mq, e); in queue_promotion()
1263 free_entry(&mq->cache_alloc, e); in queue_promotion()
1305 struct entry *e = h_lookup(&mq->hotspot_table, hb); in update_hotspot_queue() local
1307 if (e) { in update_hotspot_queue()
1308 stats_level_accessed(&mq->hotspot_stats, e->level); in update_hotspot_queue()
1310 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1311 q_requeue(&mq->hotspot, e, in update_hotspot_queue()
1319 e = alloc_entry(&mq->hotspot_alloc); in update_hotspot_queue()
1320 if (!e) { in update_hotspot_queue()
1321 e = q_pop(&mq->hotspot); in update_hotspot_queue()
1322 if (e) { in update_hotspot_queue()
1323 h_remove(&mq->hotspot_table, e); in update_hotspot_queue()
1324 hi = get_index(&mq->hotspot_alloc, e); in update_hotspot_queue()
1330 if (e) { in update_hotspot_queue()
1331 e->oblock = hb; in update_hotspot_queue()
1332 q_push(&mq->hotspot, e); in update_hotspot_queue()
1333 h_insert(&mq->hotspot_table, e); in update_hotspot_queue()
1337 return e; in update_hotspot_queue()
1371 struct entry *e, *hs_e; in __lookup() local
1376 e = h_lookup(&mq->table, oblock); in __lookup()
1377 if (e) { in __lookup()
1378 stats_level_accessed(&mq->cache_stats, e->level); in __lookup()
1380 requeue(mq, e); in __lookup()
1381 *cblock = infer_cblock(mq, e); in __lookup()
1464 struct entry *e = get_entry(&mq->cache_alloc, in __complete_background_work() local
1470 clear_pending(mq, e); in __complete_background_work()
1472 e->oblock = work->oblock; in __complete_background_work()
1473 e->level = NR_CACHE_LEVELS - 1; in __complete_background_work()
1474 push(mq, e); in __complete_background_work()
1477 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1485 h_remove(&mq->table, e); in __complete_background_work()
1486 free_entry(&mq->cache_alloc, e); in __complete_background_work()
1489 clear_pending(mq, e); in __complete_background_work()
1490 push_queue(mq, e); in __complete_background_work()
1497 clear_pending(mq, e); in __complete_background_work()
1498 push_queue(mq, e); in __complete_background_work()
1521 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in __smq_set_clear_dirty() local
1523 if (e->pending_work) in __smq_set_clear_dirty()
1524 e->dirty = set; in __smq_set_clear_dirty()
1526 del_queue(mq, e); in __smq_set_clear_dirty()
1527 e->dirty = set; in __smq_set_clear_dirty()
1528 push_queue(mq, e); in __smq_set_clear_dirty()
1562 struct entry *e; in smq_load_mapping() local
1564 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_load_mapping()
1565 e->oblock = oblock; in smq_load_mapping()
1566 e->dirty = dirty; in smq_load_mapping()
1567 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock); in smq_load_mapping()
1568 e->pending_work = false; in smq_load_mapping()
1574 push_front(mq, e); in smq_load_mapping()
1582 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_invalidate_mapping() local
1584 if (!e->allocated) in smq_invalidate_mapping()
1588 del_queue(mq, e); in smq_invalidate_mapping()
1589 h_remove(&mq->table, e); in smq_invalidate_mapping()
1590 free_entry(&mq->cache_alloc, e); in smq_invalidate_mapping()
1597 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock)); in smq_get_hint() local
1599 if (!e->allocated) in smq_get_hint()
1602 return e->level; in smq_get_hint()