/linux-5.19.10/drivers/md/bcache/ |
D | writeback.h | 78 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, in bcache_dev_stripe_dirty() 102 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, in should_writeback() 128 static inline void bch_writeback_queue(struct cached_dev *dc) in bch_writeback_queue() 134 static inline void bch_writeback_add(struct cached_dev *dc) in bch_writeback_add() 152 void bch_cached_dev_writeback_init(struct cached_dev *dc); 153 int bch_cached_dev_writeback_start(struct cached_dev *dc);
|
D | request.c | 30 static unsigned int cache_mode(struct cached_dev *dc) in cache_mode() 35 static bool verify(struct cached_dev *dc) in verify() 358 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) in iohash() 363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() 582 struct cached_dev *dc; in cache_lookup() 608 dc = container_of(s->d, struct cached_dev, disk); in cache_lookup() 643 struct cached_dev *dc = container_of(s->d, in backing_request_endio() 644 struct cached_dev, disk); in backing_request_endio() 755 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_bio_complete() 822 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done() [all …]
|
D | debug.h | 6 struct cached_dev; 12 void bch_data_verify(struct cached_dev *dc, struct bio *bio); 21 static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} in bch_data_verify()
|
D | writeback.c | 30 static uint64_t __calc_target_rate(struct cached_dev *dc) in __calc_target_rate() 61 static void __update_writeback_rate(struct cached_dev *dc) in __update_writeback_rate() 161 struct cached_dev *dc) in set_at_max_writeback_rate() 213 struct cached_dev *dc = container_of(to_delayed_work(work), in update_writeback_rate() 214 struct cached_dev, in update_writeback_rate() 281 static unsigned int writeback_delay(struct cached_dev *dc, in writeback_delay() 293 struct cached_dev *dc; 324 struct cached_dev *dc = io->dc; in write_dirty_finish() 376 struct cached_dev *dc = io->dc; in write_dirty() 443 static void read_dirty(struct cached_dev *dc) in read_dirty() [all …]
|
D | stats.c | 201 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in bch_mark_cache_accounting() 209 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in bch_mark_cache_miss_collision() 215 void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc, in bch_mark_sectors_bypassed()
|
D | bcache.h | 298 struct cached_dev { struct 891 static inline void cached_dev_put(struct cached_dev *dc) in cached_dev_put() 897 static inline bool cached_dev_get(struct cached_dev *dc) in cached_dev_get() 962 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); 993 bool bch_cached_dev_error(struct cached_dev *dc); 999 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); 1023 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, 1025 void bch_cached_dev_detach(struct cached_dev *dc); 1026 int bch_cached_dev_run(struct cached_dev *dc);
|
D | super.c | 281 struct cached_dev *dc = bio->bi_private; in write_bdev_super_endio() 332 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); in bch_write_bdev_super_unlock() 337 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) in bch_write_bdev_super() 998 struct cached_dev *dc; in calc_cached_dev_sectors() 1009 struct cached_dev *dc = arg; in cached_dev_status_update() 1044 int bch_cached_dev_run(struct cached_dev *dc) in bch_cached_dev_run() 1117 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) in cancel_writeback_rate_update_dwork() 1137 struct cached_dev *dc = container_of(w, struct cached_dev, detach); in cached_dev_detach_finish() 1169 void bch_cached_dev_detach(struct cached_dev *dc) in bch_cached_dev_detach() 1190 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, in bch_cached_dev_attach() [all …]
|
D | stats.h | 42 struct cached_dev; 61 struct cached_dev *dc,
|
D | request.h | 39 void bch_cached_dev_request_init(struct cached_dev *dc);
|
D | sysfs.c | 173 struct cached_dev *dc = container_of(kobj, struct cached_dev, in SHOW() 293 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE() 459 struct cached_dev *dc = container_of(kobj, struct cached_dev, in STORE()
|
D | io.c | 56 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) in bch_count_backing_io_errors()
|
D | debug.c | 108 void bch_data_verify(struct cached_dev *dc, struct bio *bio) in bch_data_verify()
|
D | btree.c | 1742 struct cached_dev *dc; in bch_btree_gc_finish() 1747 dc = container_of(d, struct cached_dev, disk); in bch_btree_gc_finish()
|
/linux-5.19.10/arch/x86/kernel/ |
D | quirks.c | 178 static struct pci_dev *cached_dev; variable 191 if (!force_hpet_address || !cached_dev) in old_ich_force_hpet_resume() 194 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); in old_ich_force_hpet_resume() 198 pci_write_config_dword(cached_dev, 0xD0, gen_cntl); in old_ich_force_hpet_resume() 199 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); in old_ich_force_hpet_resume() 249 cached_dev = dev; in old_ich_force_enable_hpet() 287 if (!force_hpet_address || !cached_dev) in vt8237_force_hpet_resume() 291 pci_write_config_dword(cached_dev, 0x68, val); in vt8237_force_hpet_resume() 293 pci_read_config_dword(cached_dev, 0x68, &val); in vt8237_force_hpet_resume() 336 cached_dev = dev; in vt8237_force_enable_hpet() [all …]
|
/linux-5.19.10/net/packet/ |
D | internal.h | 136 struct net_device __rcu *cached_dev; member
|
D | af_packet.c | 290 dev = rcu_dereference(po->cached_dev); in packet_cached_dev_get() 300 rcu_assign_pointer(po->cached_dev, dev); in packet_cached_dev_assign() 305 RCU_INIT_POINTER(po->cached_dev, NULL); in packet_cached_dev_reset()
|