Lines Matching defs:r5conf

574 struct r5conf {  struct
575 struct hlist_head *stripe_hashtbl;
577 spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
578 struct mddev *mddev;
579 int chunk_sectors;
580 int level, algorithm, rmw_level;
581 int max_degraded;
582 int raid_disks;
583 int max_nr_stripes;
584 int min_nr_stripes;
586 unsigned long stripe_size;
587 unsigned int stripe_shift;
588 unsigned long stripe_sectors;
596 sector_t reshape_progress;
600 sector_t reshape_safe;
601 int previous_raid_disks;
602 int prev_chunk_sectors;
603 int prev_algo;
604 short generation; /* increments with every reshape */
605 seqcount_spinlock_t gen_lock; /* lock against generation changes */
606 unsigned long reshape_checkpoint; /* Time we last updated
608 long long min_offset_diff; /* minimum difference between
615 struct list_head handle_list; /* stripes needing handling */
616 struct list_head loprio_list; /* low priority stripes */
617 struct list_head hold_list; /* preread ready stripes */
618 struct list_head delayed_list; /* stripes that have plugged requests */
619 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
620 struct bio *retry_read_aligned; /* currently retrying aligned bios */
621 unsigned int retry_read_offset; /* sector offset into retry_read_aligned */
622 struct bio *retry_read_aligned_list; /* aligned bios retry list */
623 atomic_t preread_active_stripes; /* stripes with scheduled io */
624 atomic_t active_aligned_reads;
625 atomic_t pending_full_writes; /* full write backlog */
626 int bypass_count; /* bypassed prereads */
627 int bypass_threshold; /* preread nice */
628 int skip_copy; /* Don't copy data from bio to stripe cache */
629 struct list_head *last_hold; /* detect hold_list promotions */
631 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
635 int active_name;
636 char cache_name[2][32];
637 struct kmem_cache *slab_cache; /* for allocating stripes */
638 struct mutex cache_size_mutex; /* Protect changes to cache size */
640 int seq_flush, seq_write;
641 int quiesce;
643 int fullsync; /* set to 1 if a full sync is needed,
647 int recovery_disabled;
649 struct raid5_percpu __percpu *percpu;
650 int scribble_disks;
651 int scribble_sectors;
652 struct hlist_node node;
657 atomic_t active_stripes;
658 struct list_head inactive_list[NR_STRIPE_HASH_LOCKS];
660 atomic_t r5c_cached_full_stripes;
661 struct list_head r5c_full_stripe_list;
662 atomic_t r5c_cached_partial_stripes;
663 struct list_head r5c_partial_stripe_list;
664 atomic_t r5c_flushing_full_stripes;
665 atomic_t r5c_flushing_partial_stripes;
667 atomic_t empty_inactive_list_nr;
668 struct llist_head released_stripes;
669 wait_queue_head_t wait_for_quiescent;
670 wait_queue_head_t wait_for_stripe;
671 wait_queue_head_t wait_for_overlap;
672 unsigned long cache_state;
673 struct shrinker shrinker;
674 int pool_size; /* number of disks in stripeheads in pool */
675 spinlock_t device_lock;
676 struct disk_info *disks;
677 struct bio_set bio_split;
682 struct md_thread *thread;
683 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS];
684 struct r5worker_group *worker_groups;
685 int group_cnt;
686 int worker_cnt_per_group;
687 struct r5l_log *log;
688 void *log_private;
690 spinlock_t pending_bios_lock;
691 bool batch_bio_dispatch;
692 struct r5pending_data *pending_data;
693 struct list_head free_list;
694 struct list_head pending_list;
718 static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector) in r5_next_bio() argument