1 /*
2  *  linux/fs/ufs/cylinder.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  *  ext2 - inode (block) bitmap caching inspired
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/time.h>
13 #include <linux/stat.h>
14 #include <linux/string.h>
15 #include <linux/bitops.h>
16 
17 #include <asm/byteorder.h>
18 
19 #include "ufs_fs.h"
20 #include "ufs.h"
21 #include "swab.h"
22 #include "util.h"
23 
24 /*
25  * Read cylinder group into cache. The memory space for ufs_cg_private_info
26  * structure is already allocated during ufs_read_super.
27  */
ufs_read_cylinder(struct super_block * sb,unsigned cgno,unsigned bitmap_nr)28 static void ufs_read_cylinder (struct super_block * sb,
29 	unsigned cgno, unsigned bitmap_nr)
30 {
31 	struct ufs_sb_info * sbi = UFS_SB(sb);
32 	struct ufs_sb_private_info * uspi;
33 	struct ufs_cg_private_info * ucpi;
34 	struct ufs_cylinder_group * ucg;
35 	unsigned i, j;
36 
37 	UFSD("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr);
38 	uspi = sbi->s_uspi;
39 	ucpi = sbi->s_ucpi[bitmap_nr];
40 	ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
41 
42 	UCPI_UBH(ucpi)->fragment = ufs_cgcmin(cgno);
43 	UCPI_UBH(ucpi)->count = uspi->s_cgsize >> sb->s_blocksize_bits;
44 	/*
45 	 * We have already the first fragment of cylinder group block in buffer
46 	 */
47 	UCPI_UBH(ucpi)->bh[0] = sbi->s_ucg[cgno];
48 	for (i = 1; i < UCPI_UBH(ucpi)->count; i++)
49 		if (!(UCPI_UBH(ucpi)->bh[i] = sb_bread(sb, UCPI_UBH(ucpi)->fragment + i)))
50 			goto failed;
51 	sbi->s_cgno[bitmap_nr] = cgno;
52 
53 	ucpi->c_cgx	= fs32_to_cpu(sb, ucg->cg_cgx);
54 	ucpi->c_ncyl	= fs16_to_cpu(sb, ucg->cg_ncyl);
55 	ucpi->c_niblk	= fs16_to_cpu(sb, ucg->cg_niblk);
56 	ucpi->c_ndblk	= fs32_to_cpu(sb, ucg->cg_ndblk);
57 	ucpi->c_rotor	= fs32_to_cpu(sb, ucg->cg_rotor);
58 	ucpi->c_frotor	= fs32_to_cpu(sb, ucg->cg_frotor);
59 	ucpi->c_irotor	= fs32_to_cpu(sb, ucg->cg_irotor);
60 	ucpi->c_btotoff	= fs32_to_cpu(sb, ucg->cg_btotoff);
61 	ucpi->c_boff	= fs32_to_cpu(sb, ucg->cg_boff);
62 	ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
63 	ucpi->c_freeoff	= fs32_to_cpu(sb, ucg->cg_freeoff);
64 	ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
65 	ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
66 	ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
67 	ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
68 	UFSD("EXIT\n");
69 	return;
70 
71 failed:
72 	for (j = 1; j < i; j++)
73 		brelse (sbi->s_ucg[j]);
74 	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
75 	ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
76 }
77 
78 /*
79  * Remove cylinder group from cache, doesn't release memory
80  * allocated for cylinder group (this is done at ufs_put_super only).
81  */
ufs_put_cylinder(struct super_block * sb,unsigned bitmap_nr)82 void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
83 {
84 	struct ufs_sb_info * sbi = UFS_SB(sb);
85 	struct ufs_sb_private_info * uspi;
86 	struct ufs_cg_private_info * ucpi;
87 	struct ufs_cylinder_group * ucg;
88 	unsigned i;
89 
90 	UFSD("ENTER, bitmap_nr %u\n", bitmap_nr);
91 
92 	uspi = sbi->s_uspi;
93 	if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
94 		UFSD("EXIT\n");
95 		return;
96 	}
97 	ucpi = sbi->s_ucpi[bitmap_nr];
98 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
99 
100 	if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
101 		ufs_panic (sb, "ufs_put_cylinder", "internal error");
102 		return;
103 	}
104 	/*
105 	 * rotor is not so important data, so we put it to disk
106 	 * at the end of working with cylinder
107 	 */
108 	ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
109 	ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
110 	ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
111 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
112 	for (i = 1; i < UCPI_UBH(ucpi)->count; i++) {
113 		brelse (UCPI_UBH(ucpi)->bh[i]);
114 	}
115 
116 	sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
117 	UFSD("EXIT\n");
118 }
119 
120 /*
121  * Find cylinder group in cache and return it as pointer.
122  * If cylinder group is not in cache, we will load it from disk.
123  *
124  * The cache is managed by LRU algorithm.
125  */
ufs_load_cylinder(struct super_block * sb,unsigned cgno)126 struct ufs_cg_private_info * ufs_load_cylinder (
127 	struct super_block * sb, unsigned cgno)
128 {
129 	struct ufs_sb_info * sbi = UFS_SB(sb);
130 	struct ufs_sb_private_info * uspi;
131 	struct ufs_cg_private_info * ucpi;
132 	unsigned cg, i, j;
133 
134 	UFSD("ENTER, cgno %u\n", cgno);
135 
136 	uspi = sbi->s_uspi;
137 	if (cgno >= uspi->s_ncg) {
138 		ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
139 		return NULL;
140 	}
141 	/*
142 	 * Cylinder group number cg it in cache and it was last used
143 	 */
144 	if (sbi->s_cgno[0] == cgno) {
145 		UFSD("EXIT\n");
146 		return sbi->s_ucpi[0];
147 	}
148 	/*
149 	 * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
150 	 */
151 	if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
152 		if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
153 			if (sbi->s_cgno[cgno] != cgno) {
154 				ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
155 				UFSD("EXIT (FAILED)\n");
156 				return NULL;
157 			}
158 			else {
159 				UFSD("EXIT\n");
160 				return sbi->s_ucpi[cgno];
161 			}
162 		} else {
163 			ufs_read_cylinder (sb, cgno, cgno);
164 			UFSD("EXIT\n");
165 			return sbi->s_ucpi[cgno];
166 		}
167 	}
168 	/*
169 	 * Cylinder group number cg is in cache but it was not last used,
170 	 * we will move to the first position
171 	 */
172 	for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
173 	if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
174 		cg = sbi->s_cgno[i];
175 		ucpi = sbi->s_ucpi[i];
176 		for (j = i; j > 0; j--) {
177 			sbi->s_cgno[j] = sbi->s_cgno[j-1];
178 			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
179 		}
180 		sbi->s_cgno[0] = cg;
181 		sbi->s_ucpi[0] = ucpi;
182 	/*
183 	 * Cylinder group number cg is not in cache, we will read it from disk
184 	 * and put it to the first position
185 	 */
186 	} else {
187 		if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
188 			sbi->s_cg_loaded++;
189 		else
190 			ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
191 		ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
192 		for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
193 			sbi->s_cgno[j] = sbi->s_cgno[j-1];
194 			sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
195 		}
196 		sbi->s_ucpi[0] = ucpi;
197 		ufs_read_cylinder (sb, cgno, 0);
198 	}
199 	UFSD("EXIT\n");
200 	return sbi->s_ucpi[0];
201 }
202