1 /*
2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 */
19
20 #ifndef __MTD_MTD_H__
21 #define __MTD_MTD_H__
22
23 #include <linux/types.h>
24 #include <linux/uio.h>
25 #include <linux/notifier.h>
26 #include <linux/device.h>
27
28 #include <mtd/mtd-abi.h>
29
30 #include <asm/div64.h>
31
32 #define MTD_CHAR_MAJOR 90
33 #define MTD_BLOCK_MAJOR 31
34
35 #define MTD_ERASE_PENDING 0x01
36 #define MTD_ERASING 0x02
37 #define MTD_ERASE_SUSPEND 0x04
38 #define MTD_ERASE_DONE 0x08
39 #define MTD_ERASE_FAILED 0x10
40
41 #define MTD_FAIL_ADDR_UNKNOWN -1LL
42
43 /*
44 * If the erase fails, fail_addr might indicate exactly which block failed. If
45 * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
46 * or was not specific to any particular block.
47 */
48 struct erase_info {
49 struct mtd_info *mtd;
50 uint64_t addr;
51 uint64_t len;
52 uint64_t fail_addr;
53 u_long time;
54 u_long retries;
55 unsigned dev;
56 unsigned cell;
57 void (*callback) (struct erase_info *self);
58 u_long priv;
59 u_char state;
60 struct erase_info *next;
61 };
62
63 struct mtd_erase_region_info {
64 uint64_t offset; /* At which this region starts, from the beginning of the MTD */
65 uint32_t erasesize; /* For this region */
66 uint32_t numblocks; /* Number of blocks of erasesize in this region */
67 unsigned long *lockmap; /* If keeping bitmap of locks */
68 };
69
70 /**
71 * struct mtd_oob_ops - oob operation operands
72 * @mode: operation mode
73 *
74 * @len: number of data bytes to write/read
75 *
76 * @retlen: number of data bytes written/read
77 *
78 * @ooblen: number of oob bytes to write/read
79 * @oobretlen: number of oob bytes written/read
80 * @ooboffs: offset of oob data in the oob area (only relevant when
81 * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
82 * @datbuf: data buffer - if NULL only oob data are read/written
83 * @oobbuf: oob data buffer
84 *
85 * Note, it is allowed to read more than one OOB area at one go, but not write.
86 * The interface assumes that the OOB write requests program only one page's
87 * OOB area.
88 */
89 struct mtd_oob_ops {
90 unsigned int mode;
91 size_t len;
92 size_t retlen;
93 size_t ooblen;
94 size_t oobretlen;
95 uint32_t ooboffs;
96 uint8_t *datbuf;
97 uint8_t *oobbuf;
98 };
99
100 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
101 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 448
102 /*
103 * Internal ECC layout control structure. For historical reasons, there is a
104 * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
105 * for export to user-space via the ECCGETLAYOUT ioctl.
106 * nand_ecclayout should be expandable in the future simply by the above macros.
107 */
108 struct nand_ecclayout {
109 __u32 eccbytes;
110 __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
111 __u32 oobavail;
112 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
113 };
114
115 struct module; /* only needed for owner field in mtd_info */
116
117 struct mtd_info {
118 u_char type;
119 uint32_t flags;
120 uint64_t size; // Total size of the MTD
121
122 /* "Major" erase size for the device. Naïve users may take this
123 * to be the only erase size available, or may use the more detailed
124 * information below if they desire
125 */
126 uint32_t erasesize;
127 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
128 * though individual bits can be cleared), in case of NAND flash it is
129 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
130 * it is of ECC block size, etc. It is illegal to have writesize = 0.
131 * Any driver registering a struct mtd_info must ensure a writesize of
132 * 1 or larger.
133 */
134 uint32_t writesize;
135
136 /*
137 * Size of the write buffer used by the MTD. MTD devices having a write
138 * buffer can write multiple writesize chunks at a time. E.g. while
139 * writing 4 * writesize bytes to a device with 2 * writesize bytes
140 * buffer the MTD driver can (but doesn't have to) do 2 writesize
141 * operations, but not 4. Currently, all NANDs have writebufsize
142 * equivalent to writesize (NAND page size). Some NOR flashes do have
143 * writebufsize greater than writesize.
144 */
145 uint32_t writebufsize;
146
147 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
148 uint32_t oobavail; // Available OOB bytes per block
149
150 /*
151 * If erasesize is a power of 2 then the shift is stored in
152 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
153 */
154 unsigned int erasesize_shift;
155 unsigned int writesize_shift;
156 /* Masks based on erasesize_shift and writesize_shift */
157 unsigned int erasesize_mask;
158 unsigned int writesize_mask;
159
160 // Kernel-only stuff starts here.
161 const char *name;
162 int index;
163
164 /* ECC layout structure pointer - read only! */
165 struct nand_ecclayout *ecclayout;
166
167 /* max number of correctible bit errors per writesize */
168 unsigned int ecc_strength;
169
170 /* Data for variable erase regions. If numeraseregions is zero,
171 * it means that the whole device has erasesize as given above.
172 */
173 int numeraseregions;
174 struct mtd_erase_region_info *eraseregions;
175
176 /*
177 * Do not call via these pointers, use corresponding mtd_*()
178 * wrappers instead.
179 */
180 int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
181 int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
182 size_t *retlen, void **virt, resource_size_t *phys);
183 int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
184 unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
185 unsigned long len,
186 unsigned long offset,
187 unsigned long flags);
188 int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
189 size_t *retlen, u_char *buf);
190 int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
191 size_t *retlen, const u_char *buf);
192 int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
193 size_t *retlen, const u_char *buf);
194 int (*_read_oob) (struct mtd_info *mtd, loff_t from,
195 struct mtd_oob_ops *ops);
196 int (*_write_oob) (struct mtd_info *mtd, loff_t to,
197 struct mtd_oob_ops *ops);
198 int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
199 size_t len);
200 int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
201 size_t len, size_t *retlen, u_char *buf);
202 int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf,
203 size_t len);
204 int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
205 size_t len, size_t *retlen, u_char *buf);
206 int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
207 size_t len, size_t *retlen, u_char *buf);
208 int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
209 size_t len);
210 int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
211 unsigned long count, loff_t to, size_t *retlen);
212 void (*_sync) (struct mtd_info *mtd);
213 int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
214 int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
215 int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
216 int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
217 int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
218 int (*_suspend) (struct mtd_info *mtd);
219 void (*_resume) (struct mtd_info *mtd);
220 /*
221 * If the driver is something smart, like UBI, it may need to maintain
222 * its own reference counting. The below functions are only for driver.
223 */
224 int (*_get_device) (struct mtd_info *mtd);
225 void (*_put_device) (struct mtd_info *mtd);
226
227 /* Backing device capabilities for this device
228 * - provides mmap capabilities
229 */
230 struct backing_dev_info *backing_dev_info;
231
232 struct notifier_block reboot_notifier; /* default mode before reboot */
233
234 /* ECC status information */
235 struct mtd_ecc_stats ecc_stats;
236 /* Subpage shift (NAND) */
237 int subpage_sft;
238
239 void *priv;
240
241 struct module *owner;
242 struct device dev;
243 int usecount;
244 };
245
246 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
247 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
248 void **virt, resource_size_t *phys);
249 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
250 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
251 unsigned long offset, unsigned long flags);
252 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
253 u_char *buf);
254 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
255 const u_char *buf);
256 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
257 const u_char *buf);
258
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)259 static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from,
260 struct mtd_oob_ops *ops)
261 {
262 ops->retlen = ops->oobretlen = 0;
263 if (!mtd->_read_oob)
264 return -EOPNOTSUPP;
265 return mtd->_read_oob(mtd, from, ops);
266 }
267
mtd_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)268 static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
269 struct mtd_oob_ops *ops)
270 {
271 ops->retlen = ops->oobretlen = 0;
272 if (!mtd->_write_oob)
273 return -EOPNOTSUPP;
274 if (!(mtd->flags & MTD_WRITEABLE))
275 return -EROFS;
276 return mtd->_write_oob(mtd, to, ops);
277 }
278
279 int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
280 size_t len);
281 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
282 size_t *retlen, u_char *buf);
283 int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf,
284 size_t len);
285 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
286 size_t *retlen, u_char *buf);
287 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
288 size_t *retlen, u_char *buf);
289 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
290
291 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
292 unsigned long count, loff_t to, size_t *retlen);
293
mtd_sync(struct mtd_info * mtd)294 static inline void mtd_sync(struct mtd_info *mtd)
295 {
296 if (mtd->_sync)
297 mtd->_sync(mtd);
298 }
299
300 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
301 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
302 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
303 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
304 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
305
mtd_suspend(struct mtd_info * mtd)306 static inline int mtd_suspend(struct mtd_info *mtd)
307 {
308 return mtd->_suspend ? mtd->_suspend(mtd) : 0;
309 }
310
mtd_resume(struct mtd_info * mtd)311 static inline void mtd_resume(struct mtd_info *mtd)
312 {
313 if (mtd->_resume)
314 mtd->_resume(mtd);
315 }
316
mtd_div_by_eb(uint64_t sz,struct mtd_info * mtd)317 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
318 {
319 if (mtd->erasesize_shift)
320 return sz >> mtd->erasesize_shift;
321 do_div(sz, mtd->erasesize);
322 return sz;
323 }
324
mtd_mod_by_eb(uint64_t sz,struct mtd_info * mtd)325 static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
326 {
327 if (mtd->erasesize_shift)
328 return sz & mtd->erasesize_mask;
329 return do_div(sz, mtd->erasesize);
330 }
331
mtd_div_by_ws(uint64_t sz,struct mtd_info * mtd)332 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
333 {
334 if (mtd->writesize_shift)
335 return sz >> mtd->writesize_shift;
336 do_div(sz, mtd->writesize);
337 return sz;
338 }
339
mtd_mod_by_ws(uint64_t sz,struct mtd_info * mtd)340 static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
341 {
342 if (mtd->writesize_shift)
343 return sz & mtd->writesize_mask;
344 return do_div(sz, mtd->writesize);
345 }
346
mtd_has_oob(const struct mtd_info * mtd)347 static inline int mtd_has_oob(const struct mtd_info *mtd)
348 {
349 return mtd->_read_oob && mtd->_write_oob;
350 }
351
mtd_can_have_bb(const struct mtd_info * mtd)352 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
353 {
354 return !!mtd->_block_isbad;
355 }
356
357 /* Kernel-side ioctl definitions */
358
359 struct mtd_partition;
360 struct mtd_part_parser_data;
361
362 extern int mtd_device_parse_register(struct mtd_info *mtd,
363 const char **part_probe_types,
364 struct mtd_part_parser_data *parser_data,
365 const struct mtd_partition *defparts,
366 int defnr_parts);
367 #define mtd_device_register(master, parts, nr_parts) \
368 mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
369 extern int mtd_device_unregister(struct mtd_info *master);
370 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
371 extern int __get_mtd_device(struct mtd_info *mtd);
372 extern void __put_mtd_device(struct mtd_info *mtd);
373 extern struct mtd_info *get_mtd_device_nm(const char *name);
374 extern void put_mtd_device(struct mtd_info *mtd);
375
376
377 struct mtd_notifier {
378 void (*add)(struct mtd_info *mtd);
379 void (*remove)(struct mtd_info *mtd);
380 struct list_head list;
381 };
382
383
384 extern void register_mtd_user (struct mtd_notifier *new);
385 extern int unregister_mtd_user (struct mtd_notifier *old);
386 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
387
388 void mtd_erase_callback(struct erase_info *instr);
389
mtd_is_bitflip(int err)390 static inline int mtd_is_bitflip(int err) {
391 return err == -EUCLEAN;
392 }
393
mtd_is_eccerr(int err)394 static inline int mtd_is_eccerr(int err) {
395 return err == -EBADMSG;
396 }
397
mtd_is_bitflip_or_eccerr(int err)398 static inline int mtd_is_bitflip_or_eccerr(int err) {
399 return mtd_is_bitflip(err) || mtd_is_eccerr(err);
400 }
401
402 #endif /* __MTD_MTD_H__ */
403