1 /*
2  * Copyright (C) 2001 Sistina Software (UK) Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the LGPL.
6  */
7 
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10 
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/ratelimit.h>
14 
15 struct dm_dev;
16 struct dm_target;
17 struct dm_table;
18 struct mapped_device;
19 struct bio_vec;
20 
21 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
22 
23 union map_info {
24 	void *ptr;
25 	unsigned long long ll;
26 	unsigned target_request_nr;
27 };
28 
29 /*
30  * In the constructor the target parameter will already have the
31  * table, type, begin and len fields filled in.
32  */
33 typedef int (*dm_ctr_fn) (struct dm_target *target,
34 			  unsigned int argc, char **argv);
35 
36 /*
37  * The destructor doesn't need to free the dm_target, just
38  * anything hidden ti->private.
39  */
40 typedef void (*dm_dtr_fn) (struct dm_target *ti);
41 
42 /*
43  * The map function must return:
44  * < 0: error
45  * = 0: The target will handle the io by resubmitting it later
46  * = 1: simple remap complete
47  * = 2: The target wants to push back the io
48  */
49 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
50 			  union map_info *map_context);
51 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
52 				  union map_info *map_context);
53 
54 /*
55  * Returns:
56  * < 0 : error (currently ignored)
57  * 0   : ended successfully
58  * 1   : for some reason the io has still not completed (eg,
59  *       multipath target might want to requeue a failed io).
60  * 2   : The target wants to push back the io
61  */
62 typedef int (*dm_endio_fn) (struct dm_target *ti,
63 			    struct bio *bio, int error,
64 			    union map_info *map_context);
65 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
66 				    struct request *clone, int error,
67 				    union map_info *map_context);
68 
69 typedef void (*dm_flush_fn) (struct dm_target *ti);
70 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
71 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
72 typedef int (*dm_preresume_fn) (struct dm_target *ti);
73 typedef void (*dm_resume_fn) (struct dm_target *ti);
74 
75 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
76 			      char *result, unsigned int maxlen);
77 
78 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
79 
80 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
81 			    unsigned long arg);
82 
83 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
84 			    struct bio_vec *biovec, int max_size);
85 
86 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
87 					   struct dm_dev *dev,
88 					   sector_t start, sector_t len,
89 					   void *data);
90 
91 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
92 				      iterate_devices_callout_fn fn,
93 				      void *data);
94 
95 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
96 				struct queue_limits *limits);
97 
98 /*
99  * Returns:
100  *    0: The target can handle the next I/O immediately.
101  *    1: The target can't handle the next I/O immediately.
102  */
103 typedef int (*dm_busy_fn) (struct dm_target *ti);
104 
105 void dm_error(const char *message);
106 
107 /*
108  * Combine device limits.
109  */
110 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
111 			 sector_t start, sector_t len, void *data);
112 
113 struct dm_dev {
114 	struct block_device *bdev;
115 	fmode_t mode;
116 	char name[16];
117 };
118 
119 /*
120  * Constructors should call these functions to ensure destination devices
121  * are opened/closed correctly.
122  */
123 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
124 						 struct dm_dev **result);
125 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
126 
127 /*
128  * Information about a target type
129  */
130 
131 struct target_type {
132 	uint64_t features;
133 	const char *name;
134 	struct module *module;
135 	unsigned version[3];
136 	dm_ctr_fn ctr;
137 	dm_dtr_fn dtr;
138 	dm_map_fn map;
139 	dm_map_request_fn map_rq;
140 	dm_endio_fn end_io;
141 	dm_request_endio_fn rq_end_io;
142 	dm_flush_fn flush;
143 	dm_presuspend_fn presuspend;
144 	dm_postsuspend_fn postsuspend;
145 	dm_preresume_fn preresume;
146 	dm_resume_fn resume;
147 	dm_status_fn status;
148 	dm_message_fn message;
149 	dm_ioctl_fn ioctl;
150 	dm_merge_fn merge;
151 	dm_busy_fn busy;
152 	dm_iterate_devices_fn iterate_devices;
153 	dm_io_hints_fn io_hints;
154 
155 	/* For internal device-mapper use. */
156 	struct list_head list;
157 };
158 
159 /*
160  * Target features
161  */
162 
163 /*
164  * Any table that contains an instance of this target must have only one.
165  */
166 #define DM_TARGET_SINGLETON		0x00000001
167 #define dm_target_needs_singleton(type)	((type)->features & DM_TARGET_SINGLETON)
168 
169 /*
170  * Indicates that a target does not support read-only devices.
171  */
172 #define DM_TARGET_ALWAYS_WRITEABLE	0x00000002
173 #define dm_target_always_writeable(type) \
174 		((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
175 
176 /*
177  * Any device that contains a table with an instance of this target may never
178  * have tables containing any different target type.
179  */
180 #define DM_TARGET_IMMUTABLE		0x00000004
181 #define dm_target_is_immutable(type)	((type)->features & DM_TARGET_IMMUTABLE)
182 
183 struct dm_target {
184 	struct dm_table *table;
185 	struct target_type *type;
186 
187 	/* target limits */
188 	sector_t begin;
189 	sector_t len;
190 
191 	/* Always a power of 2 */
192 	sector_t split_io;
193 
194 	/*
195 	 * A number of zero-length barrier requests that will be submitted
196 	 * to the target for the purpose of flushing cache.
197 	 *
198 	 * The request number will be placed in union map_info->target_request_nr.
199 	 * It is a responsibility of the target driver to remap these requests
200 	 * to the real underlying devices.
201 	 */
202 	unsigned num_flush_requests;
203 
204 	/*
205 	 * The number of discard requests that will be submitted to the
206 	 * target.  map_info->request_nr is used just like num_flush_requests.
207 	 */
208 	unsigned num_discard_requests;
209 
210 	/* target specific data */
211 	void *private;
212 
213 	/* Used to provide an error string from the ctr */
214 	char *error;
215 
216 	/*
217 	 * Set if this target needs to receive discards regardless of
218 	 * whether or not its underlying devices have support.
219 	 */
220 	unsigned discards_supported:1;
221 
222 	/*
223 	 * Set if this target does not return zeroes on discarded blocks.
224 	 */
225 	unsigned discard_zeroes_data_unsupported:1;
226 };
227 
228 /* Each target can link one of these into the table */
229 struct dm_target_callbacks {
230 	struct list_head list;
231 	int (*congested_fn) (struct dm_target_callbacks *, int);
232 };
233 
234 int dm_register_target(struct target_type *t);
235 void dm_unregister_target(struct target_type *t);
236 
237 /*
238  * Target argument parsing.
239  */
240 struct dm_arg_set {
241 	unsigned argc;
242 	char **argv;
243 };
244 
245 /*
246  * The minimum and maximum value of a numeric argument, together with
247  * the error message to use if the number is found to be outside that range.
248  */
249 struct dm_arg {
250 	unsigned min;
251 	unsigned max;
252 	char *error;
253 };
254 
255 /*
256  * Validate the next argument, either returning it as *value or, if invalid,
257  * returning -EINVAL and setting *error.
258  */
259 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
260 		unsigned *value, char **error);
261 
262 /*
263  * Process the next argument as the start of a group containing between
264  * arg->min and arg->max further arguments. Either return the size as
265  * *num_args or, if invalid, return -EINVAL and set *error.
266  */
267 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
268 		      unsigned *num_args, char **error);
269 
270 /*
271  * Return the current argument and shift to the next.
272  */
273 const char *dm_shift_arg(struct dm_arg_set *as);
274 
275 /*
276  * Move through num_args arguments.
277  */
278 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
279 
280 /*-----------------------------------------------------------------
281  * Functions for creating and manipulating mapped devices.
282  * Drop the reference with dm_put when you finish with the object.
283  *---------------------------------------------------------------*/
284 
285 /*
286  * DM_ANY_MINOR chooses the next available minor number.
287  */
288 #define DM_ANY_MINOR (-1)
289 int dm_create(int minor, struct mapped_device **md);
290 
291 /*
292  * Reference counting for md.
293  */
294 struct mapped_device *dm_get_md(dev_t dev);
295 void dm_get(struct mapped_device *md);
296 void dm_put(struct mapped_device *md);
297 
298 /*
299  * An arbitrary pointer may be stored alongside a mapped device.
300  */
301 void dm_set_mdptr(struct mapped_device *md, void *ptr);
302 void *dm_get_mdptr(struct mapped_device *md);
303 
304 /*
305  * A device can still be used while suspended, but I/O is deferred.
306  */
307 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
308 int dm_resume(struct mapped_device *md);
309 
310 /*
311  * Event functions.
312  */
313 uint32_t dm_get_event_nr(struct mapped_device *md);
314 int dm_wait_event(struct mapped_device *md, int event_nr);
315 uint32_t dm_next_uevent_seq(struct mapped_device *md);
316 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
317 
318 /*
319  * Info functions.
320  */
321 const char *dm_device_name(struct mapped_device *md);
322 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
323 struct gendisk *dm_disk(struct mapped_device *md);
324 int dm_suspended(struct dm_target *ti);
325 int dm_noflush_suspending(struct dm_target *ti);
326 union map_info *dm_get_mapinfo(struct bio *bio);
327 union map_info *dm_get_rq_mapinfo(struct request *rq);
328 
329 /*
330  * Geometry functions.
331  */
332 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
333 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
334 
335 
336 /*-----------------------------------------------------------------
337  * Functions for manipulating device-mapper tables.
338  *---------------------------------------------------------------*/
339 
340 /*
341  * First create an empty table.
342  */
343 int dm_table_create(struct dm_table **result, fmode_t mode,
344 		    unsigned num_targets, struct mapped_device *md);
345 
346 /*
347  * Then call this once for each target.
348  */
349 int dm_table_add_target(struct dm_table *t, const char *type,
350 			sector_t start, sector_t len, char *params);
351 
352 /*
353  * Target_ctr should call this if it needs to add any callbacks.
354  */
355 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
356 
357 /*
358  * Finally call this to make the table ready for use.
359  */
360 int dm_table_complete(struct dm_table *t);
361 
362 /*
363  * Table reference counting.
364  */
365 struct dm_table *dm_get_live_table(struct mapped_device *md);
366 void dm_table_get(struct dm_table *t);
367 void dm_table_put(struct dm_table *t);
368 
369 /*
370  * Queries
371  */
372 sector_t dm_table_get_size(struct dm_table *t);
373 unsigned int dm_table_get_num_targets(struct dm_table *t);
374 fmode_t dm_table_get_mode(struct dm_table *t);
375 struct mapped_device *dm_table_get_md(struct dm_table *t);
376 
377 /*
378  * Trigger an event.
379  */
380 void dm_table_event(struct dm_table *t);
381 
382 /*
383  * The device must be suspended before calling this method.
384  * Returns the previous table, which the caller must destroy.
385  */
386 struct dm_table *dm_swap_table(struct mapped_device *md,
387 			       struct dm_table *t);
388 
389 /*
390  * A wrapper around vmalloc.
391  */
392 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
393 
394 /*-----------------------------------------------------------------
395  * Macros.
396  *---------------------------------------------------------------*/
397 #define DM_NAME "device-mapper"
398 
399 #ifdef CONFIG_PRINTK
400 extern struct ratelimit_state dm_ratelimit_state;
401 
402 #define dm_ratelimit()	__ratelimit(&dm_ratelimit_state)
403 #else
404 #define dm_ratelimit()	0
405 #endif
406 
407 #define DMCRIT(f, arg...) \
408 	printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
409 
410 #define DMERR(f, arg...) \
411 	printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
412 #define DMERR_LIMIT(f, arg...) \
413 	do { \
414 		if (dm_ratelimit())	\
415 			printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
416 			       f "\n", ## arg); \
417 	} while (0)
418 
419 #define DMWARN(f, arg...) \
420 	printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
421 #define DMWARN_LIMIT(f, arg...) \
422 	do { \
423 		if (dm_ratelimit())	\
424 			printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
425 			       f "\n", ## arg); \
426 	} while (0)
427 
428 #define DMINFO(f, arg...) \
429 	printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
430 #define DMINFO_LIMIT(f, arg...) \
431 	do { \
432 		if (dm_ratelimit())	\
433 			printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
434 			       "\n", ## arg); \
435 	} while (0)
436 
437 #ifdef CONFIG_DM_DEBUG
438 #  define DMDEBUG(f, arg...) \
439 	printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
440 #  define DMDEBUG_LIMIT(f, arg...) \
441 	do { \
442 		if (dm_ratelimit())	\
443 			printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
444 			       "\n", ## arg); \
445 	} while (0)
446 #else
447 #  define DMDEBUG(f, arg...) do {} while (0)
448 #  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
449 #endif
450 
451 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
452 			  0 : scnprintf(result + sz, maxlen - sz, x))
453 
454 #define SECTOR_SHIFT 9
455 
456 /*
457  * Definitions of return values from target end_io function.
458  */
459 #define DM_ENDIO_INCOMPLETE	1
460 #define DM_ENDIO_REQUEUE	2
461 
462 /*
463  * Definitions of return values from target map function.
464  */
465 #define DM_MAPIO_SUBMITTED	0
466 #define DM_MAPIO_REMAPPED	1
467 #define DM_MAPIO_REQUEUE	DM_ENDIO_REQUEUE
468 
469 /*
470  * Ceiling(n / sz)
471  */
472 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
473 
474 #define dm_sector_div_up(n, sz) ( \
475 { \
476 	sector_t _r = ((n) + (sz) - 1); \
477 	sector_div(_r, (sz)); \
478 	_r; \
479 } \
480 )
481 
482 /*
483  * ceiling(n / size) * size
484  */
485 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
486 
487 #define dm_array_too_big(fixed, obj, num) \
488 	((num) > (UINT_MAX - (fixed)) / (obj))
489 
490 /*
491  * Sector offset taken relative to the start of the target instead of
492  * relative to the start of the device.
493  */
494 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
495 
to_sector(unsigned long n)496 static inline sector_t to_sector(unsigned long n)
497 {
498 	return (n >> SECTOR_SHIFT);
499 }
500 
to_bytes(sector_t n)501 static inline unsigned long to_bytes(sector_t n)
502 {
503 	return (n << SECTOR_SHIFT);
504 }
505 
506 /*-----------------------------------------------------------------
507  * Helper for block layer and dm core operations
508  *---------------------------------------------------------------*/
509 void dm_dispatch_request(struct request *rq);
510 void dm_requeue_unmapped_request(struct request *rq);
511 void dm_kill_unmapped_request(struct request *rq, int error);
512 int dm_underlying_device_busy(struct request_queue *q);
513 
514 #endif	/* _LINUX_DEVICE_MAPPER_H */
515