1 /*
2 * linux/drivers/char/raw.c
3 *
4 * Front-end raw character devices. These can be bound to any block
5 * devices to provide genuine Unix raw character device semantics.
6 *
7 * We reserve minor number 0 for a control interface. ioctl()s on this
8 * device are used to bind the other minor numbers to block devices.
9 */
10
11 #include <linux/fs.h>
12 #include <linux/iobuf.h>
13 #include <linux/major.h>
14 #include <linux/blkdev.h>
15 #include <linux/raw.h>
16 #include <linux/capability.h>
17 #include <linux/smp_lock.h>
18 #include <asm/uaccess.h>
19
20 #define dprintk(x...)
21
22 typedef struct raw_device_data_s {
23 struct block_device *binding;
24 int inuse, sector_size, sector_bits;
25 struct semaphore mutex;
26 } raw_device_data_t;
27
28 static raw_device_data_t raw_devices[256];
29
30 static ssize_t rw_raw_dev(int rw, struct file *, char *, size_t, loff_t *);
31
32 ssize_t raw_read(struct file *, char *, size_t, loff_t *);
33 ssize_t raw_write(struct file *, const char *, size_t, loff_t *);
34 int raw_open(struct inode *, struct file *);
35 int raw_release(struct inode *, struct file *);
36 int raw_ctl_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
37 int raw_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
38
39
40 static struct file_operations raw_fops = {
41 read: raw_read,
42 write: raw_write,
43 open: raw_open,
44 release: raw_release,
45 ioctl: raw_ioctl,
46 };
47
48 static struct file_operations raw_ctl_fops = {
49 ioctl: raw_ctl_ioctl,
50 open: raw_open,
51 };
52
raw_init(void)53 static int __init raw_init(void)
54 {
55 int i;
56 register_chrdev(RAW_MAJOR, "raw", &raw_fops);
57
58 for (i = 0; i < 256; i++)
59 init_MUTEX(&raw_devices[i].mutex);
60
61 return 0;
62 }
63
64 __initcall(raw_init);
65
66 /*
67 * Open/close code for raw IO.
68 */
69
raw_open(struct inode * inode,struct file * filp)70 int raw_open(struct inode *inode, struct file *filp)
71 {
72 int minor;
73 struct block_device * bdev;
74 kdev_t rdev; /* it should eventually go away */
75 int err;
76 int sector_size;
77 int sector_bits;
78
79 minor = MINOR(inode->i_rdev);
80
81 /*
82 * Is it the control device?
83 */
84
85 if (minor == 0) {
86 filp->f_op = &raw_ctl_fops;
87 return 0;
88 }
89
90 if (!filp->f_iobuf) {
91 err = alloc_kiovec(1, &filp->f_iobuf);
92 if (err)
93 return err;
94 }
95
96 down(&raw_devices[minor].mutex);
97 /*
98 * No, it is a normal raw device. All we need to do on open is
99 * to check that the device is bound, and force the underlying
100 * block device to a sector-size blocksize.
101 */
102
103 bdev = raw_devices[minor].binding;
104 err = -ENODEV;
105 if (!bdev)
106 goto out;
107
108 atomic_inc(&bdev->bd_count);
109 rdev = to_kdev_t(bdev->bd_dev);
110 err = blkdev_get(bdev, filp->f_mode, 0, BDEV_RAW);
111 if (err)
112 goto out;
113
114 /*
115 * Don't change the blocksize if we already have users using
116 * this device
117 */
118
119 if (raw_devices[minor].inuse++)
120 goto out;
121
122 /*
123 * Don't interfere with mounted devices: we cannot safely set
124 * the blocksize on a device which is already mounted.
125 */
126
127 sector_size = 512;
128 if (is_mounted(rdev)) {
129 if (blksize_size[MAJOR(rdev)])
130 sector_size = blksize_size[MAJOR(rdev)][MINOR(rdev)];
131 } else {
132 if (hardsect_size[MAJOR(rdev)])
133 sector_size = hardsect_size[MAJOR(rdev)][MINOR(rdev)];
134 }
135
136 set_blocksize(rdev, sector_size);
137 raw_devices[minor].sector_size = sector_size;
138
139 for (sector_bits = 0; !(sector_size & 1); )
140 sector_size>>=1, sector_bits++;
141 raw_devices[minor].sector_bits = sector_bits;
142
143 out:
144 up(&raw_devices[minor].mutex);
145
146 return err;
147 }
148
raw_release(struct inode * inode,struct file * filp)149 int raw_release(struct inode *inode, struct file *filp)
150 {
151 int minor;
152 struct block_device *bdev;
153
154 minor = MINOR(inode->i_rdev);
155 down(&raw_devices[minor].mutex);
156 bdev = raw_devices[minor].binding;
157 raw_devices[minor].inuse--;
158 up(&raw_devices[minor].mutex);
159 blkdev_put(bdev, BDEV_RAW);
160 return 0;
161 }
162
163
164
165 /* Forward ioctls to the underlying block device. */
raw_ioctl(struct inode * inode,struct file * flip,unsigned int command,unsigned long arg)166 int raw_ioctl(struct inode *inode,
167 struct file *flip,
168 unsigned int command,
169 unsigned long arg)
170 {
171 int minor = minor(inode->i_rdev), err;
172 struct block_device *b;
173 if (minor < 1 || minor > 255)
174 return -ENODEV;
175
176 b = raw_devices[minor].binding;
177 err = -EINVAL;
178 if (b && b->bd_inode && b->bd_op && b->bd_op->ioctl) {
179 err = b->bd_op->ioctl(b->bd_inode, NULL, command, arg);
180 }
181 return err;
182 }
183
184 /*
185 * Deal with ioctls against the raw-device control interface, to bind
186 * and unbind other raw devices.
187 */
188
raw_ctl_ioctl(struct inode * inode,struct file * flip,unsigned int command,unsigned long arg)189 int raw_ctl_ioctl(struct inode *inode,
190 struct file *flip,
191 unsigned int command,
192 unsigned long arg)
193 {
194 struct raw_config_request rq;
195 int err = 0;
196 int minor;
197
198 switch (command) {
199 case RAW_SETBIND:
200 case RAW_GETBIND:
201
202 /* First, find out which raw minor we want */
203
204 err = copy_from_user(&rq, (void *) arg, sizeof(rq));
205 if (err)
206 break;
207
208 minor = rq.raw_minor;
209 if (minor <= 0 || minor > MINORMASK) {
210 err = -EINVAL;
211 break;
212 }
213
214 if (command == RAW_SETBIND) {
215 /*
216 * This is like making block devices, so demand the
217 * same capability
218 */
219 if (!capable(CAP_SYS_ADMIN)) {
220 err = -EPERM;
221 break;
222 }
223
224 /*
225 * For now, we don't need to check that the underlying
226 * block device is present or not: we can do that when
227 * the raw device is opened. Just check that the
228 * major/minor numbers make sense.
229 */
230
231 if ((rq.block_major == NODEV &&
232 rq.block_minor != NODEV) ||
233 rq.block_major > MAX_BLKDEV ||
234 rq.block_minor > MINORMASK) {
235 err = -EINVAL;
236 break;
237 }
238
239 down(&raw_devices[minor].mutex);
240 if (raw_devices[minor].inuse) {
241 up(&raw_devices[minor].mutex);
242 err = -EBUSY;
243 break;
244 }
245 if (raw_devices[minor].binding)
246 bdput(raw_devices[minor].binding);
247 raw_devices[minor].binding =
248 bdget(kdev_t_to_nr(MKDEV(rq.block_major, rq.block_minor)));
249 up(&raw_devices[minor].mutex);
250 } else {
251 struct block_device *bdev;
252 kdev_t dev;
253
254 bdev = raw_devices[minor].binding;
255 if (bdev) {
256 dev = to_kdev_t(bdev->bd_dev);
257 rq.block_major = MAJOR(dev);
258 rq.block_minor = MINOR(dev);
259 } else {
260 rq.block_major = rq.block_minor = 0;
261 }
262 err = copy_to_user((void *) arg, &rq, sizeof(rq));
263 }
264 break;
265
266 default:
267 err = -EINVAL;
268 }
269
270 return err;
271 }
272
273
274
raw_read(struct file * filp,char * buf,size_t size,loff_t * offp)275 ssize_t raw_read(struct file *filp, char * buf,
276 size_t size, loff_t *offp)
277 {
278 return rw_raw_dev(READ, filp, buf, size, offp);
279 }
280
raw_write(struct file * filp,const char * buf,size_t size,loff_t * offp)281 ssize_t raw_write(struct file *filp, const char *buf,
282 size_t size, loff_t *offp)
283 {
284 return rw_raw_dev(WRITE, filp, (char *) buf, size, offp);
285 }
286
287 #define SECTOR_BITS 9
288 #define SECTOR_SIZE (1U << SECTOR_BITS)
289 #define SECTOR_MASK (SECTOR_SIZE - 1)
290
rw_raw_dev(int rw,struct file * filp,char * buf,size_t size,loff_t * offp)291 ssize_t rw_raw_dev(int rw, struct file *filp, char *buf,
292 size_t size, loff_t *offp)
293 {
294 struct kiobuf * iobuf;
295 int new_iobuf;
296 int err = 0;
297 unsigned long blocknr, blocks;
298 size_t transferred;
299 int iosize;
300 int i;
301 int minor;
302 kdev_t dev;
303 unsigned long limit;
304 loff_t off = *offp;
305
306 int sector_size, sector_bits, sector_mask;
307 int max_sectors;
308
309 /*
310 * First, a few checks on device size limits
311 */
312
313 minor = MINOR(filp->f_dentry->d_inode->i_rdev);
314
315 new_iobuf = 0;
316 iobuf = filp->f_iobuf;
317 if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
318 /*
319 * A parallel read/write is using the preallocated iobuf
320 * so just run slow and allocate a new one.
321 */
322 err = alloc_kiovec(1, &iobuf);
323 if (err)
324 goto out;
325 new_iobuf = 1;
326 }
327
328 dev = to_kdev_t(raw_devices[minor].binding->bd_dev);
329 sector_size = raw_devices[minor].sector_size;
330 sector_bits = raw_devices[minor].sector_bits;
331 sector_mask = sector_size- 1;
332 max_sectors = KIO_MAX_SECTORS >> (sector_bits - 9);
333
334 if (blk_size[MAJOR(dev)])
335 limit = (((loff_t) blk_size[MAJOR(dev)][MINOR(dev)]) << BLOCK_SIZE_BITS) >> sector_bits;
336 else
337 limit = INT_MAX;
338 dprintk ("rw_raw_dev: dev %d:%d (+%d)\n",
339 MAJOR(dev), MINOR(dev), limit);
340
341 err = -EINVAL;
342 if ((off & sector_mask) || (size & sector_mask))
343 goto out_free;
344 err = 0;
345 if (size)
346 err = -ENXIO;
347 if ((off >> sector_bits) >= limit)
348 goto out_free;
349
350 /*
351 * Split the IO into KIO_MAX_SECTORS chunks, mapping and
352 * unmapping the single kiobuf as we go to perform each chunk of
353 * IO.
354 */
355
356 transferred = 0;
357 blocknr = off >> sector_bits;
358 while (size > 0) {
359 blocks = size >> sector_bits;
360 if (blocks > max_sectors)
361 blocks = max_sectors;
362 if (blocks > limit - blocknr)
363 blocks = limit - blocknr;
364 if (!blocks)
365 break;
366
367 iosize = blocks << sector_bits;
368
369 err = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
370 if (err)
371 break;
372
373 for (i=0; i < blocks; i++)
374 iobuf->blocks[i] = blocknr++;
375
376 err = brw_kiovec(rw, 1, &iobuf, dev, iobuf->blocks, sector_size);
377
378 if (rw == READ && err > 0)
379 mark_dirty_kiobuf(iobuf, err);
380
381 if (err >= 0) {
382 transferred += err;
383 size -= err;
384 buf += err;
385 }
386
387 unmap_kiobuf(iobuf);
388
389 if (err != iosize)
390 break;
391 }
392
393 if (transferred) {
394 *offp = off + transferred;
395 err = transferred;
396 }
397
398 out_free:
399 if (!new_iobuf)
400 clear_bit(0, &filp->f_iobuf_lock);
401 else
402 free_kiovec(1, &iobuf);
403 out:
404 return err;
405 }
406