1 /*
2  *	Block OSM
3  *
4  * 	Copyright (C) 1999-2002	Red Hat Software
5  *
6  *	Written by Alan Cox, Building Number Three Ltd
7  *
8  *	This program is free software; you can redistribute it and/or modify it
9  *	under the terms of the GNU General Public License as published by the
10  *	Free Software Foundation; either version 2 of the License, or (at your
11  *	option) any later version.
12  *
13  *	This program is distributed in the hope that it will be useful, but
14  *	WITHOUT ANY WARRANTY; without even the implied warranty of
15  *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  *	General Public License for more details.
17  *
18  *	For the purpose of avoiding doubt the preferred form of the work
19  *	for making modifications shall be a standards compliant form such
20  *	gzipped tar and not one requiring a proprietary or patent encumbered
21  *	tool to unpack.
22  *
23  *	Fixes/additions:
24  *		Steve Ralston:
25  *			Multiple device handling error fixes,
26  *			Added a queue depth.
27  *		Alan Cox:
28  *			FC920 has an rmw bug. Dont or in the end marker.
29  *			Removed queue walk, fixed for 64bitness.
30  *			Rewrote much of the code over time
31  *			Added indirect block lists
32  *			Handle 64K limits on many controllers
33  *			Don't use indirects on the Promise (breaks)
34  *			Heavily chop down the queue depths
35  *		Deepak Saxena:
36  *			Independent queues per IOP
37  *			Support for dynamic device creation/deletion
38  *			Code cleanup
39  *	    		Support for larger I/Os through merge* functions
40  *			(taken from DAC960 driver)
41  *		Boji T Kannanthanam:
42  *			Set the I2O Block devices to be detected in increasing
43  *			order of TIDs during boot.
44  *			Search and set the I2O block device that we boot off
45  *			from as the first device to be claimed (as /dev/i2o/hda)
46  *			Properly attach/detach I2O gendisk structure from the
47  *			system gendisk list. The I2O block devices now appear in
48  *			/proc/partitions.
49  *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
50  *			Minor bugfixes for 2.6.
51  */
52 
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/i2o.h>
56 #include <linux/mutex.h>
57 
58 #include <linux/mempool.h>
59 
60 #include <linux/genhd.h>
61 #include <linux/blkdev.h>
62 #include <linux/hdreg.h>
63 
64 #include <scsi/scsi.h>
65 
66 #include "i2o_block.h"
67 
68 #define OSM_NAME	"block-osm"
69 #define OSM_VERSION	"1.325"
70 #define OSM_DESCRIPTION	"I2O Block Device OSM"
71 
72 static DEFINE_MUTEX(i2o_block_mutex);
73 static struct i2o_driver i2o_block_driver;
74 
75 /* global Block OSM request mempool */
76 static struct i2o_block_mempool i2o_blk_req_pool;
77 
78 /* Block OSM class handling definition */
79 static struct i2o_class_id i2o_block_class_id[] = {
80 	{I2O_CLASS_RANDOM_BLOCK_STORAGE},
81 	{I2O_CLASS_END}
82 };
83 
84 /**
85  *	i2o_block_device_free - free the memory of the I2O Block device
86  *	@dev: I2O Block device, which should be cleaned up
87  *
88  *	Frees the request queue, gendisk and the i2o_block_device structure.
89  */
i2o_block_device_free(struct i2o_block_device * dev)90 static void i2o_block_device_free(struct i2o_block_device *dev)
91 {
92 	blk_cleanup_queue(dev->gd->queue);
93 
94 	put_disk(dev->gd);
95 
96 	kfree(dev);
97 };
98 
99 /**
100  *	i2o_block_remove - remove the I2O Block device from the system again
101  *	@dev: I2O Block device which should be removed
102  *
103  *	Remove gendisk from system and free all allocated memory.
104  *
105  *	Always returns 0.
106  */
i2o_block_remove(struct device * dev)107 static int i2o_block_remove(struct device *dev)
108 {
109 	struct i2o_device *i2o_dev = to_i2o_device(dev);
110 	struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
111 
112 	osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid,
113 		 i2o_blk_dev->gd->disk_name);
114 
115 	i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
116 
117 	del_gendisk(i2o_blk_dev->gd);
118 
119 	dev_set_drvdata(dev, NULL);
120 
121 	i2o_device_claim_release(i2o_dev);
122 
123 	i2o_block_device_free(i2o_blk_dev);
124 
125 	return 0;
126 };
127 
128 /**
129  *	i2o_block_device flush - Flush all dirty data of I2O device dev
130  *	@dev: I2O device which should be flushed
131  *
132  *	Flushes all dirty data on device dev.
133  *
134  *	Returns 0 on success or negative error code on failure.
135  */
i2o_block_device_flush(struct i2o_device * dev)136 static int i2o_block_device_flush(struct i2o_device *dev)
137 {
138 	struct i2o_message *msg;
139 
140 	msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
141 	if (IS_ERR(msg))
142 		return PTR_ERR(msg);
143 
144 	msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
145 	msg->u.head[1] =
146 	    cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
147 			lct_data.tid);
148 	msg->body[0] = cpu_to_le32(60 << 16);
149 	osm_debug("Flushing...\n");
150 
151 	return i2o_msg_post_wait(dev->iop, msg, 60);
152 };
153 
154 /**
155  *	i2o_block_device_mount - Mount (load) the media of device dev
156  *	@dev: I2O device which should receive the mount request
157  *	@media_id: Media Identifier
158  *
159  *	Load a media into drive. Identifier should be set to -1, because the
160  *	spec does not support any other value.
161  *
162  *	Returns 0 on success or negative error code on failure.
163  */
i2o_block_device_mount(struct i2o_device * dev,u32 media_id)164 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
165 {
166 	struct i2o_message *msg;
167 
168 	msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
169 	if (IS_ERR(msg))
170 		return PTR_ERR(msg);
171 
172 	msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
173 	msg->u.head[1] =
174 	    cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
175 			lct_data.tid);
176 	msg->body[0] = cpu_to_le32(-1);
177 	msg->body[1] = cpu_to_le32(0x00000000);
178 	osm_debug("Mounting...\n");
179 
180 	return i2o_msg_post_wait(dev->iop, msg, 2);
181 };
182 
183 /**
184  *	i2o_block_device_lock - Locks the media of device dev
185  *	@dev: I2O device which should receive the lock request
186  *	@media_id: Media Identifier
187  *
188  *	Lock media of device dev to prevent removal. The media identifier
189  *	should be set to -1, because the spec does not support any other value.
190  *
191  *	Returns 0 on success or negative error code on failure.
192  */
i2o_block_device_lock(struct i2o_device * dev,u32 media_id)193 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
194 {
195 	struct i2o_message *msg;
196 
197 	msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
198 	if (IS_ERR(msg))
199 		return PTR_ERR(msg);
200 
201 	msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
202 	msg->u.head[1] =
203 	    cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
204 			lct_data.tid);
205 	msg->body[0] = cpu_to_le32(-1);
206 	osm_debug("Locking...\n");
207 
208 	return i2o_msg_post_wait(dev->iop, msg, 2);
209 };
210 
211 /**
212  *	i2o_block_device_unlock - Unlocks the media of device dev
213  *	@dev: I2O device which should receive the unlocked request
214  *	@media_id: Media Identifier
215  *
216  *	Unlocks the media in device dev. The media identifier should be set to
217  *	-1, because the spec does not support any other value.
218  *
219  *	Returns 0 on success or negative error code on failure.
220  */
i2o_block_device_unlock(struct i2o_device * dev,u32 media_id)221 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
222 {
223 	struct i2o_message *msg;
224 
225 	msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
226 	if (IS_ERR(msg))
227 		return PTR_ERR(msg);
228 
229 	msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
230 	msg->u.head[1] =
231 	    cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
232 			lct_data.tid);
233 	msg->body[0] = cpu_to_le32(media_id);
234 	osm_debug("Unlocking...\n");
235 
236 	return i2o_msg_post_wait(dev->iop, msg, 2);
237 };
238 
239 /**
240  *	i2o_block_device_power - Power management for device dev
241  *	@dev: I2O device which should receive the power management request
242  *	@op: Operation to send
243  *
244  *	Send a power management request to the device dev.
245  *
246  *	Returns 0 on success or negative error code on failure.
247  */
i2o_block_device_power(struct i2o_block_device * dev,u8 op)248 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
249 {
250 	struct i2o_device *i2o_dev = dev->i2o_dev;
251 	struct i2o_controller *c = i2o_dev->iop;
252 	struct i2o_message *msg;
253 	int rc;
254 
255 	msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
256 	if (IS_ERR(msg))
257 		return PTR_ERR(msg);
258 
259 	msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
260 	msg->u.head[1] =
261 	    cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
262 			lct_data.tid);
263 	msg->body[0] = cpu_to_le32(op << 24);
264 	osm_debug("Power...\n");
265 
266 	rc = i2o_msg_post_wait(c, msg, 60);
267 	if (!rc)
268 		dev->power = op;
269 
270 	return rc;
271 };
272 
273 /**
274  *	i2o_block_request_alloc - Allocate an I2O block request struct
275  *
276  *	Allocates an I2O block request struct and initialize the list.
277  *
278  *	Returns a i2o_block_request pointer on success or negative error code
279  *	on failure.
280  */
i2o_block_request_alloc(void)281 static inline struct i2o_block_request *i2o_block_request_alloc(void)
282 {
283 	struct i2o_block_request *ireq;
284 
285 	ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
286 	if (!ireq)
287 		return ERR_PTR(-ENOMEM);
288 
289 	INIT_LIST_HEAD(&ireq->queue);
290 	sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS);
291 
292 	return ireq;
293 };
294 
295 /**
296  *	i2o_block_request_free - Frees a I2O block request
297  *	@ireq: I2O block request which should be freed
298  *
299  *	Frees the allocated memory (give it back to the request mempool).
300  */
i2o_block_request_free(struct i2o_block_request * ireq)301 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
302 {
303 	mempool_free(ireq, i2o_blk_req_pool.pool);
304 };
305 
306 /**
307  *	i2o_block_sglist_alloc - Allocate the SG list and map it
308  *	@c: I2O controller to which the request belongs
309  *	@ireq: I2O block request
310  *	@mptr: message body pointer
311  *
312  *	Builds the SG list and map it to be accessible by the controller.
313  *
314  *	Returns 0 on failure or 1 on success.
315  */
i2o_block_sglist_alloc(struct i2o_controller * c,struct i2o_block_request * ireq,u32 ** mptr)316 static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
317 					 struct i2o_block_request *ireq,
318 					 u32 ** mptr)
319 {
320 	int nents;
321 	enum dma_data_direction direction;
322 
323 	ireq->dev = &c->pdev->dev;
324 	nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
325 
326 	if (rq_data_dir(ireq->req) == READ)
327 		direction = PCI_DMA_FROMDEVICE;
328 	else
329 		direction = PCI_DMA_TODEVICE;
330 
331 	ireq->sg_nents = nents;
332 
333 	return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
334 };
335 
336 /**
337  *	i2o_block_sglist_free - Frees the SG list
338  *	@ireq: I2O block request from which the SG should be freed
339  *
340  *	Frees the SG list from the I2O block request.
341  */
i2o_block_sglist_free(struct i2o_block_request * ireq)342 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
343 {
344 	enum dma_data_direction direction;
345 
346 	if (rq_data_dir(ireq->req) == READ)
347 		direction = PCI_DMA_FROMDEVICE;
348 	else
349 		direction = PCI_DMA_TODEVICE;
350 
351 	dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
352 };
353 
354 /**
355  *	i2o_block_prep_req_fn - Allocates I2O block device specific struct
356  *	@q: request queue for the request
357  *	@req: the request to prepare
358  *
359  *	Allocate the necessary i2o_block_request struct and connect it to
360  *	the request. This is needed that we not lose the SG list later on.
361  *
362  *	Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
363  */
i2o_block_prep_req_fn(struct request_queue * q,struct request * req)364 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
365 {
366 	struct i2o_block_device *i2o_blk_dev = q->queuedata;
367 	struct i2o_block_request *ireq;
368 
369 	if (unlikely(!i2o_blk_dev)) {
370 		osm_err("block device already removed\n");
371 		return BLKPREP_KILL;
372 	}
373 
374 	/* connect the i2o_block_request to the request */
375 	if (!req->special) {
376 		ireq = i2o_block_request_alloc();
377 		if (IS_ERR(ireq)) {
378 			osm_debug("unable to allocate i2o_block_request!\n");
379 			return BLKPREP_DEFER;
380 		}
381 
382 		ireq->i2o_blk_dev = i2o_blk_dev;
383 		req->special = ireq;
384 		ireq->req = req;
385 	}
386 	/* do not come back here */
387 	req->cmd_flags |= REQ_DONTPREP;
388 
389 	return BLKPREP_OK;
390 };
391 
392 /**
393  *	i2o_block_delayed_request_fn - delayed request queue function
394  *	@work: the delayed request with the queue to start
395  *
396  *	If the request queue is stopped for a disk, and there is no open
397  *	request, a new event is created, which calls this function to start
398  *	the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
399  *	be started again.
400  */
i2o_block_delayed_request_fn(struct work_struct * work)401 static void i2o_block_delayed_request_fn(struct work_struct *work)
402 {
403 	struct i2o_block_delayed_request *dreq =
404 		container_of(work, struct i2o_block_delayed_request,
405 			     work.work);
406 	struct request_queue *q = dreq->queue;
407 	unsigned long flags;
408 
409 	spin_lock_irqsave(q->queue_lock, flags);
410 	blk_start_queue(q);
411 	spin_unlock_irqrestore(q->queue_lock, flags);
412 	kfree(dreq);
413 };
414 
415 /**
416  *	i2o_block_end_request - Post-processing of completed commands
417  *	@req: request which should be completed
418  *	@error: 0 for success, < 0 for error
419  *	@nr_bytes: number of bytes to complete
420  *
421  *	Mark the request as complete. The lock must not be held when entering.
422  *
423  */
i2o_block_end_request(struct request * req,int error,int nr_bytes)424 static void i2o_block_end_request(struct request *req, int error,
425 				  int nr_bytes)
426 {
427 	struct i2o_block_request *ireq = req->special;
428 	struct i2o_block_device *dev = ireq->i2o_blk_dev;
429 	struct request_queue *q = req->q;
430 	unsigned long flags;
431 
432 	if (blk_end_request(req, error, nr_bytes))
433 		if (error)
434 			blk_end_request_all(req, -EIO);
435 
436 	spin_lock_irqsave(q->queue_lock, flags);
437 
438 	if (likely(dev)) {
439 		dev->open_queue_depth--;
440 		list_del(&ireq->queue);
441 	}
442 
443 	blk_start_queue(q);
444 
445 	spin_unlock_irqrestore(q->queue_lock, flags);
446 
447 	i2o_block_sglist_free(ireq);
448 	i2o_block_request_free(ireq);
449 };
450 
451 /**
452  *	i2o_block_reply - Block OSM reply handler.
453  *	@c: I2O controller from which the message arrives
454  *	@m: message id of reply
455  *	@msg: the actual I2O message reply
456  *
457  *	This function gets all the message replies.
458  *
459  */
i2o_block_reply(struct i2o_controller * c,u32 m,struct i2o_message * msg)460 static int i2o_block_reply(struct i2o_controller *c, u32 m,
461 			   struct i2o_message *msg)
462 {
463 	struct request *req;
464 	int error = 0;
465 
466 	req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
467 	if (unlikely(!req)) {
468 		osm_err("NULL reply received!\n");
469 		return -1;
470 	}
471 
472 	/*
473 	 *      Lets see what is cooking. We stuffed the
474 	 *      request in the context.
475 	 */
476 
477 	if ((le32_to_cpu(msg->body[0]) >> 24) != 0) {
478 		u32 status = le32_to_cpu(msg->body[0]);
479 		/*
480 		 *      Device not ready means two things. One is that the
481 		 *      the thing went offline (but not a removal media)
482 		 *
483 		 *      The second is that you have a SuperTrak 100 and the
484 		 *      firmware got constipated. Unlike standard i2o card
485 		 *      setups the supertrak returns an error rather than
486 		 *      blocking for the timeout in these cases.
487 		 *
488 		 *      Don't stick a supertrak100 into cache aggressive modes
489 		 */
490 
491 		osm_err("TID %03x error status: 0x%02x, detailed status: "
492 			"0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
493 			status >> 24, status & 0xffff);
494 
495 		req->errors++;
496 
497 		error = -EIO;
498 	}
499 
500 	i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
501 
502 	return 1;
503 };
504 
i2o_block_event(struct work_struct * work)505 static void i2o_block_event(struct work_struct *work)
506 {
507 	struct i2o_event *evt = container_of(work, struct i2o_event, work);
508 	osm_debug("event received\n");
509 	kfree(evt);
510 };
511 
512 /*
513  *	SCSI-CAM for ioctl geometry mapping
514  *	Duplicated with SCSI - this should be moved into somewhere common
515  *	perhaps genhd ?
516  *
517  * LBA -> CHS mapping table taken from:
518  *
519  * "Incorporating the I2O Architecture into BIOS for Intel Architecture
520  *  Platforms"
521  *
522  * This is an I2O document that is only available to I2O members,
523  * not developers.
524  *
525  * From my understanding, this is how all the I2O cards do this
526  *
527  * Disk Size      | Sectors | Heads | Cylinders
528  * ---------------+---------+-------+-------------------
529  * 1 < X <= 528M  | 63      | 16    | X/(63 * 16 * 512)
530  * 528M < X <= 1G | 63      | 32    | X/(63 * 32 * 512)
531  * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
532  * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
533  *
534  */
535 #define	BLOCK_SIZE_528M		1081344
536 #define	BLOCK_SIZE_1G		2097152
537 #define	BLOCK_SIZE_21G		4403200
538 #define	BLOCK_SIZE_42G		8806400
539 #define	BLOCK_SIZE_84G		17612800
540 
i2o_block_biosparam(unsigned long capacity,unsigned short * cyls,unsigned char * hds,unsigned char * secs)541 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
542 				unsigned char *hds, unsigned char *secs)
543 {
544 	unsigned long heads, sectors, cylinders;
545 
546 	sectors = 63L;		/* Maximize sectors per track */
547 	if (capacity <= BLOCK_SIZE_528M)
548 		heads = 16;
549 	else if (capacity <= BLOCK_SIZE_1G)
550 		heads = 32;
551 	else if (capacity <= BLOCK_SIZE_21G)
552 		heads = 64;
553 	else if (capacity <= BLOCK_SIZE_42G)
554 		heads = 128;
555 	else
556 		heads = 255;
557 
558 	cylinders = (unsigned long)capacity / (heads * sectors);
559 
560 	*cyls = (unsigned short)cylinders;	/* Stuff return values */
561 	*secs = (unsigned char)sectors;
562 	*hds = (unsigned char)heads;
563 }
564 
565 /**
566  *	i2o_block_open - Open the block device
567  *	@bdev: block device being opened
568  *	@mode: file open mode
569  *
570  *	Power up the device, mount and lock the media. This function is called,
571  *	if the block device is opened for access.
572  *
573  *	Returns 0 on success or negative error code on failure.
574  */
i2o_block_open(struct block_device * bdev,fmode_t mode)575 static int i2o_block_open(struct block_device *bdev, fmode_t mode)
576 {
577 	struct i2o_block_device *dev = bdev->bd_disk->private_data;
578 
579 	if (!dev->i2o_dev)
580 		return -ENODEV;
581 
582 	mutex_lock(&i2o_block_mutex);
583 	if (dev->power > 0x1f)
584 		i2o_block_device_power(dev, 0x02);
585 
586 	i2o_block_device_mount(dev->i2o_dev, -1);
587 
588 	i2o_block_device_lock(dev->i2o_dev, -1);
589 
590 	osm_debug("Ready.\n");
591 	mutex_unlock(&i2o_block_mutex);
592 
593 	return 0;
594 };
595 
596 /**
597  *	i2o_block_release - Release the I2O block device
598  *	@disk: gendisk device being released
599  *	@mode: file open mode
600  *
601  *	Unlock and unmount the media, and power down the device. Gets called if
602  *	the block device is closed.
603  *
604  *	Returns 0 on success or negative error code on failure.
605  */
i2o_block_release(struct gendisk * disk,fmode_t mode)606 static int i2o_block_release(struct gendisk *disk, fmode_t mode)
607 {
608 	struct i2o_block_device *dev = disk->private_data;
609 	u8 operation;
610 
611 	/*
612 	 * This is to deail with the case of an application
613 	 * opening a device and then the device disappears while
614 	 * it's in use, and then the application tries to release
615 	 * it.  ex: Unmounting a deleted RAID volume at reboot.
616 	 * If we send messages, it will just cause FAILs since
617 	 * the TID no longer exists.
618 	 */
619 	if (!dev->i2o_dev)
620 		return 0;
621 
622 	mutex_lock(&i2o_block_mutex);
623 	i2o_block_device_flush(dev->i2o_dev);
624 
625 	i2o_block_device_unlock(dev->i2o_dev, -1);
626 
627 	if (dev->flags & (1 << 3 | 1 << 4))	/* Removable */
628 		operation = 0x21;
629 	else
630 		operation = 0x24;
631 
632 	i2o_block_device_power(dev, operation);
633 	mutex_unlock(&i2o_block_mutex);
634 
635 	return 0;
636 }
637 
i2o_block_getgeo(struct block_device * bdev,struct hd_geometry * geo)638 static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
639 {
640 	i2o_block_biosparam(get_capacity(bdev->bd_disk),
641 			    &geo->cylinders, &geo->heads, &geo->sectors);
642 	return 0;
643 }
644 
645 /**
646  *	i2o_block_ioctl - Issue device specific ioctl calls.
647  *	@bdev: block device being opened
648  *	@mode: file open mode
649  *	@cmd: ioctl command
650  *	@arg: arg
651  *
652  *	Handles ioctl request for the block device.
653  *
654  *	Return 0 on success or negative error on failure.
655  */
i2o_block_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)656 static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
657 			   unsigned int cmd, unsigned long arg)
658 {
659 	struct gendisk *disk = bdev->bd_disk;
660 	struct i2o_block_device *dev = disk->private_data;
661 	int ret = -ENOTTY;
662 
663 	/* Anyone capable of this syscall can do *real bad* things */
664 
665 	if (!capable(CAP_SYS_ADMIN))
666 		return -EPERM;
667 
668 	mutex_lock(&i2o_block_mutex);
669 	switch (cmd) {
670 	case BLKI2OGRSTRAT:
671 		ret = put_user(dev->rcache, (int __user *)arg);
672 		break;
673 	case BLKI2OGWSTRAT:
674 		ret = put_user(dev->wcache, (int __user *)arg);
675 		break;
676 	case BLKI2OSRSTRAT:
677 		ret = -EINVAL;
678 		if (arg < 0 || arg > CACHE_SMARTFETCH)
679 			break;
680 		dev->rcache = arg;
681 		ret = 0;
682 		break;
683 	case BLKI2OSWSTRAT:
684 		ret = -EINVAL;
685 		if (arg != 0
686 		    && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
687 			break;
688 		dev->wcache = arg;
689 		ret = 0;
690 		break;
691 	}
692 	mutex_unlock(&i2o_block_mutex);
693 
694 	return ret;
695 };
696 
697 /**
698  *	i2o_block_check_events - Have we seen a media change?
699  *	@disk: gendisk which should be verified
700  *	@clearing: events being cleared
701  *
702  *	Verifies if the media has changed.
703  *
704  *	Returns 1 if the media was changed or 0 otherwise.
705  */
i2o_block_check_events(struct gendisk * disk,unsigned int clearing)706 static unsigned int i2o_block_check_events(struct gendisk *disk,
707 					   unsigned int clearing)
708 {
709 	struct i2o_block_device *p = disk->private_data;
710 
711 	if (p->media_change_flag) {
712 		p->media_change_flag = 0;
713 		return DISK_EVENT_MEDIA_CHANGE;
714 	}
715 	return 0;
716 }
717 
718 /**
719  *	i2o_block_transfer - Transfer a request to/from the I2O controller
720  *	@req: the request which should be transferred
721  *
722  *	This function converts the request into a I2O message. The necessary
723  *	DMA buffers are allocated and after everything is setup post the message
724  *	to the I2O controller. No cleanup is done by this function. It is done
725  *	on the interrupt side when the reply arrives.
726  *
727  *	Return 0 on success or negative error code on failure.
728  */
i2o_block_transfer(struct request * req)729 static int i2o_block_transfer(struct request *req)
730 {
731 	struct i2o_block_device *dev = req->rq_disk->private_data;
732 	struct i2o_controller *c;
733 	u32 tid;
734 	struct i2o_message *msg;
735 	u32 *mptr;
736 	struct i2o_block_request *ireq = req->special;
737 	u32 tcntxt;
738 	u32 sgl_offset = SGL_OFFSET_8;
739 	u32 ctl_flags = 0x00000000;
740 	int rc;
741 	u32 cmd;
742 
743 	if (unlikely(!dev->i2o_dev)) {
744 		osm_err("transfer to removed drive\n");
745 		rc = -ENODEV;
746 		goto exit;
747 	}
748 
749 	tid = dev->i2o_dev->lct_data.tid;
750 	c = dev->i2o_dev->iop;
751 
752 	msg = i2o_msg_get(c);
753 	if (IS_ERR(msg)) {
754 		rc = PTR_ERR(msg);
755 		goto exit;
756 	}
757 
758 	tcntxt = i2o_cntxt_list_add(c, req);
759 	if (!tcntxt) {
760 		rc = -ENOMEM;
761 		goto nop_msg;
762 	}
763 
764 	msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
765 	msg->u.s.tcntxt = cpu_to_le32(tcntxt);
766 
767 	mptr = &msg->body[0];
768 
769 	if (rq_data_dir(req) == READ) {
770 		cmd = I2O_CMD_BLOCK_READ << 24;
771 
772 		switch (dev->rcache) {
773 		case CACHE_PREFETCH:
774 			ctl_flags = 0x201F0008;
775 			break;
776 
777 		case CACHE_SMARTFETCH:
778 			if (blk_rq_sectors(req) > 16)
779 				ctl_flags = 0x201F0008;
780 			else
781 				ctl_flags = 0x001F0000;
782 			break;
783 
784 		default:
785 			break;
786 		}
787 	} else {
788 		cmd = I2O_CMD_BLOCK_WRITE << 24;
789 
790 		switch (dev->wcache) {
791 		case CACHE_WRITETHROUGH:
792 			ctl_flags = 0x001F0008;
793 			break;
794 		case CACHE_WRITEBACK:
795 			ctl_flags = 0x001F0010;
796 			break;
797 		case CACHE_SMARTBACK:
798 			if (blk_rq_sectors(req) > 16)
799 				ctl_flags = 0x001F0004;
800 			else
801 				ctl_flags = 0x001F0010;
802 			break;
803 		case CACHE_SMARTTHROUGH:
804 			if (blk_rq_sectors(req) > 16)
805 				ctl_flags = 0x001F0004;
806 			else
807 				ctl_flags = 0x001F0010;
808 		default:
809 			break;
810 		}
811 	}
812 
813 #ifdef CONFIG_I2O_EXT_ADAPTEC
814 	if (c->adaptec) {
815 		u8 cmd[10];
816 		u32 scsi_flags;
817 		u16 hwsec;
818 
819 		hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
820 		memset(cmd, 0, 10);
821 
822 		sgl_offset = SGL_OFFSET_12;
823 
824 		msg->u.head[1] =
825 		    cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
826 
827 		*mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
828 		*mptr++ = cpu_to_le32(tid);
829 
830 		/*
831 		 * ENABLE_DISCONNECT
832 		 * SIMPLE_TAG
833 		 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
834 		 */
835 		if (rq_data_dir(req) == READ) {
836 			cmd[0] = READ_10;
837 			scsi_flags = 0x60a0000a;
838 		} else {
839 			cmd[0] = WRITE_10;
840 			scsi_flags = 0xa0a0000a;
841 		}
842 
843 		*mptr++ = cpu_to_le32(scsi_flags);
844 
845 		*((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
846 		*((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
847 
848 		memcpy(mptr, cmd, 10);
849 		mptr += 4;
850 		*mptr++ = cpu_to_le32(blk_rq_bytes(req));
851 	} else
852 #endif
853 	{
854 		msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
855 		*mptr++ = cpu_to_le32(ctl_flags);
856 		*mptr++ = cpu_to_le32(blk_rq_bytes(req));
857 		*mptr++ =
858 		    cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
859 		*mptr++ =
860 		    cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
861 	}
862 
863 	if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
864 		rc = -ENOMEM;
865 		goto context_remove;
866 	}
867 
868 	msg->u.head[0] =
869 	    cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
870 
871 	list_add_tail(&ireq->queue, &dev->open_queue);
872 	dev->open_queue_depth++;
873 
874 	i2o_msg_post(c, msg);
875 
876 	return 0;
877 
878       context_remove:
879 	i2o_cntxt_list_remove(c, req);
880 
881       nop_msg:
882 	i2o_msg_nop(c, msg);
883 
884       exit:
885 	return rc;
886 };
887 
888 /**
889  *	i2o_block_request_fn - request queue handling function
890  *	@q: request queue from which the request could be fetched
891  *
892  *	Takes the next request from the queue, transfers it and if no error
893  *	occurs dequeue it from the queue. On arrival of the reply the message
894  *	will be processed further. If an error occurs requeue the request.
895  */
i2o_block_request_fn(struct request_queue * q)896 static void i2o_block_request_fn(struct request_queue *q)
897 {
898 	struct request *req;
899 
900 	while ((req = blk_peek_request(q)) != NULL) {
901 		if (req->cmd_type == REQ_TYPE_FS) {
902 			struct i2o_block_delayed_request *dreq;
903 			struct i2o_block_request *ireq = req->special;
904 			unsigned int queue_depth;
905 
906 			queue_depth = ireq->i2o_blk_dev->open_queue_depth;
907 
908 			if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
909 				if (!i2o_block_transfer(req)) {
910 					blk_start_request(req);
911 					continue;
912 				} else
913 					osm_info("transfer error\n");
914 			}
915 
916 			if (queue_depth)
917 				break;
918 
919 			/* stop the queue and retry later */
920 			dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
921 			if (!dreq)
922 				continue;
923 
924 			dreq->queue = q;
925 			INIT_DELAYED_WORK(&dreq->work,
926 					  i2o_block_delayed_request_fn);
927 
928 			if (!queue_delayed_work(i2o_block_driver.event_queue,
929 						&dreq->work,
930 						I2O_BLOCK_RETRY_TIME))
931 				kfree(dreq);
932 			else {
933 				blk_stop_queue(q);
934 				break;
935 			}
936 		} else {
937 			blk_start_request(req);
938 			__blk_end_request_all(req, -EIO);
939 		}
940 	}
941 };
942 
943 /* I2O Block device operations definition */
944 static const struct block_device_operations i2o_block_fops = {
945 	.owner = THIS_MODULE,
946 	.open = i2o_block_open,
947 	.release = i2o_block_release,
948 	.ioctl = i2o_block_ioctl,
949 	.compat_ioctl = i2o_block_ioctl,
950 	.getgeo = i2o_block_getgeo,
951 	.check_events = i2o_block_check_events,
952 };
953 
954 /**
955  *	i2o_block_device_alloc - Allocate memory for a I2O Block device
956  *
957  *	Allocate memory for the i2o_block_device struct, gendisk and request
958  *	queue and initialize them as far as no additional information is needed.
959  *
960  *	Returns a pointer to the allocated I2O Block device on success or a
961  *	negative error code on failure.
962  */
i2o_block_device_alloc(void)963 static struct i2o_block_device *i2o_block_device_alloc(void)
964 {
965 	struct i2o_block_device *dev;
966 	struct gendisk *gd;
967 	struct request_queue *queue;
968 	int rc;
969 
970 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
971 	if (!dev) {
972 		osm_err("Insufficient memory to allocate I2O Block disk.\n");
973 		rc = -ENOMEM;
974 		goto exit;
975 	}
976 
977 	INIT_LIST_HEAD(&dev->open_queue);
978 	spin_lock_init(&dev->lock);
979 	dev->rcache = CACHE_PREFETCH;
980 	dev->wcache = CACHE_WRITEBACK;
981 
982 	/* allocate a gendisk with 16 partitions */
983 	gd = alloc_disk(16);
984 	if (!gd) {
985 		osm_err("Insufficient memory to allocate gendisk.\n");
986 		rc = -ENOMEM;
987 		goto cleanup_dev;
988 	}
989 
990 	/* initialize the request queue */
991 	queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
992 	if (!queue) {
993 		osm_err("Insufficient memory to allocate request queue.\n");
994 		rc = -ENOMEM;
995 		goto cleanup_queue;
996 	}
997 
998 	blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
999 
1000 	gd->major = I2O_MAJOR;
1001 	gd->queue = queue;
1002 	gd->fops = &i2o_block_fops;
1003 	gd->private_data = dev;
1004 
1005 	dev->gd = gd;
1006 
1007 	return dev;
1008 
1009       cleanup_queue:
1010 	put_disk(gd);
1011 
1012       cleanup_dev:
1013 	kfree(dev);
1014 
1015       exit:
1016 	return ERR_PTR(rc);
1017 };
1018 
1019 /**
1020  *	i2o_block_probe - verify if dev is a I2O Block device and install it
1021  *	@dev: device to verify if it is a I2O Block device
1022  *
1023  *	We only verify if the user_tid of the device is 0xfff and then install
1024  *	the device. Otherwise it is used by some other device (e. g. RAID).
1025  *
1026  *	Returns 0 on success or negative error code on failure.
1027  */
i2o_block_probe(struct device * dev)1028 static int i2o_block_probe(struct device *dev)
1029 {
1030 	struct i2o_device *i2o_dev = to_i2o_device(dev);
1031 	struct i2o_controller *c = i2o_dev->iop;
1032 	struct i2o_block_device *i2o_blk_dev;
1033 	struct gendisk *gd;
1034 	struct request_queue *queue;
1035 	static int unit = 0;
1036 	int rc;
1037 	u64 size;
1038 	u32 blocksize;
1039 	u16 body_size = 4;
1040 	u16 power;
1041 	unsigned short max_sectors;
1042 
1043 #ifdef CONFIG_I2O_EXT_ADAPTEC
1044 	if (c->adaptec)
1045 		body_size = 8;
1046 #endif
1047 
1048 	if (c->limit_sectors)
1049 		max_sectors = I2O_MAX_SECTORS_LIMITED;
1050 	else
1051 		max_sectors = I2O_MAX_SECTORS;
1052 
1053 	/* skip devices which are used by IOP */
1054 	if (i2o_dev->lct_data.user_tid != 0xfff) {
1055 		osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1056 		return -ENODEV;
1057 	}
1058 
1059 	if (i2o_device_claim(i2o_dev)) {
1060 		osm_warn("Unable to claim device. Installation aborted\n");
1061 		rc = -EFAULT;
1062 		goto exit;
1063 	}
1064 
1065 	i2o_blk_dev = i2o_block_device_alloc();
1066 	if (IS_ERR(i2o_blk_dev)) {
1067 		osm_err("could not alloc a new I2O block device");
1068 		rc = PTR_ERR(i2o_blk_dev);
1069 		goto claim_release;
1070 	}
1071 
1072 	i2o_blk_dev->i2o_dev = i2o_dev;
1073 	dev_set_drvdata(dev, i2o_blk_dev);
1074 
1075 	/* setup gendisk */
1076 	gd = i2o_blk_dev->gd;
1077 	gd->first_minor = unit << 4;
1078 	sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1079 	gd->driverfs_dev = &i2o_dev->device;
1080 
1081 	/* setup request queue */
1082 	queue = gd->queue;
1083 	queue->queuedata = i2o_blk_dev;
1084 
1085 	blk_queue_max_hw_sectors(queue, max_sectors);
1086 	blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
1087 
1088 	osm_debug("max sectors = %d\n", queue->max_sectors);
1089 	osm_debug("phys segments = %d\n", queue->max_phys_segments);
1090 	osm_debug("max hw segments = %d\n", queue->max_hw_segments);
1091 
1092 	/*
1093 	 *      Ask for the current media data. If that isn't supported
1094 	 *      then we ask for the device capacity data
1095 	 */
1096 	if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
1097 	    !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
1098 		blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
1099 	} else
1100 		osm_warn("unable to get blocksize of %s\n", gd->disk_name);
1101 
1102 	if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
1103 	    !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
1104 		set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
1105 	} else
1106 		osm_warn("could not get size of %s\n", gd->disk_name);
1107 
1108 	if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1109 		i2o_blk_dev->power = power;
1110 
1111 	i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1112 
1113 	add_disk(gd);
1114 
1115 	unit++;
1116 
1117 	osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,
1118 		 i2o_blk_dev->gd->disk_name);
1119 
1120 	return 0;
1121 
1122       claim_release:
1123 	i2o_device_claim_release(i2o_dev);
1124 
1125       exit:
1126 	return rc;
1127 };
1128 
1129 /* Block OSM driver struct */
1130 static struct i2o_driver i2o_block_driver = {
1131 	.name = OSM_NAME,
1132 	.event = i2o_block_event,
1133 	.reply = i2o_block_reply,
1134 	.classes = i2o_block_class_id,
1135 	.driver = {
1136 		   .probe = i2o_block_probe,
1137 		   .remove = i2o_block_remove,
1138 		   },
1139 };
1140 
1141 /**
1142  *	i2o_block_init - Block OSM initialization function
1143  *
1144  *	Allocate the slab and mempool for request structs, registers i2o_block
1145  *	block device and finally register the Block OSM in the I2O core.
1146  *
1147  *	Returns 0 on success or negative error code on failure.
1148  */
i2o_block_init(void)1149 static int __init i2o_block_init(void)
1150 {
1151 	int rc;
1152 	int size;
1153 
1154 	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1155 
1156 	/* Allocate request mempool and slab */
1157 	size = sizeof(struct i2o_block_request);
1158 	i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1159 						  SLAB_HWCACHE_ALIGN, NULL);
1160 	if (!i2o_blk_req_pool.slab) {
1161 		osm_err("can't init request slab\n");
1162 		rc = -ENOMEM;
1163 		goto exit;
1164 	}
1165 
1166 	i2o_blk_req_pool.pool =
1167 		mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
1168 					 i2o_blk_req_pool.slab);
1169 	if (!i2o_blk_req_pool.pool) {
1170 		osm_err("can't init request mempool\n");
1171 		rc = -ENOMEM;
1172 		goto free_slab;
1173 	}
1174 
1175 	/* Register the block device interfaces */
1176 	rc = register_blkdev(I2O_MAJOR, "i2o_block");
1177 	if (rc) {
1178 		osm_err("unable to register block device\n");
1179 		goto free_mempool;
1180 	}
1181 #ifdef MODULE
1182 	osm_info("registered device at major %d\n", I2O_MAJOR);
1183 #endif
1184 
1185 	/* Register Block OSM into I2O core */
1186 	rc = i2o_driver_register(&i2o_block_driver);
1187 	if (rc) {
1188 		osm_err("Could not register Block driver\n");
1189 		goto unregister_blkdev;
1190 	}
1191 
1192 	return 0;
1193 
1194       unregister_blkdev:
1195 	unregister_blkdev(I2O_MAJOR, "i2o_block");
1196 
1197       free_mempool:
1198 	mempool_destroy(i2o_blk_req_pool.pool);
1199 
1200       free_slab:
1201 	kmem_cache_destroy(i2o_blk_req_pool.slab);
1202 
1203       exit:
1204 	return rc;
1205 };
1206 
1207 /**
1208  *	i2o_block_exit - Block OSM exit function
1209  *
1210  *	Unregisters Block OSM from I2O core, unregisters i2o_block block device
1211  *	and frees the mempool and slab.
1212  */
i2o_block_exit(void)1213 static void __exit i2o_block_exit(void)
1214 {
1215 	/* Unregister I2O Block OSM from I2O core */
1216 	i2o_driver_unregister(&i2o_block_driver);
1217 
1218 	/* Unregister block device */
1219 	unregister_blkdev(I2O_MAJOR, "i2o_block");
1220 
1221 	/* Free request mempool and slab */
1222 	mempool_destroy(i2o_blk_req_pool.pool);
1223 	kmem_cache_destroy(i2o_blk_req_pool.slab);
1224 };
1225 
1226 MODULE_AUTHOR("Red Hat");
1227 MODULE_LICENSE("GPL");
1228 MODULE_DESCRIPTION(OSM_DESCRIPTION);
1229 MODULE_VERSION(OSM_VERSION);
1230 
1231 module_init(i2o_block_init);
1232 module_exit(i2o_block_exit);
1233