1 /*
2         pd.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
3                             Under the terms of the GNU General Public License.
4 
5         This is the high-level driver for parallel port IDE hard
6         drives based on chips supported by the paride module.
7 
8 	By default, the driver will autoprobe for a single parallel
9 	port IDE drive, but if their individual parameters are
10         specified, the driver can handle up to 4 drives.
11 
12         The behaviour of the pd driver can be altered by setting
13         some parameters from the insmod command line.  The following
14         parameters are adjustable:
15 
16 	    drive0  	These four arguments can be arrays of
17 	    drive1	1-8 integers as follows:
18 	    drive2
19 	    drive3	<prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
20 
21 			Where,
22 
23 		<prt>	is the base of the parallel port address for
24 			the corresponding drive.  (required)
25 
26 		<pro>   is the protocol number for the adapter that
27 			supports this drive.  These numbers are
28                         logged by 'paride' when the protocol modules
29 			are initialised.  (0 if not given)
30 
31 		<uni>   for those adapters that support chained
32 			devices, this is the unit selector for the
33 		        chain of devices on the given port.  It should
34 			be zero for devices that don't support chaining.
35 			(0 if not given)
36 
37 		<mod>   this can be -1 to choose the best mode, or one
38 		        of the mode numbers supported by the adapter.
39 			(-1 if not given)
40 
41 		<geo>   this defaults to 0 to indicate that the driver
42 			should use the CHS geometry provided by the drive
43 			itself.  If set to 1, the driver will provide
44 			a logical geometry with 64 heads and 32 sectors
45 			per track, to be consistent with most SCSI
46 		        drivers.  (0 if not given)
47 
48 		<sby>   set this to zero to disable the power saving
49 			standby mode, if needed.  (1 if not given)
50 
51 		<dly>   some parallel ports require the driver to
52 			go more slowly.  -1 sets a default value that
53 			should work with the chosen protocol.  Otherwise,
54 			set this to a small integer, the larger it is
55 			the slower the port i/o.  In some cases, setting
56 			this to zero will speed up the device. (default -1)
57 
58 		<slv>   IDE disks can be jumpered to master or slave.
59                         Set this to 0 to choose the master drive, 1 to
60                         choose the slave, -1 (the default) to choose the
61                         first drive found.
62 
63 
64             major       You may use this parameter to overide the
65                         default major number (45) that this driver
66                         will use.  Be sure to change the device
67                         name as well.
68 
69             name        This parameter is a character string that
70                         contains the name the kernel will use for this
71                         device (in /proc output, for instance).
72 			(default "pd")
73 
74 	    cluster	The driver will attempt to aggregate requests
75 			for adjacent blocks into larger multi-block
76 			clusters.  The maximum cluster size (in 512
77 			byte sectors) is set with this parameter.
78 			(default 64)
79 
80 	    verbose	This parameter controls the amount of logging
81 			that the driver will do.  Set it to 0 for
82 			normal operation, 1 to see autoprobe progress
83 			messages, or 2 to see additional debugging
84 			output.  (default 0)
85 
86             nice        This parameter controls the driver's use of
87                         idle CPU time, at the expense of some speed.
88 
89         If this driver is built into the kernel, you can use kernel
90         the following command line parameters, with the same values
91         as the corresponding module parameters listed above:
92 
93             pd.drive0
94             pd.drive1
95             pd.drive2
96             pd.drive3
97             pd.cluster
98             pd.nice
99 
100         In addition, you can use the parameter pd.disable to disable
101         the driver entirely.
102 
103 */
104 
105 /* Changes:
106 
107 	1.01	GRG 1997.01.24	Restored pd_reset()
108 				Added eject ioctl
109 	1.02    GRG 1998.05.06  SMP spinlock changes,
110 				Added slave support
111 	1.03    GRG 1998.06.16  Eliminate an Ugh.
112 	1.04	GRG 1998.08.15  Extra debugging, use HZ in loop timing
113 	1.05    GRG 1998.09.24  Added jumbo support
114 
115 */
116 
117 #define PD_VERSION      "1.05"
118 #define PD_MAJOR	45
119 #define PD_NAME		"pd"
120 #define PD_UNITS	4
121 
122 /* Here are things one can override from the insmod command.
123    Most are autoprobed by paride unless set here.  Verbose is off
124    by default.
125 
126 */
127 #include <linux/types.h>
128 
129 static bool verbose = 0;
130 static int major = PD_MAJOR;
131 static char *name = PD_NAME;
132 static int cluster = 64;
133 static int nice = 0;
134 static int disable = 0;
135 
136 static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
137 static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
138 static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
139 static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
140 
141 static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
142 
143 enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
144 
145 /* end of parameters */
146 
147 #include <linux/init.h>
148 #include <linux/module.h>
149 #include <linux/gfp.h>
150 #include <linux/fs.h>
151 #include <linux/delay.h>
152 #include <linux/hdreg.h>
153 #include <linux/cdrom.h>	/* for the eject ioctl */
154 #include <linux/blkdev.h>
155 #include <linux/blkpg.h>
156 #include <linux/kernel.h>
157 #include <linux/mutex.h>
158 #include <asm/uaccess.h>
159 #include <linux/workqueue.h>
160 
161 static DEFINE_MUTEX(pd_mutex);
162 static DEFINE_SPINLOCK(pd_lock);
163 
164 module_param(verbose, bool, 0);
165 module_param(major, int, 0);
166 module_param(name, charp, 0);
167 module_param(cluster, int, 0);
168 module_param(nice, int, 0);
169 module_param_array(drive0, int, NULL, 0);
170 module_param_array(drive1, int, NULL, 0);
171 module_param_array(drive2, int, NULL, 0);
172 module_param_array(drive3, int, NULL, 0);
173 
174 #include "paride.h"
175 
176 #define PD_BITS    4
177 
178 /* numbers for "SCSI" geometry */
179 
180 #define PD_LOG_HEADS    64
181 #define PD_LOG_SECTS    32
182 
183 #define PD_ID_OFF       54
184 #define PD_ID_LEN       14
185 
186 #define PD_MAX_RETRIES  5
187 #define PD_TMO          800	/* interrupt timeout in jiffies */
188 #define PD_SPIN_DEL     50	/* spin delay in micro-seconds  */
189 
190 #define PD_SPIN         (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
191 
192 #define STAT_ERR        0x00001
193 #define STAT_INDEX      0x00002
194 #define STAT_ECC        0x00004
195 #define STAT_DRQ        0x00008
196 #define STAT_SEEK       0x00010
197 #define STAT_WRERR      0x00020
198 #define STAT_READY      0x00040
199 #define STAT_BUSY       0x00080
200 
201 #define ERR_AMNF        0x00100
202 #define ERR_TK0NF       0x00200
203 #define ERR_ABRT        0x00400
204 #define ERR_MCR         0x00800
205 #define ERR_IDNF        0x01000
206 #define ERR_MC          0x02000
207 #define ERR_UNC         0x04000
208 #define ERR_TMO         0x10000
209 
210 #define IDE_READ        	0x20
211 #define IDE_WRITE       	0x30
212 #define IDE_READ_VRFY		0x40
213 #define IDE_INIT_DEV_PARMS	0x91
214 #define IDE_STANDBY     	0x96
215 #define IDE_ACKCHANGE   	0xdb
216 #define IDE_DOORLOCK    	0xde
217 #define IDE_DOORUNLOCK  	0xdf
218 #define IDE_IDENTIFY    	0xec
219 #define IDE_EJECT		0xed
220 
221 #define PD_NAMELEN	8
222 
223 struct pd_unit {
224 	struct pi_adapter pia;	/* interface to paride layer */
225 	struct pi_adapter *pi;
226 	int access;		/* count of active opens ... */
227 	int capacity;		/* Size of this volume in sectors */
228 	int heads;		/* physical geometry */
229 	int sectors;
230 	int cylinders;
231 	int can_lba;
232 	int drive;		/* master=0 slave=1 */
233 	int changed;		/* Have we seen a disk change ? */
234 	int removable;		/* removable media device  ?  */
235 	int standby;
236 	int alt_geom;
237 	char name[PD_NAMELEN];	/* pda, pdb, etc ... */
238 	struct gendisk *gd;
239 };
240 
241 static struct pd_unit pd[PD_UNITS];
242 
243 static char pd_scratch[512];	/* scratch block buffer */
244 
245 static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
246 	"READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
247 	"IDNF", "MC", "UNC", "???", "TMO"
248 };
249 
status_reg(struct pd_unit * disk)250 static inline int status_reg(struct pd_unit *disk)
251 {
252 	return pi_read_regr(disk->pi, 1, 6);
253 }
254 
read_reg(struct pd_unit * disk,int reg)255 static inline int read_reg(struct pd_unit *disk, int reg)
256 {
257 	return pi_read_regr(disk->pi, 0, reg);
258 }
259 
write_status(struct pd_unit * disk,int val)260 static inline void write_status(struct pd_unit *disk, int val)
261 {
262 	pi_write_regr(disk->pi, 1, 6, val);
263 }
264 
write_reg(struct pd_unit * disk,int reg,int val)265 static inline void write_reg(struct pd_unit *disk, int reg, int val)
266 {
267 	pi_write_regr(disk->pi, 0, reg, val);
268 }
269 
DRIVE(struct pd_unit * disk)270 static inline u8 DRIVE(struct pd_unit *disk)
271 {
272 	return 0xa0+0x10*disk->drive;
273 }
274 
275 /*  ide command interface */
276 
pd_print_error(struct pd_unit * disk,char * msg,int status)277 static void pd_print_error(struct pd_unit *disk, char *msg, int status)
278 {
279 	int i;
280 
281 	printk("%s: %s: status = 0x%x =", disk->name, msg, status);
282 	for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
283 		if (status & (1 << i))
284 			printk(" %s", pd_errs[i]);
285 	printk("\n");
286 }
287 
pd_reset(struct pd_unit * disk)288 static void pd_reset(struct pd_unit *disk)
289 {				/* called only for MASTER drive */
290 	write_status(disk, 4);
291 	udelay(50);
292 	write_status(disk, 0);
293 	udelay(250);
294 }
295 
296 #define DBMSG(msg)	((verbose>1)?(msg):NULL)
297 
pd_wait_for(struct pd_unit * disk,int w,char * msg)298 static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
299 {				/* polled wait */
300 	int k, r, e;
301 
302 	k = 0;
303 	while (k < PD_SPIN) {
304 		r = status_reg(disk);
305 		k++;
306 		if (((r & w) == w) && !(r & STAT_BUSY))
307 			break;
308 		udelay(PD_SPIN_DEL);
309 	}
310 	e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
311 	if (k >= PD_SPIN)
312 		e |= ERR_TMO;
313 	if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
314 		pd_print_error(disk, msg, e);
315 	return e;
316 }
317 
pd_send_command(struct pd_unit * disk,int n,int s,int h,int c0,int c1,int func)318 static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
319 {
320 	write_reg(disk, 6, DRIVE(disk) + h);
321 	write_reg(disk, 1, 0);		/* the IDE task file */
322 	write_reg(disk, 2, n);
323 	write_reg(disk, 3, s);
324 	write_reg(disk, 4, c0);
325 	write_reg(disk, 5, c1);
326 	write_reg(disk, 7, func);
327 
328 	udelay(1);
329 }
330 
pd_ide_command(struct pd_unit * disk,int func,int block,int count)331 static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
332 {
333 	int c1, c0, h, s;
334 
335 	if (disk->can_lba) {
336 		s = block & 255;
337 		c0 = (block >>= 8) & 255;
338 		c1 = (block >>= 8) & 255;
339 		h = ((block >>= 8) & 15) + 0x40;
340 	} else {
341 		s = (block % disk->sectors) + 1;
342 		h = (block /= disk->sectors) % disk->heads;
343 		c0 = (block /= disk->heads) % 256;
344 		c1 = (block >>= 8);
345 	}
346 	pd_send_command(disk, count, s, h, c0, c1, func);
347 }
348 
349 /* The i/o request engine */
350 
351 enum action {Fail = 0, Ok = 1, Hold, Wait};
352 
353 static struct request *pd_req;	/* current request */
354 static enum action (*phase)(void);
355 
356 static void run_fsm(void);
357 
358 static void ps_tq_int(struct work_struct *work);
359 
360 static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
361 
schedule_fsm(void)362 static void schedule_fsm(void)
363 {
364 	if (!nice)
365 		schedule_delayed_work(&fsm_tq, 0);
366 	else
367 		schedule_delayed_work(&fsm_tq, nice-1);
368 }
369 
ps_tq_int(struct work_struct * work)370 static void ps_tq_int(struct work_struct *work)
371 {
372 	run_fsm();
373 }
374 
375 static enum action do_pd_io_start(void);
376 static enum action pd_special(void);
377 static enum action do_pd_read_start(void);
378 static enum action do_pd_write_start(void);
379 static enum action do_pd_read_drq(void);
380 static enum action do_pd_write_done(void);
381 
382 static struct request_queue *pd_queue;
383 static int pd_claimed;
384 
385 static struct pd_unit *pd_current; /* current request's drive */
386 static PIA *pi_current; /* current request's PIA */
387 
run_fsm(void)388 static void run_fsm(void)
389 {
390 	while (1) {
391 		enum action res;
392 		unsigned long saved_flags;
393 		int stop = 0;
394 
395 		if (!phase) {
396 			pd_current = pd_req->rq_disk->private_data;
397 			pi_current = pd_current->pi;
398 			phase = do_pd_io_start;
399 		}
400 
401 		switch (pd_claimed) {
402 			case 0:
403 				pd_claimed = 1;
404 				if (!pi_schedule_claimed(pi_current, run_fsm))
405 					return;
406 			case 1:
407 				pd_claimed = 2;
408 				pi_current->proto->connect(pi_current);
409 		}
410 
411 		switch(res = phase()) {
412 			case Ok: case Fail:
413 				pi_disconnect(pi_current);
414 				pd_claimed = 0;
415 				phase = NULL;
416 				spin_lock_irqsave(&pd_lock, saved_flags);
417 				if (!__blk_end_request_cur(pd_req,
418 						res == Ok ? 0 : -EIO)) {
419 					pd_req = blk_fetch_request(pd_queue);
420 					if (!pd_req)
421 						stop = 1;
422 				}
423 				spin_unlock_irqrestore(&pd_lock, saved_flags);
424 				if (stop)
425 					return;
426 			case Hold:
427 				schedule_fsm();
428 				return;
429 			case Wait:
430 				pi_disconnect(pi_current);
431 				pd_claimed = 0;
432 		}
433 	}
434 }
435 
436 static int pd_retries = 0;	/* i/o error retry count */
437 static int pd_block;		/* address of next requested block */
438 static int pd_count;		/* number of blocks still to do */
439 static int pd_run;		/* sectors in current cluster */
440 static int pd_cmd;		/* current command READ/WRITE */
441 static char *pd_buf;		/* buffer for request in progress */
442 
do_pd_io_start(void)443 static enum action do_pd_io_start(void)
444 {
445 	if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
446 		phase = pd_special;
447 		return pd_special();
448 	}
449 
450 	pd_cmd = rq_data_dir(pd_req);
451 	if (pd_cmd == READ || pd_cmd == WRITE) {
452 		pd_block = blk_rq_pos(pd_req);
453 		pd_count = blk_rq_cur_sectors(pd_req);
454 		if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
455 			return Fail;
456 		pd_run = blk_rq_sectors(pd_req);
457 		pd_buf = pd_req->buffer;
458 		pd_retries = 0;
459 		if (pd_cmd == READ)
460 			return do_pd_read_start();
461 		else
462 			return do_pd_write_start();
463 	}
464 	return Fail;
465 }
466 
pd_special(void)467 static enum action pd_special(void)
468 {
469 	enum action (*func)(struct pd_unit *) = pd_req->special;
470 	return func(pd_current);
471 }
472 
pd_next_buf(void)473 static int pd_next_buf(void)
474 {
475 	unsigned long saved_flags;
476 
477 	pd_count--;
478 	pd_run--;
479 	pd_buf += 512;
480 	pd_block++;
481 	if (!pd_run)
482 		return 1;
483 	if (pd_count)
484 		return 0;
485 	spin_lock_irqsave(&pd_lock, saved_flags);
486 	__blk_end_request_cur(pd_req, 0);
487 	pd_count = blk_rq_cur_sectors(pd_req);
488 	pd_buf = pd_req->buffer;
489 	spin_unlock_irqrestore(&pd_lock, saved_flags);
490 	return 0;
491 }
492 
493 static unsigned long pd_timeout;
494 
do_pd_read_start(void)495 static enum action do_pd_read_start(void)
496 {
497 	if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
498 		if (pd_retries < PD_MAX_RETRIES) {
499 			pd_retries++;
500 			return Wait;
501 		}
502 		return Fail;
503 	}
504 	pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
505 	phase = do_pd_read_drq;
506 	pd_timeout = jiffies + PD_TMO;
507 	return Hold;
508 }
509 
do_pd_write_start(void)510 static enum action do_pd_write_start(void)
511 {
512 	if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
513 		if (pd_retries < PD_MAX_RETRIES) {
514 			pd_retries++;
515 			return Wait;
516 		}
517 		return Fail;
518 	}
519 	pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
520 	while (1) {
521 		if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
522 			if (pd_retries < PD_MAX_RETRIES) {
523 				pd_retries++;
524 				return Wait;
525 			}
526 			return Fail;
527 		}
528 		pi_write_block(pd_current->pi, pd_buf, 512);
529 		if (pd_next_buf())
530 			break;
531 	}
532 	phase = do_pd_write_done;
533 	pd_timeout = jiffies + PD_TMO;
534 	return Hold;
535 }
536 
pd_ready(void)537 static inline int pd_ready(void)
538 {
539 	return !(status_reg(pd_current) & STAT_BUSY);
540 }
541 
do_pd_read_drq(void)542 static enum action do_pd_read_drq(void)
543 {
544 	if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
545 		return Hold;
546 
547 	while (1) {
548 		if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
549 			if (pd_retries < PD_MAX_RETRIES) {
550 				pd_retries++;
551 				phase = do_pd_read_start;
552 				return Wait;
553 			}
554 			return Fail;
555 		}
556 		pi_read_block(pd_current->pi, pd_buf, 512);
557 		if (pd_next_buf())
558 			break;
559 	}
560 	return Ok;
561 }
562 
do_pd_write_done(void)563 static enum action do_pd_write_done(void)
564 {
565 	if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
566 		return Hold;
567 
568 	if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
569 		if (pd_retries < PD_MAX_RETRIES) {
570 			pd_retries++;
571 			phase = do_pd_write_start;
572 			return Wait;
573 		}
574 		return Fail;
575 	}
576 	return Ok;
577 }
578 
579 /* special io requests */
580 
581 /* According to the ATA standard, the default CHS geometry should be
582    available following a reset.  Some Western Digital drives come up
583    in a mode where only LBA addresses are accepted until the device
584    parameters are initialised.
585 */
586 
pd_init_dev_parms(struct pd_unit * disk)587 static void pd_init_dev_parms(struct pd_unit *disk)
588 {
589 	pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
590 	pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
591 			IDE_INIT_DEV_PARMS);
592 	udelay(300);
593 	pd_wait_for(disk, 0, "Initialise device parameters");
594 }
595 
pd_door_lock(struct pd_unit * disk)596 static enum action pd_door_lock(struct pd_unit *disk)
597 {
598 	if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
599 		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
600 		pd_wait_for(disk, STAT_READY, "Lock done");
601 	}
602 	return Ok;
603 }
604 
pd_door_unlock(struct pd_unit * disk)605 static enum action pd_door_unlock(struct pd_unit *disk)
606 {
607 	if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
608 		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
609 		pd_wait_for(disk, STAT_READY, "Lock done");
610 	}
611 	return Ok;
612 }
613 
pd_eject(struct pd_unit * disk)614 static enum action pd_eject(struct pd_unit *disk)
615 {
616 	pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
617 	pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
618 	pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
619 	pd_wait_for(disk, 0, DBMSG("before eject"));
620 	pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
621 	pd_wait_for(disk, 0, DBMSG("after eject"));
622 	return Ok;
623 }
624 
pd_media_check(struct pd_unit * disk)625 static enum action pd_media_check(struct pd_unit *disk)
626 {
627 	int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
628 	if (!(r & STAT_ERR)) {
629 		pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
630 		r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
631 	} else
632 		disk->changed = 1;	/* say changed if other error */
633 	if (r & ERR_MC) {
634 		disk->changed = 1;
635 		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
636 		pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
637 		pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
638 		r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
639 	}
640 	return Ok;
641 }
642 
pd_standby_off(struct pd_unit * disk)643 static void pd_standby_off(struct pd_unit *disk)
644 {
645 	pd_wait_for(disk, 0, DBMSG("before STANDBY"));
646 	pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
647 	pd_wait_for(disk, 0, DBMSG("after STANDBY"));
648 }
649 
pd_identify(struct pd_unit * disk)650 static enum action pd_identify(struct pd_unit *disk)
651 {
652 	int j;
653 	char id[PD_ID_LEN + 1];
654 
655 /* WARNING:  here there may be dragons.  reset() applies to both drives,
656    but we call it only on probing the MASTER. This should allow most
657    common configurations to work, but be warned that a reset can clear
658    settings on the SLAVE drive.
659 */
660 
661 	if (disk->drive == 0)
662 		pd_reset(disk);
663 
664 	write_reg(disk, 6, DRIVE(disk));
665 	pd_wait_for(disk, 0, DBMSG("before IDENT"));
666 	pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
667 
668 	if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
669 		return Fail;
670 	pi_read_block(disk->pi, pd_scratch, 512);
671 	disk->can_lba = pd_scratch[99] & 2;
672 	disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
673 	disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
674 	disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
675 	if (disk->can_lba)
676 		disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
677 	else
678 		disk->capacity = disk->sectors * disk->heads * disk->cylinders;
679 
680 	for (j = 0; j < PD_ID_LEN; j++)
681 		id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
682 	j = PD_ID_LEN - 1;
683 	while ((j >= 0) && (id[j] <= 0x20))
684 		j--;
685 	j++;
686 	id[j] = 0;
687 
688 	disk->removable = pd_scratch[0] & 0x80;
689 
690 	printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
691 	       disk->name, id,
692 	       disk->drive ? "slave" : "master",
693 	       disk->capacity, disk->capacity / 2048,
694 	       disk->cylinders, disk->heads, disk->sectors,
695 	       disk->removable ? "removable" : "fixed");
696 
697 	if (disk->capacity)
698 		pd_init_dev_parms(disk);
699 	if (!disk->standby)
700 		pd_standby_off(disk);
701 
702 	return Ok;
703 }
704 
705 /* end of io request engine */
706 
do_pd_request(struct request_queue * q)707 static void do_pd_request(struct request_queue * q)
708 {
709 	if (pd_req)
710 		return;
711 	pd_req = blk_fetch_request(q);
712 	if (!pd_req)
713 		return;
714 
715 	schedule_fsm();
716 }
717 
pd_special_command(struct pd_unit * disk,enum action (* func)(struct pd_unit * disk))718 static int pd_special_command(struct pd_unit *disk,
719 		      enum action (*func)(struct pd_unit *disk))
720 {
721 	struct request *rq;
722 	int err = 0;
723 
724 	rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
725 
726 	rq->cmd_type = REQ_TYPE_SPECIAL;
727 	rq->special = func;
728 
729 	err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
730 
731 	blk_put_request(rq);
732 	return err;
733 }
734 
735 /* kernel glue structures */
736 
pd_open(struct block_device * bdev,fmode_t mode)737 static int pd_open(struct block_device *bdev, fmode_t mode)
738 {
739 	struct pd_unit *disk = bdev->bd_disk->private_data;
740 
741 	mutex_lock(&pd_mutex);
742 	disk->access++;
743 
744 	if (disk->removable) {
745 		pd_special_command(disk, pd_media_check);
746 		pd_special_command(disk, pd_door_lock);
747 	}
748 	mutex_unlock(&pd_mutex);
749 	return 0;
750 }
751 
pd_getgeo(struct block_device * bdev,struct hd_geometry * geo)752 static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
753 {
754 	struct pd_unit *disk = bdev->bd_disk->private_data;
755 
756 	if (disk->alt_geom) {
757 		geo->heads = PD_LOG_HEADS;
758 		geo->sectors = PD_LOG_SECTS;
759 		geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
760 	} else {
761 		geo->heads = disk->heads;
762 		geo->sectors = disk->sectors;
763 		geo->cylinders = disk->cylinders;
764 	}
765 
766 	return 0;
767 }
768 
pd_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)769 static int pd_ioctl(struct block_device *bdev, fmode_t mode,
770 	 unsigned int cmd, unsigned long arg)
771 {
772 	struct pd_unit *disk = bdev->bd_disk->private_data;
773 
774 	switch (cmd) {
775 	case CDROMEJECT:
776 		mutex_lock(&pd_mutex);
777 		if (disk->access == 1)
778 			pd_special_command(disk, pd_eject);
779 		mutex_unlock(&pd_mutex);
780 		return 0;
781 	default:
782 		return -EINVAL;
783 	}
784 }
785 
pd_release(struct gendisk * p,fmode_t mode)786 static int pd_release(struct gendisk *p, fmode_t mode)
787 {
788 	struct pd_unit *disk = p->private_data;
789 
790 	mutex_lock(&pd_mutex);
791 	if (!--disk->access && disk->removable)
792 		pd_special_command(disk, pd_door_unlock);
793 	mutex_unlock(&pd_mutex);
794 
795 	return 0;
796 }
797 
pd_check_events(struct gendisk * p,unsigned int clearing)798 static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
799 {
800 	struct pd_unit *disk = p->private_data;
801 	int r;
802 	if (!disk->removable)
803 		return 0;
804 	pd_special_command(disk, pd_media_check);
805 	r = disk->changed;
806 	disk->changed = 0;
807 	return r ? DISK_EVENT_MEDIA_CHANGE : 0;
808 }
809 
pd_revalidate(struct gendisk * p)810 static int pd_revalidate(struct gendisk *p)
811 {
812 	struct pd_unit *disk = p->private_data;
813 	if (pd_special_command(disk, pd_identify) == 0)
814 		set_capacity(p, disk->capacity);
815 	else
816 		set_capacity(p, 0);
817 	return 0;
818 }
819 
820 static const struct block_device_operations pd_fops = {
821 	.owner		= THIS_MODULE,
822 	.open		= pd_open,
823 	.release	= pd_release,
824 	.ioctl		= pd_ioctl,
825 	.getgeo		= pd_getgeo,
826 	.check_events	= pd_check_events,
827 	.revalidate_disk= pd_revalidate
828 };
829 
830 /* probing */
831 
pd_probe_drive(struct pd_unit * disk)832 static void pd_probe_drive(struct pd_unit *disk)
833 {
834 	struct gendisk *p = alloc_disk(1 << PD_BITS);
835 	if (!p)
836 		return;
837 	strcpy(p->disk_name, disk->name);
838 	p->fops = &pd_fops;
839 	p->major = major;
840 	p->first_minor = (disk - pd) << PD_BITS;
841 	disk->gd = p;
842 	p->private_data = disk;
843 	p->queue = pd_queue;
844 
845 	if (disk->drive == -1) {
846 		for (disk->drive = 0; disk->drive <= 1; disk->drive++)
847 			if (pd_special_command(disk, pd_identify) == 0)
848 				return;
849 	} else if (pd_special_command(disk, pd_identify) == 0)
850 		return;
851 	disk->gd = NULL;
852 	put_disk(p);
853 }
854 
pd_detect(void)855 static int pd_detect(void)
856 {
857 	int found = 0, unit, pd_drive_count = 0;
858 	struct pd_unit *disk;
859 
860 	for (unit = 0; unit < PD_UNITS; unit++) {
861 		int *parm = *drives[unit];
862 		struct pd_unit *disk = pd + unit;
863 		disk->pi = &disk->pia;
864 		disk->access = 0;
865 		disk->changed = 1;
866 		disk->capacity = 0;
867 		disk->drive = parm[D_SLV];
868 		snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
869 		disk->alt_geom = parm[D_GEO];
870 		disk->standby = parm[D_SBY];
871 		if (parm[D_PRT])
872 			pd_drive_count++;
873 	}
874 
875 	if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
876 		disk = pd;
877 		if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
878 			    PI_PD, verbose, disk->name)) {
879 			pd_probe_drive(disk);
880 			if (!disk->gd)
881 				pi_release(disk->pi);
882 		}
883 
884 	} else {
885 		for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
886 			int *parm = *drives[unit];
887 			if (!parm[D_PRT])
888 				continue;
889 			if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
890 				     parm[D_UNI], parm[D_PRO], parm[D_DLY],
891 				     pd_scratch, PI_PD, verbose, disk->name)) {
892 				pd_probe_drive(disk);
893 				if (!disk->gd)
894 					pi_release(disk->pi);
895 			}
896 		}
897 	}
898 	for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
899 		if (disk->gd) {
900 			set_capacity(disk->gd, disk->capacity);
901 			add_disk(disk->gd);
902 			found = 1;
903 		}
904 	}
905 	if (!found)
906 		printk("%s: no valid drive found\n", name);
907 	return found;
908 }
909 
pd_init(void)910 static int __init pd_init(void)
911 {
912 	if (disable)
913 		goto out1;
914 
915 	pd_queue = blk_init_queue(do_pd_request, &pd_lock);
916 	if (!pd_queue)
917 		goto out1;
918 
919 	blk_queue_max_hw_sectors(pd_queue, cluster);
920 
921 	if (register_blkdev(major, name))
922 		goto out2;
923 
924 	printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
925 	       name, name, PD_VERSION, major, cluster, nice);
926 	if (!pd_detect())
927 		goto out3;
928 
929 	return 0;
930 
931 out3:
932 	unregister_blkdev(major, name);
933 out2:
934 	blk_cleanup_queue(pd_queue);
935 out1:
936 	return -ENODEV;
937 }
938 
pd_exit(void)939 static void __exit pd_exit(void)
940 {
941 	struct pd_unit *disk;
942 	int unit;
943 	unregister_blkdev(major, name);
944 	for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
945 		struct gendisk *p = disk->gd;
946 		if (p) {
947 			disk->gd = NULL;
948 			del_gendisk(p);
949 			put_disk(p);
950 			pi_release(disk->pi);
951 		}
952 	}
953 	blk_cleanup_queue(pd_queue);
954 }
955 
956 MODULE_LICENSE("GPL");
957 module_init(pd_init)
958 module_exit(pd_exit)
959