1 /*
2  * linux/drivers/ide/ide-taskfile.c	Version 0.38	March 05, 2003
3  *
4  *  Copyright (C) 2000-2002	Michael Cornwell <cornwell@acm.org>
5  *  Copyright (C) 2000-2002	Andre Hedrick <andre@linux-ide.org>
6  *  Copyright (C) 2001-2002	Klaus Smolin
7  *					IBM Storage Technology Division
8  *
9  *  The big the bad and the ugly.
10  *
11  *  Problems to be fixed because of BH interface or the lack therefore.
12  *
13  *  Fill me in stupid !!!
14  *
15  *  HOST:
16  *	General refers to the Controller and Driver "pair".
17  *  DATA HANDLER:
18  *	Under the context of Linux it generally refers to an interrupt handler.
19  *	However, it correctly describes the 'HOST'
20  *  DATA BLOCK:
21  *	The amount of data needed to be transfered as predefined in the
22  *	setup of the device.
23  *  STORAGE ATOMIC:
24  *	The 'DATA BLOCK' associated to the 'DATA HANDLER', and can be as
25  *	small as a single sector or as large as the entire command block
26  *	request.
27  */
28 
29 #include <linux/config.h>
30 #define __NO_VERSION__
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/kernel.h>
35 #include <linux/timer.h>
36 #include <linux/mm.h>
37 #include <linux/interrupt.h>
38 #include <linux/major.h>
39 #include <linux/errno.h>
40 #include <linux/genhd.h>
41 #include <linux/blkpg.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/delay.h>
45 #include <linux/hdreg.h>
46 #include <linux/ide.h>
47 
48 #include <asm/byteorder.h>
49 #include <asm/irq.h>
50 #include <asm/uaccess.h>
51 #include <asm/io.h>
52 #include <asm/bitops.h>
53 
54 #define DEBUG_TASKFILE	0	/* unset when fixed */
55 
56 #if DEBUG_TASKFILE
57 #define DTF(x...) printk(x)
58 #else
59 #define DTF(x...)
60 #endif
61 
62 /*
63  *
64  */
65 #define task_rq_offset(rq) \
66 	(((rq)->nr_sectors - (rq)->current_nr_sectors) * SECTOR_SIZE)
67 
68 /*
69  * for now, taskfile requests are special :/
70  *
71  * However, upon the creation of the atapi version of packet_command
72  * data-phase ISR plus it own diagnostics and extensions for direct access
73  * (ioctl,read,write,rip,stream -- atapi), the kmap/kunmap for PIO will
74  * come localized.
75  */
task_map_rq(struct request * rq,unsigned long * flags)76 inline char *task_map_rq (struct request *rq, unsigned long *flags)
77 {
78 	if (rq->bh)
79 		return ide_map_buffer(rq, flags);
80 	return rq->buffer + task_rq_offset(rq);
81 }
82 
task_unmap_rq(struct request * rq,char * buf,unsigned long * flags)83 inline void task_unmap_rq (struct request *rq, char *buf, unsigned long *flags)
84 {
85 	if (rq->bh)
86 		ide_unmap_buffer(buf, flags);
87 }
88 
task_read_24(ide_drive_t * drive)89 inline u32 task_read_24 (ide_drive_t *drive)
90 {
91 	return	(HWIF(drive)->INB(IDE_HCYL_REG)<<16) |
92 		(HWIF(drive)->INB(IDE_LCYL_REG)<<8) |
93 		 HWIF(drive)->INB(IDE_SECTOR_REG);
94 }
95 
96 EXPORT_SYMBOL(task_read_24);
97 
ata_bswap_data(void * buffer,int wcount)98 static void ata_bswap_data (void *buffer, int wcount)
99 {
100 	u16 *p = buffer;
101 
102 	while (wcount--) {
103 		*p = *p << 8 | *p >> 8; p++;
104 		*p = *p << 8 | *p >> 8; p++;
105 	}
106 }
107 
108 
taskfile_input_data(ide_drive_t * drive,void * buffer,u32 wcount)109 void taskfile_input_data (ide_drive_t *drive, void *buffer, u32 wcount)
110 {
111 	HWIF(drive)->ata_input_data(drive, buffer, wcount);
112 	if (drive->bswap)
113 		ata_bswap_data(buffer, wcount);
114 }
115 
116 EXPORT_SYMBOL(taskfile_input_data);
117 
taskfile_output_data(ide_drive_t * drive,void * buffer,u32 wcount)118 void taskfile_output_data (ide_drive_t *drive, void *buffer, u32 wcount)
119 {
120 	if (drive->bswap) {
121 		ata_bswap_data(buffer, wcount);
122 		HWIF(drive)->ata_output_data(drive, buffer, wcount);
123 		ata_bswap_data(buffer, wcount);
124 	} else {
125 		HWIF(drive)->ata_output_data(drive, buffer, wcount);
126 	}
127 }
128 
129 EXPORT_SYMBOL(taskfile_output_data);
130 
taskfile_lib_get_identify(ide_drive_t * drive,u8 * buf)131 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
132 {
133 	ide_task_t args;
134 	memset(&args, 0, sizeof(ide_task_t));
135 	args.tfRegister[IDE_NSECTOR_OFFSET]	= 0x01;
136 	if (drive->media == ide_disk)
137 		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_IDENTIFY;
138 	else
139 		args.tfRegister[IDE_COMMAND_OFFSET]	= WIN_PIDENTIFY;
140 	args.command_type			= ide_cmd_type_parser(&args);
141 	return ide_raw_taskfile(drive, &args, buf);
142 }
143 
144 EXPORT_SYMBOL(taskfile_lib_get_identify);
145 
146 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
debug_taskfile(ide_drive_t * drive,ide_task_t * args)147 void debug_taskfile (ide_drive_t *drive, ide_task_t *args)
148 {
149 	printk(KERN_INFO "%s: ", drive->name);
150 //	printk("TF.0=x%02x ", args->tfRegister[IDE_DATA_OFFSET]);
151 	printk("TF.1=x%02x ", args->tfRegister[IDE_FEATURE_OFFSET]);
152 	printk("TF.2=x%02x ", args->tfRegister[IDE_NSECTOR_OFFSET]);
153 	printk("TF.3=x%02x ", args->tfRegister[IDE_SECTOR_OFFSET]);
154 	printk("TF.4=x%02x ", args->tfRegister[IDE_LCYL_OFFSET]);
155 	printk("TF.5=x%02x ", args->tfRegister[IDE_HCYL_OFFSET]);
156 	printk("TF.6=x%02x ", args->tfRegister[IDE_SELECT_OFFSET]);
157 	printk("TF.7=x%02x\n", args->tfRegister[IDE_COMMAND_OFFSET]);
158 	printk(KERN_INFO "%s: ", drive->name);
159 //	printk("HTF.0=x%02x ", args->hobRegister[IDE_DATA_OFFSET_HOB]);
160 	printk("HTF.1=x%02x ", args->hobRegister[IDE_FEATURE_OFFSET_HOB]);
161 	printk("HTF.2=x%02x ", args->hobRegister[IDE_NSECTOR_OFFSET_HOB]);
162 	printk("HTF.3=x%02x ", args->hobRegister[IDE_SECTOR_OFFSET_HOB]);
163 	printk("HTF.4=x%02x ", args->hobRegister[IDE_LCYL_OFFSET_HOB]);
164 	printk("HTF.5=x%02x ", args->hobRegister[IDE_HCYL_OFFSET_HOB]);
165 	printk("HTF.6=x%02x ", args->hobRegister[IDE_SELECT_OFFSET_HOB]);
166 	printk("HTF.7=x%02x\n", args->hobRegister[IDE_CONTROL_OFFSET_HOB]);
167 }
168 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
169 
do_rw_taskfile(ide_drive_t * drive,ide_task_t * task)170 ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
171 {
172 	ide_hwif_t *hwif	= HWIF(drive);
173 	task_struct_t *taskfile	= (task_struct_t *) task->tfRegister;
174 	hob_struct_t *hobfile	= (hob_struct_t *) task->hobRegister;
175 	u8 HIHI			= (drive->addressing == 1) ? 0xE0 : 0xEF;
176 
177 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
178 	void debug_taskfile(drive, task);
179 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
180 
181 	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
182 	if (IDE_CONTROL_REG) {
183 		/* clear nIEN */
184 		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
185 	}
186 	SELECT_MASK(drive, 0);
187 
188 	if (drive->addressing == 1) {
189 		hwif->OUTB(hobfile->feature, IDE_FEATURE_REG);
190 		hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
191 		hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
192 		hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
193 		hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
194 	}
195 
196 	hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
197 	hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
198 	hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
199 	hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
200 	hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
201 
202 	hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG);
203 	if (task->handler != NULL) {
204 		ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
205 		if (task->prehandler != NULL)
206 			return task->prehandler(drive, task->rq);
207 		return ide_started;
208 	}
209 	/* for dma commands we down set the handler */
210 #if 0
211 	if (blk_fs_request(task->rq) && drive->using_dma) {
212 		if (rq_data_dir(task->rq) == READ) {
213 			if (hwif->ide_dma_read(drive))
214 				return ide_stopped;
215 		} else {
216 			if (hwif->ide_dma_write(drive))
217 				return ide_stopped;
218 		}
219 	} else {
220 		if (!drive->using_dma && (task->handler == NULL))
221 			return ide_stopped;
222 
223 		switch(taskfile->command) {
224 			case WIN_WRITEDMA_ONCE:
225 			case WIN_WRITEDMA:
226 			case WIN_WRITEDMA_EXT:
227 				hwif->ide_dma_write(drive);
228 				break;
229 			case WIN_READDMA_ONCE:
230 			case WIN_READDMA:
231 			case WIN_READDMA_EXT:
232 			case WIN_IDENTIFY_DMA:
233 				hwif->ide_dma_read(drive);
234 				break;
235 			default:
236 				if (task->handler == NULL)
237 					return ide_stopped;
238 		}
239 	}
240 	return ide_started;
241 #else
242 	switch(taskfile->command) {
243 		case WIN_WRITEDMA_ONCE:
244 		case WIN_WRITEDMA:
245 		case WIN_WRITEDMA_EXT:
246 			if (drive->using_dma && !(hwif->ide_dma_write(drive)))
247 				return ide_started;
248 		case WIN_READDMA_ONCE:
249 		case WIN_READDMA:
250 		case WIN_READDMA_EXT:
251 		case WIN_IDENTIFY_DMA:
252 			if (drive->using_dma && !(hwif->ide_dma_read(drive)))
253 				return ide_started;
254 		default:
255 			break;
256 	}
257 	return ide_stopped;
258 #endif
259 }
260 
261 EXPORT_SYMBOL(do_rw_taskfile);
262 
263 /*
264  * Error reporting, in human readable form (luxurious, but a memory hog).
265  */
taskfile_dump_status(ide_drive_t * drive,const char * msg,u8 stat)266 u8 taskfile_dump_status (ide_drive_t *drive, const char *msg, u8 stat)
267 {
268 	ide_hwif_t *hwif = HWIF(drive);
269 	unsigned long flags;
270 	u8 err = 0;
271 
272 	local_irq_set(flags);
273 	printk("%s: %s: status=0x%02x", drive->name, msg, stat);
274 #if FANCY_STATUS_DUMPS
275 	printk(" { ");
276 	if (stat & BUSY_STAT) {
277 		printk("Busy ");
278 	} else {
279 		if (stat & READY_STAT)	printk("DriveReady ");
280 		if (stat & WRERR_STAT)	printk("DeviceFault ");
281 		if (stat & SEEK_STAT)	printk("SeekComplete ");
282 		if (stat & DRQ_STAT)	printk("DataRequest ");
283 		if (stat & ECC_STAT)	printk("CorrectedError ");
284 		if (stat & INDEX_STAT)	printk("Index ");
285 		if (stat & ERR_STAT)	printk("Error ");
286 	}
287 	printk("}");
288 #endif  /* FANCY_STATUS_DUMPS */
289 	printk("\n");
290 	if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
291 		err = hwif->INB(IDE_ERROR_REG);
292 		printk("%s: %s: error=0x%02x", drive->name, msg, err);
293 #if FANCY_STATUS_DUMPS
294 		if (drive->media == ide_disk)
295 			goto media_out;
296 
297 		printk(" { ");
298 		if (err & ABRT_ERR)	printk("DriveStatusError ");
299 		if (err & ICRC_ERR)	printk("Bad%s", (err & ABRT_ERR) ? "CRC " : "Sector ");
300 		if (err & ECC_ERR)	printk("UncorrectableError ");
301 		if (err & ID_ERR)	printk("SectorIdNotFound ");
302 		if (err & TRK0_ERR)	printk("TrackZeroNotFound ");
303 		if (err & MARK_ERR)	printk("AddrMarkNotFound ");
304 		printk("}");
305 		if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR ||
306 		    (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
307 			if (drive->addressing == 1) {
308 				u64 sectors = 0;
309 				u32 high = 0;
310 				u32 low = task_read_24(drive);
311 				hwif->OUTB(0x80, IDE_CONTROL_REG);
312 				high = task_read_24(drive);
313 				sectors = ((u64)high << 24) | low;
314 				printk(", LBAsect=%lld", sectors);
315 			} else {
316 				u8 cur  = hwif->INB(IDE_SELECT_REG);
317 				u8 low  = hwif->INB(IDE_LCYL_REG);
318 				u8 high = hwif->INB(IDE_HCYL_REG);
319 				u8 sect = hwif->INB(IDE_SECTOR_REG);
320 				/* using LBA? */
321 				if (cur & 0x40) {
322 					printk(", LBAsect=%d", (u32)
323 						((cur&0xf)<<24)|(high<<16)|
324 						(low<<8)|sect);
325 				} else {
326 					printk(", CHS=%d/%d/%d",
327 						((high<<8) + low),
328 						(cur & 0xf), sect);
329 				}
330 			}
331 			if (HWGROUP(drive)->rq)
332 				printk(", sector=%lu",
333 					HWGROUP(drive)->rq->sector);
334 		}
335 media_out:
336 #endif  /* FANCY_STATUS_DUMPS */
337 		printk("\n");
338 	}
339 	local_irq_restore(flags);
340 	return err;
341 }
342 
343 EXPORT_SYMBOL(taskfile_dump_status);
344 
345 /*
346  * Clean up after success/failure of an explicit taskfile operation.
347  */
ide_end_taskfile(ide_drive_t * drive,u8 stat,u8 err)348 void ide_end_taskfile (ide_drive_t *drive, u8 stat, u8 err)
349 {
350 	ide_hwif_t *hwif = HWIF(drive);
351 	unsigned long flags;
352 	struct request *rq;
353 	ide_task_t *args;
354 	task_ioreg_t command;
355 
356 	spin_lock_irqsave(&io_request_lock, flags);
357 	rq = HWGROUP(drive)->rq;
358 	spin_unlock_irqrestore(&io_request_lock, flags);
359 	args = (ide_task_t *) rq->special;
360 
361 	command = args->tfRegister[IDE_COMMAND_OFFSET];
362 
363 	if (rq->errors == 0)
364 		rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
365 
366 	if (args->tf_in_flags.b.data) {
367 		u16 data = hwif->INW(IDE_DATA_REG);
368 		args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF;
369 		args->hobRegister[IDE_DATA_OFFSET_HOB]	= (data >> 8) & 0xFF;
370 	}
371 	args->tfRegister[IDE_ERROR_OFFSET]   = err;
372 	args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
373 	args->tfRegister[IDE_SECTOR_OFFSET]  = hwif->INB(IDE_SECTOR_REG);
374 	args->tfRegister[IDE_LCYL_OFFSET]    = hwif->INB(IDE_LCYL_REG);
375 	args->tfRegister[IDE_HCYL_OFFSET]    = hwif->INB(IDE_HCYL_REG);
376 	args->tfRegister[IDE_SELECT_OFFSET]  = hwif->INB(IDE_SELECT_REG);
377 	args->tfRegister[IDE_STATUS_OFFSET]  = stat;
378 	if ((drive->id->command_set_2 & 0x0400) &&
379 	    (drive->id->cfs_enable_2 & 0x0400) &&
380 	    (drive->addressing == 1)) {
381 		hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG_HOB);
382 		args->hobRegister[IDE_FEATURE_OFFSET_HOB] = hwif->INB(IDE_FEATURE_REG);
383 		args->hobRegister[IDE_NSECTOR_OFFSET_HOB] = hwif->INB(IDE_NSECTOR_REG);
384 		args->hobRegister[IDE_SECTOR_OFFSET_HOB]  = hwif->INB(IDE_SECTOR_REG);
385 		args->hobRegister[IDE_LCYL_OFFSET_HOB]    = hwif->INB(IDE_LCYL_REG);
386 		args->hobRegister[IDE_HCYL_OFFSET_HOB]    = hwif->INB(IDE_HCYL_REG);
387 	}
388 
389 #if 0
390 /*	taskfile_settings_update(drive, args, command); */
391 
392 	if (args->posthandler != NULL)
393 		args->posthandler(drive, args);
394 #endif
395 
396 	spin_lock_irqsave(&io_request_lock, flags);
397 	blkdev_dequeue_request(rq);
398 	HWGROUP(drive)->rq = NULL;
399 	end_that_request_last(rq);
400 	spin_unlock_irqrestore(&io_request_lock, flags);
401 }
402 
403 EXPORT_SYMBOL(ide_end_taskfile);
404 
405 /*
406  * try_to_flush_leftover_data() is invoked in response to a drive
407  * unexpectedly having its DRQ_STAT bit set.  As an alternative to
408  * resetting the drive, this routine tries to clear the condition
409  * by read a sector's worth of data from the drive.  Of course,
410  * this may not help if the drive is *waiting* for data from *us*.
411  */
task_try_to_flush_leftover_data(ide_drive_t * drive)412 void task_try_to_flush_leftover_data (ide_drive_t *drive)
413 {
414 	int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
415 
416 	if (drive->media != ide_disk)
417 		return;
418 	while (i > 0) {
419 		u32 buffer[16];
420 		unsigned int wcount = (i > 16) ? 16 : i;
421 		i -= wcount;
422 		taskfile_input_data(drive, buffer, wcount);
423 	}
424 }
425 
426 EXPORT_SYMBOL(task_try_to_flush_leftover_data);
427 
428 /*
429  * taskfile_error() takes action based on the error returned by the drive.
430  */
taskfile_error(ide_drive_t * drive,const char * msg,u8 stat)431 ide_startstop_t taskfile_error (ide_drive_t *drive, const char *msg, u8 stat)
432 {
433 	ide_hwif_t *hwif;
434 	struct request *rq;
435 	u8 err;
436 
437         err = taskfile_dump_status(drive, msg, stat);
438 	if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
439 		return ide_stopped;
440 
441 	hwif = HWIF(drive);
442 	/* retry only "normal" I/O: */
443 	if (rq->cmd == IDE_DRIVE_TASKFILE) {
444 		rq->errors = 1;
445 		ide_end_taskfile(drive, stat, err);
446 		return ide_stopped;
447 	}
448 	if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
449 		/* other bits are useless when BUSY */
450 		rq->errors |= ERROR_RESET;
451 	} else {
452 		if (drive->media != ide_disk)
453 			goto media_out;
454 		if (stat & ERR_STAT) {
455 			/* err has different meaning on cdrom and tape */
456 			if (err == ABRT_ERR) {
457 				if (drive->select.b.lba &&
458 				    (hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY))
459 					/* some newer drives don't
460 					 * support WIN_SPECIFY
461 					 */
462 					return ide_stopped;
463 			} else if ((err & BAD_CRC) == BAD_CRC) {
464 				/* UDMA crc error -- just retry the operation */
465 				drive->crc_count++;
466 			} else if (err & (BBD_ERR | ECC_ERR)) {
467 				/* retries won't help these */
468 				rq->errors = ERROR_MAX;
469 			} else if (err & TRK0_ERR) {
470 				/* help it find track zero */
471 				rq->errors |= ERROR_RECAL;
472 			}
473                 }
474 media_out:
475                 if ((stat & DRQ_STAT) && rq_data_dir(rq) != WRITE)
476                         task_try_to_flush_leftover_data(drive);
477 	}
478 	if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) {
479 		/* force an abort */
480 		hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
481 	}
482 	if (rq->errors >= ERROR_MAX) {
483 		DRIVER(drive)->end_request(drive, 0);
484 	} else {
485 		if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
486 			++rq->errors;
487 			return ide_do_reset(drive);
488 		}
489 		if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
490 			drive->special.b.recalibrate = 1;
491 		++rq->errors;
492 	}
493 	return ide_stopped;
494 }
495 
496 EXPORT_SYMBOL(taskfile_error);
497 
498 /*
499  * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
500  */
set_multmode_intr(ide_drive_t * drive)501 ide_startstop_t set_multmode_intr (ide_drive_t *drive)
502 {
503 	ide_hwif_t *hwif = HWIF(drive);
504 	u8 stat;
505 
506 	if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
507 		drive->mult_count = drive->mult_req;
508 	} else {
509 		drive->mult_req = drive->mult_count = 0;
510 		drive->special.b.recalibrate = 1;
511 		(void) ide_dump_status(drive, "set_multmode", stat);
512 	}
513 	return ide_stopped;
514 }
515 
516 EXPORT_SYMBOL(set_multmode_intr);
517 
518 /*
519  * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
520  */
set_geometry_intr(ide_drive_t * drive)521 ide_startstop_t set_geometry_intr (ide_drive_t *drive)
522 {
523 	ide_hwif_t *hwif = HWIF(drive);
524 	int retries = 5;
525 	u8 stat;
526 
527 	while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
528 		udelay(10);
529 
530 	if (OK_STAT(stat, READY_STAT, BAD_STAT))
531 		return ide_stopped;
532 
533 	if (stat & (ERR_STAT|DRQ_STAT))
534 		return DRIVER(drive)->error(drive, "set_geometry_intr", stat);
535 
536 	if (HWGROUP(drive)->handler != NULL)
537 		BUG();
538 	ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
539 	return ide_started;
540 }
541 
542 EXPORT_SYMBOL(set_geometry_intr);
543 
544 /*
545  * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
546  */
recal_intr(ide_drive_t * drive)547 ide_startstop_t recal_intr (ide_drive_t *drive)
548 {
549 	ide_hwif_t *hwif = HWIF(drive);
550 	u8 stat;
551 
552 	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
553 		return DRIVER(drive)->error(drive, "recal_intr", stat);
554 	return ide_stopped;
555 }
556 
557 EXPORT_SYMBOL(recal_intr);
558 
559 /*
560  * Handler for commands without a data phase
561  */
task_no_data_intr(ide_drive_t * drive)562 ide_startstop_t task_no_data_intr (ide_drive_t *drive)
563 {
564 	ide_task_t *args	= HWGROUP(drive)->rq->special;
565 	ide_hwif_t *hwif	= HWIF(drive);
566 	u8 stat;
567 
568 	local_irq_enable();
569 	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
570 		DTF("%s: command opcode 0x%02x\n", drive->name,
571 			args->tfRegister[IDE_COMMAND_OFFSET]);
572 		return DRIVER(drive)->error(drive, "task_no_data_intr", stat);
573 		/* calls ide_end_drive_cmd */
574 	}
575 	if (args)
576 		ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
577 
578 	return ide_stopped;
579 }
580 
581 EXPORT_SYMBOL(task_no_data_intr);
582 
583 /*
584  * Handler for command with PIO data-in phase, READ
585  */
586 /*
587  * FIXME before 2.4 enable ...
588  *	DATA integrity issue upon error. <andre@linux-ide.org>
589  */
task_in_intr(ide_drive_t * drive)590 ide_startstop_t task_in_intr (ide_drive_t *drive)
591 {
592 	struct request *rq	= HWGROUP(drive)->rq;
593 	ide_hwif_t *hwif	= HWIF(drive);
594 	char *pBuf		= NULL;
595 	u8 stat;
596 	unsigned long flags;
597 
598 	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) {
599 		if (stat & (ERR_STAT|DRQ_STAT)) {
600 #if 0
601 			DTF("%s: attempting to recover last " \
602 				"sector counter status=0x%02x\n",
603 				drive->name, stat);
604 			/*
605 			 * Expect a BUG BOMB if we attempt to rewind the
606 			 * offset in the BH aka PAGE in the current BLOCK
607 			 * segment.  This is different than the HOST segment.
608 			 */
609 #endif
610 			if (!rq->bh)
611 				rq->current_nr_sectors++;
612 			return DRIVER(drive)->error(drive, "task_in_intr", stat);
613 		}
614 		if (!(stat & BUSY_STAT)) {
615 			DTF("task_in_intr to Soon wait for next interrupt\n");
616 			if (HWGROUP(drive)->handler == NULL)
617 				ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
618 			return ide_started;
619 		}
620 	}
621 #if 0
622 
623 	/*
624 	 * Holding point for a brain dump of a thought :-/
625 	 */
626 
627 	if (!OK_STAT(stat,DRIVE_READY,drive->bad_wstat)) {
628 		DTF("%s: READ attempting to recover last " \
629 			"sector counter status=0x%02x\n",
630 			drive->name, stat);
631 		rq->current_nr_sectors++;
632 		return DRIVER(drive)->error(drive, "task_in_intr", stat);
633         }
634 	if (!rq->current_nr_sectors)
635 		if (!DRIVER(drive)->end_request(drive, 1))
636 			return ide_stopped;
637 
638 	if (--rq->current_nr_sectors <= 0)
639 		if (!DRIVER(drive)->end_request(drive, 1))
640 			return ide_stopped;
641 #endif
642 
643 	pBuf = task_map_rq(rq, &flags);
644 	DTF("Read: %p, rq->current_nr_sectors: %d, stat: %02x\n",
645 		pBuf, (int) rq->current_nr_sectors, stat);
646 	taskfile_input_data(drive, pBuf, SECTOR_WORDS);
647 	task_unmap_rq(rq, pBuf, &flags);
648 	/*
649 	 * FIXME :: We really can not legally get a new page/bh
650 	 * regardless, if this is the end of our segment.
651 	 * BH walking or segment can only be updated after we have a good
652 	 * hwif->INB(IDE_STATUS_REG); return.
653 	 */
654 	if (--rq->current_nr_sectors <= 0)
655 		if (!DRIVER(drive)->end_request(drive, 1))
656 			return ide_stopped;
657 	/*
658 	 * ERM, it is techincally legal to leave/exit here but it makes
659 	 * a mess of the code ...
660 	 */
661 	if (HWGROUP(drive)->handler == NULL)
662 		ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
663 	return ide_started;
664 }
665 
666 EXPORT_SYMBOL(task_in_intr);
667 
668 /*
669  * Handler for command with Read Multiple
670  */
task_mulin_intr(ide_drive_t * drive)671 ide_startstop_t task_mulin_intr (ide_drive_t *drive)
672 {
673 	ide_hwif_t *hwif	= HWIF(drive);
674 	struct request *rq	= HWGROUP(drive)->rq;
675 	char *pBuf		= NULL;
676 	unsigned int msect	= drive->mult_count;
677 	unsigned int nsect;
678 	unsigned long flags;
679 	u8 stat;
680 
681 	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),DATA_READY,BAD_R_STAT)) {
682 		if (stat & (ERR_STAT|DRQ_STAT)) {
683 			if (!rq->bh) {
684 				rq->current_nr_sectors += drive->mult_count;
685 				/*
686 				 * NOTE: could rewind beyond beginning :-/
687 				 */
688 			} else {
689 				printk("%s: MULTI-READ assume all data " \
690 					"transfered is bad status=0x%02x\n",
691 					drive->name, stat);
692 			}
693 			return DRIVER(drive)->error(drive, "task_mulin_intr", stat);
694 		}
695 		/* no data yet, so wait for another interrupt */
696 		if (HWGROUP(drive)->handler == NULL)
697 			ide_set_handler(drive, &task_mulin_intr, WAIT_WORSTCASE, NULL);
698 		return ide_started;
699 	}
700 
701 	do {
702 		nsect = rq->current_nr_sectors;
703 		if (nsect > msect)
704 			nsect = msect;
705 		pBuf = task_map_rq(rq, &flags);
706 		DTF("Multiread: %p, nsect: %d, msect: %d, " \
707 			" rq->current_nr_sectors: %d\n",
708 			pBuf, nsect, msect, rq->current_nr_sectors);
709 		taskfile_input_data(drive, pBuf, nsect * SECTOR_WORDS);
710 		task_unmap_rq(rq, pBuf, &flags);
711 		rq->errors = 0;
712 		rq->current_nr_sectors -= nsect;
713 		msect -= nsect;
714 		/*
715 		 * FIXME :: We really can not legally get a new page/bh
716 		 * regardless, if this is the end of our segment.
717 		 * BH walking or segment can only be updated after we have a
718 		 * good hwif->INB(IDE_STATUS_REG); return.
719 		 */
720 		if (!rq->current_nr_sectors) {
721 			if (!DRIVER(drive)->end_request(drive, 1))
722 				return ide_stopped;
723 		}
724 	} while (msect);
725 	if (HWGROUP(drive)->handler == NULL)
726 		ide_set_handler(drive, &task_mulin_intr, WAIT_WORSTCASE, NULL);
727 	return ide_started;
728 }
729 
730 EXPORT_SYMBOL(task_mulin_intr);
731 
732 /*
733  * VERIFY ME before 2.4 ... unexpected race is possible based on details
734  * RMK with 74LS245/373/374 TTL buffer logic because of passthrough.
735  */
pre_task_out_intr(ide_drive_t * drive,struct request * rq)736 ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
737 {
738 	char *pBuf		= NULL;
739 	unsigned long flags;
740 	ide_startstop_t startstop;
741 
742 	if (ide_wait_stat(&startstop, drive, DATA_READY,
743 			drive->bad_wstat, WAIT_DRQ)) {
744 		printk(KERN_ERR "%s: no DRQ after issuing WRITE%s\n",
745 			drive->name,
746 			drive->addressing ? "_EXT" : "");
747 		return startstop;
748 	}
749 	/* For Write_sectors we need to stuff the first sector */
750 	pBuf = task_map_rq(rq, &flags);
751 	taskfile_output_data(drive, pBuf, SECTOR_WORDS);
752 	rq->current_nr_sectors--;
753 	task_unmap_rq(rq, pBuf, &flags);
754 	return ide_started;
755 }
756 
757 EXPORT_SYMBOL(pre_task_out_intr);
758 
759 /*
760  * Handler for command with PIO data-out phase WRITE
761  *
762  * WOOHOO this is a CORRECT STATE DIAGRAM NOW, <andre@linux-ide.org>
763  */
task_out_intr(ide_drive_t * drive)764 ide_startstop_t task_out_intr (ide_drive_t *drive)
765 {
766 	ide_hwif_t *hwif	= HWIF(drive);
767 	struct request *rq	= HWGROUP(drive)->rq;
768 	char *pBuf		= NULL;
769 	unsigned long flags;
770 	u8 stat;
771 
772 	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), DRIVE_READY, drive->bad_wstat)) {
773 		DTF("%s: WRITE attempting to recover last " \
774 			"sector counter status=0x%02x\n",
775 			drive->name, stat);
776 		rq->current_nr_sectors++;
777 		return DRIVER(drive)->error(drive, "task_out_intr", stat);
778 	}
779 	/*
780 	 * Safe to update request for partial completions.
781 	 * We have a good STATUS CHECK!!!
782 	 */
783 	if (!rq->current_nr_sectors)
784 		if (!DRIVER(drive)->end_request(drive, 1))
785 			return ide_stopped;
786 	if ((rq->current_nr_sectors==1) ^ (stat & DRQ_STAT)) {
787 		rq = HWGROUP(drive)->rq;
788 		pBuf = task_map_rq(rq, &flags);
789 		DTF("write: %p, rq->current_nr_sectors: %d\n",
790 			pBuf, (int) rq->current_nr_sectors);
791 		taskfile_output_data(drive, pBuf, SECTOR_WORDS);
792 		task_unmap_rq(rq, pBuf, &flags);
793 		rq->errors = 0;
794 		rq->current_nr_sectors--;
795 	}
796 	if (HWGROUP(drive)->handler == NULL)
797 		ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
798 	return ide_started;
799 }
800 
801 EXPORT_SYMBOL(task_out_intr);
802 
803 #undef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
804 
pre_task_mulout_intr(ide_drive_t * drive,struct request * rq)805 ide_startstop_t pre_task_mulout_intr (ide_drive_t *drive, struct request *rq)
806 {
807 #ifdef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
808 	ide_hwif_t *hwif		= HWIF(drive);
809 	char *pBuf			= NULL;
810 	unsigned int nsect = 0, msect	= drive->mult_count;
811         u8 stat;
812 	unsigned long flags;
813 #endif /* ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
814 
815 	ide_task_t *args = rq->special;
816 	ide_startstop_t startstop;
817 
818 #if 0
819 	/*
820 	 * assign private copy for multi-write
821 	 */
822 	memcpy(&HWGROUP(drive)->wrq, rq, sizeof(struct request));
823 #endif
824 
825 	if (ide_wait_stat(&startstop, drive, DATA_READY,
826 			drive->bad_wstat, WAIT_DRQ)) {
827 		printk(KERN_ERR "%s: no DRQ after issuing %s\n",
828 			drive->name,
829 			drive->addressing ? "MULTWRITE_EXT" : "MULTWRITE");
830 		return startstop;
831 	}
832 #ifdef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
833 
834 	do {
835 		nsect = rq->current_nr_sectors;
836 		if (nsect > msect)
837 			nsect = msect;
838 		pBuf = task_map_rq(rq, &flags);
839 		DTF("Pre-Multiwrite: %p, nsect: %d, msect: %d, " \
840 			"rq->current_nr_sectors: %ld\n",
841 			pBuf, nsect, msect, rq->current_nr_sectors);
842 		msect -= nsect;
843 		taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
844 		task_unmap_rq(rq, pBuf, &flags);
845 		rq->current_nr_sectors -= nsect;
846 		if (!rq->current_nr_sectors) {
847 			if (!DRIVER(drive)->end_request(drive, 1))
848 				if (!rq->bh) {
849 					stat = hwif->INB(IDE_STATUS_REG);
850 					return ide_stopped;
851 				}
852 		}
853 	} while (msect);
854 	rq->errors = 0;
855 	return ide_started;
856 #else /* ! ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
857 	if (!(drive_is_ready(drive))) {
858 		int i;
859 		for (i=0; i<100; i++) {
860 			if (drive_is_ready(drive))
861 				break;
862 		}
863 	}
864 
865 	/*
866 	 * WARNING :: if the drive as not acked good status we may not
867 	 * move the DATA-TRANSFER T-Bar as BSY != 0. <andre@linux-ide.org>
868 	 */
869 	return args->handler(drive);
870 #endif /* ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
871 }
872 
873 EXPORT_SYMBOL(pre_task_mulout_intr);
874 
875 /*
876  * FIXME before enabling in 2.4 ... DATA integrity issue upon error.
877  */
878 /*
879  * Handler for command write multiple
880  * Called directly from execute_drive_cmd for the first bunch of sectors,
881  * afterwards only by the ISR
882  */
task_mulout_intr(ide_drive_t * drive)883 ide_startstop_t task_mulout_intr (ide_drive_t *drive)
884 {
885 	ide_hwif_t *hwif		= HWIF(drive);
886 	u8 stat				= hwif->INB(IDE_STATUS_REG);
887 	struct request *rq		= HWGROUP(drive)->rq;
888 	char *pBuf			= NULL;
889 	ide_startstop_t startstop	= ide_stopped;
890 	unsigned int msect		= drive->mult_count;
891 	unsigned int nsect;
892 	unsigned long flags;
893 
894 	/*
895 	 * (ks/hs): Handle last IRQ on multi-sector transfer,
896 	 * occurs after all data was sent in this chunk
897 	 */
898 	if (rq->current_nr_sectors == 0) {
899 		if (stat & (ERR_STAT|DRQ_STAT)) {
900 			if (!rq->bh) {
901                                 rq->current_nr_sectors += drive->mult_count;
902 				/*
903 				 * NOTE: could rewind beyond beginning :-/
904 				 */
905 			} else {
906 				printk(KERN_ERR "%s: MULTI-WRITE assume all data " \
907 					"transfered is bad status=0x%02x\n",
908 					drive->name, stat);
909 			}
910 			return DRIVER(drive)->error(drive, "task_mulout_intr", stat);
911 		}
912 		if (!rq->bh)
913 			DRIVER(drive)->end_request(drive, 1);
914 		return startstop;
915 	}
916 	/*
917 	 * DON'T be lazy code the above and below togather !!!
918 	 */
919 	if (!OK_STAT(stat,DATA_READY,BAD_R_STAT)) {
920 		if (stat & (ERR_STAT|DRQ_STAT)) {
921 			if (!rq->bh) {
922 				rq->current_nr_sectors += drive->mult_count;
923 				/*
924 				 * NOTE: could rewind beyond beginning :-/
925 				 */
926 			} else {
927 				printk(KERN_ERR "%s: MULTI-WRITE assume all data " \
928 					"transfered is bad status=0x%02x\n",
929 					drive->name, stat);
930 			}
931 			return DRIVER(drive)->error(drive, "task_mulout_intr", stat);
932 		}
933 		/* no data yet, so wait for another interrupt */
934 		if (HWGROUP(drive)->handler == NULL)
935 			ide_set_handler(drive, &task_mulout_intr, WAIT_WORSTCASE, NULL);
936 		return ide_started;
937 	}
938 
939 #ifndef ALTERNATE_STATE_DIAGRAM_MULTI_OUT
940 	if (HWGROUP(drive)->handler != NULL) {
941 		unsigned long lflags;
942 		spin_lock_irqsave(&io_request_lock, lflags);
943 		HWGROUP(drive)->handler = NULL;
944 		del_timer(&HWGROUP(drive)->timer);
945 		spin_unlock_irqrestore(&io_request_lock, lflags);
946 	}
947 #endif /* ALTERNATE_STATE_DIAGRAM_MULTI_OUT */
948 
949 	do {
950 		nsect = rq->current_nr_sectors;
951 		if (nsect > msect)
952 			nsect = msect;
953 		pBuf = task_map_rq(rq, &flags);
954 		DTF("Multiwrite: %p, nsect: %d, msect: %d, " \
955 			"rq->current_nr_sectors: %ld\n",
956 			pBuf, nsect, msect, rq->current_nr_sectors);
957 		msect -= nsect;
958 		taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
959 		task_unmap_rq(rq, pBuf, &flags);
960 		rq->current_nr_sectors -= nsect;
961 		/*
962 		 * FIXME :: We really can not legally get a new page/bh
963 		 * regardless, if this is the end of our segment.
964 		 * BH walking or segment can only be updated after we
965 		 * have a good  hwif->INB(IDE_STATUS_REG); return.
966 		 */
967 		if (!rq->current_nr_sectors) {
968 			if (!DRIVER(drive)->end_request(drive, 1))
969 				if (!rq->bh)
970 					return ide_stopped;
971 		}
972 	} while (msect);
973 	rq->errors = 0;
974 	if (HWGROUP(drive)->handler == NULL)
975 		ide_set_handler(drive, &task_mulout_intr, WAIT_WORSTCASE, NULL);
976 	return ide_started;
977 }
978 
979 EXPORT_SYMBOL(task_mulout_intr);
980 
981 /* Called by internal to feature out type of command being called */
982 //ide_pre_handler_t * ide_pre_handler_parser (task_struct_t *taskfile, hob_struct_t *hobfile)
ide_pre_handler_parser(struct hd_drive_task_hdr * taskfile,struct hd_drive_hob_hdr * hobfile)983 ide_pre_handler_t * ide_pre_handler_parser (struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile)
984 {
985 	switch(taskfile->command) {
986 				/* IDE_DRIVE_TASK_RAW_WRITE */
987 		case CFA_WRITE_MULTI_WO_ERASE:
988 	//	case WIN_WRITE_LONG:
989 	//	case WIN_WRITE_LONG_ONCE:
990 		case WIN_MULTWRITE:
991 		case WIN_MULTWRITE_EXT:
992 			return &pre_task_mulout_intr;
993 
994 				/* IDE_DRIVE_TASK_OUT */
995 		case WIN_WRITE:
996 	//	case WIN_WRITE_ONCE:
997 		case WIN_WRITE_EXT:
998 		case WIN_WRITE_VERIFY:
999 		case WIN_WRITE_BUFFER:
1000 		case CFA_WRITE_SECT_WO_ERASE:
1001 		case WIN_DOWNLOAD_MICROCODE:
1002 			return &pre_task_out_intr;
1003 				/* IDE_DRIVE_TASK_OUT */
1004 		case WIN_SMART:
1005 			if (taskfile->feature == SMART_WRITE_LOG_SECTOR)
1006 				return &pre_task_out_intr;
1007 		case WIN_WRITEDMA:
1008 	//	case WIN_WRITEDMA_ONCE:
1009 		case WIN_WRITEDMA_QUEUED:
1010 		case WIN_WRITEDMA_EXT:
1011 		case WIN_WRITEDMA_QUEUED_EXT:
1012 				/* IDE_DRIVE_TASK_OUT */
1013 		default:
1014 			break;
1015 	}
1016 	return(NULL);
1017 }
1018 
1019 EXPORT_SYMBOL(ide_pre_handler_parser);
1020 
1021 /* Called by internal to feature out type of command being called */
1022 //ide_handler_t * ide_handler_parser (task_struct_t *taskfile, hob_struct_t *hobfile)
ide_handler_parser(struct hd_drive_task_hdr * taskfile,struct hd_drive_hob_hdr * hobfile)1023 ide_handler_t * ide_handler_parser (struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile)
1024 {
1025 	switch(taskfile->command) {
1026 		case WIN_IDENTIFY:
1027 		case WIN_PIDENTIFY:
1028 		case CFA_TRANSLATE_SECTOR:
1029 		case WIN_READ_BUFFER:
1030 		case WIN_READ:
1031 	//	case WIN_READ_ONCE:
1032 		case WIN_READ_EXT:
1033 			return &task_in_intr;
1034 		case WIN_SECURITY_DISABLE:
1035 		case WIN_SECURITY_ERASE_UNIT:
1036 		case WIN_SECURITY_SET_PASS:
1037 		case WIN_SECURITY_UNLOCK:
1038 		case WIN_DOWNLOAD_MICROCODE:
1039 		case CFA_WRITE_SECT_WO_ERASE:
1040 		case WIN_WRITE_BUFFER:
1041 		case WIN_WRITE_VERIFY:
1042 		case WIN_WRITE:
1043 	//	case WIN_WRITE_ONCE:
1044 		case WIN_WRITE_EXT:
1045 			return &task_out_intr;
1046 	//	case WIN_READ_LONG:
1047 	//	case WIN_READ_LONG_ONCE:
1048 		case WIN_MULTREAD:
1049 		case WIN_MULTREAD_EXT:
1050 			return &task_mulin_intr;
1051 	//	case WIN_WRITE_LONG:
1052 	//	case WIN_WRITE_LONG_ONCE:
1053 		case CFA_WRITE_MULTI_WO_ERASE:
1054 		case WIN_MULTWRITE:
1055 		case WIN_MULTWRITE_EXT:
1056 			return &task_mulout_intr;
1057 		case WIN_SMART:
1058 			switch(taskfile->feature) {
1059 				case SMART_READ_VALUES:
1060 				case SMART_READ_THRESHOLDS:
1061 				case SMART_READ_LOG_SECTOR:
1062 					return &task_in_intr;
1063 				case SMART_WRITE_LOG_SECTOR:
1064 					return &task_out_intr;
1065 				default:
1066 					return &task_no_data_intr;
1067 			}
1068 		case CFA_REQ_EXT_ERROR_CODE:
1069 		case CFA_ERASE_SECTORS:
1070 		case WIN_VERIFY:
1071 	//	case WIN_VERIFY_ONCE:
1072 		case WIN_VERIFY_EXT:
1073 		case WIN_SEEK:
1074 			return &task_no_data_intr;
1075 		case WIN_SPECIFY:
1076 			return &set_geometry_intr;
1077 		case WIN_RECAL:
1078 	//	case WIN_RESTORE:
1079 			return &recal_intr;
1080 		case WIN_NOP:
1081 		case WIN_DIAGNOSE:
1082 		case WIN_FLUSH_CACHE:
1083 		case WIN_FLUSH_CACHE_EXT:
1084 		case WIN_STANDBYNOW1:
1085 		case WIN_STANDBYNOW2:
1086 		case WIN_SLEEPNOW1:
1087 		case WIN_SLEEPNOW2:
1088 		case WIN_SETIDLE1:
1089 		case WIN_CHECKPOWERMODE1:
1090 		case WIN_CHECKPOWERMODE2:
1091 		case WIN_GETMEDIASTATUS:
1092 		case WIN_MEDIAEJECT:
1093 			return &task_no_data_intr;
1094 		case WIN_SETMULT:
1095 			return &set_multmode_intr;
1096 		case WIN_READ_NATIVE_MAX:
1097 		case WIN_SET_MAX:
1098 		case WIN_READ_NATIVE_MAX_EXT:
1099 		case WIN_SET_MAX_EXT:
1100 		case WIN_SECURITY_ERASE_PREPARE:
1101 		case WIN_SECURITY_FREEZE_LOCK:
1102 		case WIN_DOORLOCK:
1103 		case WIN_DOORUNLOCK:
1104 		case WIN_SETFEATURES:
1105 			return &task_no_data_intr;
1106 		case DISABLE_SEAGATE:
1107 		case EXABYTE_ENABLE_NEST:
1108 			return &task_no_data_intr;
1109 #ifdef CONFIG_BLK_DEV_IDEDMA
1110 		case WIN_READDMA:
1111 	//	case WIN_READDMA_ONCE:
1112 		case WIN_IDENTIFY_DMA:
1113 		case WIN_READDMA_QUEUED:
1114 		case WIN_READDMA_EXT:
1115 		case WIN_READDMA_QUEUED_EXT:
1116 		case WIN_WRITEDMA:
1117 	//	case WIN_WRITEDMA_ONCE:
1118 		case WIN_WRITEDMA_QUEUED:
1119 		case WIN_WRITEDMA_EXT:
1120 		case WIN_WRITEDMA_QUEUED_EXT:
1121 #endif
1122 		case WIN_FORMAT:
1123 		case WIN_INIT:
1124 		case WIN_DEVICE_RESET:
1125 		case WIN_QUEUED_SERVICE:
1126 		case WIN_PACKETCMD:
1127 		default:
1128 			return(NULL);
1129 	}
1130 }
1131 
1132 EXPORT_SYMBOL(ide_handler_parser);
1133 
ide_post_handler_parser(struct hd_drive_task_hdr * taskfile,struct hd_drive_hob_hdr * hobfile)1134 ide_post_handler_t * ide_post_handler_parser (struct hd_drive_task_hdr *taskfile, struct hd_drive_hob_hdr *hobfile)
1135 {
1136 	switch(taskfile->command) {
1137 		case WIN_SPECIFY:	/* set_geometry_intr */
1138 		case WIN_RESTORE:	/* recal_intr */
1139 		case WIN_SETMULT:	/* set_multmode_intr */
1140 		default:
1141 			return(NULL);
1142 	}
1143 }
1144 
1145 EXPORT_SYMBOL(ide_post_handler_parser);
1146 
1147 /* Called by ioctl to feature out type of command being called */
ide_cmd_type_parser(ide_task_t * args)1148 int ide_cmd_type_parser (ide_task_t *args)
1149 {
1150 
1151 	task_struct_t *taskfile = (task_struct_t *) args->tfRegister;
1152 	hob_struct_t *hobfile   = (hob_struct_t *) args->hobRegister;
1153 
1154 	args->prehandler	= ide_pre_handler_parser(taskfile, hobfile);
1155 	args->handler		= ide_handler_parser(taskfile, hobfile);
1156 	args->posthandler	= ide_post_handler_parser(taskfile, hobfile);
1157 
1158 	switch(args->tfRegister[IDE_COMMAND_OFFSET]) {
1159 		case WIN_IDENTIFY:
1160 		case WIN_PIDENTIFY:
1161 			return IDE_DRIVE_TASK_IN;
1162 		case CFA_TRANSLATE_SECTOR:
1163 		case WIN_READ:
1164 	//	case WIN_READ_ONCE:
1165 		case WIN_READ_EXT:
1166 		case WIN_READ_BUFFER:
1167 			return IDE_DRIVE_TASK_IN;
1168 		case WIN_WRITE:
1169 	//	case WIN_WRITE_ONCE:
1170 		case WIN_WRITE_EXT:
1171 		case WIN_WRITE_VERIFY:
1172 		case WIN_WRITE_BUFFER:
1173 		case CFA_WRITE_SECT_WO_ERASE:
1174 		case WIN_DOWNLOAD_MICROCODE:
1175 			return IDE_DRIVE_TASK_RAW_WRITE;
1176 	//	case WIN_READ_LONG:
1177 	//	case WIN_READ_LONG_ONCE:
1178 		case WIN_MULTREAD:
1179 		case WIN_MULTREAD_EXT:
1180 			return IDE_DRIVE_TASK_IN;
1181 	//	case WIN_WRITE_LONG:
1182 	//	case WIN_WRITE_LONG_ONCE:
1183 		case CFA_WRITE_MULTI_WO_ERASE:
1184 		case WIN_MULTWRITE:
1185 		case WIN_MULTWRITE_EXT:
1186 			return IDE_DRIVE_TASK_RAW_WRITE;
1187 		case WIN_SECURITY_DISABLE:
1188 		case WIN_SECURITY_ERASE_UNIT:
1189 		case WIN_SECURITY_SET_PASS:
1190 		case WIN_SECURITY_UNLOCK:
1191 			return IDE_DRIVE_TASK_OUT;
1192 		case WIN_SMART:
1193 			args->tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS;
1194 			args->tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS;
1195 			switch(args->tfRegister[IDE_FEATURE_OFFSET]) {
1196 				case SMART_READ_VALUES:
1197 				case SMART_READ_THRESHOLDS:
1198 				case SMART_READ_LOG_SECTOR:
1199 					return IDE_DRIVE_TASK_IN;
1200 				case SMART_WRITE_LOG_SECTOR:
1201 					return IDE_DRIVE_TASK_OUT;
1202 				default:
1203 					return IDE_DRIVE_TASK_NO_DATA;
1204 			}
1205 #ifdef CONFIG_BLK_DEV_IDEDMA
1206 		case WIN_READDMA:
1207 	//	case WIN_READDMA_ONCE:
1208 		case WIN_IDENTIFY_DMA:
1209 		case WIN_READDMA_QUEUED:
1210 		case WIN_READDMA_EXT:
1211 		case WIN_READDMA_QUEUED_EXT:
1212 			return IDE_DRIVE_TASK_IN;
1213 		case WIN_WRITEDMA:
1214 	//	case WIN_WRITEDMA_ONCE:
1215 		case WIN_WRITEDMA_QUEUED:
1216 		case WIN_WRITEDMA_EXT:
1217 		case WIN_WRITEDMA_QUEUED_EXT:
1218 			return IDE_DRIVE_TASK_RAW_WRITE;
1219 #endif
1220 		case WIN_SETFEATURES:
1221 			switch(args->tfRegister[IDE_FEATURE_OFFSET]) {
1222 				case SETFEATURES_EN_8BIT:
1223 				case SETFEATURES_EN_WCACHE:
1224 					return IDE_DRIVE_TASK_NO_DATA;
1225 				case SETFEATURES_XFER:
1226 					return IDE_DRIVE_TASK_SET_XFER;
1227 				case SETFEATURES_DIS_DEFECT:
1228 				case SETFEATURES_EN_APM:
1229 				case SETFEATURES_DIS_MSN:
1230 				case SETFEATURES_DIS_RETRY:
1231 				case SETFEATURES_EN_AAM:
1232 				case SETFEATURES_RW_LONG:
1233 				case SETFEATURES_SET_CACHE:
1234 				case SETFEATURES_DIS_RLA:
1235 				case SETFEATURES_EN_RI:
1236 				case SETFEATURES_EN_SI:
1237 				case SETFEATURES_DIS_RPOD:
1238 				case SETFEATURES_DIS_WCACHE:
1239 				case SETFEATURES_EN_DEFECT:
1240 				case SETFEATURES_DIS_APM:
1241 				case SETFEATURES_EN_ECC:
1242 				case SETFEATURES_EN_MSN:
1243 				case SETFEATURES_EN_RETRY:
1244 				case SETFEATURES_EN_RLA:
1245 				case SETFEATURES_PREFETCH:
1246 				case SETFEATURES_4B_RW_LONG:
1247 				case SETFEATURES_DIS_AAM:
1248 				case SETFEATURES_EN_RPOD:
1249 				case SETFEATURES_DIS_RI:
1250 				case SETFEATURES_DIS_SI:
1251 				default:
1252 					return IDE_DRIVE_TASK_NO_DATA;
1253 			}
1254 		case WIN_NOP:
1255 		case CFA_REQ_EXT_ERROR_CODE:
1256 		case CFA_ERASE_SECTORS:
1257 		case WIN_VERIFY:
1258 	//	case WIN_VERIFY_ONCE:
1259 		case WIN_VERIFY_EXT:
1260 		case WIN_SEEK:
1261 		case WIN_SPECIFY:
1262 		case WIN_RESTORE:
1263 		case WIN_DIAGNOSE:
1264 		case WIN_FLUSH_CACHE:
1265 		case WIN_FLUSH_CACHE_EXT:
1266 		case WIN_STANDBYNOW1:
1267 		case WIN_STANDBYNOW2:
1268 		case WIN_SLEEPNOW1:
1269 		case WIN_SLEEPNOW2:
1270 		case WIN_SETIDLE1:
1271 		case DISABLE_SEAGATE:
1272 		case WIN_CHECKPOWERMODE1:
1273 		case WIN_CHECKPOWERMODE2:
1274 		case WIN_GETMEDIASTATUS:
1275 		case WIN_MEDIAEJECT:
1276 		case WIN_SETMULT:
1277 		case WIN_READ_NATIVE_MAX:
1278 		case WIN_SET_MAX:
1279 		case WIN_READ_NATIVE_MAX_EXT:
1280 		case WIN_SET_MAX_EXT:
1281 		case WIN_SECURITY_ERASE_PREPARE:
1282 		case WIN_SECURITY_FREEZE_LOCK:
1283 		case EXABYTE_ENABLE_NEST:
1284 		case WIN_DOORLOCK:
1285 		case WIN_DOORUNLOCK:
1286 			return IDE_DRIVE_TASK_NO_DATA;
1287 		case WIN_FORMAT:
1288 		case WIN_INIT:
1289 		case WIN_DEVICE_RESET:
1290 		case WIN_QUEUED_SERVICE:
1291 		case WIN_PACKETCMD:
1292 		default:
1293 			return IDE_DRIVE_TASK_INVALID;
1294 	}
1295 }
1296 
1297 EXPORT_SYMBOL(ide_cmd_type_parser);
1298 
1299 /*
1300  * This function is intended to be used prior to invoking ide_do_drive_cmd().
1301  */
ide_init_drive_taskfile(struct request * rq)1302 void ide_init_drive_taskfile (struct request *rq)
1303 {
1304 	memset(rq, 0, sizeof(*rq));
1305 	rq->cmd = IDE_DRIVE_TASK_NO_DATA;
1306 }
1307 
1308 EXPORT_SYMBOL(ide_init_drive_taskfile);
1309 
1310 #if 1
1311 
ide_diag_taskfile(ide_drive_t * drive,ide_task_t * args,unsigned long data_size,u8 * buf)1312 int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
1313 {
1314 	struct request rq;
1315 
1316 	ide_init_drive_taskfile(&rq);
1317 	rq.cmd = IDE_DRIVE_TASKFILE;
1318 	rq.buffer = buf;
1319 
1320 	/*
1321 	 * (ks) We transfer currently only whole sectors.
1322 	 * This is suffient for now.  But, it would be great,
1323 	 * if we would find a solution to transfer any size.
1324 	 * To support special commands like READ LONG.
1325 	 */
1326 	if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
1327 		if (data_size == 0)
1328 			rq.current_nr_sectors = rq.nr_sectors = (args->hobRegister[IDE_NSECTOR_OFFSET_HOB] << 8) | args->tfRegister[IDE_NSECTOR_OFFSET];
1329 		/*	rq.hard_cur_sectors	*/
1330 		else
1331 			rq.current_nr_sectors = rq.nr_sectors = data_size / SECTOR_SIZE;
1332 		/*	rq.hard_cur_sectors	*/
1333 	}
1334 
1335 	if (args->tf_out_flags.all == 0) {
1336 		/*
1337 		 * clean up kernel settings for driver sanity, regardless.
1338 		 * except for discrete diag services.
1339 		 */
1340 		args->posthandler = ide_post_handler_parser(
1341 				(struct hd_drive_task_hdr *) args->tfRegister,
1342 				(struct hd_drive_hob_hdr *) args->hobRegister);
1343 
1344 	}
1345 	rq.special = args;
1346 	return ide_do_drive_cmd(drive, &rq, ide_wait);
1347 }
1348 
1349 #else
1350 
ide_diag_taskfile(ide_drive_t * drive,ide_task_t * args,unsigned long data_size,u8 * buf)1351 int ide_diag_taskfile (ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
1352 {
1353 	struct request *rq;
1354 	unsigned long flags;
1355 	ide_hwgroup_t *hwgroup = HWGROUP(drive);
1356 	unsigned int major = HWIF(drive)->major;
1357 	struct list_head *queue_head = &drive->queue.queue_head;
1358 	DECLARE_COMPLETION(wait);
1359 
1360 	if (HWIF(drive)->chipset == ide_pdc4030 && buf != NULL)
1361 		return -ENOSYS; /* special drive cmds not supported */
1362 
1363 	memset(rq, 0, sizeof(*rq));
1364 	rq->cmd = IDE_DRIVE_TASKFILE;
1365 	rq->buffer = buf;
1366 
1367 	/*
1368 	 * (ks) We transfer currently only whole sectors.
1369 	 * This is suffient for now.  But, it would be great,
1370 	 * if we would find a solution to transfer any size.
1371 	 * To support special commands like READ LONG.
1372 	 */
1373 	if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
1374 		if (data_size == 0) {
1375 			ata_nsector_t nsector;
1376 			nsector.b.low = args->hobRegister[IDE_NSECTOR_OFFSET_HOB];
1377 			nsector.b.high = args->tfRegister[IDE_NSECTOR_OFFSET];
1378 			rq.nr_sectors = nsector.all;
1379 		} else {
1380 			rq.nr_sectors = data_size / SECTOR_SIZE;
1381 		}
1382 		rq.current_nr_sectors = rq.nr_sectors;
1383 	//	rq.hard_cur_sectors = rq.nr_sectors;
1384 	}
1385 
1386 	if (args->tf_out_flags.all == 0) {
1387 		/*
1388 		 * clean up kernel settings for driver sanity, regardless.
1389 		 * except for discrete diag services.
1390 		 */
1391 		args->posthandler = ide_post_handler_parser(
1392 				(struct hd_drive_task_hdr *) args->tfRegister,
1393 				(struct hd_drive_hob_hdr *) args->hobRegister);
1394 	}
1395 	rq->special = args;
1396 	rq->errors = 0;
1397 	rq->rq_status = RQ_ACTIVE;
1398 	rq->rq_dev = MKDEV(major,(drive->select.b.unit)<<PARTN_BITS);
1399 	rq->waiting = &wait;
1400 
1401 	spin_lock_irqsave(&io_request_lock, flags);
1402 	queue_head = queue_head->prev;
1403 	list_add(&rq->queue, queue_head);
1404 	ide_do_request(hwgroup, 0);
1405 	spin_unlock_irqrestore(&io_request_lock, flags);
1406 
1407 	wait_for_completion(&wait);	/* wait for it to be serviced */
1408 	return rq->errors ? -EIO : 0;	/* return -EIO if errors */
1409 }
1410 
1411 #endif
1412 
1413 EXPORT_SYMBOL(ide_diag_taskfile);
1414 
ide_raw_taskfile(ide_drive_t * drive,ide_task_t * args,u8 * buf)1415 int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
1416 {
1417 	return ide_diag_taskfile(drive, args, 0, buf);
1418 }
1419 
1420 EXPORT_SYMBOL(ide_raw_taskfile);
1421 
1422 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
ide_ioctl_verbose(unsigned int cmd)1423 char * ide_ioctl_verbose (unsigned int cmd)
1424 {
1425 	return("unknown");
1426 }
1427 
ide_task_cmd_verbose(u8 task)1428 char * ide_task_cmd_verbose (u8 task)
1429 {
1430 	return("unknown");
1431 }
1432 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
1433 
1434 #define MAX_DMA		(256*SECTOR_WORDS)
1435 
1436 ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *);
1437 ide_startstop_t flagged_task_no_data_intr(ide_drive_t *);
1438 ide_startstop_t flagged_task_in_intr(ide_drive_t *);
1439 ide_startstop_t flagged_task_mulin_intr(ide_drive_t *);
1440 ide_startstop_t flagged_pre_task_out_intr(ide_drive_t *, struct request *);
1441 ide_startstop_t flagged_task_out_intr(ide_drive_t *);
1442 ide_startstop_t flagged_pre_task_mulout_intr(ide_drive_t *, struct request *);
1443 ide_startstop_t flagged_task_mulout_intr(ide_drive_t *);
1444 
ide_taskfile_ioctl(ide_drive_t * drive,struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)1445 int ide_taskfile_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1446 {
1447 	ide_task_request_t	*req_task;
1448 	ide_task_t		args;
1449 	u8 *outbuf		= NULL;
1450 	u8 *inbuf		= NULL;
1451 	task_ioreg_t *argsptr	= args.tfRegister;
1452 	task_ioreg_t *hobsptr	= args.hobRegister;
1453 	int err			= 0;
1454 	int tasksize		= sizeof(struct ide_task_request_s);
1455 	int taskin		= 0;
1456 	int taskout		= 0;
1457 	u8 io_32bit		= drive->io_32bit;
1458 
1459 //	printk("IDE Taskfile ...\n");
1460 
1461 	req_task = kmalloc(tasksize, GFP_KERNEL);
1462 	if (req_task == NULL) return -ENOMEM;
1463 	memset(req_task, 0, tasksize);
1464 	if (copy_from_user(req_task, (void *) arg, tasksize)) {
1465 		kfree(req_task);
1466 		return -EFAULT;
1467 	}
1468 
1469 	taskout = (int) req_task->out_size;
1470 	taskin  = (int) req_task->in_size;
1471 
1472 	if (taskout) {
1473 		int outtotal = tasksize;
1474 		outbuf = kmalloc(taskout, GFP_KERNEL);
1475 		if (outbuf == NULL) {
1476 			err = -ENOMEM;
1477 			goto abort;
1478 		}
1479 		memset(outbuf, 0, taskout);
1480 		if (copy_from_user(outbuf, (void *)arg + outtotal, taskout)) {
1481 			err = -EFAULT;
1482 			goto abort;
1483 		}
1484 	}
1485 
1486 	if (taskin) {
1487 		int intotal = tasksize + taskout;
1488 		inbuf = kmalloc(taskin, GFP_KERNEL);
1489 		if (inbuf == NULL) {
1490 			err = -ENOMEM;
1491 			goto abort;
1492 		}
1493 		memset(inbuf, 0, taskin);
1494 		if (copy_from_user(inbuf, (void *)arg + intotal , taskin)) {
1495 			err = -EFAULT;
1496 			goto abort;
1497 		}
1498 	}
1499 
1500 	memset(&args, 0, sizeof(ide_task_t));
1501 	memcpy(argsptr, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
1502 	memcpy(hobsptr, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE);
1503 
1504 	args.tf_in_flags  = req_task->in_flags;
1505 	args.tf_out_flags = req_task->out_flags;
1506 	args.data_phase   = req_task->data_phase;
1507 	args.command_type = req_task->req_cmd;
1508 
1509 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
1510 	DTF("%s: ide_ioctl_cmd %s:  ide_task_cmd %s\n",
1511 		drive->name,
1512 		ide_ioctl_verbose(cmd),
1513 		ide_task_cmd_verbose(args.tfRegister[IDE_COMMAND_OFFSET]));
1514 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
1515 
1516 	drive->io_32bit = 0;
1517 	switch(req_task->data_phase) {
1518 		case TASKFILE_OUT_DMAQ:
1519 		case TASKFILE_OUT_DMA:
1520 			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1521 			break;
1522 		case TASKFILE_IN_DMAQ:
1523 		case TASKFILE_IN_DMA:
1524 			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1525 			break;
1526 		case TASKFILE_IN_OUT:
1527 #if 0
1528 			args.prehandler = &pre_task_out_intr;
1529 			args.handler = &task_out_intr;
1530 			args.posthandler = NULL;
1531 			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1532 			args.prehandler = NULL;
1533 			args.handler = &task_in_intr;
1534 			args.posthandler = NULL;
1535 			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1536 			break;
1537 #else
1538 			err = -EFAULT;
1539 			goto abort;
1540 #endif
1541 		case TASKFILE_MULTI_OUT:
1542 			if (!drive->mult_count) {
1543 				/* (hs): give up if multcount is not set */
1544 				printk(KERN_ERR "%s: %s Multimode Write " \
1545 					"multcount is not set\n",
1546 					drive->name, __FUNCTION__);
1547 				err = -EPERM;
1548 				goto abort;
1549 			}
1550 			if (args.tf_out_flags.all != 0) {
1551 				args.prehandler = &flagged_pre_task_mulout_intr;
1552 				args.handler = &flagged_task_mulout_intr;
1553 			} else {
1554 				args.prehandler = &pre_task_mulout_intr;
1555 				args.handler = &task_mulout_intr;
1556 			}
1557 			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1558 			break;
1559 		case TASKFILE_OUT:
1560 			if (args.tf_out_flags.all != 0) {
1561 				args.prehandler = &flagged_pre_task_out_intr;
1562 				args.handler    = &flagged_task_out_intr;
1563 			} else {
1564 				args.prehandler = &pre_task_out_intr;
1565 				args.handler = &task_out_intr;
1566 			}
1567 			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
1568 			break;
1569 		case TASKFILE_MULTI_IN:
1570 			if (!drive->mult_count) {
1571 				/* (hs): give up if multcount is not set */
1572 				printk(KERN_ERR "%s: %s Multimode Read failure " \
1573 					"multcount is not set\n",
1574 					drive->name, __FUNCTION__);
1575 				err = -EPERM;
1576 				goto abort;
1577 			}
1578 			if (args.tf_out_flags.all != 0) {
1579 				args.handler = &flagged_task_mulin_intr;
1580 			} else {
1581 				args.handler = &task_mulin_intr;
1582 			}
1583 			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1584 			break;
1585 		case TASKFILE_IN:
1586 			if (args.tf_out_flags.all != 0) {
1587 				args.handler = &flagged_task_in_intr;
1588 			} else {
1589 				args.handler = &task_in_intr;
1590 			}
1591 			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
1592 			break;
1593 		case TASKFILE_NO_DATA:
1594 			if (args.tf_out_flags.all != 0) {
1595 				args.handler = &flagged_task_no_data_intr;
1596 			} else {
1597 				args.handler = &task_no_data_intr;
1598 			}
1599 			err = ide_diag_taskfile(drive, &args, 0, NULL);
1600 			break;
1601 		default:
1602 			err = -EFAULT;
1603 			goto abort;
1604 	}
1605 
1606 	memcpy(req_task->io_ports, &(args.tfRegister), HDIO_DRIVE_TASK_HDR_SIZE);
1607 	memcpy(req_task->hob_ports, &(args.hobRegister), HDIO_DRIVE_HOB_HDR_SIZE);
1608 	req_task->in_flags  = args.tf_in_flags;
1609 	req_task->out_flags = args.tf_out_flags;
1610 
1611 	if (copy_to_user((void *)arg, req_task, tasksize)) {
1612 		err = -EFAULT;
1613 		goto abort;
1614 	}
1615 	if (taskout) {
1616 		int outtotal = tasksize;
1617 		if (copy_to_user((void *)arg+outtotal, outbuf, taskout)) {
1618 			err = -EFAULT;
1619 			goto abort;
1620 		}
1621 	}
1622 	if (taskin) {
1623 		int intotal = tasksize + taskout;
1624 		if (copy_to_user((void *)arg+intotal, inbuf, taskin)) {
1625 			err = -EFAULT;
1626 			goto abort;
1627 		}
1628 	}
1629 abort:
1630 	kfree(req_task);
1631 	if (outbuf != NULL)
1632 		kfree(outbuf);
1633 	if (inbuf != NULL)
1634 		kfree(inbuf);
1635 
1636 //	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
1637 
1638 	drive->io_32bit = io_32bit;
1639 
1640 	return err;
1641 }
1642 
1643 EXPORT_SYMBOL(ide_taskfile_ioctl);
1644 
ide_wait_cmd(ide_drive_t * drive,u8 cmd,u8 nsect,u8 feature,u8 sectors,u8 * buf)1645 int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
1646 {
1647 	struct request rq;
1648 	u8 buffer[4];
1649 
1650 	if (!buf)
1651 		buf = buffer;
1652 	memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
1653 	ide_init_drive_cmd(&rq);
1654 	rq.buffer = buf;
1655 	*buf++ = cmd;
1656 	*buf++ = nsect;
1657 	*buf++ = feature;
1658 	*buf++ = sectors;
1659 	return ide_do_drive_cmd(drive, &rq, ide_wait);
1660 }
1661 
1662 EXPORT_SYMBOL(ide_wait_cmd);
1663 
1664 /*
1665  * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
1666  */
ide_cmd_ioctl(ide_drive_t * drive,struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)1667 int ide_cmd_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1668 {
1669 #if 1
1670 	int err = -EIO;
1671 	u8 args[4], *argbuf = args;
1672 	u8 xfer_rate = 0;
1673 	int argsize = 4;
1674 	ide_task_t tfargs;
1675 
1676 	if (NULL == (void *) arg) {
1677 		struct request rq;
1678 		ide_init_drive_cmd(&rq);
1679 		return ide_do_drive_cmd(drive, &rq, ide_wait);
1680 	}
1681 
1682 	if (copy_from_user(args, (void *)arg, 4))
1683 		return -EFAULT;
1684 
1685 	memset(&tfargs, 0, sizeof(ide_task_t));
1686 	tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
1687 	tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
1688 	tfargs.tfRegister[IDE_SECTOR_OFFSET]  = args[1];
1689 	tfargs.tfRegister[IDE_LCYL_OFFSET]    = 0x00;
1690 	tfargs.tfRegister[IDE_HCYL_OFFSET]    = 0x00;
1691 	tfargs.tfRegister[IDE_SELECT_OFFSET]  = 0x00;
1692 	tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
1693 
1694 	if (args[3]) {
1695 		argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
1696 		argbuf = kmalloc(argsize, GFP_KERNEL);
1697 		if (argbuf == NULL)
1698 			return -ENOMEM;
1699 		memcpy(argbuf, args, 4);
1700 	}
1701 	if (set_transfer(drive, &tfargs)) {
1702 		xfer_rate = args[1];
1703 		if (ide_ata66_check(drive, &tfargs))
1704 			goto abort;
1705 	}
1706 
1707 	err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
1708 
1709 	if (!err && xfer_rate) {
1710 		/* active-retuning-calls future */
1711 		ide_set_xfer_rate(drive, xfer_rate);
1712 		ide_driveid_update(drive);
1713 	}
1714 abort:
1715 	if (copy_to_user((void *)arg, argbuf, argsize))
1716 		err = -EFAULT;
1717 	if (argsize > 4)
1718 		kfree(argbuf);
1719 	return err;
1720 
1721 #else
1722 
1723 	int err = 0;
1724 	u8 args[4], *argbuf = args;
1725 	u8 xfer_rate = 0;
1726 	int argsize = 0;
1727 	ide_task_t tfargs;
1728 
1729 	if (NULL == (void *) arg) {
1730 		struct request rq;
1731 		ide_init_drive_cmd(&rq);
1732 		return ide_do_drive_cmd(drive, &rq, ide_wait);
1733 	}
1734 
1735 	if (copy_from_user(args, (void *)arg, 4))
1736 		return -EFAULT;
1737 
1738 	memset(&tfargs, 0, sizeof(ide_task_t));
1739 	tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
1740 	tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
1741 	tfargs.tfRegister[IDE_SECTOR_OFFSET]  = args[1];
1742 	tfargs.tfRegister[IDE_LCYL_OFFSET]    = 0x00;
1743 	tfargs.tfRegister[IDE_HCYL_OFFSET]    = 0x00;
1744 	tfargs.tfRegister[IDE_SELECT_OFFSET]  = 0x00;
1745 	tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
1746 
1747 	if (args[3]) {
1748 		argsize = (SECTOR_WORDS * 4 * args[3]);
1749 		argbuf = kmalloc(argsize, GFP_KERNEL);
1750 		if (argbuf == NULL)
1751 			return -ENOMEM;
1752 	}
1753 
1754 	if (set_transfer(drive, &tfargs)) {
1755 		xfer_rate = args[1];
1756 		if (ide_ata66_check(drive, &tfargs))
1757 			goto abort;
1758 	}
1759 
1760 	tfargs.command_type = ide_cmd_type_parser(&tfargs);
1761 	err = ide_raw_taskfile(drive, &tfargs, argbuf);
1762 
1763 	if (!err && xfer_rate) {
1764 		/* active-retuning-calls future */
1765 		ide_set_xfer_rate(drive, xfer_rate);
1766 		ide_driveid_update(drive);
1767 	}
1768 abort:
1769 	args[0] = tfargs.tfRegister[IDE_COMMAND_OFFSET];
1770 	args[1] = tfargs.tfRegister[IDE_FEATURE_OFFSET];
1771 	args[2] = tfargs.tfRegister[IDE_NSECTOR_OFFSET];
1772 	args[3] = 0;
1773 
1774 	if (copy_to_user((void *)arg, argbuf, 4))
1775 		err = -EFAULT;
1776 	if (argbuf != NULL) {
1777 		if (copy_to_user((void *)arg, argbuf + 4, argsize))
1778 			err = -EFAULT;
1779 		kfree(argbuf);
1780 	}
1781 	return err;
1782 
1783 #endif
1784 }
1785 
1786 EXPORT_SYMBOL(ide_cmd_ioctl);
1787 
ide_wait_cmd_task(ide_drive_t * drive,u8 * buf)1788 int ide_wait_cmd_task (ide_drive_t *drive, u8 *buf)
1789 {
1790 	struct request rq;
1791 
1792 	ide_init_drive_cmd(&rq);
1793 	rq.cmd = IDE_DRIVE_TASK;
1794 	rq.buffer = buf;
1795 	return ide_do_drive_cmd(drive, &rq, ide_wait);
1796 }
1797 
1798 EXPORT_SYMBOL(ide_wait_cmd_task);
1799 
1800 /*
1801  * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
1802  */
ide_task_ioctl(ide_drive_t * drive,struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)1803 int ide_task_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1804 {
1805 	int err = 0;
1806 	u8 args[7], *argbuf = args;
1807 	int argsize = 7;
1808 
1809 	if (copy_from_user(args, (void *)arg, 7))
1810 		return -EFAULT;
1811 	err = ide_wait_cmd_task(drive, argbuf);
1812 	if (copy_to_user((void *)arg, argbuf, argsize))
1813 		err = -EFAULT;
1814 	return err;
1815 }
1816 
1817 EXPORT_SYMBOL(ide_task_ioctl);
1818 
1819 /*
1820  * NOTICE: This is additions from IBM to provide a discrete interface,
1821  * for selective taskregister access operations.  Nice JOB Klaus!!!
1822  * Glad to be able to work and co-develop this with you and IBM.
1823  */
flagged_taskfile(ide_drive_t * drive,ide_task_t * task)1824 ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
1825 {
1826 	ide_hwif_t *hwif	= HWIF(drive);
1827 	task_struct_t *taskfile	= (task_struct_t *) task->tfRegister;
1828 	hob_struct_t *hobfile	= (hob_struct_t *) task->hobRegister;
1829 #if DEBUG_TASKFILE
1830 	u8 status;
1831 #endif
1832 
1833 
1834 #ifdef CONFIG_IDE_TASK_IOCTL_DEBUG
1835 	void debug_taskfile(drive, task);
1836 #endif /* CONFIG_IDE_TASK_IOCTL_DEBUG */
1837 
1838 	/*
1839 	 * (ks) Check taskfile in/out flags.
1840 	 * If set, then execute as it is defined.
1841 	 * If not set, then define default settings.
1842 	 * The default values are:
1843 	 *	write and read all taskfile registers (except data)
1844 	 *	write and read the hob registers (sector,nsector,lcyl,hcyl)
1845 	 */
1846 	if (task->tf_out_flags.all == 0) {
1847 		task->tf_out_flags.all = IDE_TASKFILE_STD_OUT_FLAGS;
1848 		if (drive->addressing == 1)
1849 			task->tf_out_flags.all |= (IDE_HOB_STD_OUT_FLAGS << 8);
1850         }
1851 
1852 	if (task->tf_in_flags.all == 0) {
1853 		task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
1854 		if (drive->addressing == 1)
1855 			task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS  << 8);
1856         }
1857 
1858 	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
1859 	if (IDE_CONTROL_REG)
1860 		/* clear nIEN */
1861 		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
1862 	SELECT_MASK(drive, 0);
1863 
1864 #if DEBUG_TASKFILE
1865 	status = hwif->INB(IDE_STATUS_REG);
1866 	if (status & 0x80) {
1867 		printk("flagged_taskfile -> Bad status. Status = %02x. wait 100 usec ...\n", status);
1868 		udelay(100);
1869 		status = hwif->INB(IDE_STATUS_REG);
1870 		printk("flagged_taskfile -> Status = %02x\n", status);
1871 	}
1872 #endif
1873 
1874 	if (task->tf_out_flags.b.data) {
1875 		u16 data =  taskfile->data + (hobfile->data << 8);
1876 		hwif->OUTW(data, IDE_DATA_REG);
1877 	}
1878 
1879 	/* (ks) send hob registers first */
1880 	if (task->tf_out_flags.b.nsector_hob)
1881 		hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
1882 	if (task->tf_out_flags.b.sector_hob)
1883 		hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
1884 	if (task->tf_out_flags.b.lcyl_hob)
1885 		hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
1886 	if (task->tf_out_flags.b.hcyl_hob)
1887 		hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
1888 
1889 	/* (ks) Send now the standard registers */
1890 	if (task->tf_out_flags.b.error_feature)
1891 		hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
1892 	/* refers to number of sectors to transfer */
1893 	if (task->tf_out_flags.b.nsector)
1894 		hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
1895 	/* refers to sector offset or start sector */
1896 	if (task->tf_out_flags.b.sector)
1897 		hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
1898 	if (task->tf_out_flags.b.lcyl)
1899 		hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
1900 	if (task->tf_out_flags.b.hcyl)
1901 		hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
1902 
1903         /*
1904 	 * (ks) In the flagged taskfile approch, we will used all specified
1905 	 * registers and the register value will not be changed. Except the
1906 	 * select bit (master/slave) in the drive_head register. We must make
1907 	 * sure that the desired drive is selected.
1908 	 */
1909 	hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG);
1910 	switch(task->data_phase) {
1911 
1912    	        case TASKFILE_OUT_DMAQ:
1913 		case TASKFILE_OUT_DMA:
1914 			hwif->ide_dma_write(drive);
1915 			break;
1916 
1917 		case TASKFILE_IN_DMAQ:
1918 		case TASKFILE_IN_DMA:
1919 			hwif->ide_dma_read(drive);
1920 			break;
1921 
1922 	        default:
1923  			if (task->handler == NULL)
1924 				return ide_stopped;
1925 
1926 			/* Issue the command */
1927 			ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
1928 			if (task->prehandler != NULL)
1929 				return task->prehandler(drive, HWGROUP(drive)->rq);
1930 	}
1931 
1932 	return ide_started;
1933 }
1934 
1935 EXPORT_SYMBOL(flagged_taskfile);
1936 
flagged_task_no_data_intr(ide_drive_t * drive)1937 ide_startstop_t flagged_task_no_data_intr (ide_drive_t *drive)
1938 {
1939 	ide_hwif_t *hwif = HWIF(drive);
1940 	u8 stat;
1941 
1942 	local_irq_enable();
1943 
1944 	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT)) {
1945 		if (stat & ERR_STAT) {
1946 			return DRIVER(drive)->error(drive, "flagged_task_no_data_intr", stat);
1947 		}
1948 		/*
1949 		 * (ks) Unexpected ATA data phase detected.
1950 		 * This should not happen. But, it can !
1951 		 * I am not sure, which function is best to clean up
1952 		 * this situation.  I choose: ide_error(...)
1953 		 */
1954  		return DRIVER(drive)->error(drive, "flagged_task_no_data_intr (unexpected phase)", stat);
1955 	}
1956 
1957 	ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
1958 
1959 	return ide_stopped;
1960 }
1961 
1962 /*
1963  * Handler for command with PIO data-in phase
1964  */
flagged_task_in_intr(ide_drive_t * drive)1965 ide_startstop_t flagged_task_in_intr (ide_drive_t *drive)
1966 {
1967 	ide_hwif_t *hwif	= HWIF(drive);
1968 	u8 stat			= hwif->INB(IDE_STATUS_REG);
1969 	struct request *rq	= HWGROUP(drive)->rq;
1970 	char *pBuf		= NULL;
1971 	int retries             = 5;
1972 
1973 	if (rq->current_nr_sectors == 0)
1974 		return DRIVER(drive)->error(drive, "flagged_task_in_intr (no data requested)", stat);
1975 
1976 	if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
1977 		if (stat & ERR_STAT) {
1978 			return DRIVER(drive)->error(drive, "flagged_task_in_intr", stat);
1979 		}
1980 		/*
1981 		 * (ks) Unexpected ATA data phase detected.
1982 		 * This should not happen. But, it can !
1983 		 * I am not sure, which function is best to clean up
1984 		 * this situation.  I choose: ide_error(...)
1985 		 */
1986 		return DRIVER(drive)->error(drive, "flagged_task_in_intr (unexpected data phase)", stat);
1987 	}
1988 
1989 	pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
1990 	DTF("Read - rq->current_nr_sectors: %d, status: %02x\n", (int) rq->current_nr_sectors, stat);
1991 
1992 	taskfile_input_data(drive, pBuf, SECTOR_WORDS);
1993 
1994 	if (--rq->current_nr_sectors != 0) {
1995 		/*
1996                  * (ks) We don't know which command was executed.
1997 		 * So, we wait the 'WORSTCASE' value.
1998                  */
1999 		ide_set_handler(drive, &flagged_task_in_intr,  WAIT_WORSTCASE, NULL);
2000 		return ide_started;
2001 	}
2002 	/*
2003 	 * (ks) Last sector was transfered, wait until drive is ready.
2004 	 * This can take up to 10 usec. We willl wait max 50 us.
2005 	 */
2006 	while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
2007 		udelay(10);
2008 	ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2009 
2010 	return ide_stopped;
2011 }
2012 
flagged_task_mulin_intr(ide_drive_t * drive)2013 ide_startstop_t flagged_task_mulin_intr (ide_drive_t *drive)
2014 {
2015 	ide_hwif_t *hwif	= HWIF(drive);
2016 	u8 stat			= hwif->INB(IDE_STATUS_REG);
2017 	struct request *rq	= HWGROUP(drive)->rq;
2018 	char *pBuf		= NULL;
2019 	int retries             = 5;
2020 	unsigned int msect, nsect;
2021 
2022 	if (rq->current_nr_sectors == 0)
2023 		return DRIVER(drive)->error(drive, "flagged_task_mulin_intr (no data requested)", stat);
2024 
2025 	msect = drive->mult_count;
2026 	if (msect == 0)
2027 		return DRIVER(drive)->error(drive, "flagged_task_mulin_intr (multimode not set)", stat);
2028 
2029 	if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
2030 		if (stat & ERR_STAT) {
2031 			return DRIVER(drive)->error(drive, "flagged_task_mulin_intr", stat);
2032 		}
2033 		/*
2034 		 * (ks) Unexpected ATA data phase detected.
2035 		 * This should not happen. But, it can !
2036 		 * I am not sure, which function is best to clean up
2037 		 * this situation.  I choose: ide_error(...)
2038 		 */
2039 		return DRIVER(drive)->error(drive, "flagged_task_mulin_intr (unexpected data phase)", stat);
2040 	}
2041 
2042 	nsect = (rq->current_nr_sectors > msect) ? msect : rq->current_nr_sectors;
2043 	pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2044 
2045 	DTF("Multiread: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
2046 	    pBuf, nsect, rq->current_nr_sectors);
2047 
2048 	taskfile_input_data(drive, pBuf, nsect * SECTOR_WORDS);
2049 
2050 	rq->current_nr_sectors -= nsect;
2051 	if (rq->current_nr_sectors != 0) {
2052 		/*
2053                  * (ks) We don't know which command was executed.
2054 		 * So, we wait the 'WORSTCASE' value.
2055                  */
2056 		ide_set_handler(drive, &flagged_task_mulin_intr,  WAIT_WORSTCASE, NULL);
2057 		return ide_started;
2058 	}
2059 
2060 	/*
2061 	 * (ks) Last sector was transfered, wait until drive is ready.
2062 	 * This can take up to 10 usec. We willl wait max 50 us.
2063 	 */
2064 	while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
2065 		udelay(10);
2066 	ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2067 
2068 	return ide_stopped;
2069 }
2070 
2071 /*
2072  * Pre handler for command with PIO data-out phase
2073  */
flagged_pre_task_out_intr(ide_drive_t * drive,struct request * rq)2074 ide_startstop_t flagged_pre_task_out_intr (ide_drive_t *drive, struct request *rq)
2075 {
2076 	ide_hwif_t *hwif	= HWIF(drive);
2077 	u8 stat			= hwif->INB(IDE_STATUS_REG);
2078 	ide_startstop_t startstop;
2079 
2080 	if (!rq->current_nr_sectors) {
2081 		return DRIVER(drive)->error(drive, "flagged_pre_task_out_intr (write data not specified)", stat);
2082 	}
2083 
2084 	if (ide_wait_stat(&startstop, drive, DATA_READY,
2085 			BAD_W_STAT, WAIT_DRQ)) {
2086 		printk(KERN_ERR "%s: No DRQ bit after issuing write command.\n", drive->name);
2087 		return startstop;
2088 	}
2089 
2090 	taskfile_output_data(drive, rq->buffer, SECTOR_WORDS);
2091 	--rq->current_nr_sectors;
2092 
2093 	return ide_started;
2094 }
2095 
flagged_task_out_intr(ide_drive_t * drive)2096 ide_startstop_t flagged_task_out_intr (ide_drive_t *drive)
2097 {
2098 	ide_hwif_t *hwif	= HWIF(drive);
2099 	u8 stat			= hwif->INB(IDE_STATUS_REG);
2100 	struct request *rq	= HWGROUP(drive)->rq;
2101 	char *pBuf		= NULL;
2102 
2103 	if (!OK_STAT(stat, DRIVE_READY, BAD_W_STAT))
2104 		return DRIVER(drive)->error(drive, "flagged_task_out_intr", stat);
2105 
2106 	if (!rq->current_nr_sectors) {
2107 		ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2108 		return ide_stopped;
2109 	}
2110 
2111 	if (!OK_STAT(stat, DATA_READY, BAD_W_STAT)) {
2112 		/*
2113 		 * (ks) Unexpected ATA data phase detected.
2114 		 * This should not happen. But, it can !
2115 		 * I am not sure, which function is best to clean up
2116 		 * this situation.  I choose: ide_error(...)
2117 		 */
2118 		return DRIVER(drive)->error(drive, "flagged_task_out_intr (unexpected data phase)", stat);
2119 	}
2120 
2121 	pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2122 	DTF("Write - rq->current_nr_sectors: %d, status: %02x\n",
2123 		(int) rq->current_nr_sectors, stat);
2124 
2125 	taskfile_output_data(drive, pBuf, SECTOR_WORDS);
2126 	--rq->current_nr_sectors;
2127 
2128 	/*
2129 	 * (ks) We don't know which command was executed.
2130 	 * So, we wait the 'WORSTCASE' value.
2131 	 */
2132 	ide_set_handler(drive, &flagged_task_out_intr, WAIT_WORSTCASE, NULL);
2133 
2134 	return ide_started;
2135 }
2136 
flagged_pre_task_mulout_intr(ide_drive_t * drive,struct request * rq)2137 ide_startstop_t flagged_pre_task_mulout_intr (ide_drive_t *drive, struct request *rq)
2138 {
2139 	ide_hwif_t *hwif	= HWIF(drive);
2140 	u8 stat			= hwif->INB(IDE_STATUS_REG);
2141 	char *pBuf		= NULL;
2142 	ide_startstop_t startstop;
2143 	unsigned int msect, nsect;
2144 
2145 	if (!rq->current_nr_sectors)
2146 		return DRIVER(drive)->error(drive, "flagged_pre_task_mulout_intr (write data not specified)", stat);
2147 
2148 	msect = drive->mult_count;
2149 	if (msect == 0)
2150 		return DRIVER(drive)->error(drive, "flagged_pre_task_mulout_intr (multimode not set)", stat);
2151 
2152 	if (ide_wait_stat(&startstop, drive, DATA_READY,
2153 			BAD_W_STAT, WAIT_DRQ)) {
2154 		printk(KERN_ERR "%s: No DRQ bit after issuing write command.\n", drive->name);
2155 		return startstop;
2156 	}
2157 
2158 	nsect = (rq->current_nr_sectors > msect) ? msect : rq->current_nr_sectors;
2159 	pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2160 	DTF("Multiwrite: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
2161 	    pBuf, nsect, rq->current_nr_sectors);
2162 
2163 	taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
2164 
2165 	rq->current_nr_sectors -= nsect;
2166 
2167 	return ide_started;
2168 }
2169 
flagged_task_mulout_intr(ide_drive_t * drive)2170 ide_startstop_t flagged_task_mulout_intr (ide_drive_t *drive)
2171 {
2172 	ide_hwif_t *hwif	= HWIF(drive);
2173 	u8 stat			= hwif->INB(IDE_STATUS_REG);
2174 	struct request *rq	= HWGROUP(drive)->rq;
2175 	char *pBuf		= NULL;
2176 	unsigned int msect, nsect;
2177 
2178 	msect = drive->mult_count;
2179 	if (msect == 0)
2180 		return DRIVER(drive)->error(drive, "flagged_task_mulout_intr (multimode not set)", stat);
2181 
2182 	if (!OK_STAT(stat, DRIVE_READY, BAD_W_STAT))
2183 		return DRIVER(drive)->error(drive, "flagged_task_mulout_intr", stat);
2184 
2185 	if (!rq->current_nr_sectors) {
2186 		ide_end_drive_cmd (drive, stat, hwif->INB(IDE_ERROR_REG));
2187 		return ide_stopped;
2188 	}
2189 
2190 	if (!OK_STAT(stat, DATA_READY, BAD_W_STAT)) {
2191 		/*
2192 		 * (ks) Unexpected ATA data phase detected.
2193 		 * This should not happen. But, it can !
2194 		 * I am not sure, which function is best to clean up
2195 		 * this situation.  I choose: ide_error(...)
2196 		 */
2197 		return DRIVER(drive)->error(drive, "flagged_task_mulout_intr (unexpected data phase)", stat);
2198 	}
2199 
2200 	nsect = (rq->current_nr_sectors > msect) ? msect : rq->current_nr_sectors;
2201 	pBuf = rq->buffer + ((rq->nr_sectors - rq->current_nr_sectors) * SECTOR_SIZE);
2202 	DTF("Multiwrite: %p, nsect: %d , rq->current_nr_sectors: %ld\n",
2203 	    pBuf, nsect, rq->current_nr_sectors);
2204 
2205 	taskfile_output_data(drive, pBuf, nsect * SECTOR_WORDS);
2206 	rq->current_nr_sectors -= nsect;
2207 
2208 	/*
2209 	 * (ks) We don't know which command was executed.
2210 	 * So, we wait the 'WORSTCASE' value.
2211 	 */
2212 	ide_set_handler(drive, &flagged_task_mulout_intr, WAIT_WORSTCASE, NULL);
2213 
2214 	return ide_started;
2215 }
2216 
2217 /*
2218  * Beginning of Taskfile OPCODE Library and feature sets.
2219  */
2220 
2221 #ifdef CONFIG_PKT_TASK_IOCTL
2222 
pkt_taskfile_ioctl(ide_drive_t * drive,struct inode * inode,struct file * file,unsigned int cmd,unsigned long arg)2223 int pkt_taskfile_ioctl (ide_drive_t *drive, struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2224 {
2225 #if 0
2226 	switch(req_task->data_phase) {
2227 		case TASKFILE_P_OUT_DMAQ:
2228 		case TASKFILE_P_IN_DMAQ:
2229 		case TASKFILE_P_OUT_DMA:
2230 		case TASKFILE_P_IN_DMA:
2231 		case TASKFILE_P_OUT:
2232 		case TASKFILE_P_IN:
2233 	}
2234 #endif
2235 	return -ENOMSG;
2236 }
2237 
2238 EXPORT_SYMBOL(pkt_taskfile_ioctl);
2239 
2240 #endif /* CONFIG_PKT_TASK_IOCTL */
2241