1 /*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
8 *
9 * $Revision: 1.298.2.11 $
10 *
11 * History of changes (starts July 2000)
12 * 11/09/00 complete redesign after code review
13 * 02/01/01 added dynamic registration of ioctls
14 * fixed bug in registration of new majors
15 * fixed handling of request during dasd_end_request
16 * fixed handling of plugged queues
17 * fixed partition handling and HDIO_GETGEO
18 * fixed traditional naming scheme for devices beyond 702
19 * fixed some race conditions related to modules
20 * added devfs suupport
21 * 03/06/01 refined dynamic attach/detach for leaving devices which are online.
22 * 03/09/01 refined dynamic modifiaction of devices
23 * 03/12/01 moved policy in dasd_format to dasdfmt (renamed BIODASDFORMAT)
24 * 03/19/01 added BIODASDINFO-ioctl
25 * removed 2.2 compatibility
26 * 04/27/01 fixed PL030119COT (dasd_disciplines does not work)
27 * 04/30/01 fixed PL030146HSM (module locking with dynamic ioctls)
28 * fixed PL030130SBA (handling of invalid ranges)
29 * 05/02/01 fixed PL030145SBA (killing dasdmt)
30 * fixed PL030149SBA (status of 'accepted' devices)
31 * fixed PL030146SBA (BUG in ibm.c after adding device)
32 * added BIODASDPRRD ioctl interface
33 * 05/11/01 fixed PL030164MVE (trap in probeonly mode)
34 * 05/15/01 fixed devfs support for unformatted devices
35 * 06/26/01 hopefully fixed PL030172SBA,PL030234SBA
36 * 07/09/01 fixed PL030324MSH (wrong statistics output)
37 * 07/16/01 merged in new fixes for handling low-mem situations
38 * 01/22/01 fixed PL030579KBE (wrong statistics)
39 * 08/07/03 fixed LTC BZ 3847 Erroneous message when formatting DASD
40 */
41
42 #include <linux/config.h>
43 #include <linux/version.h>
44 #include <linux/kmod.h>
45 #include <linux/init.h>
46 #include <linux/blkdev.h>
47 #include <linux/stddef.h>
48 #include <linux/kernel.h>
49 #include <linux/tqueue.h>
50 #include <linux/timer.h>
51 #include <linux/slab.h>
52 #include <linux/genhd.h>
53 #include <linux/hdreg.h>
54 #include <linux/interrupt.h>
55 #include <linux/ctype.h>
56 #ifdef CONFIG_PROC_FS
57 #include <linux/proc_fs.h>
58 #endif /* CONFIG_PROC_FS */
59 #include <linux/spinlock.h>
60 #include <linux/devfs_fs_kernel.h>
61 #include <linux/blkpg.h>
62 #include <linux/wait.h>
63
64 #include <asm/ccwcache.h>
65 #include <asm/debug.h>
66
67 #include <asm/atomic.h>
68 #include <asm/delay.h>
69 #include <asm/io.h>
70 #include <asm/semaphore.h>
71 #include <asm/ebcdic.h>
72 #include <asm/uaccess.h>
73 #include <asm/irq.h>
74 #include <asm/s390_ext.h>
75 #include <asm/s390dyn.h>
76 #include <asm/idals.h>
77
78 #include "dasd_int.h"
79
80 #ifdef CONFIG_DASD_ECKD
81 #include "dasd_eckd.h"
82 #endif /* CONFIG_DASD_ECKD */
83 #ifdef CONFIG_DASD_FBA
84 #include "dasd_fba.h"
85 #endif /* CONFIG_DASD_FBA */
86 #ifdef CONFIG_DASD_DIAG
87 #include "dasd_diag.h"
88 #endif /* CONFIG_DASD_DIAG */
89
90 /********************************************************************************
91 * SECTION: exported variables of dasd.c
92 ********************************************************************************/
93 debug_info_t *dasd_debug_area;
94
95 MODULE_AUTHOR ("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
96 MODULE_DESCRIPTION ("Linux on S/390 DASD device driver,"
97 " Copyright 2000 IBM Corporation");
98 MODULE_SUPPORTED_DEVICE ("dasd");
99 MODULE_PARM (dasd, "1-" __MODULE_STRING (256) "s");
100 MODULE_PARM (dasd_disciplines, "1-" __MODULE_STRING (8) "s");
101 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,12))
102 MODULE_LICENSE ("GPL");
103 #endif
104 EXPORT_SYMBOL (dasd_chanq_enq_head);
105 EXPORT_SYMBOL (dasd_debug_area);
106 EXPORT_SYMBOL (dasd_chanq_enq);
107 EXPORT_SYMBOL (dasd_chanq_deq);
108 EXPORT_SYMBOL (dasd_discipline_add);
109 EXPORT_SYMBOL (dasd_discipline_del);
110 EXPORT_SYMBOL (dasd_start_IO);
111 EXPORT_SYMBOL (dasd_term_IO);
112 EXPORT_SYMBOL (dasd_schedule_bh);
113 EXPORT_SYMBOL (dasd_schedule_bh_timed);
114 EXPORT_SYMBOL (dasd_int_handler);
115 EXPORT_SYMBOL (dasd_oper_handler);
116 EXPORT_SYMBOL (dasd_alloc_request);
117 EXPORT_SYMBOL (dasd_free_request);
118 EXPORT_SYMBOL (dasd_ioctl_no_register);
119 EXPORT_SYMBOL (dasd_ioctl_no_unregister);
120 EXPORT_SYMBOL (dasd_default_erp_action);
121 EXPORT_SYMBOL (dasd_default_erp_postaction);
122 EXPORT_SYMBOL (dasd_sleep_on_req);
123 EXPORT_SYMBOL (dasd_set_normalized_cda);
124 EXPORT_SYMBOL (dasd_device_from_kdev);
125
126
127 /********************************************************************************
128 * SECTION: Constant definitions to be used within this file
129 ********************************************************************************/
130
131 #define PRINTK_HEADER DASD_NAME":"
132 #define DASD_PROFILE /* fill profile information - used for */
133 /* statistics and perfomance */
134
135 #ifndef CONFIG_PROC_FS /* DASD_PROFILE doesn't make sense */
136 #undef DASD_PROFILE /* without procfs */
137 #endif /* not CONFIG_PROC_FS */
138
139 #define DASD_CHANQ_MAX_SIZE 4
140
141 /********************************************************************************
142 * SECTION: prototypes for static functions of dasd.c
143 ********************************************************************************/
144
145 static request_fn_proc do_dasd_request;
146 static int dasd_set_device_level (unsigned int, dasd_discipline_t *, int);
147 static request_queue_t *dasd_get_queue (kdev_t kdev);
148 static void cleanup_dasd (void);
149 static void dasd_plug_device (dasd_device_t * device);
150 static int dasd_fillgeo (int kdev, struct hd_geometry *geo);
151 static void dasd_enable_ranges (dasd_range_t *, dasd_discipline_t *, int);
152 static void dasd_disable_ranges (dasd_range_t *, dasd_discipline_t *, int, int);
153 static void dasd_enable_single_device ( unsigned long);
154 static inline int dasd_state_init_to_ready(dasd_device_t*);
155 static inline void dasd_setup_partitions ( dasd_device_t *);
156 static inline void dasd_destroy_partitions ( dasd_device_t *);
157 static inline int dasd_setup_blkdev(dasd_device_t*);
158 static void dasd_deactivate_queue (dasd_device_t *);
159 static inline int dasd_disable_blkdev(dasd_device_t*);
160 static void dasd_flush_chanq ( dasd_device_t * device, int destroy );
161 static void dasd_flush_request_queues ( dasd_device_t * device, int destroy );
162 static struct block_device_operations dasd_device_operations;
163 static inline dasd_device_t ** dasd_device_from_devno (int);
164 static void dasd_process_queues (dasd_device_t * device);
165 static int dasd_sleep_on_immediate (ccw_req_t *cqr);
166 static int dasd_devno_from_devindex (int devindex);
167 static int dasd_devindex_from_devno (int devno);
168
169 /********************************************************************************
170 * SECTION: static variables of dasd.c
171 ********************************************************************************/
172
173 static devfs_handle_t dasd_devfs_handle;
174 static wait_queue_head_t dasd_init_waitq;
175 static atomic_t dasd_init_pending = ATOMIC_INIT (0);
176
177 #ifdef CONFIG_DASD_DYNAMIC
178
179 /********************************************************************************
180 * SECTION: managing dynamic configuration of dasd_driver
181 ********************************************************************************/
182
183 static struct list_head dasd_devreg_head = LIST_HEAD_INIT (dasd_devreg_head);
184
185 /*
186 * function: dasd_create_devreg
187 * creates a dasd_devreg_t related to a devno
188 */
189 static inline dasd_devreg_t *
dasd_create_devreg(int devno)190 dasd_create_devreg (int devno)
191 {
192 dasd_devreg_t *r = kmalloc (sizeof (dasd_devreg_t), GFP_KERNEL);
193 if (r != NULL) {
194 memset (r, 0, sizeof (dasd_devreg_t));
195 r->devreg.ci.devno = devno;
196 r->devreg.flag = DEVREG_TYPE_DEVNO;
197 r->devreg.oper_func = dasd_oper_handler;
198 }
199 return r;
200 }
201
202 /*
203 * function: dasd_destroy_devreg
204 * destroys the dasd_devreg_t given as argument
205 */
206 static inline void
dasd_destroy_devreg(dasd_devreg_t * devreg)207 dasd_destroy_devreg (dasd_devreg_t * devreg)
208 {
209 kfree (devreg);
210 }
211
212 #endif /* CONFIG_DASD_DYNAMIC */
213
214 /********************************************************************************
215 * SECTION: managing setup of dasd_driver
216 ********************************************************************************/
217
218 /* default setting is probeonly, autodetect */
219 static int dasd_probeonly = 0; /* is true, when probeonly mode is active */
220 static int dasd_autodetect = 0; /* is true, when autodetection is active */
221
222 static dasd_range_t dasd_range_head =
223 { list:LIST_HEAD_INIT (dasd_range_head.list) };
224 static spinlock_t range_lock = SPIN_LOCK_UNLOCKED;
225
226 /*
227 * function: dasd_create_range
228 * creates a dasd_range_t according to the arguments
229 * FIXME: no check is performed for reoccurrence of a devno
230 */
231 static inline dasd_range_t *
dasd_create_range(int from,int to,int features)232 dasd_create_range (int from, int to, int features)
233 {
234 dasd_range_t *range = NULL;
235
236 range = (dasd_range_t *) kmalloc (sizeof (dasd_range_t), GFP_KERNEL);
237 if (range == NULL)
238 return NULL;
239 memset (range, 0, sizeof (dasd_range_t));
240 range->from = from;
241 range->to = to;
242 range->features = features;
243 return range;
244 }
245
246 /*
247 * function dasd_destroy_range
248 * destroy a range allocated wit dasd_crate_range
249 * CAUTION: must not be callen in arunning sysztem, because it destroys
250 * the mapping of DASDs
251 */
252 static inline void
dasd_destroy_range(dasd_range_t * range)253 dasd_destroy_range (dasd_range_t * range)
254 {
255 kfree (range);
256 }
257
258 /*
259 * function: dasd_append_range
260 * appends the range given as argument to the list anchored at dasd_range_head.
261 */
262 static inline void
dasd_append_range(dasd_range_t * range)263 dasd_append_range (dasd_range_t * range)
264 {
265 long flags;
266
267 spin_lock_irqsave (&range_lock, flags);
268 list_add_tail (&range->list, &dasd_range_head.list);
269 spin_unlock_irqrestore (&range_lock, flags);
270 }
271
272 /*
273 * function dasd_dechain_range
274 * removes a range from the chain of ranges
275 * CAUTION: must not be called in a running system because it destroys
276 * the mapping of devices
277 */
278 static inline void
dasd_dechain_range(dasd_range_t * range)279 dasd_dechain_range (dasd_range_t * range)
280 {
281 unsigned long flags;
282
283 spin_lock_irqsave (&range_lock, flags);
284 list_del (&range->list);
285 spin_unlock_irqrestore (&range_lock, flags);
286 }
287
288 /*
289 * function: dasd_add_range
290 * creates a dasd_range_t according to the arguments and
291 * appends it to the list of ranges.
292 * If a device in the range is already in an other range, we split the
293 * range and add the subranges (no duplicate devices).
294 * additionally a devreg_t is created and added to the list of devregs
295 */
296 static inline void
dasd_add_range(int from,int to,int features)297 dasd_add_range (int from, int to, int features)
298 {
299 dasd_range_t *range;
300 int start, end, index, i;
301
302 if (from > to) {
303 MESSAGE (KERN_DEBUG,
304 "Adding device range %04x-%04x: "
305 "range invalid, ignoring.",
306 from, to);
307 return;
308 }
309
310 /* loop over the given range, remove the already contained devices */
311 /* and add the remaining subranges */
312 for (start = index = from, end = -EINVAL; index <= to; index++) {
313
314 if (dasd_devindex_from_devno(index) >= 0) {
315 /* current device is already in range */
316 MESSAGE (KERN_DEBUG,
317 "dasd_add_range %04x-%04x: "
318 "device %04x is already in range",
319 from, to, index);
320
321 if (start == index)
322 start++; /* first already in range */
323 else
324 end = index -1; /* current already in range */
325 } else {
326 if (index == to)
327 end = to; /* end of original range reached */
328 }
329
330 range = NULL;
331 if (end != -EINVAL) {
332 MESSAGE (KERN_DEBUG,
333 "dasd_add_range %04x-%04x: "
334 "add (sub)range %04x-%04x",
335 from, to, start, end);
336
337 range = dasd_create_range (start, end, features);
338 end = -EINVAL;
339 start = index + 1;
340 }
341
342 if (range) {
343 dasd_append_range (range);
344 #ifdef CONFIG_DASD_DYNAMIC
345 /* allocate and chain devreg infos for the devnos... */
346 for (i = range->from; i <= range->to; i++) {
347 dasd_devreg_t *reg = dasd_create_devreg (i);
348 s390_device_register (®->devreg);
349 list_add (®->list, &dasd_devreg_head);
350 }
351 #endif /* CONFIG_DASD_DYNAMIC */
352 }
353 }
354 return;
355 }
356
357 /*
358 * function: dasd_remove_range
359 * removes a range and the corresponding devregs from all of the chains
360 * CAUTION: must not be called in a running system because it destroys
361 * the mapping of devices!
362 */
363 static inline void
dasd_remove_range(dasd_range_t * range)364 dasd_remove_range (dasd_range_t * range)
365 {
366 #ifdef CONFIG_DASD_DYNAMIC
367 /* deallocate and dechain devreg infos for the devnos... */
368 {
369 int i;
370 for (i = range->from; i <= range->to; i++) {
371 struct list_head *l;
372 dasd_devreg_t *reg = NULL;
373 list_for_each (l, &dasd_devreg_head) {
374 reg = list_entry (l, dasd_devreg_t, list);
375 if (reg->devreg.flag == DEVREG_TYPE_DEVNO &&
376 reg->devreg.ci.devno == i &&
377 reg->devreg.oper_func == dasd_oper_handler)
378 break;
379 }
380 if (l == &dasd_devreg_head)
381 BUG ();
382 list_del(®->list);
383 s390_device_unregister (®->devreg);
384 dasd_destroy_devreg (reg);
385 }
386 }
387 #endif /* CONFIG_DASD_DYNAMIC */
388 dasd_dechain_range (range);
389 dasd_destroy_range (range);
390 }
391
392 /*
393 * function: dasd_devindex_from_devno
394 * finds the logical number of the devno supplied as argument in the list
395 * of dasd ranges and returns it or ENODEV when not found
396 */
397 static int
dasd_devindex_from_devno(int devno)398 dasd_devindex_from_devno (int devno)
399 {
400 dasd_range_t *temp;
401 int devindex = 0;
402 unsigned long flags;
403 struct list_head *l;
404
405 spin_lock_irqsave (&range_lock, flags);
406 list_for_each (l, &dasd_range_head.list) {
407 temp = list_entry (l, dasd_range_t, list);
408 if (devno >= temp->from && devno <= temp->to) {
409 spin_unlock_irqrestore (&range_lock, flags);
410 return devindex + devno - temp->from;
411 }
412 devindex += temp->to - temp->from + 1;
413 }
414 spin_unlock_irqrestore (&range_lock, flags);
415 return -ENODEV;
416 }
417
418 /*
419 * function: dasd_devno_from_devindex
420 */
421 static int
dasd_devno_from_devindex(int devindex)422 dasd_devno_from_devindex (int devindex)
423 {
424 dasd_range_t *temp;
425 unsigned long flags;
426 struct list_head *l;
427
428 spin_lock_irqsave (&range_lock, flags);
429 list_for_each (l, &dasd_range_head.list) {
430 temp = list_entry (l, dasd_range_t, list);
431 if ( devindex < temp->to - temp->from + 1) {
432 spin_unlock_irqrestore (&range_lock, flags);
433 return temp->from + devindex;
434 }
435 devindex -= temp->to - temp->from + 1;
436 }
437 spin_unlock_irqrestore (&range_lock, flags);
438 return -ENODEV;
439 }
440
441 /********************************************************************************
442 * SECTION: parsing the dasd= parameter of the parmline/insmod cmdline
443 ********************************************************************************/
444
445 /*
446 * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
447 * it is named 'dasd' to directly be filled by insmod with the comma separated
448 * strings when running as a module.
449 * a maximum of 256 ranges can be supplied, as the parmline is limited to
450 * <1024 Byte anyway.
451 */
452 char *dasd[256];
453 char *dasd_disciplines[8];
454
455 #ifndef MODULE
456 /*
457 * function: dasd_split_parm_string
458 * splits the parmline given to the kernel into comma separated strings
459 * which are filled into the 'dasd[]' array, to be parsed later on
460 */
461 static void
dasd_split_parm_string(char * str)462 dasd_split_parm_string (char *str)
463 {
464 char *tmp = str;
465 int count = 0;
466 while (tmp != NULL && *tmp != '\0') {
467 char *end;
468 int len;
469 end = strchr (tmp, ',');
470 if (end == NULL) {
471 len = strlen (tmp) + 1;
472 } else {
473 len = (long) end - (long) tmp + 1;
474 *end = '\0';
475 end++;
476 }
477 dasd[count] = kmalloc (len * sizeof (char),
478 GFP_ATOMIC);
479 if (dasd[count] == NULL) {
480
481 MESSAGE (KERN_WARNING,
482 "can't store dasd= parameter no"
483 " %d",
484 count + 1);
485 break;
486 }
487 memset (dasd[count], 0, len * sizeof (char));
488 memcpy (dasd[count], tmp, len * sizeof (char));
489 count++;
490 tmp = end;
491 };
492 }
493
494 /*
495 * dasd_parm_string holds a concatenated version of all 'dasd=' parameters
496 * supplied in the parmline, which is later to be split by
497 * dasd_split_parm_string
498 * FIXME: why first concatenate then split ?
499 */
500 static char dasd_parm_string[1024] __initdata = { 0, };
501
502 /*
503 * function: dasd_setup
504 * is invoked for any single 'dasd=' parameter supplied in the parmline
505 * it merges all the arguments into dasd_parm_string
506 */
507 void __init
dasd_setup(char * str,int * ints)508 dasd_setup (char *str, int *ints)
509 {
510 int len = strlen (dasd_parm_string);
511 if (len != 0) {
512 strcat (dasd_parm_string, ",");
513 }
514 strcat (dasd_parm_string, str);
515 }
516
517 /*
518 * function: dasd_call_setup
519 * is the 2.4 version of dasd_setup and
520 * is invoked for any single 'dasd=' parameter supplied in the parmline
521 */
522 int __init
dasd_call_setup(char * str)523 dasd_call_setup (char *str)
524 {
525 int dummy;
526 dasd_setup (str, &dummy);
527 return 1;
528 }
529
530 int __init
dasd_disciplines_setup(char * str)531 dasd_disciplines_setup (char *str)
532 {
533 return 1;
534 }
535
536 __setup ("dasd=", dasd_call_setup);
537 __setup ("dasd_disciplines=", dasd_disciplines_setup);
538
539 #endif /* MODULE */
540
541 /*
542 * function: dasd_strtoul
543 * provides a wrapper to simple_strtoul to strip leading '0x' and
544 * interpret any argument to dasd=[range,...] as hexadecimal
545 */
546 static inline int
dasd_strtoul(char * str,char ** stra,int * features)547 dasd_strtoul (char *str, char **stra, int* features)
548 {
549 char *temp=str;
550 char *buffer;
551 int val,i,start;
552
553 buffer=(char*)kmalloc((strlen(str)+1)*sizeof(char),GFP_ATOMIC);
554
555 if (buffer==NULL) {
556
557 MESSAGE_LOG (KERN_WARNING,
558 "can't parse dasd= parameter %s due "
559 "to low memory",
560 str);
561 }
562
563 /* remove leading '0x' */
564 if (*temp == '0') {
565 temp++; /* strip leading zero */
566 if (*temp == 'x')
567 temp++; /* strip leading x */
568 }
569
570 /* copy device no to buffer and convert to decimal */
571 for (i=0; temp[i]!='\0' && temp[i]!='(' &&
572 temp[i]!='-' && temp[i]!=' '; i++){
573 if (isxdigit(temp[i])) {
574 buffer[i]=temp[i];
575 } else {
576 return -EINVAL;
577 }
578 }
579
580 buffer[i]='\0';
581
582 val = simple_strtoul (buffer, &buffer, 16);
583
584 /* check for features - e.g. (ro) ; the '\0', ')' and '-' stops check */
585 *features = DASD_FEATURE_DEFAULT;
586
587 if (temp[i]=='(') {
588
589 while (temp[i]!='\0' && temp[i]!=')'&&temp[i]!='-') {
590 start=++i;
591
592 /* move next feature to buffer */
593 for (;temp[i]!='\0'&&temp[i]!=':'&&temp[i]!=')'&&temp[i]!='-';i++)
594 buffer[i-start]=temp[i];
595 buffer[i-start]='\0';
596
597 if (strlen(buffer)) {
598 if (!strcmp(buffer,"ro")) { /* handle 'ro' feature */
599 (*features) |= DASD_FEATURE_READONLY;
600 break;
601 }
602
603 MESSAGE_LOG (KERN_WARNING,
604 "unsupported feature: %s, "
605 "ignoring setting",
606 buffer);
607 }
608 }
609 }
610
611 *stra = temp+i;
612 if ((val > 0xFFFF) || (val < 0))
613 return -EINVAL;
614 return val;
615 }
616
617 /*
618 * function: dasd_parse
619 * examines the strings given in the string array str and
620 * creates and adds the ranges to the apropriate lists
621 */
622 static int
dasd_parse(char ** str)623 dasd_parse (char **str)
624 {
625 char *temp;
626 int from, to;
627 int features;
628 int rc = 0;
629
630 while (*str) {
631 temp = *str;
632 from = 0;
633 to = 0;
634 if (strcmp ("autodetect", *str) == 0) {
635 dasd_autodetect = 1;
636
637 MESSAGE (KERN_INFO, "%s",
638 "turning to autodetection mode");
639
640 break;
641 } else if (strcmp ("probeonly", *str) == 0) {
642 dasd_probeonly = 1;
643
644 MESSAGE (KERN_INFO, "%s",
645 "turning to probeonly mode");
646
647 break;
648 } else {
649 /* turn off autodetect mode, if any range is present */
650 dasd_autodetect = 0;
651 from = dasd_strtoul (temp, &temp, &features);
652 to = from;
653 if (*temp == '-') {
654 temp++;
655 to = dasd_strtoul (temp, &temp, &features);
656 }
657 if (from == -EINVAL ||
658 to == -EINVAL ) {
659 rc = -EINVAL;
660 break;
661 } else {
662 dasd_add_range (from, to ,features);
663 }
664 }
665 str++;
666 }
667
668 return rc;
669 }
670
671 /********************************************************************************
672 * SECTION: Dealing with devices registered to multiple major numbers
673 ********************************************************************************/
674
675 static spinlock_t dasd_major_lock = SPIN_LOCK_UNLOCKED;
676
677 static struct list_head dasd_major_info = LIST_HEAD_INIT(dasd_major_info);
678 static major_info_t dasd_major_static = {
679 gendisk:{INIT_GENDISK(94, DASD_NAME, DASD_PARTN_BITS, DASD_PER_MAJOR)},
680 flags: DASD_MAJOR_INFO_IS_STATIC
681 };
682
683 static major_info_t *
get_new_major_info(void)684 get_new_major_info (void)
685 {
686 major_info_t *major_info = NULL;
687
688 major_info = kmalloc (sizeof (major_info_t), GFP_KERNEL);
689 if (major_info) {
690 static major_info_t temp_major_info = {
691 gendisk:{
692 INIT_GENDISK (0, DASD_NAME, DASD_PARTN_BITS,
693 DASD_PER_MAJOR)}
694 };
695 memcpy (major_info, &temp_major_info, sizeof (major_info_t));
696 }
697 return major_info;
698 }
699
700 /*
701 * register major number
702 * is called with the 'static' major_info during init of the driver or 'NULL' to
703 * allocate an additional dynamic major.
704 */
705 static int
dasd_register_major(major_info_t * major_info)706 dasd_register_major (major_info_t * major_info)
707 {
708 int rc = 0;
709 int major;
710 unsigned long flags;
711
712 /* allocate dynamic major */
713 if (major_info == NULL) {
714 major_info = get_new_major_info ();
715 if (!major_info) {
716
717 MESSAGE (KERN_WARNING, "%s",
718 "Cannot get memory to allocate another "
719 "major number");
720
721 return -ENOMEM;
722 }
723 }
724
725 major = major_info->gendisk.major;
726
727 /* init devfs array */
728 major_info->gendisk.de_arr = (devfs_handle_t *)
729 kmalloc (DASD_PER_MAJOR * sizeof (devfs_handle_t), GFP_KERNEL);
730 if(major_info->gendisk.de_arr == NULL)
731 goto out_gd_de_arr;
732
733 memset (major_info->gendisk.de_arr, 0,
734 DASD_PER_MAJOR * sizeof (devfs_handle_t));
735
736 /* init flags */
737 major_info->gendisk.flags = (char *)
738 kmalloc (DASD_PER_MAJOR * sizeof (char), GFP_KERNEL);
739 if(major_info->gendisk.flags == NULL)
740 goto out_gd_flags;
741
742 memset (major_info->gendisk.flags, 0, DASD_PER_MAJOR * sizeof (char));
743
744 /* register blockdevice */
745 rc = devfs_register_blkdev (major, DASD_NAME, &dasd_device_operations);
746 if (rc < 0) {
747
748 MESSAGE (KERN_WARNING,
749 "Cannot register to major no %d, rc = %d",
750 major,
751 rc);
752
753 goto out_reg_blkdev;
754 } else {
755 major_info->flags |= DASD_MAJOR_INFO_REGISTERED;
756 }
757
758 /* Insert the new major info into dasd_major_info if needed (dynamic major) */
759 if (!(major_info->flags & DASD_MAJOR_INFO_IS_STATIC)) {
760 spin_lock_irqsave (&dasd_major_lock, flags);
761 list_add_tail (&major_info->list, &dasd_major_info);
762 spin_unlock_irqrestore (&dasd_major_lock, flags);
763 }
764
765 if (major == 0) {
766 major = rc;
767 rc = 0;
768 }
769
770 /* init array of devices */
771 major_info->dasd_device =
772 (dasd_device_t **) kmalloc (DASD_PER_MAJOR *
773 sizeof (dasd_device_t *), GFP_ATOMIC);
774 if (!major_info->dasd_device)
775 goto out_devices;
776 memset (major_info->dasd_device, 0,
777 DASD_PER_MAJOR * sizeof (dasd_device_t *));
778
779 /* init blk_size */
780 blk_size[major] =
781 (int *) kmalloc ((1 << MINORBITS) * sizeof (int), GFP_ATOMIC);
782 if (!blk_size[major])
783 goto out_blk_size;
784 memset (blk_size[major], 0, (1 << MINORBITS) * sizeof (int));
785
786 /* init blksize_size */
787 blksize_size[major] =
788 (int *) kmalloc ((1 << MINORBITS) * sizeof (int), GFP_ATOMIC);
789 if (!blksize_size[major])
790 goto out_blksize_size;
791 memset (blksize_size[major], 0, (1 << MINORBITS) * sizeof (int));
792
793 /* init_hardsect_size */
794 hardsect_size[major] =
795 (int *) kmalloc ((1 << MINORBITS) * sizeof (int), GFP_ATOMIC);
796 if (!hardsect_size[major])
797 goto out_hardsect_size;
798 memset (hardsect_size[major], 0, (1 << MINORBITS) * sizeof (int));
799
800 /* init max_sectors */
801 max_sectors[major] =
802 (int *) kmalloc ((1 << MINORBITS) * sizeof (int), GFP_ATOMIC);
803 if (!max_sectors[major])
804 goto out_max_sectors;
805 memset (max_sectors[major], 0, (1 << MINORBITS) * sizeof (int));
806
807 /* finally do the gendisk stuff */
808 major_info->gendisk.part = kmalloc ((1 << MINORBITS) *
809 sizeof (struct hd_struct),
810 GFP_ATOMIC);
811 if (!major_info->gendisk.part)
812 goto out_gendisk;
813 memset (major_info->gendisk.part, 0, (1 << MINORBITS) *
814 sizeof (struct hd_struct));
815
816 INIT_BLK_DEV (major, do_dasd_request, dasd_get_queue, NULL);
817
818 major_info->gendisk.sizes = blk_size[major];
819 major_info->gendisk.major = major;
820 add_gendisk (&major_info->gendisk);
821 return major;
822
823 /* error handling - free the prior allocated memory */
824 out_gendisk:
825 kfree (max_sectors[major]);
826 max_sectors[major] = NULL;
827
828 out_max_sectors:
829 kfree (hardsect_size[major]);
830 hardsect_size[major] = NULL;
831
832 out_hardsect_size:
833 kfree (blksize_size[major]);
834 blksize_size[major] = NULL;
835
836 out_blksize_size:
837 kfree (blk_size[major]);
838 blk_size[major] = NULL;
839
840 out_blk_size:
841 kfree (major_info->dasd_device);
842
843 out_devices:
844 /* Delete the new major info from dasd_major_info list if needed (dynamic) +*/
845 if (!(major_info->flags & DASD_MAJOR_INFO_IS_STATIC)) {
846 spin_lock_irqsave (&dasd_major_lock, flags);
847 list_del (&major_info->list);
848 spin_unlock_irqrestore (&dasd_major_lock, flags);
849 }
850
851 /* unregister blockdevice */
852 rc = devfs_unregister_blkdev (major, DASD_NAME);
853 if (rc < 0) {
854
855 MESSAGE (KERN_WARNING,
856 "Unable to unregister from major no %d, rc = %d",
857 major,
858 rc);
859 } else {
860 major_info->flags &= ~DASD_MAJOR_INFO_REGISTERED;
861 }
862
863 out_reg_blkdev:
864 kfree (major_info->gendisk.flags);
865
866 out_gd_flags:
867 kfree (major_info->gendisk.de_arr);
868
869 out_gd_de_arr:
870 /* Delete the new major info from dasd_major_info if needed */
871 if (!(major_info->flags & DASD_MAJOR_INFO_IS_STATIC)) {
872 kfree (major_info);
873 }
874
875 return -ENOMEM;
876 }
877
878 static int
dasd_unregister_major(major_info_t * major_info)879 dasd_unregister_major (major_info_t * major_info)
880 {
881 int rc = 0;
882 int major;
883 unsigned long flags;
884
885 if (major_info == NULL) {
886 return -EINVAL;
887 }
888 major = major_info->gendisk.major;
889 INIT_BLK_DEV (major, NULL, NULL, NULL);
890
891 del_gendisk (&major_info->gendisk);
892
893 kfree (major_info->dasd_device);
894 kfree (major_info->gendisk.part);
895
896 kfree (blk_size[major]);
897 kfree (blksize_size[major]);
898 kfree (hardsect_size[major]);
899 kfree (max_sectors[major]);
900
901 blk_size[major] = NULL;
902 blksize_size[major] = NULL;
903 hardsect_size[major] = NULL;
904 max_sectors[major] = NULL;
905
906 rc = devfs_unregister_blkdev (major, DASD_NAME);
907 if (rc < 0) {
908
909 MESSAGE (KERN_WARNING,
910 "Cannot unregister from major no %d, rc = %d",
911 major,
912 rc);
913
914 return rc;
915 } else {
916 major_info->flags &= ~DASD_MAJOR_INFO_REGISTERED;
917 }
918
919 kfree (major_info->gendisk.flags);
920 kfree (major_info->gendisk.de_arr);
921
922 /* Delete the new major info from dasd_major_info if needed */
923 if (!(major_info->flags & DASD_MAJOR_INFO_IS_STATIC)) {
924 spin_lock_irqsave (&dasd_major_lock, flags);
925 list_del (&major_info->list);
926 spin_unlock_irqrestore (&dasd_major_lock, flags);
927 kfree (major_info);
928 }
929 return rc;
930 }
931
932 /*
933 * function: dasd_device_from_kdev
934 * finds the device structure corresponding to the kdev supplied as argument
935 * in the major_info structures and returns it or NULL when not found
936 */
937 dasd_device_t *
dasd_device_from_kdev(kdev_t kdev)938 dasd_device_from_kdev (kdev_t kdev)
939 {
940 major_info_t *major_info;
941 dasd_device_t *device;
942 struct list_head *l;
943 unsigned long flags;
944
945 device = NULL;
946 spin_lock_irqsave (&dasd_major_lock, flags);
947 list_for_each (l, &dasd_major_info) {
948 major_info = list_entry (l, major_info_t, list);
949 if (major_info->gendisk.major == MAJOR (kdev)) {
950 device = major_info->dasd_device[MINOR (kdev) >>
951 DASD_PARTN_BITS];
952 break;
953 }
954 }
955 spin_unlock_irqrestore (&dasd_major_lock, flags);
956 return device;
957 }
958
959 /*
960 * function: dasd_device_from_devno
961 * finds the address of the device structure corresponding to the devno
962 * supplied as argument in the major_info structures and returns
963 * it or NULL when not found
964 */
965 static inline dasd_device_t **
dasd_device_from_devno(int devno)966 dasd_device_from_devno (int devno)
967 {
968 major_info_t *major_info;
969 dasd_device_t **device;
970 struct list_head *l;
971 int devindex;
972 unsigned long flags;
973
974 spin_lock_irqsave (&dasd_major_lock, flags);
975 devindex = dasd_devindex_from_devno (devno);
976 if (devindex < 0) {
977 spin_unlock_irqrestore (&dasd_major_lock, flags);
978 return NULL;
979 }
980
981 device = NULL;
982 list_for_each (l, &dasd_major_info) {
983 major_info = list_entry (l, major_info_t, list);
984 if (devindex < DASD_PER_MAJOR) {
985 device = &major_info->dasd_device[devindex];
986 break;
987 }
988 devindex -= DASD_PER_MAJOR;
989 }
990 spin_unlock_irqrestore (&dasd_major_lock, flags);
991 return device;
992 }
993
994 /*
995 * function: dasd_features_from_devno
996 * finds the device range corresponding to the devno
997 * supplied as argument in the major_info structures and returns
998 * the features set for it
999 */
1000
1001 static int
dasd_features_from_devno(int devno)1002 dasd_features_from_devno (int devno)
1003 {
1004 dasd_range_t *temp;
1005 int devindex = 0;
1006 unsigned long flags;
1007 struct list_head *l;
1008
1009 spin_lock_irqsave (&range_lock, flags);
1010 list_for_each (l, &dasd_range_head.list) {
1011 temp = list_entry (l, dasd_range_t, list);
1012 if (devno >= temp->from && devno <= temp->to) {
1013 spin_unlock_irqrestore (&range_lock, flags);
1014 return temp->features;
1015 }
1016 devindex += temp->to - temp->from + 1;
1017 }
1018 spin_unlock_irqrestore (&range_lock, flags);
1019 return -ENODEV;
1020 }
1021
1022 /*
1023 * function: dasd_check_bp_block
1024 * checks the blocksize and returns 0 if valid.
1025 */
1026
1027 static int
dasd_check_bp_block(dasd_device_t * device)1028 dasd_check_bp_block (dasd_device_t *device)
1029 {
1030 int rc;
1031
1032 switch (device->sizes.bp_block) {
1033 case 512:
1034 case 1024:
1035 case 2048:
1036 case 4096:
1037 rc = 0;
1038 break;
1039 default:
1040 rc = -EMEDIUMTYPE;
1041 }
1042
1043 return rc;
1044 }
1045
1046 /********************************************************************************
1047 * SECTION: managing dasd disciplines
1048 ********************************************************************************/
1049
1050 /* anchor and spinlock for list of disciplines */
1051 static struct list_head dasd_disc_head = LIST_HEAD_INIT(dasd_disc_head);
1052 static spinlock_t discipline_lock = SPIN_LOCK_UNLOCKED;
1053
1054 /*
1055 * function dasd_discipline_enq
1056 * chains the discpline given as argument to the tail of disiplines.
1057 * Exception: DIAG is always queued to the head, to ensure that CMS RESERVED
1058 * minidisks are invariably accessed using DIAG.
1059 */
1060 static inline void
dasd_discipline_enq(dasd_discipline_t * discipline)1061 dasd_discipline_enq (dasd_discipline_t *discipline)
1062 {
1063 if (strncmp (discipline->name, "DIAG", 4) == 0) {
1064 list_add (&discipline->list, &dasd_disc_head);
1065 } else {
1066 list_add_tail (&discipline->list, &dasd_disc_head);
1067 }
1068 }
1069
1070 /*
1071 * function dasd_discipline_deq
1072 * removes the discipline given as argument from the list of disciplines
1073 */
1074 static inline void
dasd_discipline_deq(dasd_discipline_t * discipline)1075 dasd_discipline_deq (dasd_discipline_t * discipline)
1076 {
1077 if (&discipline->list) {
1078 list_del (&discipline->list);
1079 }
1080 }
1081
1082 void
dasd_discipline_add(dasd_discipline_t * discipline)1083 dasd_discipline_add (dasd_discipline_t * discipline)
1084 {
1085 unsigned long flags;
1086 MOD_INC_USE_COUNT;
1087 spin_lock_irqsave (&discipline_lock,flags);
1088 dasd_discipline_enq (discipline);
1089 spin_unlock_irqrestore (&discipline_lock,flags);
1090
1091 dasd_enable_ranges (&dasd_range_head,
1092 discipline,
1093 DASD_STATE_ONLINE);
1094 }
1095
dasd_discipline_del(dasd_discipline_t * discipline)1096 void dasd_discipline_del (dasd_discipline_t * discipline)
1097 {
1098 unsigned long flags;
1099
1100 dasd_disable_ranges(&dasd_range_head,
1101 discipline,
1102 DASD_STATE_DEL,
1103 1);
1104
1105 spin_lock_irqsave (&discipline_lock,flags);
1106 dasd_discipline_deq (discipline);
1107 spin_unlock_irqrestore (&discipline_lock,flags);
1108 MOD_DEC_USE_COUNT;
1109 }
1110
1111 /*
1112 * function dasd_find_disc
1113 * checks the list of disciplines for the first one able to access the device
1114 */
1115 static inline dasd_discipline_t *
dasd_find_disc(dasd_device_t * device,dasd_discipline_t * discipline)1116 dasd_find_disc (dasd_device_t * device, dasd_discipline_t *discipline)
1117 {
1118 dasd_discipline_t *t;
1119 struct list_head *l = discipline ?
1120 &discipline->list : dasd_disc_head.next;
1121
1122 do {
1123 t = list_entry(l,dasd_discipline_t,list);
1124
1125 if ( ( t->id_check == NULL ||
1126 t->id_check (&device->devinfo) == 0 ) &&
1127 ( t->check_characteristics == NULL ||
1128 t->check_characteristics (device) == 0 ) )
1129 break;
1130 l = l->next;
1131 if ( discipline ||
1132 l == &dasd_disc_head ) {
1133 t = NULL;
1134 break;
1135 }
1136 } while ( 1 );
1137
1138 return t;
1139 }
1140
1141 /********************************************************************************
1142 * SECTION: profiling stuff
1143 ********************************************************************************/
1144
1145 #ifdef CONFIG_PROC_FS
1146
1147 static dasd_profile_info_t dasd_global_profile;
1148 #endif /* CONFIG_PROC_FS */
1149
1150 #ifdef DASD_PROFILE
1151
1152 #define DASD_PROFILE_ON 1
1153 #define DASD_PROFILE_OFF 0
1154
1155 static unsigned int dasd_profile_level = DASD_PROFILE_OFF;
1156
1157 /*
1158 * macro: dasd_profile_add_counter
1159 * increments counter in global and local profiling structures
1160 * according to the value
1161 */
1162 #define dasd_profile_add_counter( value, counter, device ) \
1163 { \
1164 int ind; \
1165 long help; \
1166 for (ind = 0, help = value >> 2; \
1167 ind < 31 && help; \
1168 help = help >> 1, ind++) {} \
1169 dasd_global_profile.counter[ind]++; \
1170 device->profile.counter[ind]++; \
1171 }
1172
1173 /*
1174 * function dasd_profile_add
1175 * adds the profiling information from the cqr given as argument to the
1176 * global and device specific profiling information
1177 */
1178 void
dasd_profile_add(ccw_req_t * cqr)1179 dasd_profile_add (ccw_req_t * cqr)
1180 {
1181 long strtime, irqtime, endtime, tottime; /* in microsecnds*/
1182 long tottimeps, sectors;
1183 dasd_device_t *device = cqr->device;
1184
1185 if (!cqr->req) /* safeguard against abnormal cqrs */
1186 return;
1187
1188 if ((!cqr->buildclk) ||
1189 (!cqr->startclk) ||
1190 (!cqr->stopclk ) ||
1191 (!cqr->endclk ) ||
1192 (!(sectors = ((struct request *) (cqr->req))->nr_sectors)))
1193 return;
1194
1195 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
1196 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
1197 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
1198 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
1199 tottimeps = tottime / sectors;
1200
1201 if (!dasd_global_profile.dasd_io_reqs) {
1202 memset (&dasd_global_profile, 0, sizeof (dasd_profile_info_t));
1203 };
1204 if (!device->profile.dasd_io_reqs) {
1205 memset (&device->profile, 0, sizeof (dasd_profile_info_t));
1206 };
1207
1208 dasd_global_profile.dasd_io_reqs++;
1209 device->profile.dasd_io_reqs++;
1210 dasd_global_profile.dasd_io_sects+=sectors;
1211 device->profile.dasd_io_sects+=sectors;
1212 dasd_profile_add_counter (sectors, dasd_io_secs, device);
1213 dasd_profile_add_counter (tottime, dasd_io_times, device);
1214 dasd_profile_add_counter (tottimeps, dasd_io_timps, device);
1215 dasd_profile_add_counter (strtime, dasd_io_time1, device);
1216 dasd_profile_add_counter (irqtime, dasd_io_time2, device);
1217 dasd_profile_add_counter (irqtime / sectors, dasd_io_time2ps, device);
1218 dasd_profile_add_counter (endtime, dasd_io_time3, device);
1219 }
1220 #endif /* DASD_PROFILE */
1221
1222 /********************************************************************************
1223 * SECTION: All the gendisk stuff
1224 ********************************************************************************/
1225
1226
1227 /********************************************************************************
1228 * SECTION: Managing wrappers for ccwcache
1229 ********************************************************************************/
1230
1231 /*
1232 * function dasd_alloc_request
1233 * tries to return space for a channel program of length cplength with
1234 * additional data of size datasize.
1235 * If the ccwcache cannot fulfill the request it tries the lowmem requests
1236 * before giving up finally.
1237 * FIXME: initialization of ccw_req_t should be done by function of ccwcache
1238 */
1239 ccw_req_t *
dasd_alloc_request(char * magic,int cplength,int datasize,dasd_device_t * device)1240 dasd_alloc_request (char *magic, int cplength, int datasize,
1241 dasd_device_t *device)
1242 {
1243 ccw_req_t *cqr;
1244 unsigned long size_needed;
1245 unsigned long data_offset, ccw_offset;
1246 dasd_lowmem_t *lowmem;
1247
1248 if ((cqr = ccw_alloc_request (magic, cplength, datasize)) != NULL) {
1249 return cqr;
1250 }
1251
1252 /* Sanity checks */
1253 if (magic == NULL || datasize > PAGE_SIZE ||
1254 cplength == 0 || (cplength * sizeof(ccw1_t)) > PAGE_SIZE)
1255 BUG();
1256
1257 /* use lowmem page only for ERP or */
1258 /* if there are less than 2 requests on queue */
1259 if (device->queue.head != NULL &&
1260 device->queue.head->next != NULL &&
1261 device->queue.head->status != CQR_STATUS_ERROR) {
1262 return NULL;
1263 }
1264
1265 /* We try to keep things together in memory */
1266 size_needed = (sizeof (ccw_req_t) + 7) & (~7L);
1267 data_offset = ccw_offset = 0;
1268 if (size_needed + datasize <= PAGE_SIZE) {
1269 /* Keep data with the request */
1270 data_offset = size_needed;
1271 size_needed += (datasize + 7) & (~7L);
1272 }
1273 if (size_needed + cplength*sizeof(ccw1_t) <= PAGE_SIZE) {
1274 /* Keep CCWs with request */
1275 ccw_offset = size_needed;
1276 size_needed += (cplength*sizeof(ccw1_t)) & (~7L);
1277 }
1278
1279 /* take page from lowmem_pool for request */
1280 list_for_each_entry (lowmem, &device->lowmem_pool, list) {
1281 list_del (&lowmem->list);
1282 cqr = (ccw_req_t *) lowmem;
1283 memset (cqr, 0, PAGE_SIZE);
1284 cqr->flags |= CQR_FLAGS_LM_CQR;
1285 break;
1286 }
1287 if (cqr == NULL)
1288 return NULL;
1289
1290 /* take page from lowmem_pool for the extra data */
1291 if (data_offset == 0) {
1292
1293 list_for_each_entry (lowmem, &device->lowmem_pool, list) {
1294 list_del (&lowmem->list);
1295 cqr->data = (void *) lowmem;
1296 memset (cqr->data, 0, PAGE_SIZE);
1297 break;
1298 }
1299 if (cqr->data == NULL) {
1300 printk(KERN_DEBUG PRINTK_HEADER
1301 "Couldn't allocate data area\n");
1302
1303 lowmem = (dasd_lowmem_t *) cqr;
1304 list_add (&lowmem->list, &device->lowmem_pool);
1305 return NULL;
1306 }
1307 } else {
1308 /* Extra data already allocated with the request */
1309 cqr->data = (void *) ((addr_t) cqr + data_offset);
1310 }
1311
1312 /* take page from lowmem_pool for the channel program */
1313 if (ccw_offset == 0) {
1314
1315 list_for_each_entry (lowmem, &device->lowmem_pool, list) {
1316 list_del (&lowmem->list);
1317 cqr->cpaddr = (ccw1_t *) lowmem;
1318 memset (cqr->cpaddr, 0, PAGE_SIZE);
1319 break;
1320 }
1321
1322 if (cqr->cpaddr == NULL) {
1323 printk (KERN_DEBUG PRINTK_HEADER
1324 "Couldn't allocate channel program area\n");
1325 if (data_offset == 0) {
1326 lowmem = (dasd_lowmem_t *) cqr->data;
1327 list_add (&lowmem->list, &device->lowmem_pool);
1328 }
1329 lowmem = (dasd_lowmem_t *) cqr;
1330 list_add (&lowmem->list, &device->lowmem_pool);
1331 return NULL;
1332 }
1333 } else {
1334 /* Channel program already allocated with the request */
1335 cqr->cpaddr = (ccw1_t *) ((addr_t) cqr + ccw_offset);
1336 }
1337
1338 /* use the remaining memory of the cqr page for IDALs */
1339 cqr->lowmem_idal_ptr = (void *) ((addr_t) cqr + size_needed);
1340
1341 strncpy ((char *)(&cqr->magic), magic, 4);
1342
1343 ASCEBC((char *)(&cqr->magic), 4);
1344 cqr->cplength = cplength;
1345 cqr->datasize = datasize;
1346
1347 return cqr;
1348 }
1349
1350 /*
1351 * function dasd_free_request
1352 * returns a ccw_req_t to the appropriate cache or emergeny request line
1353 */
1354 void
dasd_free_request(ccw_req_t * cqr,dasd_device_t * device)1355 dasd_free_request (ccw_req_t *cqr, dasd_device_t* device)
1356 {
1357 unsigned long size_needed;
1358 dasd_lowmem_t *lowmem;
1359
1360 #ifdef CONFIG_ARCH_S390X
1361 ccw1_t* ccw;
1362 /* clear any idals used for chain (might be in lowmen cqr page, */
1363 /* in seperate lowmen page or kmalloced */
1364 ccw=cqr->cpaddr-1;
1365 do {
1366 ccw++;
1367 if ((cqr->flags & CQR_FLAGS_LM_CQR) &&
1368 (ccw->cda >= (unsigned long) cqr) &&
1369 (ccw->cda < (unsigned long) cqr + PAGE_SIZE)) {
1370 /* IDAL is on the car lowmem page */
1371 continue;
1372 }
1373
1374 if ((cqr->flags & CQR_FLAGS_LM_IDAL) &&
1375 (ccw->cda >= (unsigned long) cqr->lowmem_idal) &&
1376 (ccw->cda < (unsigned long) cqr->lowmem_idal + PAGE_SIZE)) {
1377 /* IDAL is on seperate lowmem page */
1378 continue;
1379 }
1380
1381 /* IDAL was build by set_normalized_cda */
1382 clear_normalized_cda (ccw);
1383
1384 } while ((ccw->flags & CCW_FLAG_CC) ||
1385 (ccw->flags & CCW_FLAG_DC) );
1386 #endif
1387 /* give idal lowmem page back to lowmem_pool */
1388 if (cqr->flags & CQR_FLAGS_LM_IDAL) {
1389 lowmem = (dasd_lowmem_t *) cqr->lowmem_idal;
1390 list_add (&lowmem->list, &device->lowmem_pool);
1391 cqr->flags &= ~CQR_FLAGS_LM_IDAL;
1392 }
1393
1394 /* give cqr lowmem pages back to lowmem_pool */
1395 if (cqr->flags & CQR_FLAGS_LM_CQR) {
1396
1397 /* make the same decisions as in dasd_alloc_request */
1398 size_needed = (sizeof (ccw_req_t) + 7) & (~7L);
1399 if (size_needed + cqr->datasize <= PAGE_SIZE) {
1400 /* We kept the data with the request */
1401 size_needed += (cqr->datasize + 7) & (~7L);
1402 } else {
1403 lowmem = (dasd_lowmem_t *) cqr->data;
1404 list_add (&lowmem->list, &device->lowmem_pool);
1405 }
1406
1407 if (size_needed + cqr->cplength * sizeof(ccw1_t) > PAGE_SIZE) {
1408 /* We didn't keep the CCWs with request */
1409 lowmem = (dasd_lowmem_t *) cqr->cpaddr;
1410 list_add (&lowmem->list, &device->lowmem_pool);
1411 }
1412 lowmem = (dasd_lowmem_t *) cqr;
1413 list_add (&lowmem->list, &device->lowmem_pool);
1414 } else {
1415 ccw_free_request (cqr);
1416 }
1417 }
1418
1419 /*
1420 * function dasd_set_normalized_cda
1421 * calls set_normalized_cda to build IDALs.
1422 * If this did not work because of low memory, we try to use memory from the
1423 * lowmem pool.
1424 */
1425 int
dasd_set_normalized_cda(ccw1_t * cp,unsigned long address,ccw_req_t * cqr,dasd_device_t * device)1426 dasd_set_normalized_cda (ccw1_t *cp, unsigned long address,
1427 ccw_req_t *cqr, dasd_device_t *device)
1428 {
1429 #ifdef CONFIG_ARCH_S390X
1430 int rc;
1431 int nridaws;
1432 dasd_lowmem_t *lowmem;
1433 int count = cp->count;
1434
1435 /* use lowmem idal page if already assinged */
1436 if (!(cqr->flags & CQR_FLAGS_LM_IDAL)) {
1437 rc = set_normalized_cda (cp, (void *)address);
1438 if (rc !=-ENOMEM) {
1439 return rc;
1440 }
1441 }
1442
1443 /* get number of idal words needed */
1444 nridaws = ((address & (IDA_BLOCK_SIZE-1)) + count +
1445 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
1446
1447 /* check if we need an additional IDALs page */
1448 if (!(cqr->flags & CQR_FLAGS_LM_IDAL)) {
1449 /* we got no lowmem cqr page OR */
1450 /* there is no space left for IDALs */
1451 if ((!(cqr->flags & CQR_FLAGS_LM_CQR)) ||
1452 ((cqr->lowmem_idal_ptr + nridaws * sizeof(unsigned long)) >
1453 ((void *) cqr + PAGE_SIZE))) {
1454
1455 /* use lowmem page only for ERP or */
1456 /* if there are less than 2 requests on queue */
1457 if (device->queue.head != NULL &&
1458 device->queue.head->next != NULL &&
1459 device->queue.head->status != CQR_STATUS_ERROR) {
1460 return -ENOMEM;
1461 }
1462
1463 list_for_each_entry (lowmem, &device->lowmem_pool,
1464 list) {
1465 list_del (&lowmem->list);
1466 cqr->lowmem_idal = (void *)lowmem;
1467 cqr->lowmem_idal_ptr = (void *) lowmem;
1468 memset (cqr->lowmem_idal, 0, PAGE_SIZE);
1469 cqr->flags |= CQR_FLAGS_LM_IDAL;
1470 break;
1471 }
1472 }
1473
1474 }
1475
1476 /* now we (should) have an valid lowmem_idal_ptr and enough space for */
1477 /* the IDALs - fill the idals table */
1478 cp->flags |= CCW_FLAG_IDA;
1479 cp->cda = (__u32)(unsigned long)cqr->lowmem_idal_ptr;
1480 do {
1481 *((long*)cqr->lowmem_idal_ptr) = address;
1482 address = (address & -(IDA_BLOCK_SIZE)) + (IDA_BLOCK_SIZE);
1483 cqr->lowmem_idal_ptr += sizeof(unsigned long);
1484 nridaws --;
1485 } while ( nridaws > 0 );
1486 #else
1487 cp->cda = address;
1488 #endif
1489 return 0;
1490 }
1491
1492
1493 /********************************************************************************
1494 * SECTION: (de)queueing of requests to channel program queues
1495 ********************************************************************************/
1496
1497 /*
1498 * function dasd_chanq_enq
1499 * appends the cqr given as argument to the queue
1500 * has to be called with the queue lock (namely the s390_irq_lock) acquired
1501 */
1502 inline void
dasd_chanq_enq(dasd_chanq_t * q,ccw_req_t * cqr)1503 dasd_chanq_enq (dasd_chanq_t * q, ccw_req_t * cqr)
1504 {
1505 if (q->head != NULL) {
1506 q->tail->next = cqr;
1507 } else
1508 q->head = cqr;
1509 cqr->next = NULL;
1510 q->tail = cqr;
1511 check_then_set (&cqr->status,
1512 CQR_STATUS_FILLED,
1513 CQR_STATUS_QUEUED);
1514
1515
1516 #ifdef DASD_PROFILE
1517 if (dasd_profile_level == DASD_PROFILE_ON) {
1518
1519 /* save profile information for non erp cqr */
1520 if (cqr->refers == NULL) {
1521 unsigned int counter = 0;
1522 ccw_req_t *ptr;
1523 dasd_device_t *device = cqr->device;
1524
1525 /* count the length of the chanq for statistics */
1526 for (ptr = q->head;
1527 ptr->next != NULL && counter <=31;
1528 ptr = ptr->next) {
1529 counter++;
1530 }
1531
1532 dasd_global_profile.dasd_io_nr_req[counter]++;
1533 device->profile.dasd_io_nr_req[counter]++;
1534 }
1535
1536 } /* end if DASD_PROFILE_ON */
1537 #endif
1538 }
1539
1540 /*
1541 * function dasd_chanq_enq_head
1542 * chains the cqr given as argument to the queue head
1543 * has to be called with the queue lock (namely the s390_irq_lock) acquired
1544 */
1545 inline void
dasd_chanq_enq_head(dasd_chanq_t * q,ccw_req_t * cqr)1546 dasd_chanq_enq_head (dasd_chanq_t * q, ccw_req_t * cqr)
1547 {
1548 cqr->next = q->head;
1549 q->head = cqr;
1550 if (q->tail == NULL)
1551 q->tail = cqr;
1552
1553 check_then_set (&cqr->status,
1554 CQR_STATUS_FILLED,
1555 CQR_STATUS_QUEUED);
1556 }
1557
1558 /*
1559 * function dasd_chanq_deq
1560 * dechains the cqr given as argument from the queue
1561 * has to be called with the queue lock (namely the s390_irq_lock) acquired
1562 */
1563 inline void
dasd_chanq_deq(dasd_chanq_t * q,ccw_req_t * cqr)1564 dasd_chanq_deq (dasd_chanq_t * q, ccw_req_t * cqr)
1565 {
1566 ccw_req_t *prev;
1567
1568 if (cqr == NULL)
1569 BUG ();
1570
1571 if (cqr == q->head) {
1572 q->head = cqr->next;
1573 if (q->head == NULL)
1574 q->tail = NULL;
1575
1576 } else {
1577 prev = q->head;
1578 while (prev && prev->next != cqr)
1579 prev = prev->next;
1580 if (prev == NULL)
1581 return; /* request not in chanq */
1582 prev->next = cqr->next;
1583 if (prev->next == NULL)
1584 q->tail = prev;
1585 }
1586 cqr->next = NULL;
1587 }
1588
1589 /********************************************************************************
1590 * SECTION: Managing the device queues etc.
1591 ********************************************************************************/
1592
1593 /*
1594 * DASD_RESREL_TIMEOUT
1595 *
1596 * A timer is used to suspend the current reserve/release request
1597 * if it doesn't return within a certain time.
1598 */
1599 void
dasd_resrel_timeout(unsigned long cqr_ptr)1600 dasd_resrel_timeout (unsigned long cqr_ptr)
1601 {
1602 dasd_device_t *device = ((ccw_req_t *) cqr_ptr)->device;
1603 ccw_req_t *cqr;
1604 unsigned long flags;
1605
1606 s390irq_spin_lock_irqsave (device->devinfo.irq,
1607 flags);
1608 cqr = device->queue.head;
1609
1610 switch (cqr->status) {
1611 case CQR_STATUS_FILLED:
1612 case CQR_STATUS_QUEUED:
1613 /* request was not started - just set to failed */
1614 cqr->status = CQR_STATUS_FAILED;
1615 break;
1616
1617 case CQR_STATUS_IN_IO:
1618 case CQR_STATUS_ERROR:
1619 if (device->discipline->term_IO (cqr) != 0);
1620 cqr->status = CQR_STATUS_FAILED;
1621 break;
1622
1623 default:
1624 ; /* DONE and FAILED are ok */
1625 }
1626
1627 dasd_schedule_bh (device);
1628
1629 s390irq_spin_unlock_irqrestore (device->devinfo.irq,
1630 flags);
1631
1632 } /* end dasd_resrel_timeout */
1633
1634 /*
1635 * Call unconditional reserve to break the reserve of an other system.
1636 * Timeout the request if it doesn't succseed within a certain time.
1637 */
1638 static int
dasd_steal_lock(dasd_device_t * device)1639 dasd_steal_lock (dasd_device_t *device)
1640 {
1641 ccw_req_t *cqr;
1642 int rc = 0;
1643
1644 if (!device->discipline->steal_lock)
1645 rc = -EINVAL;
1646
1647 cqr = device->discipline->steal_lock (device);
1648
1649 if (cqr) {
1650 struct timer_list res_timer;
1651
1652 init_timer(&res_timer);
1653 res_timer.function = dasd_resrel_timeout;
1654 res_timer.data = (unsigned long) cqr;
1655 res_timer.expires = jiffies + 4 * HZ;
1656 add_timer(&res_timer);
1657
1658 rc = dasd_sleep_on_immediate (cqr);
1659
1660 del_timer_sync(&res_timer);
1661 dasd_free_request (cqr,
1662 device);
1663 } else {
1664 rc = -ENOMEM;
1665 }
1666
1667 return rc;
1668
1669 } /* end dasd_steal_lock */
1670
1671 /*
1672 * DASD_TERM_IO
1673 *
1674 * Attempts to terminate the current IO and set it to failed if termination
1675 * was successful.
1676 * Returns an appropriate return code.
1677 */
1678 int
dasd_term_IO(ccw_req_t * cqr)1679 dasd_term_IO (ccw_req_t * cqr)
1680 {
1681 int rc = 0;
1682 dasd_device_t *device = cqr->device;
1683 int irq;
1684 int retries = 0;
1685
1686 if (!cqr) {
1687 BUG ();
1688 }
1689 irq = device->devinfo.irq;
1690 if (strncmp ((char *) &cqr->magic,
1691 device->discipline->ebcname, 4)) {
1692
1693 DEV_MESSAGE (KERN_WARNING, device,
1694 " ccw_req_t 0x%08x magic doesn't match"
1695 " discipline 0x%08x",
1696 cqr->magic,
1697 *(unsigned int *) device->discipline->name);
1698
1699 return -EINVAL;
1700 }
1701
1702 while ((retries < 5 ) &&
1703 (cqr->status == CQR_STATUS_IN_IO) ) {
1704
1705 rc = clear_IO (irq,
1706 (long)cqr,
1707 cqr->options);
1708
1709 switch (rc) {
1710 case 0: /* termination successful */
1711 check_then_set (&cqr->status,
1712 CQR_STATUS_IN_IO,
1713 CQR_STATUS_FAILED);
1714
1715 cqr->stopclk = get_clock ();
1716
1717 break;
1718 case -ENODEV:
1719 DBF_DEV_EVENT (DBF_ERR, device, "%s",
1720 "device gone, retry");
1721 break;
1722 case -EIO:
1723 DBF_DEV_EVENT (DBF_ERR, device, "%s",
1724 "I/O error, retry");
1725 break;
1726 case -EBUSY:
1727 DBF_DEV_EVENT (DBF_ERR, device, "%s",
1728 "device busy, retry later");
1729 break;
1730 default:
1731 DEV_MESSAGE (KERN_ERR, device,
1732 "line %d unknown RC=%d, please "
1733 "report to linux390@de.ibm.com",
1734 __LINE__,
1735 rc);
1736 BUG ();
1737 break;
1738 }
1739
1740 dasd_schedule_bh (device);
1741 retries ++;
1742 }
1743 return rc;
1744 }
1745
1746 /*
1747 * function dasd_start_IO
1748 * attempts to start the IO and returns an appropriate return code
1749 */
1750 int
dasd_start_IO(ccw_req_t * cqr)1751 dasd_start_IO (ccw_req_t * cqr)
1752 {
1753 int rc = 0;
1754 dasd_device_t *device = cqr->device;
1755 int irq;
1756 unsigned long long now;
1757
1758 if (!cqr) {
1759 BUG ();
1760 }
1761 irq = device->devinfo.irq;
1762 if (strncmp ((char *) &cqr->magic,
1763 device->discipline->ebcname, 4)) {
1764
1765 DEV_MESSAGE (KERN_ERR, device,
1766 " ccw_req_t 0x%08x magic doesn't match"
1767 " discipline 0x%08x",
1768 cqr->magic,
1769 *(unsigned int *) device->discipline->name);
1770
1771 return -EINVAL;
1772 }
1773
1774 now = get_clock ();
1775
1776 cqr->startclk = now;
1777 if (!device->stopped)
1778 rc = do_IO (irq, cqr->cpaddr, (long) cqr, cqr->lpm, cqr->options);
1779 else
1780 rc = -EBUSY;
1781
1782 switch (rc) {
1783 case 0:
1784 if (cqr->options & DOIO_WAIT_FOR_INTERRUPT) {
1785 /* request already finished (synchronous IO) */
1786 check_then_set (&cqr->status,
1787 CQR_STATUS_QUEUED,
1788 CQR_STATUS_DONE);
1789
1790 cqr->stopclk = now;
1791 dasd_schedule_bh (device);
1792
1793 } else {
1794 check_then_set (&cqr->status,
1795 CQR_STATUS_QUEUED,
1796 CQR_STATUS_IN_IO);
1797 }
1798 break;
1799 case -EBUSY:
1800 DBF_DEV_EVENT (DBF_ERR, device, "%s",
1801 "device busy, retry later");
1802
1803 if (!timer_pending(&device->timer)) {
1804 init_timer (&device->timer);
1805 device->timer.function = dasd_schedule_bh_timed;
1806 device->timer.data = (unsigned long) device;
1807 device->timer.expires = jiffies + (HZ >> 4);
1808 add_timer (&device->timer);
1809 } else {
1810 mod_timer(&device->timer, jiffies + (HZ >> 4));
1811 }
1812 break;
1813 case -ETIMEDOUT:
1814 DBF_DEV_EVENT (DBF_ERR, device, "%s",
1815 "request timeout - terminated");
1816 case -ENODEV:
1817 case -EIO:
1818 check_then_set (&cqr->status,
1819 CQR_STATUS_QUEUED,
1820 CQR_STATUS_FAILED);
1821
1822 cqr->stopclk = now;
1823 dasd_schedule_bh (device);
1824 break;
1825 default:
1826 DEV_MESSAGE (KERN_ERR, device,
1827 "line %d unknown RC=%d, please report"
1828 " to linux390@de.ibm.com",
1829 __LINE__,
1830 rc);
1831 BUG ();
1832 break;
1833 }
1834
1835 return rc;
1836 }
1837
1838 /*
1839 * function dasd_sleep_on_req
1840 * attempts to start the IO and waits for completion
1841 */
1842 int
dasd_sleep_on_req(ccw_req_t * cqr)1843 dasd_sleep_on_req (ccw_req_t * cqr)
1844 {
1845 unsigned long flags;
1846 dasd_device_t *device = (dasd_device_t *) cqr->device;
1847
1848 if (signal_pending(current)) {
1849 return -ERESTARTSYS;
1850 }
1851 s390irq_spin_lock_irqsave (device->devinfo.irq,
1852 flags);
1853
1854 dasd_chanq_enq (&device->queue,
1855 cqr);
1856
1857 /* let the bh start the request to keep them in order */
1858 dasd_schedule_bh (device);
1859
1860 s390irq_spin_unlock_irqrestore (device->devinfo.irq,
1861 flags);
1862
1863 wait_event (device->wait_q,
1864 cqr->flags & CQR_FLAGS_FINALIZED);
1865
1866 if (cqr->status == CQR_STATUS_FAILED) {
1867 return -EIO;
1868 }
1869
1870 return 0;
1871
1872 } /* end dasd_sleep_on_req */
1873
1874 /*
1875 * function dasd_sleep_on_immediate
1876 * same as dasd_sleep_on_req, but attempts to start the IO immediately
1877 * (killing the actual running IO).
1878 */
1879 static int
dasd_sleep_on_immediate(ccw_req_t * cqr)1880 dasd_sleep_on_immediate (ccw_req_t *cqr)
1881 {
1882 unsigned long flags;
1883 dasd_device_t *device = (dasd_device_t *) cqr->device;
1884
1885 if (signal_pending(current))
1886 return -ERESTARTSYS;
1887
1888 s390irq_spin_lock_irqsave (device->devinfo.irq,
1889 flags);
1890
1891 /* terminate currently running IO */
1892 if (device->queue.head->status == CQR_STATUS_IN_IO) {
1893
1894 device->discipline->term_IO (device->queue.head);
1895
1896 device->queue.head->status = CQR_STATUS_QUEUED;
1897 }
1898
1899 dasd_chanq_enq_head (&device->queue,
1900 cqr);
1901
1902 /* let the bh start the request to keep them in order */
1903 dasd_schedule_bh (device);
1904
1905 s390irq_spin_unlock_irqrestore (device->devinfo.irq,
1906 flags);
1907
1908 wait_event (device->wait_q,
1909 cqr->flags & CQR_FLAGS_FINALIZED);
1910
1911 if (cqr->status == CQR_STATUS_FAILED) {
1912 return -EIO;
1913 }
1914
1915 return 0;
1916
1917 } /* end dasd_sleep_on_immediate */
1918
1919 /*
1920 * function dasd_end_request
1921 * posts the buffer_cache about a finalized request
1922 * FIXME: for requests splitted to serveral cqrs
1923 */
1924 static inline void
dasd_end_request(struct request * req,int uptodate)1925 dasd_end_request (struct request *req, int uptodate)
1926 {
1927 while (end_that_request_first (req, uptodate, DASD_NAME)) {
1928 }
1929 #ifndef DEVICE_NO_RANDOM
1930 add_blkdev_randomness (MAJOR (req->rq_dev));
1931 #endif
1932 end_that_request_last (req);
1933 return;
1934 }
1935
1936 /*
1937 * function dasd_get_queue
1938 * returns the queue corresponding to a device behind a kdev
1939 */
1940 static request_queue_t *
dasd_get_queue(kdev_t kdev)1941 dasd_get_queue (kdev_t kdev)
1942 {
1943 dasd_device_t *device = dasd_device_from_kdev (kdev);
1944
1945 if (!device) {
1946 return NULL;
1947 }
1948
1949 return device->request_queue;
1950 }
1951
1952 /*
1953 * function dasd_check_expire_time
1954 * check the request given as argument for expiration
1955 * and returns 0 if not yet expired, EIO else
1956 */
1957 static inline int
dasd_check_expire_time(ccw_req_t * cqr)1958 dasd_check_expire_time (ccw_req_t * cqr)
1959 {
1960 unsigned long long now;
1961 int rc = 0;
1962
1963 now = get_clock ();
1964
1965 if (cqr->expires &&
1966 cqr->expires + cqr->startclk < now) {
1967
1968 DBF_DEV_EVENT (DBF_WARNING, ((dasd_device_t *) cqr->device),
1969 "IO timeout 0x%08lx%08lx usecs in req %p",
1970 (long) (cqr->expires >> 44),
1971 (long) (cqr->expires >> 12),
1972 cqr);
1973
1974 cqr->expires <<= 1;
1975 rc = -EIO;
1976 }
1977 return rc;
1978 }
1979
1980 /*
1981 * function dasd_finalize_request
1982 * implemets the actions to perform, when a request is finally finished
1983 * namely in status CQR_STATUS_DONE || CQR_STATUS_FAILED
1984 */
1985 static inline void
dasd_finalize_request(ccw_req_t * cqr)1986 dasd_finalize_request (ccw_req_t * cqr)
1987 {
1988 dasd_device_t *device = cqr->device;
1989
1990 cqr->endclk = get_clock ();
1991
1992 if (cqr->req) {
1993
1994 #ifdef DASD_PROFILE
1995 if (dasd_profile_level == DASD_PROFILE_ON) {
1996 dasd_profile_add (cqr);
1997 }
1998 #endif
1999
2000 dasd_end_request (cqr->req, (cqr->status == CQR_STATUS_DONE));
2001 /* free request if nobody is waiting on it */
2002 dasd_free_request (cqr, cqr->device);
2003 } else {
2004 if (cqr == device->init_cqr && /* bring late devices online */
2005 device->level <= DASD_STATE_ONLINE ) {
2006 if (!timer_pending(&device->late_timer)) {
2007 init_timer(&device->late_timer);
2008 device->late_timer.function = dasd_enable_single_device;
2009 device->late_timer.data = (unsigned long) device;
2010 device->late_timer.expires = jiffies;
2011 add_timer(&device->late_timer);
2012 } else {
2013 mod_timer(&device->late_timer, jiffies);
2014 }
2015 } else {
2016 /* notify sleep_on_xxx about finished cqr */
2017 cqr->flags |= CQR_FLAGS_FINALIZED;
2018 }
2019
2020 /* notify sleeping task about finished postprocessing */
2021 wake_up (&device->wait_q);
2022
2023 }
2024 return;
2025 }
2026
2027 /*
2028 * function dasd_process_queues
2029 * transfers the requests on the queue given as argument to the chanq
2030 * if possible, the request ist started on a fastpath
2031 */
2032 static void
dasd_process_queues(dasd_device_t * device)2033 dasd_process_queues (dasd_device_t * device)
2034 {
2035 unsigned long flags;
2036 struct request *req;
2037 request_queue_t *queue = device->request_queue;
2038 dasd_chanq_t *qp = &device->queue;
2039 int irq = device->devinfo.irq;
2040 ccw_req_t *final_requests = NULL;
2041 int chanq_max_size = DASD_CHANQ_MAX_SIZE;
2042 ccw_req_t *cqr = NULL, *temp;
2043 dasd_erp_postaction_fn_t erp_postaction;
2044
2045 s390irq_spin_lock_irqsave (irq, flags);
2046
2047 /* First we dechain the requests, processed with completed status */
2048 while (qp->head &&
2049 ((qp->head->status == CQR_STATUS_DONE ) ||
2050 (qp->head->status == CQR_STATUS_FAILED) ||
2051 (qp->head->status == CQR_STATUS_ERROR ) )) {
2052
2053 dasd_erp_action_fn_t erp_action;
2054 ccw_req_t *erp_cqr = NULL;
2055
2056 /* preprocess requests with CQR_STATUS_ERROR */
2057 if (qp->head->status == CQR_STATUS_ERROR) {
2058
2059 qp->head->retries--;
2060
2061 if ((qp->head->dstat == NULL ) ||
2062 ((qp->head->dstat->flag & DEVSTAT_FLAG_SENSE_AVAIL) == 0 ) ||
2063 (device->discipline->erp_action == NULL ) ||
2064 ((erp_action = device->discipline->erp_action (qp->head)) == NULL) ) {
2065
2066 erp_cqr = dasd_default_erp_action (qp->head);
2067
2068 } else { /* call discipline ERP action */
2069
2070 erp_cqr = erp_action (qp->head);
2071 }
2072 continue;
2073
2074 } else if (qp->head->refers) { /* we deal with a finished ERP */
2075
2076 if (qp->head->status == CQR_STATUS_DONE) {
2077
2078 DBF_DEV_EVENT (DBF_NOTICE, device, "%s",
2079 "ERP successful");
2080 } else {
2081
2082 DEV_MESSAGE (KERN_WARNING, device, "%s",
2083 "ERP unsuccessful");
2084 }
2085
2086 if ((device->discipline->erp_postaction == NULL )||
2087 ((erp_postaction = device->discipline->erp_postaction (qp->head)) == NULL) ) {
2088
2089 dasd_default_erp_postaction (qp->head);
2090
2091 } else { /* call ERP postaction of discipline */
2092
2093 erp_postaction (qp->head);
2094 }
2095
2096 continue;
2097 }
2098
2099 /* dechain request now */
2100 if (final_requests == NULL)
2101 final_requests = qp->head;
2102
2103 cqr = qp->head;
2104 qp->head = qp->head->next;
2105
2106 if (qp->head == NULL)
2107 qp->tail = NULL;
2108
2109 } /* end while over completed requests */
2110
2111 if (cqr)
2112 cqr->next = NULL; /* terminate final_requests queue */
2113
2114 /* Now clean the requests with final status */
2115 while (final_requests) {
2116 temp = final_requests;
2117 final_requests = temp->next;
2118 dasd_finalize_request (temp);
2119 }
2120
2121 /* Now we try to fetch requests from the request queue */
2122 for (temp = qp->head; temp != NULL; temp = temp->next) {
2123 if (temp->status == CQR_STATUS_QUEUED)
2124 chanq_max_size--;
2125 }
2126
2127 while ((atomic_read(&device->plugged) == 0) &&
2128 (queue) &&
2129 (!queue->plugged) &&
2130 (!list_empty (&queue->queue_head)) &&
2131 (req = dasd_next_request (queue)) &&
2132 (qp->head == NULL || chanq_max_size > 0)) {
2133 /* queue empty or certain critera fulfilled -> transfer */
2134 cqr = NULL;
2135 if (is_read_only(device->kdev) && req->cmd == WRITE) {
2136
2137 DBF_EVENT (DBF_ERR,
2138 "(%04x) Rejecting write request %p",
2139 device->devinfo.devno,
2140 req);
2141 dasd_dequeue_request (queue,req);
2142 dasd_end_request (req, 0);
2143 continue;
2144 }
2145 cqr = device->discipline->build_cp_from_req (device, req);
2146
2147 if (cqr == NULL || IS_ERR(cqr)) {
2148 if (cqr == ERR_PTR(-ENOMEM)) {
2149 break;
2150 }
2151
2152 MESSAGE (KERN_EMERG,
2153 "(%04x) CCW creation failed "
2154 "on request %p",
2155 device->devinfo.devno, req);
2156 dasd_dequeue_request (queue,req);
2157 dasd_end_request (req, 0);
2158 continue;
2159 }
2160 dasd_dequeue_request (queue, req);
2161 dasd_chanq_enq (qp, cqr);
2162 chanq_max_size--;
2163
2164 }
2165
2166 /* we process the requests with non-final status */
2167 if (qp->head) {
2168 switch (qp->head->status) {
2169 case CQR_STATUS_QUEUED:
2170 /* try to start the first I/O that can be started */
2171 if (device->discipline->start_IO == NULL)
2172 BUG ();
2173 device->discipline->start_IO(qp->head);
2174 break;
2175 case CQR_STATUS_IN_IO:
2176 /* Check, if to invoke the missing interrupt handler */
2177 if (dasd_check_expire_time (qp->head)) {
2178 /* to be filled with MIH */
2179 }
2180 break;
2181 default:
2182 MESSAGE (KERN_EMERG,
2183 "invalid cqr (%p) detected with status %02x ",
2184 qp->head,
2185 qp->head->status);
2186 BUG ();
2187 }
2188 }
2189 s390irq_spin_unlock_irqrestore (irq, flags);
2190
2191 } /* end dasd_process_queues */
2192
2193 /*
2194 * function dasd_run_bh
2195 * acquires the locks needed and then runs the bh
2196 */
2197 static void
dasd_run_bh(dasd_device_t * device)2198 dasd_run_bh (dasd_device_t * device)
2199 {
2200 long flags;
2201 spin_lock_irqsave (&io_request_lock, flags);
2202 atomic_set (&device->bh_scheduled, 0);
2203 dasd_process_queues (device);
2204 spin_unlock_irqrestore (&io_request_lock, flags);
2205 }
2206
2207 /*
2208 * function dasd_schedule_bh_timed
2209 * retriggers the dasd_schedule_bh function (called by timer queue)
2210 */
2211 void
dasd_schedule_bh_timed(unsigned long device_ptr)2212 dasd_schedule_bh_timed (unsigned long device_ptr)
2213 {
2214 dasd_device_t *device = (dasd_device_t *) device_ptr;
2215
2216 dasd_schedule_bh (device);
2217 }
2218
2219 /*
2220 * function dasd_schedule_bh
2221 * schedules the request_fn to run with next run_bh cycle
2222 */
2223 void
dasd_schedule_bh(dasd_device_t * device)2224 dasd_schedule_bh (dasd_device_t * device)
2225 {
2226 /* Protect against rescheduling, when already running */
2227 if (atomic_compare_and_swap (0, 1, &device->bh_scheduled)) {
2228 return;
2229 }
2230
2231 INIT_LIST_HEAD (&device->bh_tq.list);
2232 device->bh_tq.sync = 0;
2233 device->bh_tq.routine = (void *) (void *) dasd_run_bh;
2234 device->bh_tq.data = device;
2235
2236 queue_task (&device->bh_tq, &tq_immediate);
2237 mark_bh (IMMEDIATE_BH);
2238 return;
2239 }
2240
2241 /*
2242 * function do_dasd_request
2243 * is called from ll_rw_blk.c and provides the caller of
2244 * dasd_process_queues
2245 */
2246 static void
do_dasd_request(request_queue_t * queue)2247 do_dasd_request (request_queue_t * queue)
2248 {
2249 dasd_device_t *device = (dasd_device_t *)queue->queuedata;
2250 dasd_process_queues (device);
2251 }
2252
2253 /*
2254 * function dasd_handle_state_change_pending
2255 *
2256 * handles the state change pending interrupt.
2257 */
2258 void
dasd_handle_state_change_pending(devstat_t * stat)2259 dasd_handle_state_change_pending (devstat_t * stat)
2260 {
2261
2262 dasd_device_t **device_addr, *device;
2263 ccw_req_t *cqr;
2264
2265 device_addr = dasd_device_from_devno (stat->devno);
2266
2267 if (!device_addr)
2268 return;
2269
2270 device = *device_addr;
2271 if (!device)
2272 return;
2273
2274 /* restart all 'running' IO on queue */
2275 cqr = device->queue.head;
2276 while (cqr) {
2277 if (cqr->status == CQR_STATUS_IN_IO) {
2278 cqr->status = CQR_STATUS_QUEUED;
2279 }
2280 cqr = cqr->next;
2281 }
2282
2283 DEV_MESSAGE (KERN_DEBUG, device, "%s",
2284 "device request queue restarted by "
2285 "state change pending interrupt");
2286
2287 del_timer_sync (&(device->blocking_timer));
2288 device->stopped &= ~DASD_STOPPED_PENDING;
2289 dasd_schedule_bh (device);
2290
2291 } /* end dasd_handle_state_change_pending */
2292
2293 /*
2294 * function dasd_int_handler
2295 * is the DASD driver's default interrupt handler for SSCH-IO
2296 */
2297 void
dasd_int_handler(int irq,void * ds,struct pt_regs * regs)2298 dasd_int_handler (int irq, void *ds, struct pt_regs *regs)
2299 {
2300 int ip;
2301 ccw_req_t *cqr;
2302 dasd_device_t *device;
2303 dasd_era_t era;
2304 devstat_t *stat = (devstat_t *)ds;
2305
2306 if (stat == NULL) {
2307 BUG();
2308 }
2309
2310 DBF_EVENT (DBF_DEBUG,
2311 "Int: IRQ %02x, CS/DS %04x, flag %08x, devno %04x, ip %08x",
2312 irq,
2313 ((stat->cstat<<8)|stat->dstat),
2314 stat->flag,
2315 stat->devno,
2316 stat->intparm);
2317
2318 /* first of all check for state change pending interrupt */
2319 if ((stat->dstat & DEV_STAT_ATTENTION ) &&
2320 (stat->dstat & DEV_STAT_DEV_END ) &&
2321 (stat->dstat & DEV_STAT_UNIT_EXCEP) ) {
2322
2323 DBF_EVENT (DBF_NOTICE,
2324 "State change Interrupt: %04x",
2325 stat->devno);
2326
2327 dasd_handle_state_change_pending (stat);
2328 return;
2329 }
2330
2331 ip = stat->intparm;
2332 if (!ip) { /* no intparm: unsolicited interrupt */
2333
2334 MESSAGE (KERN_DEBUG,
2335 "unsolicited interrupt: irq 0x%x devno %04x",
2336 irq,
2337 stat->devno);
2338 return;
2339 }
2340
2341 if (ip & 0x80000001) { /* check for invalid 'cqr' address */
2342
2343 MESSAGE (KERN_DEBUG,
2344 "spurious interrupt: irq 0x%x devno %04x, parm %08x",
2345 irq,
2346 stat->devno,ip);
2347 return;
2348 }
2349
2350 cqr = (ccw_req_t *)(long)ip;
2351
2352 /* check status - the request might have been killed because of dyn dettach */
2353 if (cqr->status != CQR_STATUS_IN_IO) {
2354
2355 MESSAGE (KERN_DEBUG,
2356 "invalid status: irq 0x%x devno %04x, status %02x",
2357 irq,
2358 stat->devno,
2359 cqr->status);
2360 return;
2361 }
2362
2363 /* some consistency checks */
2364 device = (dasd_device_t *) cqr->device;
2365 if (device == NULL ||
2366 device != ds-offsetof(dasd_device_t,dev_status)) {
2367 BUG();
2368 }
2369 if (device->devinfo.irq != irq) {
2370 BUG();
2371 }
2372 if (strncmp (device->discipline->ebcname, (char *) &cqr->magic, 4)) {
2373 BUG();
2374 }
2375
2376 /* first of all lets try to find out the appropriate era_action */
2377 if (stat->flag & DEVSTAT_HALT_FUNCTION) {
2378 era = dasd_era_fatal;
2379
2380 } else if (stat->flag & DEVSTAT_FINAL_STATUS &&
2381 stat->dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
2382 stat->cstat == 0) {
2383 /* received 'ok' for running IO */
2384 era = dasd_era_none;
2385
2386 } else if (stat->flag & DEVSTAT_FLAG_SENSE_AVAIL) {
2387 /* got sense data */
2388 if (cqr->dstat == NULL)
2389 cqr->dstat = kmalloc (sizeof (devstat_t), GFP_ATOMIC);
2390 if (cqr->dstat) {
2391 memcpy (cqr->dstat, stat, sizeof (devstat_t));
2392 } else {
2393 MESSAGE (KERN_DEBUG, "%s",
2394 "no memory for dstat...ignoring");
2395 }
2396 #ifdef ERP_DEBUG
2397 if (device->discipline &&
2398 device->discipline->dump_sense ) {
2399
2400 device->discipline->dump_sense (device,
2401 cqr);
2402 }
2403 #endif
2404 if (device->discipline->examine_error == NULL) {
2405 era = dasd_era_recover;
2406 } else {
2407 era = device->discipline->examine_error (cqr, stat);
2408 }
2409
2410 } else if (stat->flag & DEVSTAT_NOT_OPER) {
2411 /* path became offline or similar */
2412 /* => retry to see if there are any other pathes available */
2413 DEV_MESSAGE (KERN_DEBUG, device, "%s",
2414 "Device or a path became not operational while in IO");
2415 era = dasd_era_recover;
2416
2417 } else if (stat->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END) ||
2418 stat->cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN) ) {
2419 /* received device state apart from (channel end & device end) */
2420 /* OR any kind of channel check (e.g. IFCC, DATA_CHECK or ..... */
2421 /* we got no sense data, therefore we just retry */
2422 DEV_MESSAGE (KERN_DEBUG, device,
2423 "Status without sense (IFCC,...) CS/DS %04x flag %08x",
2424 ((stat->cstat<<8)|stat->dstat),
2425 stat->flag);
2426 era = dasd_era_recover;
2427
2428 } else {
2429 /* any other kind of interrupt - just retry */
2430 DEV_MESSAGE (KERN_DEBUG, device,
2431 "Got unclassified interrupt CS/DS %04x flag %08x",
2432 ((stat->cstat<<8)|stat->dstat),
2433 stat->flag);
2434 era = dasd_era_recover;
2435 }
2436
2437 switch (era) {
2438 case dasd_era_none:
2439 check_then_set(&cqr->status,
2440 CQR_STATUS_IN_IO,
2441 CQR_STATUS_DONE);
2442 cqr->stopclk = get_clock ();
2443 /* start the next queued request if possible -> fast_io */
2444 if (cqr->next &&
2445 cqr->next->status == CQR_STATUS_QUEUED) {
2446 if (device->discipline->start_IO (cqr->next) != 0) {
2447 MESSAGE (KERN_WARNING, "%s",
2448 "Interrupt fastpath failed!");
2449 }
2450 }
2451 break;
2452 case dasd_era_fatal:
2453 check_then_set (&cqr->status,
2454 CQR_STATUS_IN_IO,
2455 CQR_STATUS_FAILED);
2456 cqr->stopclk = get_clock ();
2457 break;
2458 case dasd_era_recover:
2459 check_then_set (&cqr->status,
2460 CQR_STATUS_IN_IO,
2461 CQR_STATUS_ERROR);
2462 break;
2463 default:
2464 BUG ();
2465 }
2466
2467 /* handle special device initialization request */
2468 if ( cqr == device->init_cqr &&
2469 ( cqr->status == CQR_STATUS_DONE ||
2470 cqr->status == CQR_STATUS_FAILED )){
2471 dasd_state_init_to_ready(device);
2472 if ( atomic_read(&dasd_init_pending) == 0)
2473 wake_up (&dasd_init_waitq);
2474 }
2475 dasd_schedule_bh (device);
2476
2477 } /* end dasd_int_handler */
2478
2479 /********************************************************************************
2480 * SECTION: Some stuff related to error recovery
2481 ********************************************************************************/
2482
2483 /*
2484 * DEFAULT_ERP_ACTION
2485 *
2486 * DESCRIPTION
2487 * just retries the current cqr
2488 *
2489 * PARAMETER
2490 * cqr failed CQR
2491 *
2492 * RETURN VALUES
2493 * cqr modified CQR
2494 */
2495 ccw_req_t *
dasd_default_erp_action(ccw_req_t * cqr)2496 dasd_default_erp_action (ccw_req_t * cqr)
2497 {
2498
2499 dasd_device_t *device = cqr->device;
2500 // just retry - there is nothing to save ... I got no sense data....
2501 if (cqr->retries > 0) {
2502 DEV_MESSAGE (KERN_DEBUG, device,
2503 "default ERP called (%i retries left)",
2504 cqr->retries);
2505
2506 check_then_set (&cqr->status,
2507 CQR_STATUS_ERROR,
2508 CQR_STATUS_QUEUED);
2509 } else {
2510 DEV_MESSAGE (KERN_WARNING, device, "%s",
2511 "default ERP called (NO retry left)");
2512
2513 check_then_set (&cqr->status,
2514 CQR_STATUS_ERROR,
2515 CQR_STATUS_FAILED);
2516
2517 cqr->stopclk = get_clock ();
2518 }
2519 return cqr;
2520 } /* end dasd_default_erp_action */
2521
2522 /*
2523 * DEFAULT_ERP_POSTACTION
2524 *
2525 * DESCRIPTION
2526 * Frees all ERPs of the current ERP Chain and set the status
2527 * of the original CQR either to CQR_STATUS_DONE if ERP was successful
2528 * or to CQR_STATUS_FAILED if ERP was NOT successful.
2529 * NOTE: This function is only called if no discipline postaction
2530 * is available
2531 *
2532 * PARAMETER
2533 * erp current erp_head
2534 *
2535 * RETURN VALUES
2536 * cqr pointer to the original CQR
2537 */
2538 ccw_req_t *
dasd_default_erp_postaction(ccw_req_t * erp)2539 dasd_default_erp_postaction (ccw_req_t *erp)
2540 {
2541
2542 ccw_req_t *cqr = NULL,
2543 *free_erp = NULL;
2544 dasd_device_t *device = erp->device;
2545 int success;
2546
2547 if (erp->refers == NULL ||
2548 erp->function == NULL ) {
2549
2550 BUG ();
2551 }
2552
2553 if (erp->status == CQR_STATUS_DONE)
2554 success = 1;
2555 else
2556 success = 0;
2557
2558 /* free all ERPs - but NOT the original cqr */
2559 while (erp->refers != NULL) {
2560
2561 free_erp = erp;
2562 erp = erp->refers;
2563
2564 /* remove the request from the device queue */
2565 dasd_chanq_deq (&device->queue,
2566 free_erp);
2567
2568 /* free the finished erp request */
2569 dasd_free_request (free_erp, free_erp->device);
2570 }
2571
2572 /* save ptr to original cqr */
2573 cqr = erp;
2574
2575 /* set corresponding status for original cqr */
2576 if (success) {
2577 cqr->status = CQR_STATUS_DONE;
2578 } else {
2579 cqr->status = CQR_STATUS_FAILED;
2580 cqr->stopclk = get_clock ();
2581 }
2582
2583 return cqr;
2584
2585 } /* end default_erp_postaction */
2586
2587 /********************************************************************************
2588 * SECTION: The helpers of the struct file_operations
2589 ********************************************************************************/
2590
2591 /*
2592 * function dasd_format
2593 * performs formatting of _device_ according to _fdata_
2594 * Note: The discipline's format_function is assumed to deliver formatting
2595 * commands to format a single unit of the device. In terms of the ECKD
2596 * devices this means CCWs are generated to format a single track.
2597 */
2598
2599 static int
dasd_format(dasd_device_t * device,format_data_t * fdata)2600 dasd_format (dasd_device_t * device, format_data_t * fdata)
2601 {
2602 int rc = 0;
2603 int openct = atomic_read (&device->open_count);
2604 ccw_req_t *req;
2605
2606 if (openct > 1) {
2607
2608 DEV_MESSAGE (KERN_WARNING, device, "%s",
2609 "dasd_format: device is open! "
2610 "expect errors.");
2611 }
2612
2613 DBF_DEV_EVENT (DBF_NOTICE, device,
2614 "formatting units %d to %d (%d B blocks) flags %d",
2615 fdata->start_unit,
2616 fdata->stop_unit,
2617 fdata->blksize,
2618 fdata->intensity);
2619
2620 while ((!rc) && (fdata->start_unit <= fdata->stop_unit)) {
2621
2622 if (device->discipline->format_device == NULL)
2623 break;
2624
2625 req = device->discipline->format_device (device, fdata);
2626 if (req == NULL) {
2627 rc = -ENOMEM;
2628 break;
2629 }
2630
2631 rc = dasd_sleep_on_req (req);
2632 dasd_free_request (req, device); /* request is no longer used */
2633
2634 if ( rc ) {
2635 if (rc != -ERESTARTSYS )
2636 DEV_MESSAGE (KERN_WARNING, device,
2637 " Formatting of unit %d failed"
2638 " with rc = %d",
2639 fdata->start_unit, rc);
2640 break;
2641 }
2642 fdata->start_unit++;
2643 }
2644 return rc;
2645 } /* end dasd_format */
2646
2647 static struct list_head dasd_ioctls = LIST_HEAD_INIT (dasd_ioctls);
2648
2649 static dasd_ioctl_list_t *
dasd_find_ioctl(int no)2650 dasd_find_ioctl (int no)
2651 {
2652 struct list_head *curr;
2653 list_for_each (curr, &dasd_ioctls) {
2654 if (list_entry (curr, dasd_ioctl_list_t, list)->no == no) {
2655 return list_entry (curr, dasd_ioctl_list_t, list);
2656 }
2657 }
2658 return NULL;
2659 }
2660
2661 int
dasd_ioctl_no_register(struct module * owner,int no,dasd_ioctl_fn_t handler)2662 dasd_ioctl_no_register (struct module *owner, int no, dasd_ioctl_fn_t handler)
2663 {
2664 dasd_ioctl_list_t *new;
2665 if (dasd_find_ioctl (no))
2666 return -EBUSY;
2667 new = kmalloc (sizeof (dasd_ioctl_list_t), GFP_KERNEL);
2668 if (new == NULL)
2669 return -ENOMEM;
2670 new->owner = owner;
2671 new->no = no;
2672 new->handler = handler;
2673 list_add (&new->list, &dasd_ioctls);
2674 MOD_INC_USE_COUNT;
2675 return 0;
2676 }
2677
2678 int
dasd_ioctl_no_unregister(struct module * owner,int no,dasd_ioctl_fn_t handler)2679 dasd_ioctl_no_unregister (struct module *owner, int no, dasd_ioctl_fn_t handler)
2680 {
2681 dasd_ioctl_list_t *old = dasd_find_ioctl (no);
2682 if (old == NULL)
2683 return -ENOENT;
2684 if (old->no != no || old->handler != handler || owner != old->owner )
2685 return -EINVAL;
2686 list_del (&old->list);
2687 kfree (old);
2688 MOD_DEC_USE_COUNT;
2689 return 0;
2690 }
2691
2692 /*
2693 * handle the re-read partition table IOCTL (BLKRRPART)
2694 */
2695 static int
dasd_revalidate(dasd_device_t * device)2696 dasd_revalidate (dasd_device_t * device)
2697 {
2698 int rc = 0;
2699 int i;
2700 kdev_t kdev = device->kdev;
2701 int openct = atomic_read (&device->open_count);
2702 int start = MINOR (kdev);
2703 if (openct != 1) {
2704
2705 DEV_MESSAGE (KERN_WARNING, device, "%s",
2706 "BLKRRPART: device is open! expect errors.");
2707 }
2708 for (i = (1 << DASD_PARTN_BITS) - 1; i >= 0; i--) {
2709 int major = device->major_info->gendisk.major;
2710 invalidate_device(MKDEV (major, start+i), 1);
2711 }
2712 dasd_destroy_partitions(device);
2713 dasd_setup_partitions(device);
2714 return rc;
2715
2716 }
2717
2718 /*
2719 * function do_dasd_ioctl
2720 * Implementation of the DASD API.
2721 * Changes to the API should be binary compatible to privous versions
2722 * of the user-space applications by means of any already existing tool
2723 * (e.g. dasdfmt) must work with the new kernel API.
2724 */
2725
2726 static int
do_dasd_ioctl(struct inode * inp,int no,unsigned long data)2727 do_dasd_ioctl (struct inode *inp, /* unsigned */ int no, unsigned long data)
2728 {
2729 int rc = 0;
2730 dasd_device_t *device = dasd_device_from_kdev (inp->i_rdev);
2731 major_info_t *major_info;
2732
2733 if (!device) {
2734
2735 MESSAGE (KERN_WARNING,
2736 "No device registered as device (%d:%d)",
2737 MAJOR (inp->i_rdev),
2738 MINOR (inp->i_rdev));
2739
2740 return -EINVAL;
2741 }
2742 if ((_IOC_DIR (no) != _IOC_NONE) && (data == 0)) {
2743 PRINT_DEBUG ("empty data ptr");
2744 return -EINVAL;
2745 }
2746 major_info = device->major_info;
2747
2748 DBF_DEV_EVENT (DBF_DEBUG, device,
2749 "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx",
2750 no,
2751 (_IOC_DIR (no) == _IOC_NONE ? "0" :
2752 _IOC_DIR (no) == _IOC_READ ? "r" :
2753 _IOC_DIR (no) == _IOC_WRITE ? "w" :
2754 _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u"),
2755 _IOC_TYPE (no),
2756 _IOC_NR (no),
2757 _IOC_SIZE (no),
2758 data);
2759
2760 switch (no) {
2761 case DASDAPIVER: { /* retrun dasd API version */
2762 int ver = DASD_API_VERSION;
2763 rc = put_user(ver, (int *) data);
2764 break;
2765 }
2766 case BLKGETSIZE: { /* Return device size in # of sectors */
2767 long blocks = major_info->gendisk.sizes
2768 [MINOR (inp->i_rdev)] << 1;
2769 rc = put_user(blocks, (long *) data);
2770 break;
2771 }
2772 case BLKGETSIZE64:{
2773 u64 blocks = major_info->gendisk.sizes
2774 [MINOR (inp->i_rdev)];
2775 rc = put_user(blocks << 10, (u64 *) data);
2776 break;
2777 }
2778 case BLKRRPART: { /* reread partition table */
2779 if (!capable (CAP_SYS_ADMIN)) {
2780 rc = -EACCES;
2781 break;
2782 }
2783 rc = dasd_revalidate (device);
2784 break;
2785 }
2786 case HDIO_GETGEO: { /* return disk geometry */
2787 struct hd_geometry geo = { 0, };
2788 rc = dasd_fillgeo (inp->i_rdev, &geo);
2789 if (rc)
2790 break;
2791
2792 rc = copy_to_user ((struct hd_geometry *) data, &geo,
2793 sizeof (struct hd_geometry));
2794 if (rc)
2795 rc = -EFAULT;
2796 break;
2797 }
2798 case BIODASDDISABLE: { /* disable device */
2799 if (!capable (CAP_SYS_ADMIN)) {
2800 rc = -EACCES;
2801 break;
2802 }
2803
2804 if ( device->level > DASD_STATE_ACCEPT) {
2805 dasd_deactivate_queue(device);
2806 if ( device->request_queue)
2807 dasd_flush_request_queues(device,0);
2808 dasd_flush_chanq(device,0);
2809 dasd_disable_blkdev(device);
2810 dasd_set_device_level (device->devinfo.devno,
2811 device->discipline,
2812 DASD_STATE_ACCEPT);
2813 }
2814
2815 break;
2816 }
2817 case BIODASDENABLE: { /* enable device */
2818 dasd_range_t range = {
2819 from: device->devinfo.devno,
2820 to: device->devinfo.devno
2821 };
2822 if (!capable (CAP_SYS_ADMIN)) {
2823 rc = -EACCES;
2824 break;
2825 }
2826 dasd_enable_ranges (&range, device->discipline, 0);
2827 break;
2828 }
2829 case BIODASDFMT: { /* format device */
2830 /* fdata == NULL is no longer a valid arg to dasd_format ! */
2831 int partn = MINOR (inp->i_rdev) &
2832 ((1 << major_info->gendisk.minor_shift) - 1);
2833 format_data_t fdata;
2834
2835 if (!capable (CAP_SYS_ADMIN)) {
2836 rc = -EACCES;
2837 break;
2838 }
2839 if (dasd_features_from_devno(device->devinfo.devno)&DASD_FEATURE_READONLY) {
2840 rc = -EROFS;
2841 break;
2842 }
2843 if (!data) {
2844 rc = -EINVAL;
2845 break;
2846 }
2847 rc = copy_from_user (&fdata, (void *) data,
2848 sizeof (format_data_t));
2849 if (rc) {
2850 rc = -EFAULT;
2851 break;
2852 }
2853 if (partn != 0) {
2854
2855 DEV_MESSAGE (KERN_WARNING, device, "%s",
2856 "Cannot low-level format a partition");
2857
2858 return -EINVAL;
2859 }
2860 rc = dasd_format (device, &fdata);
2861 break;
2862 }
2863 case BIODASDGATTR: { /* Get Attributes (cache operations) */
2864
2865 attrib_data_t attrib;
2866
2867 if (!capable (CAP_SYS_ADMIN)) {
2868 rc = -EACCES;
2869 break;
2870 }
2871
2872 if (!data) {
2873 rc = -EINVAL;
2874 break;
2875 }
2876
2877 if (!device->discipline->get_attrib) {
2878 rc = -EINVAL;
2879 break;
2880 }
2881
2882 device->discipline->get_attrib (device,
2883 &attrib);
2884
2885 rc = copy_to_user ((void *) data, &attrib,
2886 sizeof (attrib_data_t));
2887
2888 if (rc) {
2889 rc = -EFAULT;
2890 }
2891
2892 break;
2893 }
2894 case BIODASDSATTR: { /* Set Attributes (cache operations) */
2895
2896 attrib_data_t attrib;
2897
2898 if (!capable (CAP_SYS_ADMIN)) {
2899 rc = -EACCES;
2900 break;
2901 }
2902
2903 if (!data) {
2904 rc = -EINVAL;
2905 break;
2906 }
2907
2908 if (!device->discipline->set_attrib) {
2909 rc = -EINVAL;
2910 break;
2911 }
2912
2913 rc = copy_from_user (&attrib, (void *) data,
2914 sizeof (attrib_data_t));
2915 if (rc) {
2916 rc = -EFAULT;
2917 break;
2918 }
2919
2920 rc = device->discipline->set_attrib (device,
2921 &attrib);
2922 break;
2923 }
2924 case BIODASDPRRST: { /* reset device profile information */
2925 if (!capable (CAP_SYS_ADMIN)) {
2926 rc = -EACCES;
2927 break;
2928 }
2929 memset (&device->profile, 0,
2930 sizeof (dasd_profile_info_t));
2931 break;
2932 }
2933 case BIODASDPRRD: { /* return device profile information */
2934 rc = copy_to_user((long *)data,
2935 (long *)&device->profile,
2936 sizeof(dasd_profile_info_t));
2937 if (rc)
2938 rc = -EFAULT;
2939 break;
2940 }
2941 case BIODASDRSRV: { /* reserve device */
2942 ccw_req_t *cqr;
2943 if (!capable (CAP_SYS_ADMIN)) {
2944 rc = -EACCES;
2945 break;
2946 }
2947
2948 if (!device->discipline->reserve) {
2949 rc = -EINVAL;
2950 break;
2951 }
2952
2953 cqr = device->discipline->reserve (device);
2954
2955 if (cqr) {
2956 struct timer_list res_timer;
2957
2958 init_timer (&res_timer);
2959 res_timer.function = dasd_resrel_timeout;
2960 res_timer.data = (unsigned long) cqr;
2961 res_timer.expires = jiffies + 2 * HZ;
2962 add_timer (&res_timer);
2963
2964 rc = dasd_sleep_on_immediate (cqr);
2965
2966 del_timer_sync (&res_timer);
2967 dasd_free_request (cqr,
2968 device);
2969 } else {
2970 rc = -ENOMEM;
2971 }
2972 break;
2973 }
2974 case BIODASDRLSE: { /* release device */
2975 ccw_req_t *cqr;
2976 if (!capable (CAP_SYS_ADMIN)) {
2977 rc = -EACCES;
2978 break;
2979 }
2980
2981 if (!device->discipline->release) {
2982 rc = -EINVAL;
2983 break;
2984 }
2985
2986 cqr = device->discipline->release (device);
2987
2988 if (cqr) {
2989 struct timer_list rel_timer;
2990
2991 init_timer (&rel_timer);
2992 rel_timer.function = dasd_resrel_timeout;
2993 rel_timer.data = (unsigned long) cqr;
2994 rel_timer.expires = jiffies + 2 * HZ;
2995 add_timer (&rel_timer);
2996
2997 rc = dasd_sleep_on_immediate (cqr);
2998
2999 del_timer_sync (&rel_timer); /* in case of interrupt */
3000 dasd_free_request (cqr,
3001 device);
3002 } else {
3003 rc = -ENOMEM;
3004 }
3005 break;
3006 }
3007 case BIODASDSLCK: { /* steal lock - unconditional reserve device */
3008 if (!capable (CAP_SYS_ADMIN)) {
3009 rc = -EACCES;
3010 break;
3011 }
3012
3013 rc = dasd_steal_lock (device);
3014 break;
3015 }
3016 case BIODASDINFO: /* return dasd information */
3017 case BIODASDINFO2: { /* return dasd information2 (incl. format and features) */
3018 dasd_information2_t dasd_info;
3019
3020 unsigned long flags;
3021
3022 if (!device->discipline->fill_info) {
3023 rc = -EINVAL;
3024 break;
3025 }
3026
3027 rc = device->discipline->fill_info (device,
3028 &dasd_info);
3029
3030 dasd_info.label_block = device->sizes.pt_block;
3031 dasd_info.devno = device->devinfo.devno;
3032 dasd_info.schid = device->devinfo.irq;
3033 dasd_info.cu_type = device->devinfo.sid_data.cu_type;
3034 dasd_info.cu_model = device->devinfo.sid_data.cu_model;
3035 dasd_info.dev_type = device->devinfo.sid_data.dev_type;
3036 dasd_info.dev_model = device->devinfo.sid_data.dev_model;
3037 dasd_info.open_count =
3038 atomic_read (&device->open_count);
3039 dasd_info.status = device->level;
3040
3041 /* check if device is really formatted - LDL / CDL was returned by 'fill_info' */
3042 if ((device->level < DASD_STATE_READY) ||
3043 (dasd_check_bp_block (device) ) ) {
3044 dasd_info.format = DASD_FORMAT_NONE;
3045 }
3046
3047 dasd_info.features =
3048 dasd_features_from_devno (device->devinfo.devno);
3049
3050 if (device->discipline) {
3051 memcpy (dasd_info.type,
3052 device->discipline->name, 4);
3053 } else {
3054 memcpy (dasd_info.type, "none", 4);
3055 }
3056 dasd_info.req_queue_len = 0;
3057 dasd_info.chanq_len = 0;
3058
3059 if ((device->request_queue ) &&
3060 (device->request_queue->request_fn) ) {
3061 struct list_head *l;
3062 ccw_req_t *cqr = device->queue.head;
3063 spin_lock_irqsave (&io_request_lock, flags);
3064 list_for_each (l,
3065 &device->request_queue->
3066 queue_head) {
3067 dasd_info.req_queue_len++;
3068 }
3069 spin_unlock_irqrestore (&io_request_lock,
3070 flags);
3071 s390irq_spin_lock_irqsave (device->devinfo.irq,
3072 flags);
3073 while (cqr) {
3074 cqr = cqr->next;
3075 dasd_info.chanq_len++;
3076 }
3077 s390irq_spin_unlock_irqrestore (device->devinfo.
3078 irq, flags);
3079 }
3080
3081 rc = copy_to_user ((long *) data, (long *) &dasd_info,
3082 ((no == (unsigned int) BIODASDINFO2) ?
3083 sizeof (dasd_information2_t) :
3084 sizeof (dasd_information_t)));
3085
3086 if (rc)
3087 rc = -EFAULT;
3088 break;
3089 }
3090 case BIODASDPSRD: { /* Performance Statistics Read */
3091
3092 ccw_req_t *cqr;
3093 dasd_rssd_perf_stats_t *stats;
3094
3095 if ((!device->discipline->read_stats) ||
3096 (!device->discipline->ret_stats ) ) {
3097 rc = -EINVAL;
3098 break;
3099 }
3100
3101 cqr = device->discipline->read_stats (device);
3102
3103 if (cqr) {
3104
3105 if ((rc = dasd_sleep_on_req (cqr)) == 0) {
3106
3107 if ((stats = device->discipline->ret_stats (cqr)) != NULL) {
3108
3109 rc = copy_to_user ((long *) data,
3110 (long *) stats,
3111 sizeof (dasd_rssd_perf_stats_t));
3112 } else {
3113
3114 rc = -EFAULT;
3115 }
3116 }
3117
3118 dasd_free_request (cqr,
3119 device);
3120
3121 } else {
3122 rc = -ENOMEM;
3123 }
3124 break;
3125 }
3126 #if 0 /* needed for XFS */
3127 case BLKBSZSET: {
3128 int bsz;
3129 rc = copy_from_user ((long *)&bsz,(long *)data,sizeof(int));
3130 if ( rc ) {
3131 rc = -EFAULT;
3132 } else {
3133 if ( bsz >= device->sizes.bp_block )
3134 rc = blk_ioctl (inp->i_rdev, no, data);
3135 else
3136 rc = -EINVAL;
3137 }
3138 break;
3139 }
3140 #endif /* 0 */
3141 case BLKROSET: {
3142 int intval;
3143 dasd_range_t *temp;
3144 int devindex = 0;
3145 unsigned long flags;
3146 struct list_head *l;
3147 int major=MAJOR(device->kdev);
3148 int minor;
3149
3150 if (!capable(CAP_SYS_ADMIN))
3151 return -EACCES;
3152 if (inp->i_rdev != device->kdev)
3153 // ro setting is not allowed for partitions
3154 return -EINVAL;
3155 if (get_user(intval, (int *)(data)))
3156 return -EFAULT;
3157 spin_lock_irqsave (&range_lock, flags);
3158 list_for_each (l, &dasd_range_head.list) {
3159 temp = list_entry (l, dasd_range_t, list);
3160 if (device->devinfo.devno >= temp->from && device->devinfo.devno <= temp->to) {
3161 spin_unlock_irqrestore (&range_lock, flags);
3162 if (intval)
3163 temp->features |= DASD_FEATURE_READONLY;
3164 else
3165 temp->features &= ~DASD_FEATURE_READONLY;
3166 goto continue_blkroset;
3167 }
3168 devindex += temp->to - temp->from + 1;
3169 }
3170 spin_unlock_irqrestore (&range_lock, flags);
3171 return(-ENODEV);
3172 continue_blkroset:
3173 for (minor = MINOR(device->kdev); minor < MINOR(device->kdev) + (1 << DASD_PARTN_BITS); minor++)
3174 set_device_ro(MKDEV(major,minor), intval);
3175 return 0;
3176 }
3177 case BLKBSZGET:
3178 case BLKSSZGET:
3179 case BLKROGET:
3180 case BLKRASET:
3181 case BLKRAGET:
3182 case BLKFLSBUF:
3183 case BLKPG:
3184 case BLKELVGET:
3185 case BLKELVSET:
3186 return blk_ioctl (inp->i_rdev, no, data);
3187 break;
3188 default: {
3189
3190 dasd_ioctl_list_t *old = dasd_find_ioctl (no);
3191 if (old) {
3192 if ( old->owner )
3193 __MOD_INC_USE_COUNT(old->owner);
3194 rc = old->handler (inp, no, data);
3195 if ( old->owner )
3196 __MOD_DEC_USE_COUNT(old->owner);
3197 } else {
3198
3199 DBF_DEV_EVENT (DBF_INFO, device,
3200 "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx",
3201 no,
3202 (_IOC_DIR (no) == _IOC_NONE ? "0" :
3203 _IOC_DIR (no) == _IOC_READ ? "r" :
3204 _IOC_DIR (no) == _IOC_WRITE ? "w" :
3205 _IOC_DIR (no) ==
3206 (_IOC_READ | _IOC_WRITE) ? "rw" : "u"),
3207 _IOC_TYPE (no),
3208 _IOC_NR (no),
3209 _IOC_SIZE (no),
3210 data);
3211
3212 rc = -ENOTTY;
3213 }
3214 break;
3215 }
3216 }
3217 return rc;
3218 }
3219
3220 /********************************************************************************
3221 * SECTION: The members of the struct file_operations
3222 ********************************************************************************/
3223
3224 static int
dasd_ioctl(struct inode * inp,struct file * filp,unsigned int no,unsigned long data)3225 dasd_ioctl (struct inode *inp, struct file *filp,
3226 unsigned int no, unsigned long data)
3227 {
3228 int rc = 0;
3229 if ((!inp) || !(inp->i_rdev)) {
3230 return -EINVAL;
3231 }
3232 rc = do_dasd_ioctl (inp, no, data);
3233 return rc;
3234 }
3235
3236 static int
dasd_open(struct inode * inp,struct file * filp)3237 dasd_open (struct inode *inp, struct file *filp)
3238 {
3239 int rc = 0;
3240 unsigned long flags;
3241 dasd_device_t *device;
3242
3243 if ((!inp) || !(inp->i_rdev)) {
3244 rc = -EINVAL;
3245 goto fail;
3246 }
3247 if (dasd_probeonly) {
3248
3249 MESSAGE (KERN_INFO,
3250 "No access to device (%d:%d) due to probeonly mode",
3251 MAJOR (inp->i_rdev),
3252 MINOR (inp->i_rdev));
3253
3254 rc = -EPERM;
3255 goto fail;
3256 }
3257 spin_lock_irqsave(&discipline_lock,flags);
3258 device = dasd_device_from_kdev (inp->i_rdev);
3259 if (!device) {
3260
3261 MESSAGE (KERN_WARNING,
3262 "No device registered as (%d:%d)",
3263 MAJOR (inp->i_rdev),
3264 MINOR (inp->i_rdev));
3265
3266 rc = -ENODEV;
3267 goto unlock;
3268 }
3269 if (device->level <= DASD_STATE_ACCEPT ) {
3270
3271 DBF_DEV_EVENT (DBF_ERR, device, " %s",
3272 " Cannot open unrecognized device");
3273
3274 rc = -ENODEV;
3275 goto unlock;
3276 }
3277 if (atomic_inc_return (&device->open_count) == 1 ) {
3278 if ( device->discipline->owner )
3279 __MOD_INC_USE_COUNT(device->discipline->owner);
3280 }
3281 unlock:
3282 spin_unlock_irqrestore(&discipline_lock,flags);
3283 fail:
3284 return rc;
3285 }
3286
3287 /*
3288 * DASD_RELEASE
3289 *
3290 * DESCRIPTION
3291 */
3292 static int
dasd_release(struct inode * inp,struct file * filp)3293 dasd_release (struct inode *inp, struct file *filp)
3294 {
3295 int rc = 0;
3296 int count;
3297 dasd_device_t *device;
3298
3299 if ((!inp) || !(inp->i_rdev)) {
3300 rc = -EINVAL;
3301 goto out;
3302 }
3303 device = dasd_device_from_kdev (inp->i_rdev);
3304 if (!device) {
3305
3306 MESSAGE (KERN_WARNING,
3307 "No device registered as %d:%d",
3308 MAJOR (inp->i_rdev),
3309 MINOR (inp->i_rdev));
3310
3311 rc = -EINVAL;
3312 goto out;
3313 }
3314
3315 if (device->level < DASD_STATE_ACCEPT ) {
3316
3317 DBF_DEV_EVENT (DBF_ERR, device, " %s",
3318 " Cannot release unrecognized device");
3319
3320 rc = -ENODEV;
3321 goto out;
3322 }
3323 count = atomic_dec_return (&device->open_count);
3324 if ( count == 0) {
3325 invalidate_buffers (inp->i_rdev);
3326 if ( device->discipline->owner )
3327 __MOD_DEC_USE_COUNT(device->discipline->owner);
3328 } else if ( count == -1 ) { /* paranoia only */
3329 atomic_set (&device->open_count,0);
3330
3331 MESSAGE (KERN_WARNING, "%s",
3332 "release called with open count==0");
3333 }
3334 out:
3335 return rc;
3336 }
3337
3338 static struct
3339 block_device_operations dasd_device_operations =
3340 {
3341 owner:THIS_MODULE,
3342 open:dasd_open,
3343 release:dasd_release,
3344 ioctl:dasd_ioctl,
3345 };
3346
3347 /********************************************************************************
3348 * SECTION: Management of device list
3349 ********************************************************************************/
3350 int
dasd_fillgeo(int kdev,struct hd_geometry * geo)3351 dasd_fillgeo(int kdev,struct hd_geometry *geo)
3352 {
3353 dasd_device_t *device = dasd_device_from_kdev (kdev);
3354
3355 if (!device)
3356 return -EINVAL;
3357
3358 if (!device->discipline->fill_geometry)
3359 return -EINVAL;
3360
3361 device->discipline->fill_geometry (device, geo);
3362 geo->start = device->major_info->gendisk.part[MINOR(kdev)].start_sect
3363 >> device->sizes.s2b_shift;;
3364 return 0;
3365 }
3366
3367
3368 /* This one is needed for naming 18000+ possible dasd devices */
3369 int
dasd_device_name(char * str,int index,int partition,struct gendisk * hd)3370 dasd_device_name (char *str, int index, int partition, struct gendisk *hd)
3371 {
3372 major_info_t *major_info;
3373 struct list_head *l;
3374 char first, second, third;
3375 int len;
3376
3377 if (hd == NULL)
3378 return -EINVAL;
3379
3380 major_info = NULL;
3381 list_for_each (l, &dasd_major_info) {
3382 major_info = list_entry (l, major_info_t, list);
3383 if (&major_info->gendisk == hd)
3384 break;
3385 index += DASD_PER_MAJOR;
3386 }
3387 if (major_info == NULL || &major_info->gendisk != hd) {
3388 /* list empty or hd not found in list */
3389 return -EINVAL;
3390 }
3391
3392 len = 0;
3393 third = index % 26;
3394 second = ((index - 26) / 26) % 26;
3395 first = (((index - 702) / 26) / 26) % 26;
3396
3397 len = sprintf (str, "dasd");
3398 if (index > 701) {
3399 len += sprintf (str + len, "%c", first + 'a');
3400 }
3401 if (index > 25) {
3402 len += sprintf (str + len, "%c", second + 'a');
3403 }
3404 len += sprintf (str + len, "%c", third + 'a');
3405 if (partition) {
3406 if (partition > 9) {
3407 return -EINVAL;
3408 } else {
3409 len += sprintf (str + len, "%d", partition);
3410 }
3411 }
3412 str[len] = '\0';
3413 return 0;
3414 }
3415
3416 static void
dasd_plug_device(dasd_device_t * device)3417 dasd_plug_device (dasd_device_t * device)
3418 {
3419 atomic_set(&device->plugged,1);
3420 }
3421
3422 static void
dasd_unplug_device(dasd_device_t * device)3423 dasd_unplug_device (dasd_device_t * device)
3424 {
3425 atomic_set(&device->plugged,0);
3426 dasd_schedule_bh(device);
3427 }
3428
3429 static void
dasd_flush_chanq(dasd_device_t * device,int destroy)3430 dasd_flush_chanq ( dasd_device_t * device, int destroy )
3431 {
3432 ccw_req_t *cqr;
3433 unsigned long flags;
3434 if ( destroy ) {
3435 s390irq_spin_lock_irqsave (device->devinfo.irq, flags);
3436 cqr = device->queue.head;
3437 while ( cqr != NULL ) {
3438 if ( cqr->status == CQR_STATUS_IN_IO )
3439 device->discipline->term_IO (cqr);
3440 if ( cqr->status != CQR_STATUS_DONE &&
3441 cqr->status != CQR_STATUS_FAILED ) {
3442
3443 cqr->status = CQR_STATUS_FAILED;
3444
3445 cqr->stopclk = get_clock ();
3446
3447 }
3448 dasd_schedule_bh(device);
3449 cqr = cqr->next;
3450 }
3451 s390irq_spin_unlock_irqrestore (device->devinfo.irq, flags);
3452 }
3453 wait_event( device->wait_q, device->queue.head == NULL );
3454 }
3455
3456 static void
dasd_flush_request_queues(dasd_device_t * device,int destroy)3457 dasd_flush_request_queues ( dasd_device_t * device, int destroy )
3458 {
3459 int i;
3460 int major = MAJOR(device->kdev);
3461 int minor = MINOR(device->kdev);
3462 for ( i = 0; i < (1 << DASD_PARTN_BITS); i ++) {
3463 if ( destroy )
3464 destroy_buffers(MKDEV(major,minor+i));
3465 else
3466 invalidate_buffers(MKDEV(major,minor+i));
3467 }
3468 }
3469
dasd_do_hotplug_event(dasd_device_t * device,int eventid)3470 static inline void dasd_do_hotplug_event (dasd_device_t* device, int eventid) {
3471 #ifdef CONFIG_HOTPLUG
3472 int i;
3473 char *argv[3], *envp[8];
3474 char devno[20],major[20],minor[20],devname[26],action[20];
3475
3476 /* setup command line arguments */
3477 i=0;
3478 argv[i++] = hotplug_path;
3479 argv[i++] = "dasd";
3480 argv[i++] = 0;
3481
3482 /* minimal environment */
3483 i=0;
3484 envp[i++] = "HOME=/";
3485 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
3486
3487 /* device information and event*/
3488 sprintf (devno, "DEVNO=%04x", device->devinfo.devno);
3489 sprintf (major, "MAJOR=%d", MAJOR(device->kdev));
3490 sprintf (minor, "MINOR=%d", MINOR(device->kdev));
3491 sprintf (devname, "DASDNAME=%s",device->name);
3492 switch (eventid) {
3493 case DASD_HOTPLUG_EVENT_ADD:
3494 sprintf (action,"ACTION=add");
3495 break;
3496 case DASD_HOTPLUG_EVENT_REMOVE:
3497 sprintf (action,"ACTION=remove");
3498 break;
3499 case DASD_HOTPLUG_EVENT_PARTCHK:
3500 sprintf (action,"ACTION=partchk");
3501 break;
3502 case DASD_HOTPLUG_EVENT_PARTREMOVE:
3503 sprintf (action,"ACTION=partremove");
3504 break;
3505 default:
3506 BUG();
3507 }
3508 envp[i++] = devno;
3509 envp[i++] = major;
3510 envp[i++] = minor;
3511 envp[i++] = devname;
3512 envp[i++] = action;
3513 envp[i++] = 0;
3514
3515 call_usermodehelper (argv [0], argv, envp);
3516 #endif
3517 }
3518
3519
3520 static int
dasd_disable_volume(dasd_device_t * device,int force)3521 dasd_disable_volume ( dasd_device_t * device, int force )
3522 {
3523 int rc = 0;
3524 int target = DASD_STATE_KNOWN;
3525 int count = atomic_read (&device->open_count);
3526
3527 if ( count ) {
3528
3529 DEV_MESSAGE (KERN_EMERG, device, "%s",
3530 "device has vanished although it was open!");
3531 }
3532 if ( force ) {
3533 dasd_deactivate_queue(device);
3534 dasd_flush_chanq(device,force);
3535 dasd_flush_request_queues(device,force);
3536 dasd_disable_blkdev(device);
3537 target = DASD_STATE_DEL;
3538 }
3539
3540 /* unregister partitions ('ungrok_partitions') */
3541 devfs_register_partitions(&device->major_info->gendisk,
3542 MINOR(device->kdev),1);
3543 dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_PARTREMOVE);
3544
3545 DBF_DEV_EVENT (DBF_ERR, device,
3546 "disabling device, target state: %d",
3547 target);
3548
3549 dasd_set_device_level (device->devinfo.devno,
3550 device->discipline,
3551 target);
3552 return rc;
3553 }
3554
3555 static void
dasd_disable_ranges(dasd_range_t * range,dasd_discipline_t * discipline,int all,int force)3556 dasd_disable_ranges (dasd_range_t *range,
3557 dasd_discipline_t *discipline,
3558 int all, int force )
3559 {
3560 dasd_device_t **dptr;
3561 dasd_device_t *device;
3562 dasd_range_t *rrange;
3563 int j;
3564
3565 if (range == &dasd_range_head) {
3566 rrange = list_entry (range->list.next,
3567 dasd_range_t, list);
3568 } else {
3569 rrange = range;
3570 }
3571 do {
3572 for (j = rrange->from; j <= rrange->to; j++) {
3573 dptr = dasd_device_from_devno(j);
3574 if (dptr == NULL) {
3575 continue;
3576 }
3577 device = *dptr;
3578 if (device == NULL ||
3579 (discipline != NULL &&
3580 device -> discipline != discipline))
3581 continue;
3582
3583 dasd_disable_volume(device, force);
3584 }
3585
3586 if (rrange->list.next == NULL)
3587 break;
3588 rrange = list_entry (rrange->list.next, dasd_range_t, list);
3589 } while ( all && rrange && rrange != range );
3590
3591 }
3592
3593 static void
dasd_enable_single_device(unsigned long arg)3594 dasd_enable_single_device ( unsigned long arg ) {
3595 dasd_device_t * device =(dasd_device_t *) arg;
3596 int devno = device->devinfo.devno;
3597 dasd_range_t range = { from: devno, to:devno };
3598 dasd_enable_ranges (&range,NULL,0);
3599 }
3600
3601 static void
dasd_enable_ranges(dasd_range_t * range,dasd_discipline_t * discipline,int all)3602 dasd_enable_ranges (dasd_range_t *range,
3603 dasd_discipline_t *discipline,
3604 int all)
3605 {
3606 int retries = 0;
3607 int j;
3608 int do_again;
3609 kdev_t tempdev;
3610 dasd_range_t *rrange;
3611
3612 if (range == NULL)
3613 return;
3614
3615 do {
3616 do_again = 0;
3617 if (range == &dasd_range_head) {
3618 rrange = list_entry (range->list.next,
3619 dasd_range_t, list);
3620 } else {
3621 rrange = range;
3622 }
3623 do {
3624 for (j = rrange->from; j <= rrange->to; j++) {
3625 if ( dasd_devindex_from_devno(j) < 0 )
3626 continue;
3627 if (-EAGAIN == dasd_set_device_level
3628 (j, discipline, DASD_STATE_ONLINE))
3629 do_again = 1;
3630 }
3631 rrange = list_entry (rrange->list.next, dasd_range_t, list);
3632 } while ( all && rrange && rrange != range );
3633
3634 if ((atomic_read (&dasd_init_pending) == 0) &&
3635 (!do_again)) /* we are done, exit loop */
3636 break;
3637
3638 if ( retries == 0 ) {
3639
3640 MESSAGE (KERN_INFO, "%s",
3641 "waiting for responses...");
3642
3643 } else if ( retries < 5 ) {
3644
3645 DBF_EVENT (DBF_NOTICE, "%s",
3646 "waiting a little bit longer...");
3647
3648 } else {
3649
3650 MESSAGE (KERN_INFO, "%s",
3651 "giving up, enable late devices manually!");
3652 break;
3653 }
3654
3655 /* prevent scheduling if called by bh (timer) */
3656 if (!in_interrupt()) {
3657 interruptible_sleep_on_timeout (&dasd_init_waitq,
3658 (1 * HZ) );
3659 }
3660
3661 retries ++;
3662 } while (1);
3663 /* now setup block devices */
3664
3665 /* Now do block device and partition setup */
3666 if (range == &dasd_range_head) {
3667 rrange = list_entry (range->list.next,
3668 dasd_range_t, list);
3669 } else {
3670 rrange = range;
3671 }
3672 do {
3673 for (j = rrange->from; j <= rrange->to; j++) {
3674 dasd_device_t **dptr;
3675 dasd_device_t *device;
3676 if ( dasd_devindex_from_devno(j) < 0 )
3677 continue;
3678 dptr = dasd_device_from_devno(j);
3679 device = *dptr;
3680 if (device == NULL )
3681 continue;
3682 if ( ((discipline == NULL && device->discipline != NULL) ||
3683 (device->discipline == discipline )) &&
3684 device->level == DASD_STATE_ONLINE &&
3685 device->request_queue == NULL ) {
3686 if (dasd_features_from_devno(j)&DASD_FEATURE_READONLY) {
3687 for (tempdev=device->kdev;
3688 tempdev<(device->kdev +(1 << DASD_PARTN_BITS));
3689 tempdev++)
3690 set_device_ro (tempdev, 1);
3691
3692 DEV_MESSAGE (KERN_WARNING, device, "%s",
3693 "setting read-only mode ");
3694 }
3695 dasd_setup_blkdev(device);
3696 dasd_setup_partitions(device);
3697 }
3698 }
3699 rrange = list_entry (rrange->list.next, dasd_range_t, list);
3700 } while ( all && rrange && rrange != range );
3701 }
3702
3703 #ifdef CONFIG_DASD_DYNAMIC
3704 /*
3705 * DASD_NOT_OPER_HANDLER
3706 *
3707 * DESCRIPTION
3708 * handles leaving devices
3709 */
3710 static void
dasd_not_oper_handler(int irq,int status)3711 dasd_not_oper_handler (int irq, int status)
3712 {
3713 dasd_device_t *device;
3714 major_info_t *major_info;
3715 ccw_req_t* cqr;
3716 struct list_head *l;
3717 unsigned long flags;
3718 int i, devno;
3719
3720 /* find out devno of leaving device: CIO has already deleted this information ! */
3721 devno = -ENODEV;
3722 device = NULL;
3723 list_for_each (l, &dasd_major_info) {
3724 major_info = list_entry (l, major_info_t, list);
3725 for (i = 0; i < DASD_PER_MAJOR; i++) {
3726 device = major_info->dasd_device[i];
3727 if (device && device->devinfo.irq == irq) {
3728 devno = device->devinfo.devno;
3729 break;
3730 }
3731 }
3732 if (devno != -ENODEV)
3733 break;
3734 }
3735
3736 if (devno < 0) {
3737
3738 MESSAGE (KERN_WARNING,
3739 "not_oper_handler called on irq 0x%04x no devno!",
3740 irq);
3741 return;
3742 }
3743 switch (status) {
3744 case DEVSTAT_DEVICE_GONE:
3745 case DEVSTAT_REVALIDATE: //FIXME
3746 DEV_MESSAGE (KERN_DEBUG, device, "%s",
3747 "device is gone, disabling it permanently\n");
3748 dasd_disable_volume(device, 1);
3749 break;
3750 case DEVSTAT_NOT_ACC:
3751 case DEVSTAT_NOT_ACC_ERR:
3752 DEV_MESSAGE (KERN_DEBUG, device, "%s",
3753 "device is not accessible, disabling it temporary\n");
3754 s390irq_spin_lock_irqsave (device->devinfo.irq,
3755 flags);
3756 device->stopped |= DASD_STOPPED_NOT_ACC;
3757
3758 if (status == DEVSTAT_NOT_ACC_ERR) {
3759 cqr = device->queue.head;
3760 while (cqr) {
3761 if (cqr->status == CQR_STATUS_QUEUED)
3762 break;
3763 if (cqr->status == CQR_STATUS_IN_IO)
3764 cqr->status = CQR_STATUS_QUEUED;
3765 cqr = cqr->next;
3766 }
3767 }
3768 s390irq_spin_unlock_irqrestore(device->devinfo.irq,
3769 flags);
3770
3771 break;
3772 default:
3773 panic ("dasd not operational handler was called with illegal status\n");
3774 }
3775 }
3776
3777 /*
3778 * DASD_OPER_HANDLER
3779 *
3780 * DESCRIPTION
3781 * called by the machine check handler to make an device operational
3782 */
3783 int
dasd_oper_handler(int irq,devreg_t * devreg)3784 dasd_oper_handler (int irq, devreg_t * devreg)
3785 {
3786 int devno;
3787 int rc = 0;
3788 major_info_t *major_info;
3789 dasd_range_t range;
3790 dasd_device_t *device;
3791 struct list_head *l;
3792 unsigned long flags;
3793 int i;
3794
3795
3796 devno = get_devno_by_irq (irq);
3797 if (devno == -ENODEV) {
3798 rc = -ENODEV;
3799 goto out;
3800 }
3801
3802 /* find out devno of device */
3803 device = NULL;
3804 list_for_each (l, &dasd_major_info) {
3805 major_info = list_entry (l, major_info_t, list);
3806 for (i = 0; i < DASD_PER_MAJOR; i++) {
3807 device = major_info->dasd_device[i];
3808 if (device && device->devinfo.irq == irq)
3809 break;
3810 else
3811 device = NULL;
3812 }
3813 if (device)
3814 break;
3815 }
3816
3817 if (device &&
3818 device->level >= DASD_STATE_NEW) {
3819 s390irq_spin_lock_irqsave (device->devinfo.irq,
3820 flags);
3821 DEV_MESSAGE (KERN_DEBUG, device, "%s",
3822 "device is accessible again, reenabling it\n");
3823 device->stopped &= ~DASD_STOPPED_NOT_ACC;
3824
3825 s390irq_spin_unlock_irqrestore(device->devinfo.irq,
3826 flags);
3827 dasd_schedule_bh(device);
3828 } else {
3829
3830 if (dasd_autodetect) {
3831 dasd_add_range (devno, devno, DASD_FEATURE_DEFAULT);
3832 }
3833 range.from = devno;
3834 range.to = devno;
3835 dasd_enable_ranges (&range, NULL, 0);
3836 }
3837 out:
3838 return rc;
3839 }
3840 #endif /* CONFIG_DASD_DYNAMIC */
3841
3842 static inline dasd_device_t **
dasd_find_device_addr(int devno)3843 dasd_find_device_addr ( int devno )
3844 {
3845 dasd_device_t **device_addr;
3846
3847 DBF_EVENT (DBF_INFO,
3848 "devno %04x",
3849 devno);
3850
3851 if ( dasd_devindex_from_devno (devno) < 0 ) {
3852
3853 DBF_EXC (DBF_ALERT,
3854 "no dasd: devno %04x",
3855 devno);
3856
3857 return NULL;
3858 }
3859 /* allocate major numbers on demand for new devices */
3860 while ((device_addr = dasd_device_from_devno (devno)) == NULL) {
3861 int rc;
3862
3863 if ((rc = dasd_register_major (NULL)) <= 0) {
3864
3865 DBF_EXC (DBF_ALERT, "%s",
3866 "out of major numbers!");
3867 break;
3868 }
3869 }
3870 return device_addr;
3871 }
3872
3873 static inline int
dasd_state_del_to_new(dasd_device_t ** addr,int devno)3874 dasd_state_del_to_new (dasd_device_t **addr, int devno)
3875 {
3876 int i;
3877 dasd_device_t* device;
3878 dasd_lowmem_t *lowmem;
3879 int rc;
3880
3881
3882 /* allocate device descriptor on demand for new device */
3883 if (*addr != NULL) {
3884 BUG ();
3885 }
3886
3887 device = kmalloc (sizeof (dasd_device_t), GFP_ATOMIC);
3888 if (device == NULL) {
3889 return -ENOMEM;
3890 }
3891
3892 memset (device, 0, sizeof (dasd_device_t));
3893 dasd_plug_device (device);
3894 INIT_LIST_HEAD (&device->lowmem_pool);
3895
3896 /* allocate pages for lowmem pool */
3897 for (i = 0; i < DASD_LOWMEM_PAGES; i++) {
3898
3899 lowmem = (void *) get_free_page (GFP_ATOMIC|GFP_DMA);
3900 if (lowmem == NULL) {
3901 break;
3902 }
3903
3904 list_add (&lowmem->list, &device->lowmem_pool);
3905 }
3906
3907 if (i < DASD_LOWMEM_PAGES) {
3908 /* didn't get the needed lowmem pages */
3909 list_for_each_entry (lowmem, &device->lowmem_pool, list) {
3910 MESSAGE (KERN_DEBUG,
3911 "<devno: %04x> not enough memory - "
3912 "Free page again :%p",
3913 devno, lowmem);
3914 free_page ((unsigned long) lowmem);
3915 }
3916 kfree (device);
3917 rc = -ENOMEM;
3918 } else {
3919 *addr = device;
3920 rc = 0;
3921 }
3922 return rc;
3923 }
3924
3925 static inline int
dasd_state_new_to_del(dasd_device_t ** addr,int devno)3926 dasd_state_new_to_del (dasd_device_t **addr, int devno)
3927 {
3928 dasd_lowmem_t *lowmem;
3929
3930 dasd_device_t *device = *addr;
3931
3932 /* free private area */
3933 if (device && device->private) {
3934 kfree(device->private);
3935 }
3936
3937 /* free lowmem_pool */
3938 list_for_each_entry (lowmem, &device->lowmem_pool, list) {
3939 free_page ((unsigned long) lowmem);
3940 }
3941
3942 /* free device */
3943 kfree(device);
3944 *addr = NULL;
3945 return 0;
3946 }
3947
3948 static inline int
dasd_state_new_to_known(dasd_device_t ** dptr,int devno,dasd_discipline_t * discipline)3949 dasd_state_new_to_known (dasd_device_t **dptr,
3950 int devno,
3951 dasd_discipline_t *discipline)
3952 {
3953 int rc = 0;
3954 umode_t devfs_perm = S_IFBLK | S_IRUSR | S_IWUSR;
3955 struct list_head *l;
3956 major_info_t *major_info, *tmp;
3957 int i;
3958 dasd_device_t *device = *dptr;
3959 devfs_handle_t dir;
3960 char buffer[5];
3961
3962 major_info = NULL;
3963 list_for_each (l, &dasd_major_info) {
3964 tmp = list_entry (l, major_info_t, list);
3965 for (i = 0; i < DASD_PER_MAJOR; i++) {
3966 if (tmp->dasd_device[i] == device) {
3967 device->kdev = MKDEV (tmp->gendisk.major,
3968 i << DASD_PARTN_BITS);
3969 major_info = tmp;
3970 break;
3971 }
3972 }
3973 if (major_info != NULL) /* we found one */
3974 break;
3975 }
3976 if ( major_info == NULL )
3977 BUG();
3978
3979 device->major_info = major_info;
3980 dasd_device_name (device->name,
3981 (((long)dptr -
3982 (long)device->major_info->dasd_device) /
3983 sizeof (dasd_device_t *)),
3984 0, &device->major_info->gendisk);
3985 init_waitqueue_head (&device->wait_q);
3986
3987 rc = get_dev_info_by_devno (devno, &device->devinfo);
3988 if ( rc ) {
3989 /* returns -EUSERS if boxed !!*/
3990 if (rc == -EUSERS) {
3991 device->level = DASD_STATE_BOXED;
3992 }
3993 goto out;
3994 }
3995
3996 DBF_EVENT (DBF_NOTICE,
3997 "got devinfo CU-type %04x and dev-type %04x",
3998 device->devinfo.sid_data.cu_type,
3999 device->devinfo.sid_data.dev_type);
4000
4001
4002 if ( devno != device->devinfo.devno )
4003 BUG();
4004
4005 device->discipline = dasd_find_disc (device,
4006 discipline);
4007 if ( device->discipline == NULL ) {
4008 rc = -ENODEV;
4009 goto out;
4010 }
4011 sprintf (buffer, "%04x",
4012 device->devinfo.devno);
4013 dir = devfs_mk_dir (dasd_devfs_handle, buffer, device);
4014 device->major_info->gendisk.de_arr[MINOR(device->kdev)
4015 >> DASD_PARTN_BITS] = dir;
4016 if (dasd_features_from_devno(device->devinfo.devno)&DASD_FEATURE_READONLY) {
4017 devfs_perm &= ~(S_IWUSR);
4018 }
4019 device->devfs_entry = devfs_register (dir,"device",DEVFS_FL_DEFAULT,
4020 MAJOR(device->kdev),
4021 MINOR(device->kdev),
4022 devfs_perm,
4023 &dasd_device_operations,NULL);
4024 dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_ADD);
4025 device->level = DASD_STATE_KNOWN;
4026 out:
4027 return rc;
4028 }
4029
4030 static inline int
dasd_state_known_to_new(dasd_device_t * device)4031 dasd_state_known_to_new (dasd_device_t *device )
4032 {
4033 int rc = 0;
4034 /* don't reset to zeros because of persistent data durich detach/attach! */
4035 devfs_unregister(device->devfs_entry);
4036 devfs_unregister(device->major_info->gendisk.de_arr[MINOR(device->kdev) >> DASD_PARTN_BITS]);
4037 dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_REMOVE);
4038 return rc;
4039 }
4040
4041 static inline int
dasd_state_known_to_accept(dasd_device_t * device)4042 dasd_state_known_to_accept (dasd_device_t *device)
4043 {
4044 int rc = 0;
4045
4046 /* register 'device' debug area, used for all DBF_DEV_XXX calls*/
4047 device->debug_area = debug_register (device->name,
4048 0, /* size of debug area */
4049 2, /* number of areas */
4050 8 * sizeof (long));
4051
4052 debug_register_view (device->debug_area,
4053 &debug_sprintf_view);
4054
4055 debug_set_level (device->debug_area,
4056 DBF_ERR);
4057
4058 DBF_DEV_EVENT (DBF_EMERG, device, "%s",
4059 "debug area created");
4060
4061 if (device->discipline->int_handler) {
4062 rc = s390_request_irq_special (device->devinfo.irq,
4063 device->discipline->int_handler,
4064 dasd_not_oper_handler,
4065 SA_DOPATHGROUP, DASD_NAME,
4066 &device->dev_status);
4067 if ( rc ) {
4068
4069 MESSAGE (KERN_DEBUG, "%s",
4070 "No request IRQ");
4071
4072 if (rc == -EUSERS) {
4073 /* Device is reserved by someone else. */
4074 device->level = DASD_STATE_BOXED;
4075 }
4076
4077 goto out;
4078 }
4079 }
4080 device->level = DASD_STATE_ACCEPT;
4081 out:
4082 return rc;
4083 }
4084
4085 static inline int
dasd_state_accept_to_known(dasd_device_t * device)4086 dasd_state_accept_to_known (dasd_device_t *device )
4087 {
4088 if ( device->discipline == NULL )
4089 goto out;
4090 if (device->discipline->int_handler) {
4091 free_irq (device->devinfo.irq, &device->dev_status);
4092 }
4093
4094 DBF_DEV_EVENT (DBF_EMERG, device,
4095 "%p debug area deleted",
4096 device);
4097
4098 if (device->debug_area != NULL) {
4099 debug_unregister (device->debug_area);
4100 device->debug_area = NULL;
4101 }
4102 device->discipline = NULL;
4103 device->level = DASD_STATE_KNOWN;
4104 out:
4105 return 0;
4106 }
4107
4108 static inline int
dasd_state_accept_to_init(dasd_device_t * device)4109 dasd_state_accept_to_init (dasd_device_t *device)
4110 {
4111 int rc = 0;
4112 unsigned long flags;
4113
4114 if ( device->discipline->init_analysis ) {
4115 device->init_cqr=device->discipline->init_analysis (device);
4116 if ( device->init_cqr != NULL ) {
4117 if ( device->discipline->start_IO == NULL )
4118 BUG();
4119 atomic_inc (&dasd_init_pending);
4120 s390irq_spin_lock_irqsave (device->devinfo.irq,
4121 flags);
4122 rc = device->discipline->start_IO (device->init_cqr);
4123 if ( ! rc )
4124 device->level = DASD_STATE_INIT;
4125 s390irq_spin_unlock_irqrestore(device->devinfo.irq,
4126 flags);
4127 } else {
4128 rc = -ENOMEM;
4129 }
4130 } else {
4131 rc = dasd_state_init_to_ready ( device );
4132 }
4133
4134 return rc;
4135 }
4136
4137 static inline int
dasd_state_init_to_ready(dasd_device_t * device)4138 dasd_state_init_to_ready (dasd_device_t *device )
4139 {
4140 int rc = 0;
4141 if (device->discipline->do_analysis != NULL)
4142 if ( device->discipline->do_analysis (device) == 0 )
4143 rc = dasd_check_bp_block (device);
4144
4145 if ( device->init_cqr ) {
4146 /* This pointer is no longer needed, BUT dont't free the */
4147 /* memory, because this is done in bh for finished request!!!! */
4148 atomic_dec(&dasd_init_pending);
4149 device->init_cqr = NULL;
4150 }
4151 device->level = DASD_STATE_READY;
4152 return rc;
4153 }
4154
4155 static inline int
dasd_state_ready_to_accept(dasd_device_t * device)4156 dasd_state_ready_to_accept (dasd_device_t *device )
4157 {
4158 int rc = 0;
4159 unsigned long flags;
4160
4161 s390irq_spin_lock_irqsave (device->devinfo.irq, flags);
4162 if ( device->init_cqr != NULL && atomic_read(&dasd_init_pending) != 0 ) {
4163 if ( device->discipline->term_IO == NULL )
4164 BUG();
4165 device->discipline->term_IO (device->init_cqr);
4166 atomic_dec (&dasd_init_pending);
4167 dasd_free_request (device->init_cqr, device);
4168 device->init_cqr = NULL;
4169 }
4170 s390irq_spin_unlock_irqrestore(device->devinfo.irq, flags);
4171 memset(&device->sizes,0,sizeof(dasd_sizes_t));
4172 device->level = DASD_STATE_ACCEPT;
4173 return rc;
4174 }
4175
4176 static inline int
dasd_state_ready_to_online(dasd_device_t * device)4177 dasd_state_ready_to_online (dasd_device_t *device )
4178 {
4179 int rc = 0;
4180 if (!(rc = dasd_check_bp_block (device))) {
4181 dasd_unplug_device (device);
4182 device->level = DASD_STATE_ONLINE;
4183 }
4184 return rc;
4185 }
4186
4187 static inline int
dasd_state_online_to_ready(dasd_device_t * device)4188 dasd_state_online_to_ready (dasd_device_t *device )
4189 {
4190 int rc = 0;
4191 dasd_plug_device (device);
4192 device->level = DASD_STATE_READY;
4193 return rc;
4194 }
4195
4196 static inline int
dasd_setup_blkdev(dasd_device_t * device)4197 dasd_setup_blkdev (dasd_device_t *device )
4198 {
4199 int rc = 0;
4200 int i;
4201 int major = MAJOR(device->kdev);
4202 int minor = MINOR(device->kdev);
4203 request_queue_t *request_queue;
4204
4205 for (i = 0; i < (1 << DASD_PARTN_BITS); i++) {
4206 if (i == 0)
4207 device->major_info->gendisk.sizes[minor] =
4208 (device->sizes.blocks << device->
4209 sizes.s2b_shift) >> 1;
4210 else
4211 device->major_info->gendisk.sizes[minor + i] = 0;
4212 hardsect_size[major][minor + i] = device->sizes.bp_block;
4213 blksize_size[major][minor + i] = device->sizes.bp_block;
4214 max_sectors[major][minor + i] =
4215 device->discipline->max_blocks <<
4216 device->sizes.s2b_shift;
4217 device->major_info->gendisk.part[minor+i].start_sect = 0;
4218 device->major_info->gendisk.part[minor+i].nr_sects = 0;
4219 }
4220
4221 request_queue = kmalloc(sizeof(request_queue_t),GFP_KERNEL);
4222 if (request_queue) {
4223 request_queue->queuedata = device;
4224 blk_init_queue (request_queue, do_dasd_request);
4225 blk_queue_headactive (request_queue, 1);
4226 elevator_init (&(request_queue->elevator),ELEVATOR_NOOP);
4227 }
4228 device->request_queue = request_queue;
4229
4230 return rc;
4231 }
4232
4233 static void
dasd_deactivate_queue(dasd_device_t * device)4234 dasd_deactivate_queue (dasd_device_t *device)
4235 {
4236 int i;
4237 int minor = MINOR(device->kdev);
4238
4239 for (i = 0; i < (1 << DASD_PARTN_BITS); i++) {
4240 device->major_info->gendisk.sizes[minor + i] = 0;
4241 }
4242 }
4243
4244 static inline int
dasd_disable_blkdev(dasd_device_t * device)4245 dasd_disable_blkdev (dasd_device_t *device )
4246 {
4247 int i;
4248 int major = MAJOR(device->kdev);
4249 int minor = MINOR(device->kdev);
4250 request_queue_t *q = device->request_queue;
4251 struct request *req;
4252 long flags;
4253
4254 spin_lock_irqsave(&io_request_lock, flags);
4255 while (q &&
4256 !list_empty(&q->queue_head) &&
4257 (req = dasd_next_request(q)) != NULL) {
4258
4259 dasd_end_request(req, 0);
4260 dasd_dequeue_request(q, req);
4261 }
4262 spin_unlock_irqrestore(&io_request_lock, flags);
4263
4264 for (i = 0; i < (1 << DASD_PARTN_BITS); i++) {
4265 destroy_buffers(MKDEV(major,minor+i));
4266 device->major_info->gendisk.sizes[minor + i] = 0;
4267 hardsect_size[major][minor + i] = 0;
4268 blksize_size[major][minor + i] = 0;
4269 max_sectors[major][minor + i] = 0;
4270 }
4271 if (device->request_queue) {
4272 blk_cleanup_queue (device->request_queue);
4273 kfree(device->request_queue);
4274 device->request_queue = NULL;
4275 }
4276 return 0;
4277 }
4278
4279
4280 /*
4281 * function dasd_setup_partitions
4282 * calls the function in genhd, which is appropriate to setup a partitioned disk
4283 */
4284 static inline void
dasd_setup_partitions(dasd_device_t * device)4285 dasd_setup_partitions ( dasd_device_t * device )
4286 {
4287 register_disk (&device->major_info->gendisk,
4288 device->kdev,
4289 1 << DASD_PARTN_BITS,
4290 &dasd_device_operations,
4291 (device->sizes.blocks << device->sizes.s2b_shift));
4292 dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_PARTCHK);
4293 }
4294
4295 static inline void
dasd_destroy_partitions(dasd_device_t * device)4296 dasd_destroy_partitions ( dasd_device_t * device )
4297 {
4298 int i;
4299 int minor = MINOR(device->kdev);
4300
4301 for (i = 0; i < (1 << DASD_PARTN_BITS); i++) {
4302 device->major_info->gendisk.part[minor+i].start_sect = 0;
4303 device->major_info->gendisk.part[minor+i].nr_sects = 0;
4304 }
4305 devfs_register_partitions(&device->major_info->gendisk,
4306 MINOR(device->kdev),1);
4307 dasd_do_hotplug_event (device, DASD_HOTPLUG_EVENT_PARTREMOVE);
4308 }
4309
4310 /*
4311 * function dasd_set_device_level
4312 */
4313 static int
dasd_set_device_level(unsigned int devno,dasd_discipline_t * discipline,int to_state)4314 dasd_set_device_level (unsigned int devno,
4315 dasd_discipline_t * discipline,
4316 int to_state)
4317 {
4318 int rc = 0;
4319 dasd_device_t **device_addr;
4320 dasd_device_t *device;
4321 int from_state;
4322
4323 device_addr = dasd_find_device_addr ( devno );
4324 if ( device_addr == NULL ) {
4325 rc = -ENODEV;
4326 goto out;
4327 }
4328 device = *device_addr;
4329
4330 if ( device == NULL ) {
4331 from_state = DASD_STATE_DEL;
4332 if ( to_state == DASD_STATE_DEL )
4333 goto out;
4334 } else {
4335 from_state = device->level;
4336 }
4337
4338 DBF_EVENT (DBF_INFO,
4339 "devno %04x; from %i to %i",
4340 devno,
4341 from_state,
4342 to_state);
4343
4344 if ( from_state == to_state )
4345 goto out;
4346
4347 if ( to_state < from_state )
4348 goto shutdown;
4349
4350 /* First check for bringup */
4351 if ( from_state <= DASD_STATE_DEL &&
4352 to_state >= DASD_STATE_NEW ) {
4353 rc = dasd_state_del_to_new(device_addr, devno);
4354 if ( rc ) {
4355 goto bringup_fail;
4356 }
4357 device = *device_addr;
4358 }
4359
4360 /* reprobe boxed devices */
4361 if (device->level == DASD_STATE_BOXED) {
4362 rc = s390_trigger_resense (device->devinfo.irq);
4363 if ( rc ) {
4364 goto bringup_fail;
4365 }
4366 }
4367
4368 if ( device->level <= DASD_STATE_BOXED &&
4369 to_state >= DASD_STATE_KNOWN ) {
4370 rc = dasd_state_new_to_known( device_addr, devno, discipline );
4371 if ( rc ) {
4372 goto bringup_fail;
4373 }
4374 }
4375
4376 if ( device->level <= DASD_STATE_KNOWN &&
4377 to_state >= DASD_STATE_ACCEPT ) {
4378 rc = dasd_state_known_to_accept(device);
4379 if ( rc ) {
4380 goto bringup_fail;
4381 }
4382 }
4383 if ( dasd_probeonly ) {
4384 goto out;
4385 }
4386
4387 if ( device->level <= DASD_STATE_ACCEPT &&
4388 to_state >= DASD_STATE_INIT ) {
4389 rc = dasd_state_accept_to_init(device);
4390 if ( rc ) {
4391 goto bringup_fail;
4392 }
4393 }
4394 if ( device->level <= DASD_STATE_INIT &&
4395 to_state >= DASD_STATE_READY ) {
4396 rc = -EAGAIN;
4397 goto out;
4398 }
4399
4400 if ( device->level <= DASD_STATE_READY &&
4401 to_state >= DASD_STATE_ONLINE ) {
4402 rc = dasd_state_ready_to_online(device);
4403 if ( rc ) {
4404 goto bringup_fail;
4405 }
4406 }
4407 goto out;
4408 bringup_fail: /* revert changes */
4409
4410 DBF_DEV_EVENT (DBF_ERR, device,
4411 "failed to set device from state %d to %d at "
4412 "level %d rc %d. Reverting...",
4413 from_state,
4414 to_state,
4415 device->level,
4416 rc);
4417
4418 if (device->level <= DASD_STATE_NEW) {
4419 /* Revert - device can not be accessed */
4420 to_state = from_state;
4421 from_state = device->level;
4422 }
4423
4424 /* now do a shutdown */
4425 shutdown:
4426 if ( device->level >= DASD_STATE_ONLINE &&
4427 to_state <= DASD_STATE_READY )
4428 if (dasd_state_online_to_ready(device))
4429 BUG();
4430
4431 if ( device->level >= DASD_STATE_READY &&
4432 to_state <= DASD_STATE_ACCEPT )
4433 if ( dasd_state_ready_to_accept(device))
4434 BUG();
4435
4436 if ( device->level >= DASD_STATE_ACCEPT &&
4437 to_state <= DASD_STATE_KNOWN )
4438 if ( dasd_state_accept_to_known(device))
4439 BUG();
4440
4441 if ( device->level >= DASD_STATE_KNOWN &&
4442 to_state <= DASD_STATE_NEW )
4443 if ( dasd_state_known_to_new(device))
4444 BUG();
4445
4446 if ( device->level >= DASD_STATE_NEW &&
4447 to_state <= DASD_STATE_DEL)
4448 if (dasd_state_new_to_del(device_addr, devno))
4449 BUG();
4450 goto out;
4451 out:
4452 return rc;
4453 }
4454
4455 /********************************************************************************
4456 * SECTION: Procfs stuff
4457 ********************************************************************************/
4458 #ifdef CONFIG_PROC_FS
4459
4460 typedef struct {
4461 char *data;
4462 int len;
4463 } tempinfo_t;
4464
4465 void
dasd_fill_inode(struct inode * inode,int fill)4466 dasd_fill_inode (struct inode *inode, int fill)
4467 {
4468 if (fill)
4469 MOD_INC_USE_COUNT;
4470 else
4471 MOD_DEC_USE_COUNT;
4472 }
4473
4474 static struct proc_dir_entry *dasd_proc_root_entry = NULL;
4475 static struct proc_dir_entry *dasd_devices_entry;
4476 static struct proc_dir_entry *dasd_statistics_entry;
4477
4478 static int
dasd_devices_open(struct inode * inode,struct file * file)4479 dasd_devices_open (struct inode *inode, struct file *file)
4480 {
4481 int rc = 0;
4482 int size = 1;
4483 int len = 0;
4484 major_info_t *temp = NULL;
4485 struct list_head *l;
4486 tempinfo_t *info;
4487 int i;
4488 unsigned long flags;
4489 int index = 0;
4490
4491 MOD_INC_USE_COUNT;
4492
4493 spin_lock_irqsave(&discipline_lock,
4494 flags);
4495
4496 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
4497
4498 if (info == NULL) {
4499
4500 MESSAGE (KERN_WARNING, "%s",
4501 "No memory available for data (tempinfo)");
4502
4503 spin_unlock_irqrestore(&discipline_lock,
4504 flags);
4505
4506 MOD_DEC_USE_COUNT;
4507
4508 return -ENOMEM;
4509
4510 } else {
4511 file->private_data = (void *) info;
4512 }
4513 list_for_each (l, &dasd_major_info) {
4514 size += 128 * 1 << (MINORBITS - DASD_PARTN_BITS);
4515 }
4516 info->data = (char *) vmalloc (size);
4517
4518 if (size && info->data == NULL) {
4519
4520 MESSAGE (KERN_WARNING, "%s",
4521 "No memory available for data (info->data)");
4522
4523 vfree (info);
4524
4525 spin_unlock_irqrestore(&discipline_lock,
4526 flags);
4527
4528 MOD_DEC_USE_COUNT;
4529
4530 return -ENOMEM;
4531 }
4532
4533 DBF_EVENT (DBF_NOTICE,
4534 "procfs-area: %p, size 0x%x allocated",
4535 info->data,
4536 size);
4537
4538 list_for_each (l, &dasd_major_info) {
4539 temp = list_entry (l, major_info_t, list);
4540 for (i = 0; i < 1 << (MINORBITS - DASD_PARTN_BITS); i++) {
4541 dasd_device_t *device;
4542 int devno = dasd_devno_from_devindex(index+i);
4543 int features;
4544
4545 if ( devno == -ENODEV )
4546 continue;
4547
4548 features = dasd_features_from_devno(devno);
4549 if (features < DASD_FEATURE_DEFAULT)
4550 features = DASD_FEATURE_DEFAULT;
4551
4552 device = temp->dasd_device[i];
4553 if (device) {
4554
4555 len += sprintf (info->data + len,
4556 "%04x(%s) at (%3d:%3d) is %-7s%4s: ",
4557 device->devinfo.devno,
4558 device->discipline ?
4559 device->
4560 discipline->name : "none",
4561 temp->gendisk.major,
4562 i << DASD_PARTN_BITS,
4563 device->name,
4564 (features & DASD_FEATURE_READONLY) ?
4565 "(ro)" : " ");
4566
4567 switch (device->level) {
4568 case DASD_STATE_NEW:
4569 len +=
4570 sprintf (info->data + len,
4571 "new");
4572 break;
4573 case DASD_STATE_KNOWN:
4574 len +=
4575 sprintf (info->data + len,
4576 "detected");
4577 break;
4578 case DASD_STATE_BOXED:
4579 len +=
4580 sprintf (info->data + len,
4581 "boxed");
4582 break;
4583 case DASD_STATE_ACCEPT:
4584 len += sprintf (info->data + len,
4585 "accepted");
4586 break;
4587 case DASD_STATE_INIT:
4588 len += sprintf (info->data + len,
4589 "busy ");
4590 break;
4591 case DASD_STATE_READY:
4592 len += sprintf (info->data + len,
4593 "ready ");
4594 break;
4595 case DASD_STATE_ONLINE:
4596 len += sprintf (info->data + len,
4597 "active ");
4598
4599 if (dasd_check_bp_block (device))
4600 len +=
4601 sprintf (info->data + len,
4602 "n/f ");
4603 else
4604 len +=
4605 sprintf (info->data + len,
4606 "at blocksize: %d, %ld blocks, %ld MB",
4607 device->sizes.bp_block,
4608 device->sizes.blocks,
4609 ((device->
4610 sizes.bp_block >> 9) *
4611 device->sizes.
4612 blocks) >> 11);
4613 break;
4614 default:
4615 len +=
4616 sprintf (info->data + len,
4617 "no stat");
4618 break;
4619 }
4620 } else {
4621 char buffer[7];
4622 dasd_device_name (buffer, i, 0, &temp->gendisk);
4623 if ( devno < 0 ) {
4624 len += sprintf (info->data + len,
4625 "none");
4626 } else {
4627 len += sprintf (info->data + len,
4628 "%04x",devno);
4629 }
4630 len += sprintf (info->data + len,
4631 "(none) at (%3d:%3d) is %-7s%4s: unknown",
4632 temp->gendisk.major,
4633 i << DASD_PARTN_BITS,
4634 buffer,
4635 (features & DASD_FEATURE_READONLY) ?
4636 "(ro)" : " ");
4637 }
4638 if ( dasd_probeonly )
4639 len += sprintf(info->data + len,"(probeonly)");
4640 len += sprintf(info->data + len,"\n");
4641 }
4642 index += 1 << (MINORBITS - DASD_PARTN_BITS);
4643 }
4644 info->len = len;
4645
4646 spin_unlock_irqrestore(&discipline_lock,
4647 flags);
4648 return rc;
4649 }
4650
4651 #define MIN(a,b) ((a)<(b)?(a):(b))
4652
4653 static ssize_t
dasd_generic_read(struct file * file,char * user_buf,size_t user_len,loff_t * offset)4654 dasd_generic_read (struct file *file, char *user_buf, size_t user_len,
4655 loff_t * offset)
4656 {
4657 loff_t len;
4658 loff_t n = *offset;
4659 unsigned pos = n;
4660 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
4661
4662 if (n != pos || pos >= p_info->len) {
4663 return 0; /* EOF */
4664 } else {
4665 len = MIN (user_len, (p_info->len - pos));
4666 if (copy_to_user (user_buf, &(p_info->data[pos]), len))
4667 return -EFAULT;
4668 *offset = pos + len;
4669 return len; /* number of bytes "read" */
4670 }
4671 }
4672
4673 /*
4674 * scan for device range in given string (e.g. 0x0150-0x0155).
4675 * devnos are always hex and leading 0x are ignored.
4676 */
4677 static char *
dasd_parse_range(char * buffer,dasd_range_t * range)4678 dasd_parse_range (char *buffer, dasd_range_t *range)
4679 {
4680 char *str;
4681
4682 /* remove optional 'device ' and 'range=' and search for nexet digit */
4683 for (str = buffer + 4; isspace(*str); str++);
4684
4685 if (strncmp (str, "device ", 7) == 0)
4686 for (str = str + 7; isspace(*str); str++);
4687
4688 if (strncmp (str, "range=", 6) == 0)
4689 for (str = str + 6; isspace(*str); str++);
4690
4691 range->to = range->from = dasd_strtoul (str,
4692 &str,
4693 &(range->features));
4694
4695 if (*str == '-') {
4696 str++;
4697 range->to = dasd_strtoul (str,
4698 &str,
4699 &(range->features));
4700 }
4701
4702 /* remove blanks after device range */
4703 for (; isspace(*str); str++);
4704
4705 if (range->from < 0 || range->to < 0) {
4706 MESSAGE_LOG (KERN_WARNING,
4707 "/proc/dasd/devices: range parse error in '%s'",
4708 buffer);
4709 return ERR_PTR (-EINVAL);
4710 }
4711
4712 return str;
4713
4714 } /* end dasd_parse_range */
4715
4716 /*
4717 * Enable / Disable the given devices
4718 */
4719 static void
dasd_proc_set(char * buffer)4720 dasd_proc_set (char *buffer)
4721 {
4722 dasd_range_t range;
4723 char *str;
4724
4725 str = dasd_parse_range (buffer,
4726 &range);
4727
4728 /* Negative numbers in str/from/to indicate errors */
4729 if (IS_ERR (str) || (range.from < 0) || (range.to < 0)
4730 || (range.from > 0xFFFF) || (range.to > 0xFFFF))
4731 return;
4732
4733 if (strncmp (str, "on", 2) == 0) {
4734 dasd_enable_ranges (&range, NULL, 0);
4735
4736 } else if (strncmp (str, "off", 3) == 0) {
4737 dasd_disable_ranges (&range, NULL, 0, 1);
4738
4739 } else {
4740 MESSAGE_LOG (KERN_WARNING,
4741 "/proc/dasd/devices: "
4742 "only 'on' and 'off' are alowed in 'set' "
4743 "command ('%s'/'%s')",
4744 buffer,
4745 str);
4746 }
4747
4748 return;
4749
4750 } /* end dasd_proc_set */
4751
4752 /*
4753 * Add the given devices
4754 */
4755 static void
dasd_proc_add(char * buffer)4756 dasd_proc_add (char *buffer)
4757 {
4758 dasd_range_t range;
4759 char *str;
4760
4761 str = dasd_parse_range (buffer,
4762 &range);
4763
4764 /* Negative numbers in str/from/to indicate errors */
4765 if (IS_ERR (str) || (range.from < 0) || (range.to < 0)
4766 || (range.from > 0xFFFF) || (range.to > 0xFFFF))
4767 return;
4768
4769 dasd_add_range (range.from, range.to, range.features);
4770 dasd_enable_ranges (&range, NULL, 0);
4771
4772 return;
4773
4774 } /* end dasd_proc_add */
4775
4776 /*
4777 * Break the lock of a given 'boxed' dasd.
4778 * If the dasd in not in status 'boxed' just return.
4779 */
4780 static int
dasd_break_boxed(dasd_range_t * range,dasd_device_t * device)4781 dasd_break_boxed (dasd_range_t *range,
4782 dasd_device_t *device)
4783 {
4784 int rc = 0;
4785 dasd_discipline_t *discipline;
4786 struct list_head *lh = dasd_disc_head.next;
4787
4788 /* check devixe status */
4789 if (device->level != DASD_STATE_BOXED) {
4790 MESSAGE (KERN_WARNING,
4791 "/proc/dasd/devices: the given device (%04X) "
4792 "is not 'boxed')",
4793 device->devinfo.devno);
4794 rc = -EINVAL;
4795 goto out;
4796 }
4797
4798 /* force eckd discipline */
4799 do {
4800 discipline = list_entry(lh,
4801 dasd_discipline_t,
4802 list);
4803
4804 if (strncmp (discipline->name, range->discipline, 4) == 0)
4805 break; /* discipline found */
4806
4807 lh = lh->next; /* check next discipline in list */
4808 if (lh == &dasd_disc_head) {
4809 discipline = NULL;
4810 break;
4811 }
4812 } while ( 1 );
4813 device->discipline = discipline;
4814
4815 if (device->discipline == NULL) {
4816 MESSAGE (KERN_WARNING, "%s",
4817 "/proc/dasd/devices: discipline not found "
4818 "in discipline list");
4819 rc = -EINVAL;
4820 goto out;
4821 }
4822
4823 /* register the int handler to enable IO */
4824 rc = s390_request_irq_special (device->devinfo.irq,
4825 device->discipline->int_handler,
4826 dasd_not_oper_handler,
4827 SA_DOPATHGROUP | SA_FORCE,
4828 DASD_NAME,
4829 &device->dev_status);
4830 if ( rc )
4831 goto out;
4832
4833 rc = dasd_steal_lock (device);
4834
4835 /* unregister the int handler to enable re-sensing */
4836 free_irq (device->devinfo.irq,
4837 &device->dev_status);
4838
4839 device->discipline = NULL;
4840 device->level = DASD_STATE_NEW;
4841
4842 out:
4843 return rc;
4844
4845 } /* end dasd_break_boxed */
4846
4847 /*
4848 * Handle the procfs call 'brk <devno> <discipline>.
4849 */
4850 static void
dasd_proc_brk(char * buffer)4851 dasd_proc_brk (char *buffer)
4852 {
4853 char *str;
4854 dasd_range_t range;
4855 dasd_device_t *device;
4856 int rc = 0;
4857
4858 str = dasd_parse_range (buffer,
4859 &range);
4860
4861 if (IS_ERR (str))
4862 return;
4863
4864 if (range.from != range.to) {
4865 MESSAGE (KERN_WARNING, "%s",
4866 "/proc/dasd/devices: 'brk <devno> <discipline> "
4867 "is only allowed for a single device (no ranges)");
4868 return;
4869 }
4870
4871 /* check for discipline = 'eckd' */
4872 if (strncmp(str, "eckd", 4) != 0) {
4873 MESSAGE_LOG (KERN_WARNING,
4874 "/proc/dasd/devices: 'brk <devno> <discipline> "
4875 "is only allowed for 'eckd' (%s)",
4876 str);
4877 return;
4878 }
4879
4880 memcpy (range.discipline, "ECKD", 4);
4881
4882 device = *(dasd_device_from_devno (range.from));
4883 if (device == NULL) {
4884 MESSAGE (KERN_WARNING,
4885 "/proc/dasd/devices: no device found for devno (%04X)",
4886 range.from);
4887 return;
4888 }
4889
4890 rc = dasd_break_boxed (&range,
4891 device);
4892 if (rc == 0) {
4893 /* trigger CIO to resense the device */
4894 s390_trigger_resense (device->devinfo.irq);
4895
4896 // get the device online now
4897 dasd_enable_ranges (&range,
4898 NULL,
4899 0);
4900 }
4901
4902 } /* end dasd_proc_brk */
4903
4904 static ssize_t
dasd_devices_write(struct file * file,const char * user_buf,size_t user_len,loff_t * offset)4905 dasd_devices_write (struct file *file, const char *user_buf,
4906 size_t user_len, loff_t * offset)
4907 {
4908 char *buffer;
4909
4910 if (user_len > PAGE_SIZE)
4911 return -EINVAL;
4912
4913 buffer = vmalloc (user_len+1);
4914 if (buffer == NULL)
4915 return -ENOMEM;
4916
4917 if (copy_from_user (buffer, user_buf, user_len)) {
4918 vfree (buffer);
4919 return -EFAULT;
4920 }
4921
4922 /* replace LF with '\0' */
4923 if (buffer[user_len -1] == '\n') {
4924 buffer[user_len -1] = '\0';
4925 } else {
4926 buffer[user_len] = '\0';
4927 }
4928
4929 MESSAGE_LOG (KERN_INFO,
4930 "/proc/dasd/devices: '%s'",
4931 buffer);
4932
4933 if (strncmp (buffer, "set ", 4) == 0) {
4934 /* handle 'set <devno> on/off' */
4935 dasd_proc_set (buffer);
4936
4937 } else if (strncmp (buffer, "add ", 4) == 0) {
4938 /* handle 'add <devno>' */
4939 dasd_proc_add (buffer);
4940
4941 } else if (strncmp (buffer, "brk ", 4) == 0) {
4942 /* handle 'brk <devno> <discipline>' */
4943 dasd_proc_brk (buffer);
4944
4945 } else {
4946 MESSAGE (KERN_WARNING, "%s",
4947 "/proc/dasd/devices: only 'set' ,'add' and "
4948 "'brk' are supported verbs");
4949 vfree (buffer);
4950 return -EINVAL;
4951 }
4952
4953 vfree (buffer);
4954 return user_len;
4955 }
4956
4957 static int
dasd_devices_close(struct inode * inode,struct file * file)4958 dasd_devices_close (struct inode *inode, struct file *file)
4959 {
4960 int rc = 0;
4961 tempinfo_t *p_info = (tempinfo_t *) file->private_data;
4962 if (p_info) {
4963 if (p_info->data)
4964 vfree (p_info->data);
4965 vfree (p_info);
4966 }
4967 MOD_DEC_USE_COUNT;
4968 return rc;
4969 }
4970
4971 static struct file_operations dasd_devices_file_ops = {
4972 read:dasd_generic_read, /* read */
4973 write:dasd_devices_write, /* write */
4974 open:dasd_devices_open, /* open */
4975 release:dasd_devices_close, /* close */
4976 };
4977
4978 static struct inode_operations dasd_devices_inode_ops = {
4979 };
4980
4981 static int
dasd_statistics_open(struct inode * inode,struct file * file)4982 dasd_statistics_open (struct inode *inode, struct file *file)
4983 {
4984 int rc = 0;
4985 int len = 0;
4986 tempinfo_t *info;
4987 int shift, i, help = 0;
4988
4989 MOD_INC_USE_COUNT;
4990 info = (tempinfo_t *) vmalloc (sizeof (tempinfo_t));
4991
4992 if (info == NULL) {
4993
4994 MESSAGE (KERN_WARNING, "%s",
4995 "No memory available for data (tempinfo)");
4996
4997 MOD_DEC_USE_COUNT;
4998 return -ENOMEM;
4999 } else {
5000 file->private_data = (void *) info;
5001 }
5002 /* FIXME! determine space needed in a better way */
5003 info->data = (char *) vmalloc (PAGE_SIZE);
5004
5005 if (info->data == NULL) {
5006
5007 MESSAGE (KERN_WARNING, "%s",
5008 "No memory available for data (info->data)");
5009
5010 vfree (info);
5011 file->private_data = NULL;
5012 MOD_DEC_USE_COUNT;
5013 return -ENOMEM;
5014 }
5015
5016 /* check for active profiling */
5017 if (dasd_profile_level == DASD_PROFILE_OFF) {
5018
5019 info->len = sprintf (info->data,
5020 "Statistics are off - they might be "
5021 "switched on using 'echo set on > "
5022 "/proc/dasd/statistics'\n");
5023 return rc;
5024 }
5025
5026 /* prevent couter 'ouverflow' on output */
5027 for (shift = 0, help = dasd_global_profile.dasd_io_reqs;
5028 help > 9999999; help = help >> 1, shift++) ;
5029
5030 len = sprintf (info->data, "%d dasd I/O requests\n",
5031 dasd_global_profile.dasd_io_reqs);
5032 len += sprintf (info->data + len, "with %d sectors(512B each)\n",
5033 dasd_global_profile.dasd_io_sects);
5034
5035 len += sprintf (info->data + len,
5036 " __<4 ___8 __16 __32 __64 "
5037 " _128 _256 _512 __1k __2k "
5038 " __4k __8k _16k _32k _64k "
5039 " 128k\n");
5040
5041 len += sprintf (info->data + len,
5042 " _256 _512 __1M __2M __4M "
5043 " __8M _16M _32M _64M 128M "
5044 " 256M 512M __1G __2G __4G "
5045 " _>4G\n");
5046
5047 len += sprintf (info->data + len, "Histogram of sizes (512B secs)\n");
5048 for (i = 0; i < 16; i++) {
5049 len += sprintf (info->data + len, "%7d ",
5050 dasd_global_profile.dasd_io_secs[i] >> shift);
5051 }
5052 len += sprintf (info->data + len, "\n");
5053
5054 for (; i < 32; i++) {
5055 len += sprintf (info->data + len, "%7d ",
5056 dasd_global_profile.dasd_io_secs[i] >> shift);
5057 }
5058 len += sprintf (info->data + len, "\n");
5059
5060 len += sprintf (info->data + len,
5061 "Histogram of I/O times (microseconds)\n");
5062
5063 for (i = 0; i < 16; i++) {
5064 len += sprintf (info->data + len, "%7d ",
5065 dasd_global_profile.dasd_io_times[i] >> shift);
5066 }
5067 len += sprintf (info->data + len, "\n");
5068 for (; i < 32; i++) {
5069 len += sprintf (info->data + len, "%7d ",
5070 dasd_global_profile.dasd_io_times[i] >> shift);
5071 }
5072 len += sprintf (info->data + len, "\n");
5073
5074 len += sprintf (info->data + len, "Histogram of I/O times per sector\n");
5075 for (i = 0; i < 16; i++) {
5076 len += sprintf (info->data + len, "%7d ",
5077 dasd_global_profile.dasd_io_timps[i] >> shift);
5078 }
5079 len += sprintf (info->data + len, "\n");
5080 for (; i < 32; i++) {
5081 len += sprintf (info->data + len, "%7d ",
5082 dasd_global_profile.dasd_io_timps[i] >> shift);
5083 }
5084 len += sprintf (info->data + len, "\n");
5085
5086 len += sprintf (info->data + len, "Histogram of I/O time till ssch\n");
5087 for (i = 0; i < 16; i++) {
5088 len += sprintf (info->data + len, "%7d ",
5089 dasd_global_profile.dasd_io_time1[i] >> shift);
5090 }
5091 len += sprintf (info->data + len, "\n");
5092 for (; i < 32; i++) {
5093 len += sprintf (info->data + len, "%7d ",
5094 dasd_global_profile.dasd_io_time1[i] >> shift);
5095 }
5096 len += sprintf (info->data + len, "\n");
5097
5098 len += sprintf (info->data + len,
5099 "Histogram of I/O time between ssch and irq\n");
5100 for (i = 0; i < 16; i++) {
5101 len += sprintf (info->data + len, "%7d ",
5102 dasd_global_profile.dasd_io_time2[i] >> shift);
5103 }
5104 len += sprintf (info->data + len, "\n");
5105 for (; i < 32; i++) {
5106 len += sprintf (info->data + len, "%7d ",
5107 dasd_global_profile.dasd_io_time2[i] >> shift);
5108 }
5109 len += sprintf (info->data + len, "\n");
5110
5111 len += sprintf (info->data + len,
5112 "Histogram of I/O time between ssch and irq per "
5113 "sector\n");
5114
5115 for (i = 0; i < 16; i++) {
5116 len += sprintf (info->data + len, "%7d ",
5117 dasd_global_profile.dasd_io_time2ps[i] >> shift);
5118 }
5119 len += sprintf (info->data + len, "\n");
5120 for (; i < 32; i++) {
5121 len += sprintf (info->data + len, "%7d ",
5122 dasd_global_profile.dasd_io_time2ps[i] >> shift);
5123 }
5124 len += sprintf (info->data + len, "\n");
5125
5126 len += sprintf (info->data + len,
5127 "Histogram of I/O time between irq and end\n");
5128 for (i = 0; i < 16; i++) {
5129 len +=
5130 sprintf (info->data + len, "%7d ",
5131 dasd_global_profile.dasd_io_time3[i] >> shift);
5132 }
5133 len += sprintf (info->data + len, "\n");
5134 for (; i < 32; i++) {
5135 len += sprintf (info->data + len, "%7d ",
5136 dasd_global_profile.dasd_io_time3[i] >> shift);
5137 }
5138 len += sprintf (info->data + len, "\n");
5139
5140 len += sprintf (info->data + len,
5141 "# of req in chanq at enqueuing (1..32) \n");
5142 for (i = 0; i < 16; i++) {
5143 len += sprintf (info->data + len, "%7d ",
5144 dasd_global_profile.dasd_io_nr_req[i] >> shift);
5145 }
5146 len += sprintf (info->data + len, "\n");
5147 for (; i < 32; i++) {
5148 len += sprintf (info->data + len, "%7d ",
5149 dasd_global_profile.dasd_io_nr_req[i] >> shift);
5150 }
5151 len += sprintf (info->data + len, "\n");
5152
5153 info->len = len;
5154 return rc;
5155 }
5156
5157 static ssize_t
dasd_statistics_write(struct file * file,const char * user_buf,size_t user_len,loff_t * offset)5158 dasd_statistics_write (struct file *file, const char *user_buf,
5159 size_t user_len, loff_t * offset)
5160 {
5161 char *buffer;
5162
5163 if(user_len > 65536)
5164 user_len = 65536;
5165
5166 buffer = vmalloc (user_len);
5167
5168 if (buffer == NULL)
5169 return -ENOMEM;
5170
5171 if (copy_from_user (buffer, user_buf, user_len)) {
5172 vfree (buffer);
5173 return -EFAULT;
5174 }
5175
5176 buffer[user_len] = 0;
5177
5178 MESSAGE (KERN_INFO,
5179 "/proc/dasd/statictics: '%s'",
5180 buffer);
5181
5182 #ifdef DASD_PROFILE
5183 /* check for valid verbs */
5184 if (strncmp (buffer, "reset", 5) &&
5185 strncmp (buffer, "set ", 4) ) {
5186
5187 MESSAGE (KERN_WARNING, "%s",
5188 "/proc/dasd/statistics: only 'set' and "
5189 "'reset' are supported verbs");
5190
5191 vfree (buffer);
5192 return -EINVAL;
5193 }
5194
5195 if (!strncmp (buffer, "reset", 5)) {
5196
5197 /* reset the statistics */
5198 memset (&dasd_global_profile,
5199 0,
5200 sizeof (dasd_profile_info_t));
5201
5202 MESSAGE (KERN_INFO, "%s",
5203 "Statictics reset");
5204
5205 } else {
5206
5207 /* 'set xxx' was given */
5208 int offset = 4;
5209
5210 while (buffer[offset] && !isalnum (buffer[offset]))
5211 offset++;
5212
5213 if (!strncmp (buffer + offset, "on", 2)) {
5214
5215 /* switch on statistics profiling */
5216 dasd_profile_level = DASD_PROFILE_ON;
5217
5218 MESSAGE (KERN_INFO, "%s",
5219 "Statictics switched on");
5220
5221 } else if (!strncmp (buffer + offset, "off", 3)) {
5222
5223 /* switch off and reset statistics profiling */
5224 memset (&dasd_global_profile,
5225 0,
5226 sizeof (dasd_profile_info_t));
5227
5228 dasd_profile_level = DASD_PROFILE_OFF;
5229
5230 MESSAGE (KERN_INFO, "%s",
5231 "Statictics switched off");
5232
5233 } else {
5234
5235 MESSAGE (KERN_WARNING, "%s",
5236 "/proc/dasd/statistics: only 'set on' and "
5237 "'set off' are supported verbs");
5238 }
5239 }
5240 #else
5241 MESSAGE (KERN_WARNING, "%s",
5242 "/proc/dasd/statistics: is not activated in this "
5243 "kernel");
5244
5245
5246 #endif /* DASD_PROFILE */
5247 vfree (buffer);
5248 return user_len;
5249 }
5250
5251 static struct file_operations dasd_statistics_file_ops = {
5252 read:dasd_generic_read, /* read */
5253 write:dasd_statistics_write, /* write */
5254 open:dasd_statistics_open, /* open */
5255 release:dasd_devices_close, /* close */
5256 };
5257
5258 static struct inode_operations dasd_statistics_inode_ops = {
5259 };
5260
5261 int
dasd_proc_init(void)5262 dasd_proc_init (void)
5263 {
5264 int rc = 0;
5265 dasd_proc_root_entry = proc_mkdir ("dasd", &proc_root);
5266 dasd_devices_entry = create_proc_entry ("devices",
5267 S_IFREG | S_IRUGO | S_IWUSR,
5268 dasd_proc_root_entry);
5269 dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
5270 dasd_devices_entry->proc_iops = &dasd_devices_inode_ops;
5271 dasd_statistics_entry = create_proc_entry ("statistics",
5272 S_IFREG | S_IRUGO | S_IWUSR,
5273 dasd_proc_root_entry);
5274 dasd_statistics_entry->proc_fops = &dasd_statistics_file_ops;
5275 dasd_statistics_entry->proc_iops = &dasd_statistics_inode_ops;
5276 return rc;
5277 }
5278
5279 void
dasd_proc_cleanup(void)5280 dasd_proc_cleanup (void)
5281 {
5282 remove_proc_entry ("devices", dasd_proc_root_entry);
5283 remove_proc_entry ("statistics", dasd_proc_root_entry);
5284 remove_proc_entry ("dasd", &proc_root);
5285 }
5286
5287 #endif /* CONFIG_PROC_FS */
5288
5289 /********************************************************************************
5290 * SECTION: Initializing the driver
5291 ********************************************************************************/
5292 int
dasd_request_module(void * name)5293 dasd_request_module ( void *name ) {
5294 int rc = -ERESTARTSYS;
5295 strcpy(current->comm, name);
5296 daemonize();
5297 while ( current->fs->root == NULL ) { /* wait for root-FS */
5298 DECLARE_WAIT_QUEUE_HEAD(wait);
5299 sleep_on_timeout(&wait,HZ); /* wait in steps of one second */
5300 }
5301 while ( (rc=request_module(name)) != 0 ) {
5302 DECLARE_WAIT_QUEUE_HEAD(wait);
5303
5304 MESSAGE_LOG (KERN_INFO,
5305 "request_module returned %d for %s",
5306 rc,
5307 (char*)name);
5308
5309 sleep_on_timeout(&wait,5* HZ); /* wait in steps of 5 seconds */
5310 }
5311 return rc;
5312 }
5313
5314 int __init
dasd_init(void)5315 dasd_init (void)
5316 {
5317 int rc = 0;
5318 int irq;
5319 major_info_t *major_info = NULL;
5320 struct list_head *l;
5321
5322 MESSAGE (KERN_INFO, "%s",
5323 "initializing...");
5324
5325 init_waitqueue_head (&dasd_init_waitq);
5326
5327 /* register 'common' DASD debug area, used faor all DBF_XXX calls*/
5328 dasd_debug_area = debug_register (DASD_NAME,
5329 0, /* size of debug area */
5330 2, /* number of areas */
5331 8 * sizeof (long));
5332
5333 debug_register_view (dasd_debug_area,
5334 &debug_sprintf_view);
5335
5336 if (dasd_debug_area == NULL) {
5337 goto failed;
5338 }
5339
5340 debug_set_level (dasd_debug_area,
5341 DBF_ERR);
5342
5343 DBF_EVENT (DBF_EMERG, "%s",
5344 "debug area created");
5345
5346 dasd_devfs_handle = devfs_mk_dir (NULL,
5347 DASD_NAME,
5348 NULL);
5349
5350 if (dasd_devfs_handle < 0) {
5351
5352 DBF_EVENT (DBF_ALERT, "%s",
5353 "no devfs");
5354 goto failed;
5355 }
5356
5357 list_add_tail(&dasd_major_static.list, &dasd_major_info);
5358 list_for_each (l, &dasd_major_info) {
5359 major_info = list_entry (l, major_info_t, list);
5360 if ((rc = dasd_register_major (major_info)) > 0) {
5361
5362 MESSAGE (KERN_INFO,
5363 "Registered successfully to major no %u",
5364 major_info->gendisk.major);
5365 } else {
5366
5367 MESSAGE (KERN_WARNING,
5368 "Couldn't register successfully to "
5369 "major no %d",
5370 major_info->gendisk.major);
5371
5372 /* revert registration of major infos */
5373 goto failed;
5374 }
5375 }
5376 #ifndef MODULE
5377 dasd_split_parm_string (dasd_parm_string);
5378 #endif /* ! MODULE */
5379 rc = dasd_parse (dasd);
5380 if (rc) {
5381
5382 DBF_EVENT (DBF_ALERT, "%s",
5383 "invalid range found");
5384 goto failed;
5385 }
5386
5387 #ifdef CONFIG_PROC_FS
5388 rc = dasd_proc_init ();
5389 if (rc) {
5390
5391 DBF_EVENT (DBF_ALERT, "%s",
5392 "no proc-FS");
5393 goto failed;
5394 }
5395 #endif /* CONFIG_PROC_FS */
5396
5397 genhd_dasd_name = dasd_device_name;
5398 genhd_dasd_ioctl = dasd_ioctl;
5399
5400 if (dasd_autodetect) { /* update device range to all devices */
5401 for (irq = get_irq_first (); irq != -ENODEV;
5402 irq = get_irq_next (irq)) {
5403 int devno = get_devno_by_irq (irq);
5404 int index = dasd_devindex_from_devno (devno);
5405 if (index < 0) { /* not included in ranges */
5406
5407 DBF_EVENT (DBF_CRIT,
5408 "add %04x to range",
5409 devno);
5410
5411 dasd_add_range (devno, devno,
5412 DASD_FEATURE_DEFAULT);
5413 }
5414 }
5415 }
5416
5417 if (MACHINE_IS_VM) {
5418 #ifdef CONFIG_DASD_DIAG
5419 rc = dasd_diag_init ();
5420 if (rc == 0) {
5421
5422 MESSAGE (KERN_INFO, "%s",
5423 "Registered DIAG discipline successfully");
5424
5425 } else {
5426
5427 DBF_EVENT (DBF_ALERT, "%s",
5428 "Register DIAG discipline failed");
5429
5430 goto failed;
5431 }
5432 #endif /* CONFIG_DASD_DIAG */
5433 #if defined(CONFIG_DASD_DIAG_MODULE) && defined(CONFIG_DASD_AUTO_DIAG)
5434 kernel_thread(dasd_request_module,"dasd_diag_mod",SIGCHLD);
5435 #endif /* CONFIG_DASD_AUTO_DIAG */
5436 }
5437 #ifdef CONFIG_DASD_ECKD
5438 rc = dasd_eckd_init ();
5439 if (rc == 0) {
5440
5441 MESSAGE (KERN_INFO, "%s",
5442 "Registered ECKD discipline successfully");
5443 } else {
5444
5445 DBF_EVENT (DBF_ALERT, "%s",
5446 "Register ECKD discipline failed");
5447
5448 goto failed;
5449 }
5450 #endif /* CONFIG_DASD_ECKD */
5451 #if defined(CONFIG_DASD_ECKD_MODULE) && defined(CONFIG_DASD_AUTO_ECKD)
5452 kernel_thread(dasd_request_module,"dasd_eckd_mod",SIGCHLD);
5453 #endif /* CONFIG_DASD_AUTO_ECKD */
5454 #ifdef CONFIG_DASD_FBA
5455 rc = dasd_fba_init ();
5456 if (rc == 0) {
5457
5458 MESSAGE (KERN_INFO, "%s",
5459 "Registered FBA discipline successfully");
5460 } else {
5461
5462 DBF_EVENT (DBF_ALERT, "%s",
5463 "Register FBA discipline failed");
5464
5465 goto failed;
5466 }
5467 #endif /* CONFIG_DASD_FBA */
5468 #if defined(CONFIG_DASD_FBA_MODULE) && defined(CONFIG_DASD_AUTO_FBA)
5469 kernel_thread(dasd_request_module,"dasd_fba_mod",SIGCHLD);
5470 #endif /* CONFIG_DASD_AUTO_FBA */
5471 {
5472 char **disc=dasd_disciplines;
5473 while (*disc) {
5474 kernel_thread(dasd_request_module,*disc,SIGCHLD);
5475 disc++;
5476 }
5477 }
5478
5479 rc = 0;
5480 goto out;
5481 failed:
5482
5483 MESSAGE (KERN_INFO, "%s",
5484 "initialization not performed due to errors");
5485
5486 cleanup_dasd ();
5487 out:
5488
5489 MESSAGE (KERN_INFO, "%s",
5490 "initialization finished");
5491
5492 return rc;
5493 }
5494
5495 static void
cleanup_dasd(void)5496 cleanup_dasd (void)
5497 {
5498 int i,rc=0;
5499 major_info_t *major_info = NULL;
5500 struct list_head *l,*n;
5501 dasd_range_t *range;
5502
5503 MESSAGE (KERN_INFO, "%s",
5504 "shutting down");
5505
5506 dasd_disable_ranges (&dasd_range_head,
5507 NULL, 1, 1);
5508
5509 #ifdef CONFIG_DASD_DIAG
5510 if (MACHINE_IS_VM) {
5511 dasd_diag_cleanup ();
5512
5513 MESSAGE (KERN_INFO, "%s",
5514 "De-Registered DIAG discipline successfully");
5515 }
5516 #endif /* CONFIG_DASD_DIAG */
5517
5518 #ifdef CONFIG_DASD_FBA
5519 dasd_fba_cleanup ();
5520
5521 MESSAGE (KERN_INFO, "%s",
5522 "De-Registered FBA discipline successfully");
5523 #endif /* CONFIG_DASD_FBA */
5524
5525 #ifdef CONFIG_DASD_ECKD
5526 dasd_eckd_cleanup ();
5527
5528 MESSAGE (KERN_INFO, "%s",
5529 "De-Registered ECKD discipline successfully");
5530 #endif /* CONFIG_DASD_ECKD */
5531
5532 genhd_dasd_name = NULL;
5533 genhd_dasd_ioctl = NULL;
5534
5535 #ifdef CONFIG_PROC_FS
5536 dasd_proc_cleanup ();
5537 #endif /* CONFIG_PROC_FS */
5538
5539 list_for_each_safe (l, n, &dasd_major_info) {
5540 major_info = list_entry (l, major_info_t, list);
5541 for (i = 0; i < DASD_PER_MAJOR; i++) {
5542 kfree (major_info->dasd_device[i]);
5543 }
5544 if ((major_info->flags & DASD_MAJOR_INFO_REGISTERED) &&
5545 (rc = dasd_unregister_major (major_info)) == 0) {
5546
5547 MESSAGE (KERN_INFO,
5548 "Unregistered successfully from major no %u",
5549 major_info->gendisk.major);
5550 } else {
5551
5552 MESSAGE (KERN_WARNING,
5553 "Couldn't unregister successfully from major "
5554 "no %d rc = %d",
5555 major_info->gendisk.major,
5556 rc);
5557 }
5558 }
5559 list_for_each_safe (l, n, &dasd_range_head.list) {
5560 range = list_entry (l, dasd_range_t, list);
5561 dasd_remove_range(range);
5562 }
5563
5564 #ifndef MODULE
5565 for( i = 0; i < 256; i++ )
5566 if ( dasd[i] ) {
5567 kfree(dasd[i]);
5568 dasd[i] = NULL;
5569 }
5570 #endif /* MODULE */
5571 if (dasd_devfs_handle)
5572 devfs_unregister(dasd_devfs_handle);
5573
5574 if (dasd_debug_area != NULL ) {
5575 debug_unregister(dasd_debug_area);
5576 dasd_debug_area = NULL;
5577 }
5578
5579 MESSAGE (KERN_INFO, "%s",
5580 "shutdown completed");
5581 }
5582
5583 #ifdef MODULE
5584 int
init_module(void)5585 init_module (void)
5586 {
5587 int rc = 0;
5588 rc = dasd_init ();
5589 return rc;
5590 }
5591
5592 void
cleanup_module(void)5593 cleanup_module (void)
5594 {
5595 cleanup_dasd ();
5596 return;
5597 }
5598 #endif
5599
5600 /*
5601 * Overrides for Emacs so that we follow Linus's tabbing style.
5602 * Emacs will notice this stuff at the end of the file and automatically
5603 * adjust the settings for this buffer only. This must remain at the end
5604 * of the file.
5605 * ---------------------------------------------------------------------------
5606 * Local variables:
5607 * c-indent-level: 4
5608 * c-brace-imaginary-offset: 0
5609 * c-brace-offset: -4
5610 * c-argdecl-indent: 4
5611 * c-label-offset: -4
5612 * c-continued-statement-offset: 4
5613 * c-continued-brace-offset: 0
5614 * indent-tabs-mode: nil
5615 * tab-width: 8
5616 * End:
5617 */
5618