1 /*
2 * Copyright IBM Corp. 2002, 2009
3 *
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 * Cornelia Huck (cornelia.huck@de.ibm.com)
6 */
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 #include <linux/device.h>
13 #include <linux/delay.h>
14 #include <linux/completion.h>
15
16 #include <asm/ccwdev.h>
17 #include <asm/idals.h>
18 #include <asm/chpid.h>
19 #include <asm/fcx.h>
20
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "css.h"
24 #include "chsc.h"
25 #include "device.h"
26 #include "chp.h"
27
28 /**
29 * ccw_device_set_options_mask() - set some options and unset the rest
30 * @cdev: device for which the options are to be set
31 * @flags: options to be set
32 *
33 * All flags specified in @flags are set, all flags not specified in @flags
34 * are cleared.
35 * Returns:
36 * %0 on success, -%EINVAL on an invalid flag combination.
37 */
ccw_device_set_options_mask(struct ccw_device * cdev,unsigned long flags)38 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
39 {
40 /*
41 * The flag usage is mutal exclusive ...
42 */
43 if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
44 (flags & CCWDEV_REPORT_ALL))
45 return -EINVAL;
46 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
47 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
48 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
49 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
50 cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
51 return 0;
52 }
53
54 /**
55 * ccw_device_set_options() - set some options
56 * @cdev: device for which the options are to be set
57 * @flags: options to be set
58 *
59 * All flags specified in @flags are set, the remainder is left untouched.
60 * Returns:
61 * %0 on success, -%EINVAL if an invalid flag combination would ensue.
62 */
ccw_device_set_options(struct ccw_device * cdev,unsigned long flags)63 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
64 {
65 /*
66 * The flag usage is mutal exclusive ...
67 */
68 if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
69 (flags & CCWDEV_REPORT_ALL)) ||
70 ((flags & CCWDEV_EARLY_NOTIFICATION) &&
71 cdev->private->options.repall) ||
72 ((flags & CCWDEV_REPORT_ALL) &&
73 cdev->private->options.fast))
74 return -EINVAL;
75 cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
76 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
77 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
78 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
79 cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
80 return 0;
81 }
82
83 /**
84 * ccw_device_clear_options() - clear some options
85 * @cdev: device for which the options are to be cleared
86 * @flags: options to be cleared
87 *
88 * All flags specified in @flags are cleared, the remainder is left untouched.
89 */
ccw_device_clear_options(struct ccw_device * cdev,unsigned long flags)90 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
91 {
92 cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
93 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
94 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
95 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
96 cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
97 }
98
99 /**
100 * ccw_device_is_pathgroup - determine if paths to this device are grouped
101 * @cdev: ccw device
102 *
103 * Return non-zero if there is a path group, zero otherwise.
104 */
ccw_device_is_pathgroup(struct ccw_device * cdev)105 int ccw_device_is_pathgroup(struct ccw_device *cdev)
106 {
107 return cdev->private->flags.pgroup;
108 }
109 EXPORT_SYMBOL(ccw_device_is_pathgroup);
110
111 /**
112 * ccw_device_is_multipath - determine if device is operating in multipath mode
113 * @cdev: ccw device
114 *
115 * Return non-zero if device is operating in multipath mode, zero otherwise.
116 */
ccw_device_is_multipath(struct ccw_device * cdev)117 int ccw_device_is_multipath(struct ccw_device *cdev)
118 {
119 return cdev->private->flags.mpath;
120 }
121 EXPORT_SYMBOL(ccw_device_is_multipath);
122
123 /**
124 * ccw_device_clear() - terminate I/O request processing
125 * @cdev: target ccw device
126 * @intparm: interruption parameter; value is only used if no I/O is
127 * outstanding, otherwise the intparm associated with the I/O request
128 * is returned
129 *
130 * ccw_device_clear() calls csch on @cdev's subchannel.
131 * Returns:
132 * %0 on success,
133 * -%ENODEV on device not operational,
134 * -%EINVAL on invalid device state.
135 * Context:
136 * Interrupts disabled, ccw device lock held
137 */
ccw_device_clear(struct ccw_device * cdev,unsigned long intparm)138 int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
139 {
140 struct subchannel *sch;
141 int ret;
142
143 if (!cdev || !cdev->dev.parent)
144 return -ENODEV;
145 sch = to_subchannel(cdev->dev.parent);
146 if (!sch->schib.pmcw.ena)
147 return -EINVAL;
148 if (cdev->private->state == DEV_STATE_NOT_OPER)
149 return -ENODEV;
150 if (cdev->private->state != DEV_STATE_ONLINE &&
151 cdev->private->state != DEV_STATE_W4SENSE)
152 return -EINVAL;
153
154 ret = cio_clear(sch);
155 if (ret == 0)
156 cdev->private->intparm = intparm;
157 return ret;
158 }
159
160 /**
161 * ccw_device_start_key() - start a s390 channel program with key
162 * @cdev: target ccw device
163 * @cpa: logical start address of channel program
164 * @intparm: user specific interruption parameter; will be presented back to
165 * @cdev's interrupt handler. Allows a device driver to associate
166 * the interrupt with a particular I/O request.
167 * @lpm: defines the channel path to be used for a specific I/O request. A
168 * value of 0 will make cio use the opm.
169 * @key: storage key to be used for the I/O
170 * @flags: additional flags; defines the action to be performed for I/O
171 * processing.
172 *
173 * Start a S/390 channel program. When the interrupt arrives, the
174 * IRQ handler is called, either immediately, delayed (dev-end missing,
175 * or sense required) or never (no IRQ handler registered).
176 * Returns:
177 * %0, if the operation was successful;
178 * -%EBUSY, if the device is busy, or status pending;
179 * -%EACCES, if no path specified in @lpm is operational;
180 * -%ENODEV, if the device is not operational.
181 * Context:
182 * Interrupts disabled, ccw device lock held
183 */
ccw_device_start_key(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,__u8 key,unsigned long flags)184 int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
185 unsigned long intparm, __u8 lpm, __u8 key,
186 unsigned long flags)
187 {
188 struct subchannel *sch;
189 int ret;
190
191 if (!cdev || !cdev->dev.parent)
192 return -ENODEV;
193 sch = to_subchannel(cdev->dev.parent);
194 if (!sch->schib.pmcw.ena)
195 return -EINVAL;
196 if (cdev->private->state == DEV_STATE_NOT_OPER)
197 return -ENODEV;
198 if (cdev->private->state == DEV_STATE_VERIFY) {
199 /* Remember to fake irb when finished. */
200 if (!cdev->private->flags.fake_irb) {
201 cdev->private->flags.fake_irb = FAKE_CMD_IRB;
202 cdev->private->intparm = intparm;
203 return 0;
204 } else
205 /* There's already a fake I/O around. */
206 return -EBUSY;
207 }
208 if (cdev->private->state != DEV_STATE_ONLINE ||
209 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
210 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
211 cdev->private->flags.doverify)
212 return -EBUSY;
213 ret = cio_set_options (sch, flags);
214 if (ret)
215 return ret;
216 /* Adjust requested path mask to exclude unusable paths. */
217 if (lpm) {
218 lpm &= sch->lpm;
219 if (lpm == 0)
220 return -EACCES;
221 }
222 ret = cio_start_key (sch, cpa, lpm, key);
223 switch (ret) {
224 case 0:
225 cdev->private->intparm = intparm;
226 break;
227 case -EACCES:
228 case -ENODEV:
229 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
230 break;
231 }
232 return ret;
233 }
234
235 /**
236 * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
237 * @cdev: target ccw device
238 * @cpa: logical start address of channel program
239 * @intparm: user specific interruption parameter; will be presented back to
240 * @cdev's interrupt handler. Allows a device driver to associate
241 * the interrupt with a particular I/O request.
242 * @lpm: defines the channel path to be used for a specific I/O request. A
243 * value of 0 will make cio use the opm.
244 * @key: storage key to be used for the I/O
245 * @flags: additional flags; defines the action to be performed for I/O
246 * processing.
247 * @expires: timeout value in jiffies
248 *
249 * Start a S/390 channel program. When the interrupt arrives, the
250 * IRQ handler is called, either immediately, delayed (dev-end missing,
251 * or sense required) or never (no IRQ handler registered).
252 * This function notifies the device driver if the channel program has not
253 * completed during the time specified by @expires. If a timeout occurs, the
254 * channel program is terminated via xsch, hsch or csch, and the device's
255 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
256 * Returns:
257 * %0, if the operation was successful;
258 * -%EBUSY, if the device is busy, or status pending;
259 * -%EACCES, if no path specified in @lpm is operational;
260 * -%ENODEV, if the device is not operational.
261 * Context:
262 * Interrupts disabled, ccw device lock held
263 */
ccw_device_start_timeout_key(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,__u8 key,unsigned long flags,int expires)264 int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
265 unsigned long intparm, __u8 lpm, __u8 key,
266 unsigned long flags, int expires)
267 {
268 int ret;
269
270 if (!cdev)
271 return -ENODEV;
272 ccw_device_set_timeout(cdev, expires);
273 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
274 if (ret != 0)
275 ccw_device_set_timeout(cdev, 0);
276 return ret;
277 }
278
279 /**
280 * ccw_device_start() - start a s390 channel program
281 * @cdev: target ccw device
282 * @cpa: logical start address of channel program
283 * @intparm: user specific interruption parameter; will be presented back to
284 * @cdev's interrupt handler. Allows a device driver to associate
285 * the interrupt with a particular I/O request.
286 * @lpm: defines the channel path to be used for a specific I/O request. A
287 * value of 0 will make cio use the opm.
288 * @flags: additional flags; defines the action to be performed for I/O
289 * processing.
290 *
291 * Start a S/390 channel program. When the interrupt arrives, the
292 * IRQ handler is called, either immediately, delayed (dev-end missing,
293 * or sense required) or never (no IRQ handler registered).
294 * Returns:
295 * %0, if the operation was successful;
296 * -%EBUSY, if the device is busy, or status pending;
297 * -%EACCES, if no path specified in @lpm is operational;
298 * -%ENODEV, if the device is not operational.
299 * Context:
300 * Interrupts disabled, ccw device lock held
301 */
ccw_device_start(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,unsigned long flags)302 int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
303 unsigned long intparm, __u8 lpm, unsigned long flags)
304 {
305 return ccw_device_start_key(cdev, cpa, intparm, lpm,
306 PAGE_DEFAULT_KEY, flags);
307 }
308
309 /**
310 * ccw_device_start_timeout() - start a s390 channel program with timeout
311 * @cdev: target ccw device
312 * @cpa: logical start address of channel program
313 * @intparm: user specific interruption parameter; will be presented back to
314 * @cdev's interrupt handler. Allows a device driver to associate
315 * the interrupt with a particular I/O request.
316 * @lpm: defines the channel path to be used for a specific I/O request. A
317 * value of 0 will make cio use the opm.
318 * @flags: additional flags; defines the action to be performed for I/O
319 * processing.
320 * @expires: timeout value in jiffies
321 *
322 * Start a S/390 channel program. When the interrupt arrives, the
323 * IRQ handler is called, either immediately, delayed (dev-end missing,
324 * or sense required) or never (no IRQ handler registered).
325 * This function notifies the device driver if the channel program has not
326 * completed during the time specified by @expires. If a timeout occurs, the
327 * channel program is terminated via xsch, hsch or csch, and the device's
328 * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
329 * Returns:
330 * %0, if the operation was successful;
331 * -%EBUSY, if the device is busy, or status pending;
332 * -%EACCES, if no path specified in @lpm is operational;
333 * -%ENODEV, if the device is not operational.
334 * Context:
335 * Interrupts disabled, ccw device lock held
336 */
ccw_device_start_timeout(struct ccw_device * cdev,struct ccw1 * cpa,unsigned long intparm,__u8 lpm,unsigned long flags,int expires)337 int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
338 unsigned long intparm, __u8 lpm,
339 unsigned long flags, int expires)
340 {
341 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
342 PAGE_DEFAULT_KEY, flags,
343 expires);
344 }
345
346
347 /**
348 * ccw_device_halt() - halt I/O request processing
349 * @cdev: target ccw device
350 * @intparm: interruption parameter; value is only used if no I/O is
351 * outstanding, otherwise the intparm associated with the I/O request
352 * is returned
353 *
354 * ccw_device_halt() calls hsch on @cdev's subchannel.
355 * Returns:
356 * %0 on success,
357 * -%ENODEV on device not operational,
358 * -%EINVAL on invalid device state,
359 * -%EBUSY on device busy or interrupt pending.
360 * Context:
361 * Interrupts disabled, ccw device lock held
362 */
ccw_device_halt(struct ccw_device * cdev,unsigned long intparm)363 int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
364 {
365 struct subchannel *sch;
366 int ret;
367
368 if (!cdev || !cdev->dev.parent)
369 return -ENODEV;
370 sch = to_subchannel(cdev->dev.parent);
371 if (!sch->schib.pmcw.ena)
372 return -EINVAL;
373 if (cdev->private->state == DEV_STATE_NOT_OPER)
374 return -ENODEV;
375 if (cdev->private->state != DEV_STATE_ONLINE &&
376 cdev->private->state != DEV_STATE_W4SENSE)
377 return -EINVAL;
378
379 ret = cio_halt(sch);
380 if (ret == 0)
381 cdev->private->intparm = intparm;
382 return ret;
383 }
384
385 /**
386 * ccw_device_resume() - resume channel program execution
387 * @cdev: target ccw device
388 *
389 * ccw_device_resume() calls rsch on @cdev's subchannel.
390 * Returns:
391 * %0 on success,
392 * -%ENODEV on device not operational,
393 * -%EINVAL on invalid device state,
394 * -%EBUSY on device busy or interrupt pending.
395 * Context:
396 * Interrupts disabled, ccw device lock held
397 */
ccw_device_resume(struct ccw_device * cdev)398 int ccw_device_resume(struct ccw_device *cdev)
399 {
400 struct subchannel *sch;
401
402 if (!cdev || !cdev->dev.parent)
403 return -ENODEV;
404 sch = to_subchannel(cdev->dev.parent);
405 if (!sch->schib.pmcw.ena)
406 return -EINVAL;
407 if (cdev->private->state == DEV_STATE_NOT_OPER)
408 return -ENODEV;
409 if (cdev->private->state != DEV_STATE_ONLINE ||
410 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
411 return -EINVAL;
412 return cio_resume(sch);
413 }
414
415 /*
416 * Pass interrupt to device driver.
417 */
418 int
ccw_device_call_handler(struct ccw_device * cdev)419 ccw_device_call_handler(struct ccw_device *cdev)
420 {
421 unsigned int stctl;
422 int ending_status;
423
424 /*
425 * we allow for the device action handler if .
426 * - we received ending status
427 * - the action handler requested to see all interrupts
428 * - we received an intermediate status
429 * - fast notification was requested (primary status)
430 * - unsolicited interrupts
431 */
432 stctl = scsw_stctl(&cdev->private->irb.scsw);
433 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
434 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
435 (stctl == SCSW_STCTL_STATUS_PEND);
436 if (!ending_status &&
437 !cdev->private->options.repall &&
438 !(stctl & SCSW_STCTL_INTER_STATUS) &&
439 !(cdev->private->options.fast &&
440 (stctl & SCSW_STCTL_PRIM_STATUS)))
441 return 0;
442
443 /* Clear pending timers for device driver initiated I/O. */
444 if (ending_status)
445 ccw_device_set_timeout(cdev, 0);
446 /*
447 * Now we are ready to call the device driver interrupt handler.
448 */
449 if (cdev->handler)
450 cdev->handler(cdev, cdev->private->intparm,
451 &cdev->private->irb);
452
453 /*
454 * Clear the old and now useless interrupt response block.
455 */
456 memset(&cdev->private->irb, 0, sizeof(struct irb));
457
458 return 1;
459 }
460
461 /**
462 * ccw_device_get_ciw() - Search for CIW command in extended sense data.
463 * @cdev: ccw device to inspect
464 * @ct: command type to look for
465 *
466 * During SenseID, command information words (CIWs) describing special
467 * commands available to the device may have been stored in the extended
468 * sense data. This function searches for CIWs of a specified command
469 * type in the extended sense data.
470 * Returns:
471 * %NULL if no extended sense data has been stored or if no CIW of the
472 * specified command type could be found,
473 * else a pointer to the CIW of the specified command type.
474 */
ccw_device_get_ciw(struct ccw_device * cdev,__u32 ct)475 struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
476 {
477 int ciw_cnt;
478
479 if (cdev->private->flags.esid == 0)
480 return NULL;
481 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
482 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
483 return cdev->private->senseid.ciw + ciw_cnt;
484 return NULL;
485 }
486
487 /**
488 * ccw_device_get_path_mask() - get currently available paths
489 * @cdev: ccw device to be queried
490 * Returns:
491 * %0 if no subchannel for the device is available,
492 * else the mask of currently available paths for the ccw device's subchannel.
493 */
ccw_device_get_path_mask(struct ccw_device * cdev)494 __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
495 {
496 struct subchannel *sch;
497
498 if (!cdev->dev.parent)
499 return 0;
500
501 sch = to_subchannel(cdev->dev.parent);
502 return sch->lpm;
503 }
504
505 struct stlck_data {
506 struct completion done;
507 int rc;
508 };
509
ccw_device_stlck_done(struct ccw_device * cdev,void * data,int rc)510 void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
511 {
512 struct stlck_data *sdata = data;
513
514 sdata->rc = rc;
515 complete(&sdata->done);
516 }
517
518 /*
519 * Perform unconditional reserve + release.
520 */
ccw_device_stlck(struct ccw_device * cdev)521 int ccw_device_stlck(struct ccw_device *cdev)
522 {
523 struct subchannel *sch = to_subchannel(cdev->dev.parent);
524 struct stlck_data data;
525 u8 *buffer;
526 int rc;
527
528 /* Check if steal lock operation is valid for this device. */
529 if (cdev->drv) {
530 if (!cdev->private->options.force)
531 return -EINVAL;
532 }
533 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
534 if (!buffer)
535 return -ENOMEM;
536 init_completion(&data.done);
537 data.rc = -EIO;
538 spin_lock_irq(sch->lock);
539 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
540 if (rc)
541 goto out_unlock;
542 /* Perform operation. */
543 cdev->private->state = DEV_STATE_STEAL_LOCK,
544 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
545 spin_unlock_irq(sch->lock);
546 /* Wait for operation to finish. */
547 if (wait_for_completion_interruptible(&data.done)) {
548 /* Got a signal. */
549 spin_lock_irq(sch->lock);
550 ccw_request_cancel(cdev);
551 spin_unlock_irq(sch->lock);
552 wait_for_completion(&data.done);
553 }
554 rc = data.rc;
555 /* Check results. */
556 spin_lock_irq(sch->lock);
557 cio_disable_subchannel(sch);
558 cdev->private->state = DEV_STATE_BOXED;
559 out_unlock:
560 spin_unlock_irq(sch->lock);
561 kfree(buffer);
562
563 return rc;
564 }
565
ccw_device_get_chp_desc(struct ccw_device * cdev,int chp_no)566 void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
567 {
568 struct subchannel *sch;
569 struct chp_id chpid;
570
571 sch = to_subchannel(cdev->dev.parent);
572 chp_id_init(&chpid);
573 chpid.id = sch->schib.pmcw.chpid[chp_no];
574 return chp_get_chp_desc(chpid);
575 }
576
577 /**
578 * ccw_device_get_id - obtain a ccw device id
579 * @cdev: device to obtain the id for
580 * @dev_id: where to fill in the values
581 */
ccw_device_get_id(struct ccw_device * cdev,struct ccw_dev_id * dev_id)582 void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
583 {
584 *dev_id = cdev->private->dev_id;
585 }
586 EXPORT_SYMBOL(ccw_device_get_id);
587
588 /**
589 * ccw_device_tm_start_key - perform start function
590 * @cdev: ccw device on which to perform the start function
591 * @tcw: transport-command word to be started
592 * @intparm: user defined parameter to be passed to the interrupt handler
593 * @lpm: mask of paths to use
594 * @key: storage key to use for storage access
595 *
596 * Start the tcw on the given ccw device. Return zero on success, non-zero
597 * otherwise.
598 */
ccw_device_tm_start_key(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm,u8 key)599 int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
600 unsigned long intparm, u8 lpm, u8 key)
601 {
602 struct subchannel *sch;
603 int rc;
604
605 sch = to_subchannel(cdev->dev.parent);
606 if (!sch->schib.pmcw.ena)
607 return -EINVAL;
608 if (cdev->private->state == DEV_STATE_VERIFY) {
609 /* Remember to fake irb when finished. */
610 if (!cdev->private->flags.fake_irb) {
611 cdev->private->flags.fake_irb = FAKE_TM_IRB;
612 cdev->private->intparm = intparm;
613 return 0;
614 } else
615 /* There's already a fake I/O around. */
616 return -EBUSY;
617 }
618 if (cdev->private->state != DEV_STATE_ONLINE)
619 return -EIO;
620 /* Adjust requested path mask to exclude unusable paths. */
621 if (lpm) {
622 lpm &= sch->lpm;
623 if (lpm == 0)
624 return -EACCES;
625 }
626 rc = cio_tm_start_key(sch, tcw, lpm, key);
627 if (rc == 0)
628 cdev->private->intparm = intparm;
629 return rc;
630 }
631 EXPORT_SYMBOL(ccw_device_tm_start_key);
632
633 /**
634 * ccw_device_tm_start_timeout_key - perform start function
635 * @cdev: ccw device on which to perform the start function
636 * @tcw: transport-command word to be started
637 * @intparm: user defined parameter to be passed to the interrupt handler
638 * @lpm: mask of paths to use
639 * @key: storage key to use for storage access
640 * @expires: time span in jiffies after which to abort request
641 *
642 * Start the tcw on the given ccw device. Return zero on success, non-zero
643 * otherwise.
644 */
ccw_device_tm_start_timeout_key(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm,u8 key,int expires)645 int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
646 unsigned long intparm, u8 lpm, u8 key,
647 int expires)
648 {
649 int ret;
650
651 ccw_device_set_timeout(cdev, expires);
652 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
653 if (ret != 0)
654 ccw_device_set_timeout(cdev, 0);
655 return ret;
656 }
657 EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
658
659 /**
660 * ccw_device_tm_start - perform start function
661 * @cdev: ccw device on which to perform the start function
662 * @tcw: transport-command word to be started
663 * @intparm: user defined parameter to be passed to the interrupt handler
664 * @lpm: mask of paths to use
665 *
666 * Start the tcw on the given ccw device. Return zero on success, non-zero
667 * otherwise.
668 */
ccw_device_tm_start(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm)669 int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
670 unsigned long intparm, u8 lpm)
671 {
672 return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
673 PAGE_DEFAULT_KEY);
674 }
675 EXPORT_SYMBOL(ccw_device_tm_start);
676
677 /**
678 * ccw_device_tm_start_timeout - perform start function
679 * @cdev: ccw device on which to perform the start function
680 * @tcw: transport-command word to be started
681 * @intparm: user defined parameter to be passed to the interrupt handler
682 * @lpm: mask of paths to use
683 * @expires: time span in jiffies after which to abort request
684 *
685 * Start the tcw on the given ccw device. Return zero on success, non-zero
686 * otherwise.
687 */
ccw_device_tm_start_timeout(struct ccw_device * cdev,struct tcw * tcw,unsigned long intparm,u8 lpm,int expires)688 int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
689 unsigned long intparm, u8 lpm, int expires)
690 {
691 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
692 PAGE_DEFAULT_KEY, expires);
693 }
694 EXPORT_SYMBOL(ccw_device_tm_start_timeout);
695
696 /**
697 * ccw_device_get_mdc - accumulate max data count
698 * @cdev: ccw device for which the max data count is accumulated
699 * @mask: mask of paths to use
700 *
701 * Return the number of 64K-bytes blocks all paths at least support
702 * for a transport command. Return values <= 0 indicate failures.
703 */
ccw_device_get_mdc(struct ccw_device * cdev,u8 mask)704 int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
705 {
706 struct subchannel *sch = to_subchannel(cdev->dev.parent);
707 struct channel_path_desc_fmt1 desc;
708 struct chp_id chpid;
709 int mdc = 0, ret, i;
710
711 /* Adjust requested path mask to excluded varied off paths. */
712 if (mask)
713 mask &= sch->lpm;
714 else
715 mask = sch->lpm;
716
717 chp_id_init(&chpid);
718 for (i = 0; i < 8; i++) {
719 if (!(mask & (0x80 >> i)))
720 continue;
721 chpid.id = sch->schib.pmcw.chpid[i];
722 ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
723 if (ret)
724 return ret;
725 if (!desc.f)
726 return 0;
727 if (!desc.r)
728 mdc = 1;
729 mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
730 }
731
732 return mdc;
733 }
734 EXPORT_SYMBOL(ccw_device_get_mdc);
735
736 /**
737 * ccw_device_tm_intrg - perform interrogate function
738 * @cdev: ccw device on which to perform the interrogate function
739 *
740 * Perform an interrogate function on the given ccw device. Return zero on
741 * success, non-zero otherwise.
742 */
ccw_device_tm_intrg(struct ccw_device * cdev)743 int ccw_device_tm_intrg(struct ccw_device *cdev)
744 {
745 struct subchannel *sch = to_subchannel(cdev->dev.parent);
746
747 if (!sch->schib.pmcw.ena)
748 return -EINVAL;
749 if (cdev->private->state != DEV_STATE_ONLINE)
750 return -EIO;
751 if (!scsw_is_tm(&sch->schib.scsw) ||
752 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
753 return -EINVAL;
754 return cio_tm_intrg(sch);
755 }
756 EXPORT_SYMBOL(ccw_device_tm_intrg);
757
758 // FIXME: these have to go:
759
760 int
_ccw_device_get_subchannel_number(struct ccw_device * cdev)761 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
762 {
763 return cdev->private->schid.sch_no;
764 }
765
766
767 MODULE_LICENSE("GPL");
768 EXPORT_SYMBOL(ccw_device_set_options_mask);
769 EXPORT_SYMBOL(ccw_device_set_options);
770 EXPORT_SYMBOL(ccw_device_clear_options);
771 EXPORT_SYMBOL(ccw_device_clear);
772 EXPORT_SYMBOL(ccw_device_halt);
773 EXPORT_SYMBOL(ccw_device_resume);
774 EXPORT_SYMBOL(ccw_device_start_timeout);
775 EXPORT_SYMBOL(ccw_device_start);
776 EXPORT_SYMBOL(ccw_device_start_timeout_key);
777 EXPORT_SYMBOL(ccw_device_start_key);
778 EXPORT_SYMBOL(ccw_device_get_ciw);
779 EXPORT_SYMBOL(ccw_device_get_path_mask);
780 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
781 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
782