1 /*
2 * drivers/s390/cio/chp.c
3 *
4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 */
9
10 #include <linux/bug.h>
11 #include <linux/workqueue.h>
12 #include <linux/spinlock.h>
13 #include <linux/init.h>
14 #include <linux/jiffies.h>
15 #include <linux/wait.h>
16 #include <linux/mutex.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <asm/chpid.h>
20 #include <asm/sclp.h>
21 #include <asm/crw.h>
22
23 #include "cio.h"
24 #include "css.h"
25 #include "ioasm.h"
26 #include "cio_debug.h"
27 #include "chp.h"
28
29 #define to_channelpath(device) container_of(device, struct channel_path, dev)
30 #define CHP_INFO_UPDATE_INTERVAL 1*HZ
31
32 enum cfg_task_t {
33 cfg_none,
34 cfg_configure,
35 cfg_deconfigure
36 };
37
38 /* Map for pending configure tasks. */
39 static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
40 static DEFINE_MUTEX(cfg_lock);
41 static int cfg_busy;
42
43 /* Map for channel-path status. */
44 static struct sclp_chp_info chp_info;
45 static DEFINE_MUTEX(info_lock);
46
47 /* Time after which channel-path status may be outdated. */
48 static unsigned long chp_info_expires;
49
50 /* Workqueue to perform pending configure tasks. */
51 static struct workqueue_struct *chp_wq;
52 static struct work_struct cfg_work;
53
54 /* Wait queue for configure completion events. */
55 static wait_queue_head_t cfg_wait_queue;
56
57 /* Set vary state for given chpid. */
set_chp_logically_online(struct chp_id chpid,int onoff)58 static void set_chp_logically_online(struct chp_id chpid, int onoff)
59 {
60 chpid_to_chp(chpid)->state = onoff;
61 }
62
63 /* On success return 0 if channel-path is varied offline, 1 if it is varied
64 * online. Return -ENODEV if channel-path is not registered. */
chp_get_status(struct chp_id chpid)65 int chp_get_status(struct chp_id chpid)
66 {
67 return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
68 }
69
70 /**
71 * chp_get_sch_opm - return opm for subchannel
72 * @sch: subchannel
73 *
74 * Calculate and return the operational path mask (opm) based on the chpids
75 * used by the subchannel and the status of the associated channel-paths.
76 */
chp_get_sch_opm(struct subchannel * sch)77 u8 chp_get_sch_opm(struct subchannel *sch)
78 {
79 struct chp_id chpid;
80 int opm;
81 int i;
82
83 opm = 0;
84 chp_id_init(&chpid);
85 for (i = 0; i < 8; i++) {
86 opm <<= 1;
87 chpid.id = sch->schib.pmcw.chpid[i];
88 if (chp_get_status(chpid) != 0)
89 opm |= 1;
90 }
91 return opm;
92 }
93 EXPORT_SYMBOL_GPL(chp_get_sch_opm);
94
95 /**
96 * chp_is_registered - check if a channel-path is registered
97 * @chpid: channel-path ID
98 *
99 * Return non-zero if a channel-path with the given chpid is registered,
100 * zero otherwise.
101 */
chp_is_registered(struct chp_id chpid)102 int chp_is_registered(struct chp_id chpid)
103 {
104 return chpid_to_chp(chpid) != NULL;
105 }
106
107 /*
108 * Function: s390_vary_chpid
109 * Varies the specified chpid online or offline
110 */
s390_vary_chpid(struct chp_id chpid,int on)111 static int s390_vary_chpid(struct chp_id chpid, int on)
112 {
113 char dbf_text[15];
114 int status;
115
116 sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
117 chpid.id);
118 CIO_TRACE_EVENT(2, dbf_text);
119
120 status = chp_get_status(chpid);
121 if (!on && !status)
122 return 0;
123
124 set_chp_logically_online(chpid, on);
125 chsc_chp_vary(chpid, on);
126 return 0;
127 }
128
129 /*
130 * Channel measurement related functions
131 */
chp_measurement_chars_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)132 static ssize_t chp_measurement_chars_read(struct file *filp,
133 struct kobject *kobj,
134 struct bin_attribute *bin_attr,
135 char *buf, loff_t off, size_t count)
136 {
137 struct channel_path *chp;
138 struct device *device;
139
140 device = container_of(kobj, struct device, kobj);
141 chp = to_channelpath(device);
142 if (!chp->cmg_chars)
143 return 0;
144
145 return memory_read_from_buffer(buf, count, &off,
146 chp->cmg_chars, sizeof(struct cmg_chars));
147 }
148
149 static struct bin_attribute chp_measurement_chars_attr = {
150 .attr = {
151 .name = "measurement_chars",
152 .mode = S_IRUSR,
153 },
154 .size = sizeof(struct cmg_chars),
155 .read = chp_measurement_chars_read,
156 };
157
chp_measurement_copy_block(struct cmg_entry * buf,struct channel_subsystem * css,struct chp_id chpid)158 static void chp_measurement_copy_block(struct cmg_entry *buf,
159 struct channel_subsystem *css,
160 struct chp_id chpid)
161 {
162 void *area;
163 struct cmg_entry *entry, reference_buf;
164 int idx;
165
166 if (chpid.id < 128) {
167 area = css->cub_addr1;
168 idx = chpid.id;
169 } else {
170 area = css->cub_addr2;
171 idx = chpid.id - 128;
172 }
173 entry = area + (idx * sizeof(struct cmg_entry));
174 do {
175 memcpy(buf, entry, sizeof(*entry));
176 memcpy(&reference_buf, entry, sizeof(*entry));
177 } while (reference_buf.values[0] != buf->values[0]);
178 }
179
chp_measurement_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)180 static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
181 struct bin_attribute *bin_attr,
182 char *buf, loff_t off, size_t count)
183 {
184 struct channel_path *chp;
185 struct channel_subsystem *css;
186 struct device *device;
187 unsigned int size;
188
189 device = container_of(kobj, struct device, kobj);
190 chp = to_channelpath(device);
191 css = to_css(chp->dev.parent);
192
193 size = sizeof(struct cmg_entry);
194
195 /* Only allow single reads. */
196 if (off || count < size)
197 return 0;
198 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
199 count = size;
200 return count;
201 }
202
203 static struct bin_attribute chp_measurement_attr = {
204 .attr = {
205 .name = "measurement",
206 .mode = S_IRUSR,
207 },
208 .size = sizeof(struct cmg_entry),
209 .read = chp_measurement_read,
210 };
211
chp_remove_cmg_attr(struct channel_path * chp)212 void chp_remove_cmg_attr(struct channel_path *chp)
213 {
214 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
215 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
216 }
217
chp_add_cmg_attr(struct channel_path * chp)218 int chp_add_cmg_attr(struct channel_path *chp)
219 {
220 int ret;
221
222 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
223 if (ret)
224 return ret;
225 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
226 if (ret)
227 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
228 return ret;
229 }
230
231 /*
232 * Files for the channel path entries.
233 */
chp_status_show(struct device * dev,struct device_attribute * attr,char * buf)234 static ssize_t chp_status_show(struct device *dev,
235 struct device_attribute *attr, char *buf)
236 {
237 struct channel_path *chp = to_channelpath(dev);
238 int status;
239
240 mutex_lock(&chp->lock);
241 status = chp->state;
242 mutex_unlock(&chp->lock);
243
244 return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
245 }
246
chp_status_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)247 static ssize_t chp_status_write(struct device *dev,
248 struct device_attribute *attr,
249 const char *buf, size_t count)
250 {
251 struct channel_path *cp = to_channelpath(dev);
252 char cmd[10];
253 int num_args;
254 int error;
255
256 num_args = sscanf(buf, "%5s", cmd);
257 if (!num_args)
258 return count;
259
260 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
261 mutex_lock(&cp->lock);
262 error = s390_vary_chpid(cp->chpid, 1);
263 mutex_unlock(&cp->lock);
264 } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
265 mutex_lock(&cp->lock);
266 error = s390_vary_chpid(cp->chpid, 0);
267 mutex_unlock(&cp->lock);
268 } else
269 error = -EINVAL;
270
271 return error < 0 ? error : count;
272 }
273
274 static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
275
chp_configure_show(struct device * dev,struct device_attribute * attr,char * buf)276 static ssize_t chp_configure_show(struct device *dev,
277 struct device_attribute *attr, char *buf)
278 {
279 struct channel_path *cp;
280 int status;
281
282 cp = to_channelpath(dev);
283 status = chp_info_get_status(cp->chpid);
284 if (status < 0)
285 return status;
286
287 return snprintf(buf, PAGE_SIZE, "%d\n", status);
288 }
289
290 static int cfg_wait_idle(void);
291
chp_configure_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)292 static ssize_t chp_configure_write(struct device *dev,
293 struct device_attribute *attr,
294 const char *buf, size_t count)
295 {
296 struct channel_path *cp;
297 int val;
298 char delim;
299
300 if (sscanf(buf, "%d %c", &val, &delim) != 1)
301 return -EINVAL;
302 if (val != 0 && val != 1)
303 return -EINVAL;
304 cp = to_channelpath(dev);
305 chp_cfg_schedule(cp->chpid, val);
306 cfg_wait_idle();
307
308 return count;
309 }
310
311 static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
312
chp_type_show(struct device * dev,struct device_attribute * attr,char * buf)313 static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
314 char *buf)
315 {
316 struct channel_path *chp = to_channelpath(dev);
317 u8 type;
318
319 mutex_lock(&chp->lock);
320 type = chp->desc.desc;
321 mutex_unlock(&chp->lock);
322 return sprintf(buf, "%x\n", type);
323 }
324
325 static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
326
chp_cmg_show(struct device * dev,struct device_attribute * attr,char * buf)327 static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
328 char *buf)
329 {
330 struct channel_path *chp = to_channelpath(dev);
331
332 if (!chp)
333 return 0;
334 if (chp->cmg == -1) /* channel measurements not available */
335 return sprintf(buf, "unknown\n");
336 return sprintf(buf, "%x\n", chp->cmg);
337 }
338
339 static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
340
chp_shared_show(struct device * dev,struct device_attribute * attr,char * buf)341 static ssize_t chp_shared_show(struct device *dev,
342 struct device_attribute *attr, char *buf)
343 {
344 struct channel_path *chp = to_channelpath(dev);
345
346 if (!chp)
347 return 0;
348 if (chp->shared == -1) /* channel measurements not available */
349 return sprintf(buf, "unknown\n");
350 return sprintf(buf, "%x\n", chp->shared);
351 }
352
353 static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
354
355 static struct attribute *chp_attrs[] = {
356 &dev_attr_status.attr,
357 &dev_attr_configure.attr,
358 &dev_attr_type.attr,
359 &dev_attr_cmg.attr,
360 &dev_attr_shared.attr,
361 NULL,
362 };
363
364 static struct attribute_group chp_attr_group = {
365 .attrs = chp_attrs,
366 };
367
chp_release(struct device * dev)368 static void chp_release(struct device *dev)
369 {
370 struct channel_path *cp;
371
372 cp = to_channelpath(dev);
373 kfree(cp);
374 }
375
376 /**
377 * chp_new - register a new channel-path
378 * @chpid - channel-path ID
379 *
380 * Create and register data structure representing new channel-path. Return
381 * zero on success, non-zero otherwise.
382 */
chp_new(struct chp_id chpid)383 int chp_new(struct chp_id chpid)
384 {
385 struct channel_path *chp;
386 int ret;
387
388 if (chp_is_registered(chpid))
389 return 0;
390 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
391 if (!chp)
392 return -ENOMEM;
393
394 /* fill in status, etc. */
395 chp->chpid = chpid;
396 chp->state = 1;
397 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
398 chp->dev.release = chp_release;
399 mutex_init(&chp->lock);
400
401 /* Obtain channel path description and fill it in. */
402 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
403 if (ret)
404 goto out_free;
405 if ((chp->desc.flags & 0x80) == 0) {
406 ret = -ENODEV;
407 goto out_free;
408 }
409 /* Get channel-measurement characteristics. */
410 if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
411 ret = chsc_get_channel_measurement_chars(chp);
412 if (ret)
413 goto out_free;
414 } else {
415 chp->cmg = -1;
416 }
417 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
418
419 /* make it known to the system */
420 ret = device_register(&chp->dev);
421 if (ret) {
422 CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
423 chpid.cssid, chpid.id, ret);
424 put_device(&chp->dev);
425 goto out;
426 }
427 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
428 if (ret) {
429 device_unregister(&chp->dev);
430 goto out;
431 }
432 mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
433 if (channel_subsystems[chpid.cssid]->cm_enabled) {
434 ret = chp_add_cmg_attr(chp);
435 if (ret) {
436 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
437 device_unregister(&chp->dev);
438 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
439 goto out;
440 }
441 }
442 channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
443 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
444 goto out;
445 out_free:
446 kfree(chp);
447 out:
448 return ret;
449 }
450
451 /**
452 * chp_get_chp_desc - return newly allocated channel-path description
453 * @chpid: channel-path ID
454 *
455 * On success return a newly allocated copy of the channel-path description
456 * data associated with the given channel-path ID. Return %NULL on error.
457 */
chp_get_chp_desc(struct chp_id chpid)458 void *chp_get_chp_desc(struct chp_id chpid)
459 {
460 struct channel_path *chp;
461 struct channel_path_desc *desc;
462
463 chp = chpid_to_chp(chpid);
464 if (!chp)
465 return NULL;
466 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
467 if (!desc)
468 return NULL;
469
470 mutex_lock(&chp->lock);
471 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
472 mutex_unlock(&chp->lock);
473 return desc;
474 }
475
476 /**
477 * chp_process_crw - process channel-path status change
478 * @crw0: channel report-word to handler
479 * @crw1: second channel-report word (always NULL)
480 * @overflow: crw overflow indication
481 *
482 * Handle channel-report-words indicating that the status of a channel-path
483 * has changed.
484 */
chp_process_crw(struct crw * crw0,struct crw * crw1,int overflow)485 static void chp_process_crw(struct crw *crw0, struct crw *crw1,
486 int overflow)
487 {
488 struct chp_id chpid;
489
490 if (overflow) {
491 css_schedule_eval_all();
492 return;
493 }
494 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
495 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
496 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
497 crw0->erc, crw0->rsid);
498 /*
499 * Check for solicited machine checks. These are
500 * created by reset channel path and need not be
501 * handled here.
502 */
503 if (crw0->slct) {
504 CIO_CRW_EVENT(2, "solicited machine check for "
505 "channel path %02X\n", crw0->rsid);
506 return;
507 }
508 chp_id_init(&chpid);
509 chpid.id = crw0->rsid;
510 switch (crw0->erc) {
511 case CRW_ERC_IPARM: /* Path has come. */
512 if (!chp_is_registered(chpid))
513 chp_new(chpid);
514 chsc_chp_online(chpid);
515 break;
516 case CRW_ERC_PERRI: /* Path has gone. */
517 case CRW_ERC_PERRN:
518 chsc_chp_offline(chpid);
519 break;
520 default:
521 CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
522 crw0->erc);
523 }
524 }
525
chp_ssd_get_mask(struct chsc_ssd_info * ssd,struct chp_link * link)526 int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
527 {
528 int i;
529 int mask;
530
531 for (i = 0; i < 8; i++) {
532 mask = 0x80 >> i;
533 if (!(ssd->path_mask & mask))
534 continue;
535 if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
536 continue;
537 if ((ssd->fla_valid_mask & mask) &&
538 ((ssd->fla[i] & link->fla_mask) != link->fla))
539 continue;
540 return mask;
541 }
542 return 0;
543 }
544 EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
545
info_bit_num(struct chp_id id)546 static inline int info_bit_num(struct chp_id id)
547 {
548 return id.id + id.cssid * (__MAX_CHPID + 1);
549 }
550
551 /* Force chp_info refresh on next call to info_validate(). */
info_expire(void)552 static void info_expire(void)
553 {
554 mutex_lock(&info_lock);
555 chp_info_expires = jiffies - 1;
556 mutex_unlock(&info_lock);
557 }
558
559 /* Ensure that chp_info is up-to-date. */
info_update(void)560 static int info_update(void)
561 {
562 int rc;
563
564 mutex_lock(&info_lock);
565 rc = 0;
566 if (time_after(jiffies, chp_info_expires)) {
567 /* Data is too old, update. */
568 rc = sclp_chp_read_info(&chp_info);
569 chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
570 }
571 mutex_unlock(&info_lock);
572
573 return rc;
574 }
575
576 /**
577 * chp_info_get_status - retrieve configure status of a channel-path
578 * @chpid: channel-path ID
579 *
580 * On success, return 0 for standby, 1 for configured, 2 for reserved,
581 * 3 for not recognized. Return negative error code on error.
582 */
chp_info_get_status(struct chp_id chpid)583 int chp_info_get_status(struct chp_id chpid)
584 {
585 int rc;
586 int bit;
587
588 rc = info_update();
589 if (rc)
590 return rc;
591
592 bit = info_bit_num(chpid);
593 mutex_lock(&info_lock);
594 if (!chp_test_bit(chp_info.recognized, bit))
595 rc = CHP_STATUS_NOT_RECOGNIZED;
596 else if (chp_test_bit(chp_info.configured, bit))
597 rc = CHP_STATUS_CONFIGURED;
598 else if (chp_test_bit(chp_info.standby, bit))
599 rc = CHP_STATUS_STANDBY;
600 else
601 rc = CHP_STATUS_RESERVED;
602 mutex_unlock(&info_lock);
603
604 return rc;
605 }
606
607 /* Return configure task for chpid. */
cfg_get_task(struct chp_id chpid)608 static enum cfg_task_t cfg_get_task(struct chp_id chpid)
609 {
610 return chp_cfg_task[chpid.cssid][chpid.id];
611 }
612
613 /* Set configure task for chpid. */
cfg_set_task(struct chp_id chpid,enum cfg_task_t cfg)614 static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
615 {
616 chp_cfg_task[chpid.cssid][chpid.id] = cfg;
617 }
618
619 /* Perform one configure/deconfigure request. Reschedule work function until
620 * last request. */
cfg_func(struct work_struct * work)621 static void cfg_func(struct work_struct *work)
622 {
623 struct chp_id chpid;
624 enum cfg_task_t t;
625 int rc;
626
627 mutex_lock(&cfg_lock);
628 t = cfg_none;
629 chp_id_for_each(&chpid) {
630 t = cfg_get_task(chpid);
631 if (t != cfg_none) {
632 cfg_set_task(chpid, cfg_none);
633 break;
634 }
635 }
636 mutex_unlock(&cfg_lock);
637
638 switch (t) {
639 case cfg_configure:
640 rc = sclp_chp_configure(chpid);
641 if (rc)
642 CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
643 "%d\n", chpid.cssid, chpid.id, rc);
644 else {
645 info_expire();
646 chsc_chp_online(chpid);
647 }
648 break;
649 case cfg_deconfigure:
650 rc = sclp_chp_deconfigure(chpid);
651 if (rc)
652 CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
653 "%d\n", chpid.cssid, chpid.id, rc);
654 else {
655 info_expire();
656 chsc_chp_offline(chpid);
657 }
658 break;
659 case cfg_none:
660 /* Get updated information after last change. */
661 info_update();
662 mutex_lock(&cfg_lock);
663 cfg_busy = 0;
664 mutex_unlock(&cfg_lock);
665 wake_up_interruptible(&cfg_wait_queue);
666 return;
667 }
668 queue_work(chp_wq, &cfg_work);
669 }
670
671 /**
672 * chp_cfg_schedule - schedule chpid configuration request
673 * @chpid - channel-path ID
674 * @configure - Non-zero for configure, zero for deconfigure
675 *
676 * Schedule a channel-path configuration/deconfiguration request.
677 */
chp_cfg_schedule(struct chp_id chpid,int configure)678 void chp_cfg_schedule(struct chp_id chpid, int configure)
679 {
680 CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
681 configure);
682 mutex_lock(&cfg_lock);
683 cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
684 cfg_busy = 1;
685 mutex_unlock(&cfg_lock);
686 queue_work(chp_wq, &cfg_work);
687 }
688
689 /**
690 * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
691 * @chpid - channel-path ID
692 *
693 * Cancel an active channel-path deconfiguration request if it has not yet
694 * been performed.
695 */
chp_cfg_cancel_deconfigure(struct chp_id chpid)696 void chp_cfg_cancel_deconfigure(struct chp_id chpid)
697 {
698 CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
699 mutex_lock(&cfg_lock);
700 if (cfg_get_task(chpid) == cfg_deconfigure)
701 cfg_set_task(chpid, cfg_none);
702 mutex_unlock(&cfg_lock);
703 }
704
cfg_wait_idle(void)705 static int cfg_wait_idle(void)
706 {
707 if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
708 return -ERESTARTSYS;
709 return 0;
710 }
711
chp_init(void)712 static int __init chp_init(void)
713 {
714 struct chp_id chpid;
715 int ret;
716
717 ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
718 if (ret)
719 return ret;
720 chp_wq = create_singlethread_workqueue("cio_chp");
721 if (!chp_wq) {
722 crw_unregister_handler(CRW_RSC_CPATH);
723 return -ENOMEM;
724 }
725 INIT_WORK(&cfg_work, cfg_func);
726 init_waitqueue_head(&cfg_wait_queue);
727 if (info_update())
728 return 0;
729 /* Register available channel-paths. */
730 chp_id_for_each(&chpid) {
731 if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
732 chp_new(chpid);
733 }
734
735 return 0;
736 }
737
738 subsys_initcall(chp_init);
739