1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright IBM Corp. 1999,2010
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Arnd Bergmann (arndb@de.ibm.com)
9  */
10 
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 
19 #include <asm/cio.h>
20 #include <asm/chpid.h>
21 #include <asm/chsc.h>
22 #include <asm/crw.h>
23 
24 #include "css.h"
25 #include "cio.h"
26 #include "cio_debug.h"
27 #include "ioasm.h"
28 #include "chp.h"
29 #include "chsc.h"
30 
31 static void *sei_page;
32 static void *chsc_page;
33 static DEFINE_SPINLOCK(chsc_page_lock);
34 
35 /**
36  * chsc_error_from_response() - convert a chsc response to an error
37  * @response: chsc response code
38  *
39  * Returns an appropriate Linux error code for @response.
40  */
chsc_error_from_response(int response)41 int chsc_error_from_response(int response)
42 {
43 	switch (response) {
44 	case 0x0001:
45 		return 0;
46 	case 0x0002:
47 	case 0x0003:
48 	case 0x0006:
49 	case 0x0007:
50 	case 0x0008:
51 	case 0x000a:
52 	case 0x0104:
53 		return -EINVAL;
54 	case 0x0004:
55 		return -EOPNOTSUPP;
56 	default:
57 		return -EIO;
58 	}
59 }
60 EXPORT_SYMBOL_GPL(chsc_error_from_response);
61 
62 struct chsc_ssd_area {
63 	struct chsc_header request;
64 	u16 :10;
65 	u16 ssid:2;
66 	u16 :4;
67 	u16 f_sch;	  /* first subchannel */
68 	u16 :16;
69 	u16 l_sch;	  /* last subchannel */
70 	u32 :32;
71 	struct chsc_header response;
72 	u32 :32;
73 	u8 sch_valid : 1;
74 	u8 dev_valid : 1;
75 	u8 st	     : 3; /* subchannel type */
76 	u8 zeroes    : 3;
77 	u8  unit_addr;	  /* unit address */
78 	u16 devno;	  /* device number */
79 	u8 path_mask;
80 	u8 fla_valid_mask;
81 	u16 sch;	  /* subchannel */
82 	u8 chpid[8];	  /* chpids 0-7 */
83 	u16 fla[8];	  /* full link addresses 0-7 */
84 } __attribute__ ((packed));
85 
chsc_get_ssd_info(struct subchannel_id schid,struct chsc_ssd_info * ssd)86 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
87 {
88 	struct chsc_ssd_area *ssd_area;
89 	int ccode;
90 	int ret;
91 	int i;
92 	int mask;
93 
94 	spin_lock_irq(&chsc_page_lock);
95 	memset(chsc_page, 0, PAGE_SIZE);
96 	ssd_area = chsc_page;
97 	ssd_area->request.length = 0x0010;
98 	ssd_area->request.code = 0x0004;
99 	ssd_area->ssid = schid.ssid;
100 	ssd_area->f_sch = schid.sch_no;
101 	ssd_area->l_sch = schid.sch_no;
102 
103 	ccode = chsc(ssd_area);
104 	/* Check response. */
105 	if (ccode > 0) {
106 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
107 		goto out;
108 	}
109 	ret = chsc_error_from_response(ssd_area->response.code);
110 	if (ret != 0) {
111 		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
112 			      schid.ssid, schid.sch_no,
113 			      ssd_area->response.code);
114 		goto out;
115 	}
116 	if (!ssd_area->sch_valid) {
117 		ret = -ENODEV;
118 		goto out;
119 	}
120 	/* Copy data */
121 	ret = 0;
122 	memset(ssd, 0, sizeof(struct chsc_ssd_info));
123 	if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
124 	    (ssd_area->st != SUBCHANNEL_TYPE_MSG))
125 		goto out;
126 	ssd->path_mask = ssd_area->path_mask;
127 	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
128 	for (i = 0; i < 8; i++) {
129 		mask = 0x80 >> i;
130 		if (ssd_area->path_mask & mask) {
131 			chp_id_init(&ssd->chpid[i]);
132 			ssd->chpid[i].id = ssd_area->chpid[i];
133 		}
134 		if (ssd_area->fla_valid_mask & mask)
135 			ssd->fla[i] = ssd_area->fla[i];
136 	}
137 out:
138 	spin_unlock_irq(&chsc_page_lock);
139 	return ret;
140 }
141 
s390_subchannel_remove_chpid(struct subchannel * sch,void * data)142 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
143 {
144 	spin_lock_irq(sch->lock);
145 	if (sch->driver && sch->driver->chp_event)
146 		if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
147 			goto out_unreg;
148 	spin_unlock_irq(sch->lock);
149 	return 0;
150 
151 out_unreg:
152 	sch->lpm = 0;
153 	spin_unlock_irq(sch->lock);
154 	css_schedule_eval(sch->schid);
155 	return 0;
156 }
157 
chsc_chp_offline(struct chp_id chpid)158 void chsc_chp_offline(struct chp_id chpid)
159 {
160 	char dbf_txt[15];
161 	struct chp_link link;
162 
163 	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
164 	CIO_TRACE_EVENT(2, dbf_txt);
165 
166 	if (chp_get_status(chpid) <= 0)
167 		return;
168 	memset(&link, 0, sizeof(struct chp_link));
169 	link.chpid = chpid;
170 	/* Wait until previous actions have settled. */
171 	css_wait_for_slow_path();
172 	for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
173 }
174 
s390_process_res_acc_new_sch(struct subchannel_id schid,void * data)175 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
176 {
177 	struct schib schib;
178 	/*
179 	 * We don't know the device yet, but since a path
180 	 * may be available now to the device we'll have
181 	 * to do recognition again.
182 	 * Since we don't have any idea about which chpid
183 	 * that beast may be on we'll have to do a stsch
184 	 * on all devices, grr...
185 	 */
186 	if (stsch_err(schid, &schib))
187 		/* We're through */
188 		return -ENXIO;
189 
190 	/* Put it on the slow path. */
191 	css_schedule_eval(schid);
192 	return 0;
193 }
194 
__s390_process_res_acc(struct subchannel * sch,void * data)195 static int __s390_process_res_acc(struct subchannel *sch, void *data)
196 {
197 	spin_lock_irq(sch->lock);
198 	if (sch->driver && sch->driver->chp_event)
199 		sch->driver->chp_event(sch, data, CHP_ONLINE);
200 	spin_unlock_irq(sch->lock);
201 
202 	return 0;
203 }
204 
s390_process_res_acc(struct chp_link * link)205 static void s390_process_res_acc(struct chp_link *link)
206 {
207 	char dbf_txt[15];
208 
209 	sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
210 		link->chpid.id);
211 	CIO_TRACE_EVENT( 2, dbf_txt);
212 	if (link->fla != 0) {
213 		sprintf(dbf_txt, "fla%x", link->fla);
214 		CIO_TRACE_EVENT( 2, dbf_txt);
215 	}
216 	/* Wait until previous actions have settled. */
217 	css_wait_for_slow_path();
218 	/*
219 	 * I/O resources may have become accessible.
220 	 * Scan through all subchannels that may be concerned and
221 	 * do a validation on those.
222 	 * The more information we have (info), the less scanning
223 	 * will we have to do.
224 	 */
225 	for_each_subchannel_staged(__s390_process_res_acc,
226 				   s390_process_res_acc_new_sch, link);
227 }
228 
229 static int
__get_chpid_from_lir(void * data)230 __get_chpid_from_lir(void *data)
231 {
232 	struct lir {
233 		u8  iq;
234 		u8  ic;
235 		u16 sci;
236 		/* incident-node descriptor */
237 		u32 indesc[28];
238 		/* attached-node descriptor */
239 		u32 andesc[28];
240 		/* incident-specific information */
241 		u32 isinfo[28];
242 	} __attribute__ ((packed)) *lir;
243 
244 	lir = data;
245 	if (!(lir->iq&0x80))
246 		/* NULL link incident record */
247 		return -EINVAL;
248 	if (!(lir->indesc[0]&0xc0000000))
249 		/* node descriptor not valid */
250 		return -EINVAL;
251 	if (!(lir->indesc[0]&0x10000000))
252 		/* don't handle device-type nodes - FIXME */
253 		return -EINVAL;
254 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
255 
256 	return (u16) (lir->indesc[0]&0x000000ff);
257 }
258 
259 struct chsc_sei_area {
260 	struct chsc_header request;
261 	u32 reserved1;
262 	u32 reserved2;
263 	u32 reserved3;
264 	struct chsc_header response;
265 	u32 reserved4;
266 	u8  flags;
267 	u8  vf;		/* validity flags */
268 	u8  rs;		/* reporting source */
269 	u8  cc;		/* content code */
270 	u16 fla;	/* full link address */
271 	u16 rsid;	/* reporting source id */
272 	u32 reserved5;
273 	u32 reserved6;
274 	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
275 	/* ccdf has to be big enough for a link-incident record */
276 } __attribute__ ((packed));
277 
chsc_process_sei_link_incident(struct chsc_sei_area * sei_area)278 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
279 {
280 	struct chp_id chpid;
281 	int id;
282 
283 	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
284 		      sei_area->rs, sei_area->rsid);
285 	if (sei_area->rs != 4)
286 		return;
287 	id = __get_chpid_from_lir(sei_area->ccdf);
288 	if (id < 0)
289 		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
290 	else {
291 		chp_id_init(&chpid);
292 		chpid.id = id;
293 		chsc_chp_offline(chpid);
294 	}
295 }
296 
chsc_process_sei_res_acc(struct chsc_sei_area * sei_area)297 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
298 {
299 	struct chp_link link;
300 	struct chp_id chpid;
301 	int status;
302 
303 	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
304 		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
305 	if (sei_area->rs != 4)
306 		return;
307 	chp_id_init(&chpid);
308 	chpid.id = sei_area->rsid;
309 	/* allocate a new channel path structure, if needed */
310 	status = chp_get_status(chpid);
311 	if (status < 0)
312 		chp_new(chpid);
313 	else if (!status)
314 		return;
315 	memset(&link, 0, sizeof(struct chp_link));
316 	link.chpid = chpid;
317 	if ((sei_area->vf & 0xc0) != 0) {
318 		link.fla = sei_area->fla;
319 		if ((sei_area->vf & 0xc0) == 0xc0)
320 			/* full link address */
321 			link.fla_mask = 0xffff;
322 		else
323 			/* link address */
324 			link.fla_mask = 0xff00;
325 	}
326 	s390_process_res_acc(&link);
327 }
328 
329 struct chp_config_data {
330 	u8 map[32];
331 	u8 op;
332 	u8 pc;
333 };
334 
chsc_process_sei_chp_config(struct chsc_sei_area * sei_area)335 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
336 {
337 	struct chp_config_data *data;
338 	struct chp_id chpid;
339 	int num;
340 	char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
341 
342 	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
343 	if (sei_area->rs != 0)
344 		return;
345 	data = (struct chp_config_data *) &(sei_area->ccdf);
346 	chp_id_init(&chpid);
347 	for (num = 0; num <= __MAX_CHPID; num++) {
348 		if (!chp_test_bit(data->map, num))
349 			continue;
350 		chpid.id = num;
351 		pr_notice("Processing %s for channel path %x.%02x\n",
352 			  events[data->op], chpid.cssid, chpid.id);
353 		switch (data->op) {
354 		case 0:
355 			chp_cfg_schedule(chpid, 1);
356 			break;
357 		case 1:
358 			chp_cfg_schedule(chpid, 0);
359 			break;
360 		case 2:
361 			chp_cfg_cancel_deconfigure(chpid);
362 			break;
363 		}
364 	}
365 }
366 
chsc_process_sei(struct chsc_sei_area * sei_area)367 static void chsc_process_sei(struct chsc_sei_area *sei_area)
368 {
369 	/* Check if we might have lost some information. */
370 	if (sei_area->flags & 0x40) {
371 		CIO_CRW_EVENT(2, "chsc: event overflow\n");
372 		css_schedule_eval_all();
373 	}
374 	/* which kind of information was stored? */
375 	switch (sei_area->cc) {
376 	case 1: /* link incident*/
377 		chsc_process_sei_link_incident(sei_area);
378 		break;
379 	case 2: /* i/o resource accessibiliy */
380 		chsc_process_sei_res_acc(sei_area);
381 		break;
382 	case 8: /* channel-path-configuration notification */
383 		chsc_process_sei_chp_config(sei_area);
384 		break;
385 	default: /* other stuff */
386 		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
387 			      sei_area->cc);
388 		break;
389 	}
390 }
391 
chsc_process_crw(struct crw * crw0,struct crw * crw1,int overflow)392 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
393 {
394 	struct chsc_sei_area *sei_area;
395 
396 	if (overflow) {
397 		css_schedule_eval_all();
398 		return;
399 	}
400 	CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
401 		      "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
402 		      crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
403 		      crw0->erc, crw0->rsid);
404 	if (!sei_page)
405 		return;
406 	/* Access to sei_page is serialized through machine check handler
407 	 * thread, so no need for locking. */
408 	sei_area = sei_page;
409 
410 	CIO_TRACE_EVENT(2, "prcss");
411 	do {
412 		memset(sei_area, 0, sizeof(*sei_area));
413 		sei_area->request.length = 0x0010;
414 		sei_area->request.code = 0x000e;
415 		if (chsc(sei_area))
416 			break;
417 
418 		if (sei_area->response.code == 0x0001) {
419 			CIO_CRW_EVENT(4, "chsc: sei successful\n");
420 			chsc_process_sei(sei_area);
421 		} else {
422 			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
423 				      sei_area->response.code);
424 			break;
425 		}
426 	} while (sei_area->flags & 0x80);
427 }
428 
chsc_chp_online(struct chp_id chpid)429 void chsc_chp_online(struct chp_id chpid)
430 {
431 	char dbf_txt[15];
432 	struct chp_link link;
433 
434 	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
435 	CIO_TRACE_EVENT(2, dbf_txt);
436 
437 	if (chp_get_status(chpid) != 0) {
438 		memset(&link, 0, sizeof(struct chp_link));
439 		link.chpid = chpid;
440 		/* Wait until previous actions have settled. */
441 		css_wait_for_slow_path();
442 		for_each_subchannel_staged(__s390_process_res_acc, NULL,
443 					   &link);
444 	}
445 }
446 
__s390_subchannel_vary_chpid(struct subchannel * sch,struct chp_id chpid,int on)447 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
448 					 struct chp_id chpid, int on)
449 {
450 	unsigned long flags;
451 	struct chp_link link;
452 
453 	memset(&link, 0, sizeof(struct chp_link));
454 	link.chpid = chpid;
455 	spin_lock_irqsave(sch->lock, flags);
456 	if (sch->driver && sch->driver->chp_event)
457 		sch->driver->chp_event(sch, &link,
458 				       on ? CHP_VARY_ON : CHP_VARY_OFF);
459 	spin_unlock_irqrestore(sch->lock, flags);
460 }
461 
s390_subchannel_vary_chpid_off(struct subchannel * sch,void * data)462 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
463 {
464 	struct chp_id *chpid = data;
465 
466 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
467 	return 0;
468 }
469 
s390_subchannel_vary_chpid_on(struct subchannel * sch,void * data)470 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
471 {
472 	struct chp_id *chpid = data;
473 
474 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
475 	return 0;
476 }
477 
478 static int
__s390_vary_chpid_on(struct subchannel_id schid,void * data)479 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
480 {
481 	struct schib schib;
482 
483 	if (stsch_err(schid, &schib))
484 		/* We're through */
485 		return -ENXIO;
486 	/* Put it on the slow path. */
487 	css_schedule_eval(schid);
488 	return 0;
489 }
490 
491 /**
492  * chsc_chp_vary - propagate channel-path vary operation to subchannels
493  * @chpid: channl-path ID
494  * @on: non-zero for vary online, zero for vary offline
495  */
chsc_chp_vary(struct chp_id chpid,int on)496 int chsc_chp_vary(struct chp_id chpid, int on)
497 {
498 	struct channel_path *chp = chpid_to_chp(chpid);
499 	struct chp_link link;
500 
501 	memset(&link, 0, sizeof(struct chp_link));
502 	link.chpid = chpid;
503 	/* Wait until previous actions have settled. */
504 	css_wait_for_slow_path();
505 	/*
506 	 * Redo PathVerification on the devices the chpid connects to
507 	 */
508 	if (on) {
509 		/* Try to update the channel path descritor. */
510 		chsc_determine_base_channel_path_desc(chpid, &chp->desc);
511 		for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
512 					   __s390_vary_chpid_on, &link);
513 	} else
514 		for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
515 					   NULL, &link);
516 
517 	return 0;
518 }
519 
520 static void
chsc_remove_cmg_attr(struct channel_subsystem * css)521 chsc_remove_cmg_attr(struct channel_subsystem *css)
522 {
523 	int i;
524 
525 	for (i = 0; i <= __MAX_CHPID; i++) {
526 		if (!css->chps[i])
527 			continue;
528 		chp_remove_cmg_attr(css->chps[i]);
529 	}
530 }
531 
532 static int
chsc_add_cmg_attr(struct channel_subsystem * css)533 chsc_add_cmg_attr(struct channel_subsystem *css)
534 {
535 	int i, ret;
536 
537 	ret = 0;
538 	for (i = 0; i <= __MAX_CHPID; i++) {
539 		if (!css->chps[i])
540 			continue;
541 		ret = chp_add_cmg_attr(css->chps[i]);
542 		if (ret)
543 			goto cleanup;
544 	}
545 	return ret;
546 cleanup:
547 	for (--i; i >= 0; i--) {
548 		if (!css->chps[i])
549 			continue;
550 		chp_remove_cmg_attr(css->chps[i]);
551 	}
552 	return ret;
553 }
554 
__chsc_do_secm(struct channel_subsystem * css,int enable)555 int __chsc_do_secm(struct channel_subsystem *css, int enable)
556 {
557 	struct {
558 		struct chsc_header request;
559 		u32 operation_code : 2;
560 		u32 : 30;
561 		u32 key : 4;
562 		u32 : 28;
563 		u32 zeroes1;
564 		u32 cub_addr1;
565 		u32 zeroes2;
566 		u32 cub_addr2;
567 		u32 reserved[13];
568 		struct chsc_header response;
569 		u32 status : 8;
570 		u32 : 4;
571 		u32 fmt : 4;
572 		u32 : 16;
573 	} __attribute__ ((packed)) *secm_area;
574 	int ret, ccode;
575 
576 	spin_lock_irq(&chsc_page_lock);
577 	memset(chsc_page, 0, PAGE_SIZE);
578 	secm_area = chsc_page;
579 	secm_area->request.length = 0x0050;
580 	secm_area->request.code = 0x0016;
581 
582 	secm_area->key = PAGE_DEFAULT_KEY >> 4;
583 	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
584 	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
585 
586 	secm_area->operation_code = enable ? 0 : 1;
587 
588 	ccode = chsc(secm_area);
589 	if (ccode > 0) {
590 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
591 		goto out;
592 	}
593 
594 	switch (secm_area->response.code) {
595 	case 0x0102:
596 	case 0x0103:
597 		ret = -EINVAL;
598 		break;
599 	default:
600 		ret = chsc_error_from_response(secm_area->response.code);
601 	}
602 	if (ret != 0)
603 		CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
604 			      secm_area->response.code);
605 out:
606 	spin_unlock_irq(&chsc_page_lock);
607 	return ret;
608 }
609 
610 int
chsc_secm(struct channel_subsystem * css,int enable)611 chsc_secm(struct channel_subsystem *css, int enable)
612 {
613 	int ret;
614 
615 	if (enable && !css->cm_enabled) {
616 		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
617 		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
618 		if (!css->cub_addr1 || !css->cub_addr2) {
619 			free_page((unsigned long)css->cub_addr1);
620 			free_page((unsigned long)css->cub_addr2);
621 			return -ENOMEM;
622 		}
623 	}
624 	ret = __chsc_do_secm(css, enable);
625 	if (!ret) {
626 		css->cm_enabled = enable;
627 		if (css->cm_enabled) {
628 			ret = chsc_add_cmg_attr(css);
629 			if (ret) {
630 				__chsc_do_secm(css, 0);
631 				css->cm_enabled = 0;
632 			}
633 		} else
634 			chsc_remove_cmg_attr(css);
635 	}
636 	if (!css->cm_enabled) {
637 		free_page((unsigned long)css->cub_addr1);
638 		free_page((unsigned long)css->cub_addr2);
639 	}
640 	return ret;
641 }
642 
chsc_determine_channel_path_desc(struct chp_id chpid,int fmt,int rfmt,int c,int m,void * page)643 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
644 				     int c, int m, void *page)
645 {
646 	struct chsc_scpd *scpd_area;
647 	int ccode, ret;
648 
649 	if ((rfmt == 1) && !css_general_characteristics.fcs)
650 		return -EINVAL;
651 	if ((rfmt == 2) && !css_general_characteristics.cib)
652 		return -EINVAL;
653 
654 	memset(page, 0, PAGE_SIZE);
655 	scpd_area = page;
656 	scpd_area->request.length = 0x0010;
657 	scpd_area->request.code = 0x0002;
658 	scpd_area->cssid = chpid.cssid;
659 	scpd_area->first_chpid = chpid.id;
660 	scpd_area->last_chpid = chpid.id;
661 	scpd_area->m = m;
662 	scpd_area->c = c;
663 	scpd_area->fmt = fmt;
664 	scpd_area->rfmt = rfmt;
665 
666 	ccode = chsc(scpd_area);
667 	if (ccode > 0)
668 		return (ccode == 3) ? -ENODEV : -EBUSY;
669 
670 	ret = chsc_error_from_response(scpd_area->response.code);
671 	if (ret)
672 		CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
673 			      scpd_area->response.code);
674 	return ret;
675 }
676 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
677 
chsc_determine_base_channel_path_desc(struct chp_id chpid,struct channel_path_desc * desc)678 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
679 					  struct channel_path_desc *desc)
680 {
681 	struct chsc_response_struct *chsc_resp;
682 	struct chsc_scpd *scpd_area;
683 	unsigned long flags;
684 	int ret;
685 
686 	spin_lock_irqsave(&chsc_page_lock, flags);
687 	scpd_area = chsc_page;
688 	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
689 	if (ret)
690 		goto out;
691 	chsc_resp = (void *)&scpd_area->response;
692 	memcpy(desc, &chsc_resp->data, sizeof(*desc));
693 out:
694 	spin_unlock_irqrestore(&chsc_page_lock, flags);
695 	return ret;
696 }
697 
chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,struct channel_path_desc_fmt1 * desc)698 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
699 					  struct channel_path_desc_fmt1 *desc)
700 {
701 	struct chsc_response_struct *chsc_resp;
702 	struct chsc_scpd *scpd_area;
703 	int ret;
704 
705 	spin_lock_irq(&chsc_page_lock);
706 	scpd_area = chsc_page;
707 	ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
708 	if (ret)
709 		goto out;
710 	chsc_resp = (void *)&scpd_area->response;
711 	memcpy(desc, &chsc_resp->data, sizeof(*desc));
712 out:
713 	spin_unlock_irq(&chsc_page_lock);
714 	return ret;
715 }
716 
717 static void
chsc_initialize_cmg_chars(struct channel_path * chp,u8 cmcv,struct cmg_chars * chars)718 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
719 			  struct cmg_chars *chars)
720 {
721 	struct cmg_chars *cmg_chars;
722 	int i, mask;
723 
724 	cmg_chars = chp->cmg_chars;
725 	for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
726 		mask = 0x80 >> (i + 3);
727 		if (cmcv & mask)
728 			cmg_chars->values[i] = chars->values[i];
729 		else
730 			cmg_chars->values[i] = 0;
731 	}
732 }
733 
chsc_get_channel_measurement_chars(struct channel_path * chp)734 int chsc_get_channel_measurement_chars(struct channel_path *chp)
735 {
736 	struct cmg_chars *cmg_chars;
737 	int ccode, ret;
738 
739 	struct {
740 		struct chsc_header request;
741 		u32 : 24;
742 		u32 first_chpid : 8;
743 		u32 : 24;
744 		u32 last_chpid : 8;
745 		u32 zeroes1;
746 		struct chsc_header response;
747 		u32 zeroes2;
748 		u32 not_valid : 1;
749 		u32 shared : 1;
750 		u32 : 22;
751 		u32 chpid : 8;
752 		u32 cmcv : 5;
753 		u32 : 11;
754 		u32 cmgq : 8;
755 		u32 cmg : 8;
756 		u32 zeroes3;
757 		u32 data[NR_MEASUREMENT_CHARS];
758 	} __attribute__ ((packed)) *scmc_area;
759 
760 	chp->cmg_chars = NULL;
761 	cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
762 	if (!cmg_chars)
763 		return -ENOMEM;
764 
765 	spin_lock_irq(&chsc_page_lock);
766 	memset(chsc_page, 0, PAGE_SIZE);
767 	scmc_area = chsc_page;
768 	scmc_area->request.length = 0x0010;
769 	scmc_area->request.code = 0x0022;
770 	scmc_area->first_chpid = chp->chpid.id;
771 	scmc_area->last_chpid = chp->chpid.id;
772 
773 	ccode = chsc(scmc_area);
774 	if (ccode > 0) {
775 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
776 		goto out;
777 	}
778 
779 	ret = chsc_error_from_response(scmc_area->response.code);
780 	if (ret) {
781 		CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
782 			      scmc_area->response.code);
783 		goto out;
784 	}
785 	if (scmc_area->not_valid) {
786 		chp->cmg = -1;
787 		chp->shared = -1;
788 		goto out;
789 	}
790 	chp->cmg = scmc_area->cmg;
791 	chp->shared = scmc_area->shared;
792 	if (chp->cmg != 2 && chp->cmg != 3) {
793 		/* No cmg-dependent data. */
794 		goto out;
795 	}
796 	chp->cmg_chars = cmg_chars;
797 	chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
798 				  (struct cmg_chars *) &scmc_area->data);
799 out:
800 	spin_unlock_irq(&chsc_page_lock);
801 	if (!chp->cmg_chars)
802 		kfree(cmg_chars);
803 
804 	return ret;
805 }
806 
chsc_init(void)807 int __init chsc_init(void)
808 {
809 	int ret;
810 
811 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
812 	chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
813 	if (!sei_page || !chsc_page) {
814 		ret = -ENOMEM;
815 		goto out_err;
816 	}
817 	ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
818 	if (ret)
819 		goto out_err;
820 	return ret;
821 out_err:
822 	free_page((unsigned long)chsc_page);
823 	free_page((unsigned long)sei_page);
824 	return ret;
825 }
826 
chsc_init_cleanup(void)827 void __init chsc_init_cleanup(void)
828 {
829 	crw_unregister_handler(CRW_RSC_CSS);
830 	free_page((unsigned long)chsc_page);
831 	free_page((unsigned long)sei_page);
832 }
833 
chsc_enable_facility(int operation_code)834 int chsc_enable_facility(int operation_code)
835 {
836 	unsigned long flags;
837 	int ret;
838 	struct {
839 		struct chsc_header request;
840 		u8 reserved1:4;
841 		u8 format:4;
842 		u8 reserved2;
843 		u16 operation_code;
844 		u32 reserved3;
845 		u32 reserved4;
846 		u32 operation_data_area[252];
847 		struct chsc_header response;
848 		u32 reserved5:4;
849 		u32 format2:4;
850 		u32 reserved6:24;
851 	} __attribute__ ((packed)) *sda_area;
852 
853 	spin_lock_irqsave(&chsc_page_lock, flags);
854 	memset(chsc_page, 0, PAGE_SIZE);
855 	sda_area = chsc_page;
856 	sda_area->request.length = 0x0400;
857 	sda_area->request.code = 0x0031;
858 	sda_area->operation_code = operation_code;
859 
860 	ret = chsc(sda_area);
861 	if (ret > 0) {
862 		ret = (ret == 3) ? -ENODEV : -EBUSY;
863 		goto out;
864 	}
865 
866 	switch (sda_area->response.code) {
867 	case 0x0101:
868 		ret = -EOPNOTSUPP;
869 		break;
870 	default:
871 		ret = chsc_error_from_response(sda_area->response.code);
872 	}
873 	if (ret != 0)
874 		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
875 			      operation_code, sda_area->response.code);
876 out:
877 	spin_unlock_irqrestore(&chsc_page_lock, flags);
878 	return ret;
879 }
880 
881 struct css_general_char css_general_characteristics;
882 struct css_chsc_char css_chsc_characteristics;
883 
884 int __init
chsc_determine_css_characteristics(void)885 chsc_determine_css_characteristics(void)
886 {
887 	int result;
888 	struct {
889 		struct chsc_header request;
890 		u32 reserved1;
891 		u32 reserved2;
892 		u32 reserved3;
893 		struct chsc_header response;
894 		u32 reserved4;
895 		u32 general_char[510];
896 		u32 chsc_char[508];
897 	} __attribute__ ((packed)) *scsc_area;
898 
899 	spin_lock_irq(&chsc_page_lock);
900 	memset(chsc_page, 0, PAGE_SIZE);
901 	scsc_area = chsc_page;
902 	scsc_area->request.length = 0x0010;
903 	scsc_area->request.code = 0x0010;
904 
905 	result = chsc(scsc_area);
906 	if (result) {
907 		result = (result == 3) ? -ENODEV : -EBUSY;
908 		goto exit;
909 	}
910 
911 	result = chsc_error_from_response(scsc_area->response.code);
912 	if (result == 0) {
913 		memcpy(&css_general_characteristics, scsc_area->general_char,
914 		       sizeof(css_general_characteristics));
915 		memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
916 		       sizeof(css_chsc_characteristics));
917 	} else
918 		CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
919 			      scsc_area->response.code);
920 exit:
921 	spin_unlock_irq(&chsc_page_lock);
922 	return result;
923 }
924 
925 EXPORT_SYMBOL_GPL(css_general_characteristics);
926 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
927 
chsc_sstpc(void * page,unsigned int op,u16 ctrl)928 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
929 {
930 	struct {
931 		struct chsc_header request;
932 		unsigned int rsvd0;
933 		unsigned int op : 8;
934 		unsigned int rsvd1 : 8;
935 		unsigned int ctrl : 16;
936 		unsigned int rsvd2[5];
937 		struct chsc_header response;
938 		unsigned int rsvd3[7];
939 	} __attribute__ ((packed)) *rr;
940 	int rc;
941 
942 	memset(page, 0, PAGE_SIZE);
943 	rr = page;
944 	rr->request.length = 0x0020;
945 	rr->request.code = 0x0033;
946 	rr->op = op;
947 	rr->ctrl = ctrl;
948 	rc = chsc(rr);
949 	if (rc)
950 		return -EIO;
951 	rc = (rr->response.code == 0x0001) ? 0 : -EIO;
952 	return rc;
953 }
954 
chsc_sstpi(void * page,void * result,size_t size)955 int chsc_sstpi(void *page, void *result, size_t size)
956 {
957 	struct {
958 		struct chsc_header request;
959 		unsigned int rsvd0[3];
960 		struct chsc_header response;
961 		char data[size];
962 	} __attribute__ ((packed)) *rr;
963 	int rc;
964 
965 	memset(page, 0, PAGE_SIZE);
966 	rr = page;
967 	rr->request.length = 0x0010;
968 	rr->request.code = 0x0038;
969 	rc = chsc(rr);
970 	if (rc)
971 		return -EIO;
972 	memcpy(result, &rr->data, size);
973 	return (rr->response.code == 0x0001) ? 0 : -EIO;
974 }
975 
chsc_siosl(struct subchannel_id schid)976 int chsc_siosl(struct subchannel_id schid)
977 {
978 	struct {
979 		struct chsc_header request;
980 		u32 word1;
981 		struct subchannel_id sid;
982 		u32 word3;
983 		struct chsc_header response;
984 		u32 word[11];
985 	} __attribute__ ((packed)) *siosl_area;
986 	unsigned long flags;
987 	int ccode;
988 	int rc;
989 
990 	spin_lock_irqsave(&chsc_page_lock, flags);
991 	memset(chsc_page, 0, PAGE_SIZE);
992 	siosl_area = chsc_page;
993 	siosl_area->request.length = 0x0010;
994 	siosl_area->request.code = 0x0046;
995 	siosl_area->word1 = 0x80000000;
996 	siosl_area->sid = schid;
997 
998 	ccode = chsc(siosl_area);
999 	if (ccode > 0) {
1000 		if (ccode == 3)
1001 			rc = -ENODEV;
1002 		else
1003 			rc = -EBUSY;
1004 		CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1005 			      schid.ssid, schid.sch_no, ccode);
1006 		goto out;
1007 	}
1008 	rc = chsc_error_from_response(siosl_area->response.code);
1009 	if (rc)
1010 		CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1011 			      schid.ssid, schid.sch_no,
1012 			      siosl_area->response.code);
1013 	else
1014 		CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1015 			      schid.ssid, schid.sch_no);
1016 out:
1017 	spin_unlock_irqrestore(&chsc_page_lock, flags);
1018 	return rc;
1019 }
1020 EXPORT_SYMBOL_GPL(chsc_siosl);
1021