1 /*
2  * linux/drivers/s390/crypto/ap_bus.c
3  *
4  * Copyright (C) 2006 IBM Corporation
5  * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6  *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
7  *	      Ralph Wuerthner <rwuerthn@de.ibm.com>
8  *	      Felix Beck <felix.beck@de.ibm.com>
9  *	      Holger Dengler <hd@linux.vnet.ibm.com>
10  *
11  * Adjunct processor bus.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2, or (at your option)
16  * any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27 
28 #define KMSG_COMPONENT "ap"
29 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
30 
31 #include <linux/kernel_stat.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/delay.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/workqueue.h>
38 #include <linux/slab.h>
39 #include <linux/notifier.h>
40 #include <linux/kthread.h>
41 #include <linux/mutex.h>
42 #include <asm/reset.h>
43 #include <asm/airq.h>
44 #include <linux/atomic.h>
45 #include <asm/isc.h>
46 #include <linux/hrtimer.h>
47 #include <linux/ktime.h>
48 #include <asm/facility.h>
49 
50 #include "ap_bus.h"
51 
52 /* Some prototypes. */
53 static void ap_scan_bus(struct work_struct *);
54 static void ap_poll_all(unsigned long);
55 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
56 static int ap_poll_thread_start(void);
57 static void ap_poll_thread_stop(void);
58 static void ap_request_timeout(unsigned long);
59 static inline void ap_schedule_poll_timer(void);
60 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
61 static int ap_device_remove(struct device *dev);
62 static int ap_device_probe(struct device *dev);
63 static void ap_interrupt_handler(void *unused1, void *unused2);
64 static void ap_reset(struct ap_device *ap_dev);
65 static void ap_config_timeout(unsigned long ptr);
66 static int ap_select_domain(void);
67 
68 /*
69  * Module description.
70  */
71 MODULE_AUTHOR("IBM Corporation");
72 MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
73 		   "Copyright 2006 IBM Corporation");
74 MODULE_LICENSE("GPL");
75 
76 /*
77  * Module parameter
78  */
79 int ap_domain_index = -1;	/* Adjunct Processor Domain Index */
80 module_param_named(domain, ap_domain_index, int, 0000);
81 MODULE_PARM_DESC(domain, "domain index for ap devices");
82 EXPORT_SYMBOL(ap_domain_index);
83 
84 static int ap_thread_flag = 0;
85 module_param_named(poll_thread, ap_thread_flag, int, 0000);
86 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
87 
88 static struct device *ap_root_device = NULL;
89 static DEFINE_SPINLOCK(ap_device_list_lock);
90 static LIST_HEAD(ap_device_list);
91 
92 /*
93  * Workqueue & timer for bus rescan.
94  */
95 static struct workqueue_struct *ap_work_queue;
96 static struct timer_list ap_config_timer;
97 static int ap_config_time = AP_CONFIG_TIME;
98 static DECLARE_WORK(ap_config_work, ap_scan_bus);
99 
100 /*
101  * Tasklet & timer for AP request polling and interrupts
102  */
103 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
104 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
105 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
106 static struct task_struct *ap_poll_kthread = NULL;
107 static DEFINE_MUTEX(ap_poll_thread_mutex);
108 static DEFINE_SPINLOCK(ap_poll_timer_lock);
109 static void *ap_interrupt_indicator;
110 static struct hrtimer ap_poll_timer;
111 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
112  * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
113 static unsigned long long poll_timeout = 250000;
114 
115 /* Suspend flag */
116 static int ap_suspend_flag;
117 /* Flag to check if domain was set through module parameter domain=. This is
118  * important when supsend and resume is done in a z/VM environment where the
119  * domain might change. */
120 static int user_set_domain = 0;
121 static struct bus_type ap_bus_type;
122 
123 /**
124  * ap_using_interrupts() - Returns non-zero if interrupt support is
125  * available.
126  */
ap_using_interrupts(void)127 static inline int ap_using_interrupts(void)
128 {
129 	return ap_interrupt_indicator != NULL;
130 }
131 
132 /**
133  * ap_intructions_available() - Test if AP instructions are available.
134  *
135  * Returns 0 if the AP instructions are installed.
136  */
ap_instructions_available(void)137 static inline int ap_instructions_available(void)
138 {
139 	register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
140 	register unsigned long reg1 asm ("1") = -ENODEV;
141 	register unsigned long reg2 asm ("2") = 0UL;
142 
143 	asm volatile(
144 		"   .long 0xb2af0000\n"		/* PQAP(TAPQ) */
145 		"0: la    %1,0\n"
146 		"1:\n"
147 		EX_TABLE(0b, 1b)
148 		: "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
149 	return reg1;
150 }
151 
152 /**
153  * ap_interrupts_available(): Test if AP interrupts are available.
154  *
155  * Returns 1 if AP interrupts are available.
156  */
ap_interrupts_available(void)157 static int ap_interrupts_available(void)
158 {
159 	return test_facility(2) && test_facility(65);
160 }
161 
162 /**
163  * ap_test_queue(): Test adjunct processor queue.
164  * @qid: The AP queue number
165  * @queue_depth: Pointer to queue depth value
166  * @device_type: Pointer to device type value
167  *
168  * Returns AP queue status structure.
169  */
170 static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid,int * queue_depth,int * device_type)171 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
172 {
173 	register unsigned long reg0 asm ("0") = qid;
174 	register struct ap_queue_status reg1 asm ("1");
175 	register unsigned long reg2 asm ("2") = 0UL;
176 
177 	asm volatile(".long 0xb2af0000"		/* PQAP(TAPQ) */
178 		     : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
179 	*device_type = (int) (reg2 >> 24);
180 	*queue_depth = (int) (reg2 & 0xff);
181 	return reg1;
182 }
183 
184 /**
185  * ap_reset_queue(): Reset adjunct processor queue.
186  * @qid: The AP queue number
187  *
188  * Returns AP queue status structure.
189  */
ap_reset_queue(ap_qid_t qid)190 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
191 {
192 	register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
193 	register struct ap_queue_status reg1 asm ("1");
194 	register unsigned long reg2 asm ("2") = 0UL;
195 
196 	asm volatile(
197 		".long 0xb2af0000"		/* PQAP(RAPQ) */
198 		: "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
199 	return reg1;
200 }
201 
202 #ifdef CONFIG_64BIT
203 /**
204  * ap_queue_interruption_control(): Enable interruption for a specific AP.
205  * @qid: The AP queue number
206  * @ind: The notification indicator byte
207  *
208  * Returns AP queue status.
209  */
210 static inline struct ap_queue_status
ap_queue_interruption_control(ap_qid_t qid,void * ind)211 ap_queue_interruption_control(ap_qid_t qid, void *ind)
212 {
213 	register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
214 	register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
215 	register struct ap_queue_status reg1_out asm ("1");
216 	register void *reg2 asm ("2") = ind;
217 	asm volatile(
218 		".long 0xb2af0000"		/* PQAP(RAPQ) */
219 		: "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
220 		:
221 		: "cc" );
222 	return reg1_out;
223 }
224 #endif
225 
226 #ifdef CONFIG_64BIT
227 static inline struct ap_queue_status
__ap_query_functions(ap_qid_t qid,unsigned int * functions)228 __ap_query_functions(ap_qid_t qid, unsigned int *functions)
229 {
230 	register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
231 	register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
232 	register unsigned long reg2 asm ("2");
233 
234 	asm volatile(
235 		".long 0xb2af0000\n"
236 		"0:\n"
237 		EX_TABLE(0b, 0b)
238 		: "+d" (reg0), "+d" (reg1), "=d" (reg2)
239 		:
240 		: "cc");
241 
242 	*functions = (unsigned int)(reg2 >> 32);
243 	return reg1;
244 }
245 #endif
246 
247 /**
248  * ap_query_functions(): Query supported functions.
249  * @qid: The AP queue number
250  * @functions: Pointer to functions field.
251  *
252  * Returns
253  *   0	     on success.
254  *   -ENODEV  if queue not valid.
255  *   -EBUSY   if device busy.
256  *   -EINVAL  if query function is not supported
257  */
ap_query_functions(ap_qid_t qid,unsigned int * functions)258 static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
259 {
260 #ifdef CONFIG_64BIT
261 	struct ap_queue_status status;
262 	int i;
263 	status = __ap_query_functions(qid, functions);
264 
265 	for (i = 0; i < AP_MAX_RESET; i++) {
266 		if (ap_queue_status_invalid_test(&status))
267 			return -ENODEV;
268 
269 		switch (status.response_code) {
270 		case AP_RESPONSE_NORMAL:
271 			return 0;
272 		case AP_RESPONSE_RESET_IN_PROGRESS:
273 		case AP_RESPONSE_BUSY:
274 			break;
275 		case AP_RESPONSE_Q_NOT_AVAIL:
276 		case AP_RESPONSE_DECONFIGURED:
277 		case AP_RESPONSE_CHECKSTOPPED:
278 		case AP_RESPONSE_INVALID_ADDRESS:
279 			return -ENODEV;
280 		case AP_RESPONSE_OTHERWISE_CHANGED:
281 			break;
282 		default:
283 			break;
284 		}
285 		if (i < AP_MAX_RESET - 1) {
286 			udelay(5);
287 			status = __ap_query_functions(qid, functions);
288 		}
289 	}
290 	return -EBUSY;
291 #else
292 	return -EINVAL;
293 #endif
294 }
295 
296 /**
297  * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
298  * support.
299  * @qid: The AP queue number
300  *
301  * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
302  */
ap_4096_commands_available(ap_qid_t qid)303 int ap_4096_commands_available(ap_qid_t qid)
304 {
305 	unsigned int functions;
306 
307 	if (ap_query_functions(qid, &functions))
308 		return 0;
309 
310 	return test_ap_facility(functions, 1) &&
311 	       test_ap_facility(functions, 2);
312 }
313 EXPORT_SYMBOL(ap_4096_commands_available);
314 
315 /**
316  * ap_queue_enable_interruption(): Enable interruption on an AP.
317  * @qid: The AP queue number
318  * @ind: the notification indicator byte
319  *
320  * Enables interruption on AP queue via ap_queue_interruption_control(). Based
321  * on the return value it waits a while and tests the AP queue if interrupts
322  * have been switched on using ap_test_queue().
323  */
ap_queue_enable_interruption(ap_qid_t qid,void * ind)324 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
325 {
326 #ifdef CONFIG_64BIT
327 	struct ap_queue_status status;
328 	int t_depth, t_device_type, rc, i;
329 
330 	rc = -EBUSY;
331 	status = ap_queue_interruption_control(qid, ind);
332 
333 	for (i = 0; i < AP_MAX_RESET; i++) {
334 		switch (status.response_code) {
335 		case AP_RESPONSE_NORMAL:
336 			if (status.int_enabled)
337 				return 0;
338 			break;
339 		case AP_RESPONSE_RESET_IN_PROGRESS:
340 		case AP_RESPONSE_BUSY:
341 			break;
342 		case AP_RESPONSE_Q_NOT_AVAIL:
343 		case AP_RESPONSE_DECONFIGURED:
344 		case AP_RESPONSE_CHECKSTOPPED:
345 		case AP_RESPONSE_INVALID_ADDRESS:
346 			return -ENODEV;
347 		case AP_RESPONSE_OTHERWISE_CHANGED:
348 			if (status.int_enabled)
349 				return 0;
350 			break;
351 		default:
352 			break;
353 		}
354 		if (i < AP_MAX_RESET - 1) {
355 			udelay(5);
356 			status = ap_test_queue(qid, &t_depth, &t_device_type);
357 		}
358 	}
359 	return rc;
360 #else
361 	return -EINVAL;
362 #endif
363 }
364 
365 /**
366  * __ap_send(): Send message to adjunct processor queue.
367  * @qid: The AP queue number
368  * @psmid: The program supplied message identifier
369  * @msg: The message text
370  * @length: The message length
371  * @special: Special Bit
372  *
373  * Returns AP queue status structure.
374  * Condition code 1 on NQAP can't happen because the L bit is 1.
375  * Condition code 2 on NQAP also means the send is incomplete,
376  * because a segment boundary was reached. The NQAP is repeated.
377  */
378 static inline struct ap_queue_status
__ap_send(ap_qid_t qid,unsigned long long psmid,void * msg,size_t length,unsigned int special)379 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
380 	  unsigned int special)
381 {
382 	typedef struct { char _[length]; } msgblock;
383 	register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
384 	register struct ap_queue_status reg1 asm ("1");
385 	register unsigned long reg2 asm ("2") = (unsigned long) msg;
386 	register unsigned long reg3 asm ("3") = (unsigned long) length;
387 	register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
388 	register unsigned long reg5 asm ("5") = (unsigned int) psmid;
389 
390 	if (special == 1)
391 		reg0 |= 0x400000UL;
392 
393 	asm volatile (
394 		"0: .long 0xb2ad0042\n"		/* DQAP */
395 		"   brc   2,0b"
396 		: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
397 		: "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
398 		: "cc" );
399 	return reg1;
400 }
401 
ap_send(ap_qid_t qid,unsigned long long psmid,void * msg,size_t length)402 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
403 {
404 	struct ap_queue_status status;
405 
406 	status = __ap_send(qid, psmid, msg, length, 0);
407 	switch (status.response_code) {
408 	case AP_RESPONSE_NORMAL:
409 		return 0;
410 	case AP_RESPONSE_Q_FULL:
411 	case AP_RESPONSE_RESET_IN_PROGRESS:
412 		return -EBUSY;
413 	case AP_RESPONSE_REQ_FAC_NOT_INST:
414 		return -EINVAL;
415 	default:	/* Device is gone. */
416 		return -ENODEV;
417 	}
418 }
419 EXPORT_SYMBOL(ap_send);
420 
421 /**
422  * __ap_recv(): Receive message from adjunct processor queue.
423  * @qid: The AP queue number
424  * @psmid: Pointer to program supplied message identifier
425  * @msg: The message text
426  * @length: The message length
427  *
428  * Returns AP queue status structure.
429  * Condition code 1 on DQAP means the receive has taken place
430  * but only partially.	The response is incomplete, hence the
431  * DQAP is repeated.
432  * Condition code 2 on DQAP also means the receive is incomplete,
433  * this time because a segment boundary was reached. Again, the
434  * DQAP is repeated.
435  * Note that gpr2 is used by the DQAP instruction to keep track of
436  * any 'residual' length, in case the instruction gets interrupted.
437  * Hence it gets zeroed before the instruction.
438  */
439 static inline struct ap_queue_status
__ap_recv(ap_qid_t qid,unsigned long long * psmid,void * msg,size_t length)440 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
441 {
442 	typedef struct { char _[length]; } msgblock;
443 	register unsigned long reg0 asm("0") = qid | 0x80000000UL;
444 	register struct ap_queue_status reg1 asm ("1");
445 	register unsigned long reg2 asm("2") = 0UL;
446 	register unsigned long reg4 asm("4") = (unsigned long) msg;
447 	register unsigned long reg5 asm("5") = (unsigned long) length;
448 	register unsigned long reg6 asm("6") = 0UL;
449 	register unsigned long reg7 asm("7") = 0UL;
450 
451 
452 	asm volatile(
453 		"0: .long 0xb2ae0064\n"
454 		"   brc   6,0b\n"
455 		: "+d" (reg0), "=d" (reg1), "+d" (reg2),
456 		"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
457 		"=m" (*(msgblock *) msg) : : "cc" );
458 	*psmid = (((unsigned long long) reg6) << 32) + reg7;
459 	return reg1;
460 }
461 
ap_recv(ap_qid_t qid,unsigned long long * psmid,void * msg,size_t length)462 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
463 {
464 	struct ap_queue_status status;
465 
466 	status = __ap_recv(qid, psmid, msg, length);
467 	switch (status.response_code) {
468 	case AP_RESPONSE_NORMAL:
469 		return 0;
470 	case AP_RESPONSE_NO_PENDING_REPLY:
471 		if (status.queue_empty)
472 			return -ENOENT;
473 		return -EBUSY;
474 	case AP_RESPONSE_RESET_IN_PROGRESS:
475 		return -EBUSY;
476 	default:
477 		return -ENODEV;
478 	}
479 }
480 EXPORT_SYMBOL(ap_recv);
481 
482 /**
483  * ap_query_queue(): Check if an AP queue is available.
484  * @qid: The AP queue number
485  * @queue_depth: Pointer to queue depth value
486  * @device_type: Pointer to device type value
487  *
488  * The test is repeated for AP_MAX_RESET times.
489  */
ap_query_queue(ap_qid_t qid,int * queue_depth,int * device_type)490 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
491 {
492 	struct ap_queue_status status;
493 	int t_depth, t_device_type, rc, i;
494 
495 	rc = -EBUSY;
496 	for (i = 0; i < AP_MAX_RESET; i++) {
497 		status = ap_test_queue(qid, &t_depth, &t_device_type);
498 		switch (status.response_code) {
499 		case AP_RESPONSE_NORMAL:
500 			*queue_depth = t_depth + 1;
501 			*device_type = t_device_type;
502 			rc = 0;
503 			break;
504 		case AP_RESPONSE_Q_NOT_AVAIL:
505 			rc = -ENODEV;
506 			break;
507 		case AP_RESPONSE_RESET_IN_PROGRESS:
508 			break;
509 		case AP_RESPONSE_DECONFIGURED:
510 			rc = -ENODEV;
511 			break;
512 		case AP_RESPONSE_CHECKSTOPPED:
513 			rc = -ENODEV;
514 			break;
515 		case AP_RESPONSE_INVALID_ADDRESS:
516 			rc = -ENODEV;
517 			break;
518 		case AP_RESPONSE_OTHERWISE_CHANGED:
519 			break;
520 		case AP_RESPONSE_BUSY:
521 			break;
522 		default:
523 			BUG();
524 		}
525 		if (rc != -EBUSY)
526 			break;
527 		if (i < AP_MAX_RESET - 1)
528 			udelay(5);
529 	}
530 	return rc;
531 }
532 
533 /**
534  * ap_init_queue(): Reset an AP queue.
535  * @qid: The AP queue number
536  *
537  * Reset an AP queue and wait for it to become available again.
538  */
ap_init_queue(ap_qid_t qid)539 static int ap_init_queue(ap_qid_t qid)
540 {
541 	struct ap_queue_status status;
542 	int rc, dummy, i;
543 
544 	rc = -ENODEV;
545 	status = ap_reset_queue(qid);
546 	for (i = 0; i < AP_MAX_RESET; i++) {
547 		switch (status.response_code) {
548 		case AP_RESPONSE_NORMAL:
549 			if (status.queue_empty)
550 				rc = 0;
551 			break;
552 		case AP_RESPONSE_Q_NOT_AVAIL:
553 		case AP_RESPONSE_DECONFIGURED:
554 		case AP_RESPONSE_CHECKSTOPPED:
555 			i = AP_MAX_RESET;	/* return with -ENODEV */
556 			break;
557 		case AP_RESPONSE_RESET_IN_PROGRESS:
558 			rc = -EBUSY;
559 		case AP_RESPONSE_BUSY:
560 		default:
561 			break;
562 		}
563 		if (rc != -ENODEV && rc != -EBUSY)
564 			break;
565 		if (i < AP_MAX_RESET - 1) {
566 			udelay(5);
567 			status = ap_test_queue(qid, &dummy, &dummy);
568 		}
569 	}
570 	if (rc == 0 && ap_using_interrupts()) {
571 		rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator);
572 		/* If interruption mode is supported by the machine,
573 		* but an AP can not be enabled for interruption then
574 		* the AP will be discarded.    */
575 		if (rc)
576 			pr_err("Registering adapter interrupts for "
577 			       "AP %d failed\n", AP_QID_DEVICE(qid));
578 	}
579 	return rc;
580 }
581 
582 /**
583  * ap_increase_queue_count(): Arm request timeout.
584  * @ap_dev: Pointer to an AP device.
585  *
586  * Arm request timeout if an AP device was idle and a new request is submitted.
587  */
ap_increase_queue_count(struct ap_device * ap_dev)588 static void ap_increase_queue_count(struct ap_device *ap_dev)
589 {
590 	int timeout = ap_dev->drv->request_timeout;
591 
592 	ap_dev->queue_count++;
593 	if (ap_dev->queue_count == 1) {
594 		mod_timer(&ap_dev->timeout, jiffies + timeout);
595 		ap_dev->reset = AP_RESET_ARMED;
596 	}
597 }
598 
599 /**
600  * ap_decrease_queue_count(): Decrease queue count.
601  * @ap_dev: Pointer to an AP device.
602  *
603  * If AP device is still alive, re-schedule request timeout if there are still
604  * pending requests.
605  */
ap_decrease_queue_count(struct ap_device * ap_dev)606 static void ap_decrease_queue_count(struct ap_device *ap_dev)
607 {
608 	int timeout = ap_dev->drv->request_timeout;
609 
610 	ap_dev->queue_count--;
611 	if (ap_dev->queue_count > 0)
612 		mod_timer(&ap_dev->timeout, jiffies + timeout);
613 	else
614 		/*
615 		 * The timeout timer should to be disabled now - since
616 		 * del_timer_sync() is very expensive, we just tell via the
617 		 * reset flag to ignore the pending timeout timer.
618 		 */
619 		ap_dev->reset = AP_RESET_IGNORE;
620 }
621 
622 /*
623  * AP device related attributes.
624  */
ap_hwtype_show(struct device * dev,struct device_attribute * attr,char * buf)625 static ssize_t ap_hwtype_show(struct device *dev,
626 			      struct device_attribute *attr, char *buf)
627 {
628 	struct ap_device *ap_dev = to_ap_dev(dev);
629 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
630 }
631 
632 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
ap_depth_show(struct device * dev,struct device_attribute * attr,char * buf)633 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
634 			     char *buf)
635 {
636 	struct ap_device *ap_dev = to_ap_dev(dev);
637 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
638 }
639 
640 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
ap_request_count_show(struct device * dev,struct device_attribute * attr,char * buf)641 static ssize_t ap_request_count_show(struct device *dev,
642 				     struct device_attribute *attr,
643 				     char *buf)
644 {
645 	struct ap_device *ap_dev = to_ap_dev(dev);
646 	int rc;
647 
648 	spin_lock_bh(&ap_dev->lock);
649 	rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
650 	spin_unlock_bh(&ap_dev->lock);
651 	return rc;
652 }
653 
654 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
655 
ap_modalias_show(struct device * dev,struct device_attribute * attr,char * buf)656 static ssize_t ap_modalias_show(struct device *dev,
657 				struct device_attribute *attr, char *buf)
658 {
659 	return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
660 }
661 
662 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
663 
664 static struct attribute *ap_dev_attrs[] = {
665 	&dev_attr_hwtype.attr,
666 	&dev_attr_depth.attr,
667 	&dev_attr_request_count.attr,
668 	&dev_attr_modalias.attr,
669 	NULL
670 };
671 static struct attribute_group ap_dev_attr_group = {
672 	.attrs = ap_dev_attrs
673 };
674 
675 /**
676  * ap_bus_match()
677  * @dev: Pointer to device
678  * @drv: Pointer to device_driver
679  *
680  * AP bus driver registration/unregistration.
681  */
ap_bus_match(struct device * dev,struct device_driver * drv)682 static int ap_bus_match(struct device *dev, struct device_driver *drv)
683 {
684 	struct ap_device *ap_dev = to_ap_dev(dev);
685 	struct ap_driver *ap_drv = to_ap_drv(drv);
686 	struct ap_device_id *id;
687 
688 	/*
689 	 * Compare device type of the device with the list of
690 	 * supported types of the device_driver.
691 	 */
692 	for (id = ap_drv->ids; id->match_flags; id++) {
693 		if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
694 		    (id->dev_type != ap_dev->device_type))
695 			continue;
696 		return 1;
697 	}
698 	return 0;
699 }
700 
701 /**
702  * ap_uevent(): Uevent function for AP devices.
703  * @dev: Pointer to device
704  * @env: Pointer to kobj_uevent_env
705  *
706  * It sets up a single environment variable DEV_TYPE which contains the
707  * hardware device type.
708  */
ap_uevent(struct device * dev,struct kobj_uevent_env * env)709 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
710 {
711 	struct ap_device *ap_dev = to_ap_dev(dev);
712 	int retval = 0;
713 
714 	if (!ap_dev)
715 		return -ENODEV;
716 
717 	/* Set up DEV_TYPE environment variable. */
718 	retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
719 	if (retval)
720 		return retval;
721 
722 	/* Add MODALIAS= */
723 	retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
724 
725 	return retval;
726 }
727 
ap_bus_suspend(struct device * dev,pm_message_t state)728 static int ap_bus_suspend(struct device *dev, pm_message_t state)
729 {
730 	struct ap_device *ap_dev = to_ap_dev(dev);
731 	unsigned long flags;
732 
733 	if (!ap_suspend_flag) {
734 		ap_suspend_flag = 1;
735 
736 		/* Disable scanning for devices, thus we do not want to scan
737 		 * for them after removing.
738 		 */
739 		del_timer_sync(&ap_config_timer);
740 		if (ap_work_queue != NULL) {
741 			destroy_workqueue(ap_work_queue);
742 			ap_work_queue = NULL;
743 		}
744 
745 		tasklet_disable(&ap_tasklet);
746 	}
747 	/* Poll on the device until all requests are finished. */
748 	do {
749 		flags = 0;
750 		spin_lock_bh(&ap_dev->lock);
751 		__ap_poll_device(ap_dev, &flags);
752 		spin_unlock_bh(&ap_dev->lock);
753 	} while ((flags & 1) || (flags & 2));
754 
755 	spin_lock_bh(&ap_dev->lock);
756 	ap_dev->unregistered = 1;
757 	spin_unlock_bh(&ap_dev->lock);
758 
759 	return 0;
760 }
761 
ap_bus_resume(struct device * dev)762 static int ap_bus_resume(struct device *dev)
763 {
764 	int rc = 0;
765 	struct ap_device *ap_dev = to_ap_dev(dev);
766 
767 	if (ap_suspend_flag) {
768 		ap_suspend_flag = 0;
769 		if (!ap_interrupts_available())
770 			ap_interrupt_indicator = NULL;
771 		if (!user_set_domain) {
772 			ap_domain_index = -1;
773 			ap_select_domain();
774 		}
775 		init_timer(&ap_config_timer);
776 		ap_config_timer.function = ap_config_timeout;
777 		ap_config_timer.data = 0;
778 		ap_config_timer.expires = jiffies + ap_config_time * HZ;
779 		add_timer(&ap_config_timer);
780 		ap_work_queue = create_singlethread_workqueue("kapwork");
781 		if (!ap_work_queue)
782 			return -ENOMEM;
783 		tasklet_enable(&ap_tasklet);
784 		if (!ap_using_interrupts())
785 			ap_schedule_poll_timer();
786 		else
787 			tasklet_schedule(&ap_tasklet);
788 		if (ap_thread_flag)
789 			rc = ap_poll_thread_start();
790 	}
791 	if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
792 		spin_lock_bh(&ap_dev->lock);
793 		ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
794 				       ap_domain_index);
795 		spin_unlock_bh(&ap_dev->lock);
796 	}
797 	queue_work(ap_work_queue, &ap_config_work);
798 
799 	return rc;
800 }
801 
802 static struct bus_type ap_bus_type = {
803 	.name = "ap",
804 	.match = &ap_bus_match,
805 	.uevent = &ap_uevent,
806 	.suspend = ap_bus_suspend,
807 	.resume = ap_bus_resume
808 };
809 
ap_device_probe(struct device * dev)810 static int ap_device_probe(struct device *dev)
811 {
812 	struct ap_device *ap_dev = to_ap_dev(dev);
813 	struct ap_driver *ap_drv = to_ap_drv(dev->driver);
814 	int rc;
815 
816 	ap_dev->drv = ap_drv;
817 	rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
818 	if (!rc) {
819 		spin_lock_bh(&ap_device_list_lock);
820 		list_add(&ap_dev->list, &ap_device_list);
821 		spin_unlock_bh(&ap_device_list_lock);
822 	}
823 	return rc;
824 }
825 
826 /**
827  * __ap_flush_queue(): Flush requests.
828  * @ap_dev: Pointer to the AP device
829  *
830  * Flush all requests from the request/pending queue of an AP device.
831  */
__ap_flush_queue(struct ap_device * ap_dev)832 static void __ap_flush_queue(struct ap_device *ap_dev)
833 {
834 	struct ap_message *ap_msg, *next;
835 
836 	list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
837 		list_del_init(&ap_msg->list);
838 		ap_dev->pendingq_count--;
839 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
840 	}
841 	list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
842 		list_del_init(&ap_msg->list);
843 		ap_dev->requestq_count--;
844 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
845 	}
846 }
847 
ap_flush_queue(struct ap_device * ap_dev)848 void ap_flush_queue(struct ap_device *ap_dev)
849 {
850 	spin_lock_bh(&ap_dev->lock);
851 	__ap_flush_queue(ap_dev);
852 	spin_unlock_bh(&ap_dev->lock);
853 }
854 EXPORT_SYMBOL(ap_flush_queue);
855 
ap_device_remove(struct device * dev)856 static int ap_device_remove(struct device *dev)
857 {
858 	struct ap_device *ap_dev = to_ap_dev(dev);
859 	struct ap_driver *ap_drv = ap_dev->drv;
860 
861 	ap_flush_queue(ap_dev);
862 	del_timer_sync(&ap_dev->timeout);
863 	spin_lock_bh(&ap_device_list_lock);
864 	list_del_init(&ap_dev->list);
865 	spin_unlock_bh(&ap_device_list_lock);
866 	if (ap_drv->remove)
867 		ap_drv->remove(ap_dev);
868 	spin_lock_bh(&ap_dev->lock);
869 	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
870 	spin_unlock_bh(&ap_dev->lock);
871 	return 0;
872 }
873 
ap_driver_register(struct ap_driver * ap_drv,struct module * owner,char * name)874 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
875 		       char *name)
876 {
877 	struct device_driver *drv = &ap_drv->driver;
878 
879 	drv->bus = &ap_bus_type;
880 	drv->probe = ap_device_probe;
881 	drv->remove = ap_device_remove;
882 	drv->owner = owner;
883 	drv->name = name;
884 	return driver_register(drv);
885 }
886 EXPORT_SYMBOL(ap_driver_register);
887 
ap_driver_unregister(struct ap_driver * ap_drv)888 void ap_driver_unregister(struct ap_driver *ap_drv)
889 {
890 	driver_unregister(&ap_drv->driver);
891 }
892 EXPORT_SYMBOL(ap_driver_unregister);
893 
894 /*
895  * AP bus attributes.
896  */
ap_domain_show(struct bus_type * bus,char * buf)897 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
898 {
899 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
900 }
901 
902 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
903 
ap_config_time_show(struct bus_type * bus,char * buf)904 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
905 {
906 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
907 }
908 
ap_interrupts_show(struct bus_type * bus,char * buf)909 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
910 {
911 	return snprintf(buf, PAGE_SIZE, "%d\n",
912 			ap_using_interrupts() ? 1 : 0);
913 }
914 
915 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
916 
ap_config_time_store(struct bus_type * bus,const char * buf,size_t count)917 static ssize_t ap_config_time_store(struct bus_type *bus,
918 				    const char *buf, size_t count)
919 {
920 	int time;
921 
922 	if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
923 		return -EINVAL;
924 	ap_config_time = time;
925 	if (!timer_pending(&ap_config_timer) ||
926 	    !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
927 		ap_config_timer.expires = jiffies + ap_config_time * HZ;
928 		add_timer(&ap_config_timer);
929 	}
930 	return count;
931 }
932 
933 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
934 
ap_poll_thread_show(struct bus_type * bus,char * buf)935 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
936 {
937 	return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
938 }
939 
ap_poll_thread_store(struct bus_type * bus,const char * buf,size_t count)940 static ssize_t ap_poll_thread_store(struct bus_type *bus,
941 				    const char *buf, size_t count)
942 {
943 	int flag, rc;
944 
945 	if (sscanf(buf, "%d\n", &flag) != 1)
946 		return -EINVAL;
947 	if (flag) {
948 		rc = ap_poll_thread_start();
949 		if (rc)
950 			return rc;
951 	}
952 	else
953 		ap_poll_thread_stop();
954 	return count;
955 }
956 
957 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
958 
poll_timeout_show(struct bus_type * bus,char * buf)959 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
960 {
961 	return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
962 }
963 
poll_timeout_store(struct bus_type * bus,const char * buf,size_t count)964 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
965 				  size_t count)
966 {
967 	unsigned long long time;
968 	ktime_t hr_time;
969 
970 	/* 120 seconds = maximum poll interval */
971 	if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
972 	    time > 120000000000ULL)
973 		return -EINVAL;
974 	poll_timeout = time;
975 	hr_time = ktime_set(0, poll_timeout);
976 
977 	if (!hrtimer_is_queued(&ap_poll_timer) ||
978 	    !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
979 		hrtimer_set_expires(&ap_poll_timer, hr_time);
980 		hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
981 	}
982 	return count;
983 }
984 
985 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
986 
987 static struct bus_attribute *const ap_bus_attrs[] = {
988 	&bus_attr_ap_domain,
989 	&bus_attr_config_time,
990 	&bus_attr_poll_thread,
991 	&bus_attr_ap_interrupts,
992 	&bus_attr_poll_timeout,
993 	NULL,
994 };
995 
996 /**
997  * ap_select_domain(): Select an AP domain.
998  *
999  * Pick one of the 16 AP domains.
1000  */
ap_select_domain(void)1001 static int ap_select_domain(void)
1002 {
1003 	int queue_depth, device_type, count, max_count, best_domain;
1004 	int rc, i, j;
1005 
1006 	/*
1007 	 * We want to use a single domain. Either the one specified with
1008 	 * the "domain=" parameter or the domain with the maximum number
1009 	 * of devices.
1010 	 */
1011 	if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
1012 		/* Domain has already been selected. */
1013 		return 0;
1014 	best_domain = -1;
1015 	max_count = 0;
1016 	for (i = 0; i < AP_DOMAINS; i++) {
1017 		count = 0;
1018 		for (j = 0; j < AP_DEVICES; j++) {
1019 			ap_qid_t qid = AP_MKQID(j, i);
1020 			rc = ap_query_queue(qid, &queue_depth, &device_type);
1021 			if (rc)
1022 				continue;
1023 			count++;
1024 		}
1025 		if (count > max_count) {
1026 			max_count = count;
1027 			best_domain = i;
1028 		}
1029 	}
1030 	if (best_domain >= 0){
1031 		ap_domain_index = best_domain;
1032 		return 0;
1033 	}
1034 	return -ENODEV;
1035 }
1036 
1037 /**
1038  * ap_probe_device_type(): Find the device type of an AP.
1039  * @ap_dev: pointer to the AP device.
1040  *
1041  * Find the device type if query queue returned a device type of 0.
1042  */
ap_probe_device_type(struct ap_device * ap_dev)1043 static int ap_probe_device_type(struct ap_device *ap_dev)
1044 {
1045 	static unsigned char msg[] = {
1046 		0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1047 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1048 		0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1049 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1050 		0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1051 		0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1052 		0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1053 		0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1054 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1055 		0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1056 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1057 		0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1058 		0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1059 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1060 		0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1061 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1062 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1063 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1064 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1065 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1066 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1067 		0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1068 		0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1069 		0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1070 		0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1071 		0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1072 		0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1073 		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1074 		0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1075 		0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1076 		0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1077 		0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1078 		0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1079 		0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1080 		0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1081 		0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1082 		0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1083 		0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1084 		0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1085 		0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1086 		0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1087 		0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1088 		0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1089 		0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1090 		0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1091 	};
1092 	struct ap_queue_status status;
1093 	unsigned long long psmid;
1094 	char *reply;
1095 	int rc, i;
1096 
1097 	reply = (void *) get_zeroed_page(GFP_KERNEL);
1098 	if (!reply) {
1099 		rc = -ENOMEM;
1100 		goto out;
1101 	}
1102 
1103 	status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1104 			   msg, sizeof(msg), 0);
1105 	if (status.response_code != AP_RESPONSE_NORMAL) {
1106 		rc = -ENODEV;
1107 		goto out_free;
1108 	}
1109 
1110 	/* Wait for the test message to complete. */
1111 	for (i = 0; i < 6; i++) {
1112 		mdelay(300);
1113 		status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1114 		if (status.response_code == AP_RESPONSE_NORMAL &&
1115 		    psmid == 0x0102030405060708ULL)
1116 			break;
1117 	}
1118 	if (i < 6) {
1119 		/* Got an answer. */
1120 		if (reply[0] == 0x00 && reply[1] == 0x86)
1121 			ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1122 		else
1123 			ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1124 		rc = 0;
1125 	} else
1126 		rc = -ENODEV;
1127 
1128 out_free:
1129 	free_page((unsigned long) reply);
1130 out:
1131 	return rc;
1132 }
1133 
ap_interrupt_handler(void * unused1,void * unused2)1134 static void ap_interrupt_handler(void *unused1, void *unused2)
1135 {
1136 	kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1137 	tasklet_schedule(&ap_tasklet);
1138 }
1139 
1140 /**
1141  * __ap_scan_bus(): Scan the AP bus.
1142  * @dev: Pointer to device
1143  * @data: Pointer to data
1144  *
1145  * Scan the AP bus for new devices.
1146  */
__ap_scan_bus(struct device * dev,void * data)1147 static int __ap_scan_bus(struct device *dev, void *data)
1148 {
1149 	return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1150 }
1151 
ap_device_release(struct device * dev)1152 static void ap_device_release(struct device *dev)
1153 {
1154 	struct ap_device *ap_dev = to_ap_dev(dev);
1155 
1156 	kfree(ap_dev);
1157 }
1158 
ap_scan_bus(struct work_struct * unused)1159 static void ap_scan_bus(struct work_struct *unused)
1160 {
1161 	struct ap_device *ap_dev;
1162 	struct device *dev;
1163 	ap_qid_t qid;
1164 	int queue_depth, device_type;
1165 	unsigned int device_functions;
1166 	int rc, i;
1167 
1168 	if (ap_select_domain() != 0)
1169 		return;
1170 	for (i = 0; i < AP_DEVICES; i++) {
1171 		qid = AP_MKQID(i, ap_domain_index);
1172 		dev = bus_find_device(&ap_bus_type, NULL,
1173 				      (void *)(unsigned long)qid,
1174 				      __ap_scan_bus);
1175 		rc = ap_query_queue(qid, &queue_depth, &device_type);
1176 		if (dev) {
1177 			if (rc == -EBUSY) {
1178 				set_current_state(TASK_UNINTERRUPTIBLE);
1179 				schedule_timeout(AP_RESET_TIMEOUT);
1180 				rc = ap_query_queue(qid, &queue_depth,
1181 						    &device_type);
1182 			}
1183 			ap_dev = to_ap_dev(dev);
1184 			spin_lock_bh(&ap_dev->lock);
1185 			if (rc || ap_dev->unregistered) {
1186 				spin_unlock_bh(&ap_dev->lock);
1187 				if (ap_dev->unregistered)
1188 					i--;
1189 				device_unregister(dev);
1190 				put_device(dev);
1191 				continue;
1192 			}
1193 			spin_unlock_bh(&ap_dev->lock);
1194 			put_device(dev);
1195 			continue;
1196 		}
1197 		if (rc)
1198 			continue;
1199 		rc = ap_init_queue(qid);
1200 		if (rc)
1201 			continue;
1202 		ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1203 		if (!ap_dev)
1204 			break;
1205 		ap_dev->qid = qid;
1206 		ap_dev->queue_depth = queue_depth;
1207 		ap_dev->unregistered = 1;
1208 		spin_lock_init(&ap_dev->lock);
1209 		INIT_LIST_HEAD(&ap_dev->pendingq);
1210 		INIT_LIST_HEAD(&ap_dev->requestq);
1211 		INIT_LIST_HEAD(&ap_dev->list);
1212 		setup_timer(&ap_dev->timeout, ap_request_timeout,
1213 			    (unsigned long) ap_dev);
1214 		switch (device_type) {
1215 		case 0:
1216 			if (ap_probe_device_type(ap_dev)) {
1217 				kfree(ap_dev);
1218 				continue;
1219 			}
1220 			break;
1221 		case 10:
1222 			if (ap_query_functions(qid, &device_functions)) {
1223 				kfree(ap_dev);
1224 				continue;
1225 			}
1226 			if (test_ap_facility(device_functions, 3))
1227 				ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
1228 			else if (test_ap_facility(device_functions, 4))
1229 				ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
1230 			else {
1231 				kfree(ap_dev);
1232 				continue;
1233 			}
1234 			break;
1235 		default:
1236 			ap_dev->device_type = device_type;
1237 		}
1238 
1239 		ap_dev->device.bus = &ap_bus_type;
1240 		ap_dev->device.parent = ap_root_device;
1241 		if (dev_set_name(&ap_dev->device, "card%02x",
1242 				 AP_QID_DEVICE(ap_dev->qid))) {
1243 			kfree(ap_dev);
1244 			continue;
1245 		}
1246 		ap_dev->device.release = ap_device_release;
1247 		rc = device_register(&ap_dev->device);
1248 		if (rc) {
1249 			put_device(&ap_dev->device);
1250 			continue;
1251 		}
1252 		/* Add device attributes. */
1253 		rc = sysfs_create_group(&ap_dev->device.kobj,
1254 					&ap_dev_attr_group);
1255 		if (!rc) {
1256 			spin_lock_bh(&ap_dev->lock);
1257 			ap_dev->unregistered = 0;
1258 			spin_unlock_bh(&ap_dev->lock);
1259 		}
1260 		else
1261 			device_unregister(&ap_dev->device);
1262 	}
1263 }
1264 
1265 static void
ap_config_timeout(unsigned long ptr)1266 ap_config_timeout(unsigned long ptr)
1267 {
1268 	queue_work(ap_work_queue, &ap_config_work);
1269 	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1270 	add_timer(&ap_config_timer);
1271 }
1272 
1273 /**
1274  * __ap_schedule_poll_timer(): Schedule poll timer.
1275  *
1276  * Set up the timer to run the poll tasklet
1277  */
__ap_schedule_poll_timer(void)1278 static inline void __ap_schedule_poll_timer(void)
1279 {
1280 	ktime_t hr_time;
1281 
1282 	spin_lock_bh(&ap_poll_timer_lock);
1283 	if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1284 		goto out;
1285 	if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1286 		hr_time = ktime_set(0, poll_timeout);
1287 		hrtimer_forward_now(&ap_poll_timer, hr_time);
1288 		hrtimer_restart(&ap_poll_timer);
1289 	}
1290 out:
1291 	spin_unlock_bh(&ap_poll_timer_lock);
1292 }
1293 
1294 /**
1295  * ap_schedule_poll_timer(): Schedule poll timer.
1296  *
1297  * Set up the timer to run the poll tasklet
1298  */
ap_schedule_poll_timer(void)1299 static inline void ap_schedule_poll_timer(void)
1300 {
1301 	if (ap_using_interrupts())
1302 		return;
1303 	__ap_schedule_poll_timer();
1304 }
1305 
1306 /**
1307  * ap_poll_read(): Receive pending reply messages from an AP device.
1308  * @ap_dev: pointer to the AP device
1309  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1310  *	   required, bit 2^1 is set if the poll timer needs to get armed
1311  *
1312  * Returns 0 if the device is still present, -ENODEV if not.
1313  */
ap_poll_read(struct ap_device * ap_dev,unsigned long * flags)1314 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1315 {
1316 	struct ap_queue_status status;
1317 	struct ap_message *ap_msg;
1318 
1319 	if (ap_dev->queue_count <= 0)
1320 		return 0;
1321 	status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1322 			   ap_dev->reply->message, ap_dev->reply->length);
1323 	switch (status.response_code) {
1324 	case AP_RESPONSE_NORMAL:
1325 		atomic_dec(&ap_poll_requests);
1326 		ap_decrease_queue_count(ap_dev);
1327 		list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1328 			if (ap_msg->psmid != ap_dev->reply->psmid)
1329 				continue;
1330 			list_del_init(&ap_msg->list);
1331 			ap_dev->pendingq_count--;
1332 			ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
1333 			break;
1334 		}
1335 		if (ap_dev->queue_count > 0)
1336 			*flags |= 1;
1337 		break;
1338 	case AP_RESPONSE_NO_PENDING_REPLY:
1339 		if (status.queue_empty) {
1340 			/* The card shouldn't forget requests but who knows. */
1341 			atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1342 			ap_dev->queue_count = 0;
1343 			list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1344 			ap_dev->requestq_count += ap_dev->pendingq_count;
1345 			ap_dev->pendingq_count = 0;
1346 		} else
1347 			*flags |= 2;
1348 		break;
1349 	default:
1350 		return -ENODEV;
1351 	}
1352 	return 0;
1353 }
1354 
1355 /**
1356  * ap_poll_write(): Send messages from the request queue to an AP device.
1357  * @ap_dev: pointer to the AP device
1358  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1359  *	   required, bit 2^1 is set if the poll timer needs to get armed
1360  *
1361  * Returns 0 if the device is still present, -ENODEV if not.
1362  */
ap_poll_write(struct ap_device * ap_dev,unsigned long * flags)1363 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1364 {
1365 	struct ap_queue_status status;
1366 	struct ap_message *ap_msg;
1367 
1368 	if (ap_dev->requestq_count <= 0 ||
1369 	    ap_dev->queue_count >= ap_dev->queue_depth)
1370 		return 0;
1371 	/* Start the next request on the queue. */
1372 	ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1373 	status = __ap_send(ap_dev->qid, ap_msg->psmid,
1374 			   ap_msg->message, ap_msg->length, ap_msg->special);
1375 	switch (status.response_code) {
1376 	case AP_RESPONSE_NORMAL:
1377 		atomic_inc(&ap_poll_requests);
1378 		ap_increase_queue_count(ap_dev);
1379 		list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1380 		ap_dev->requestq_count--;
1381 		ap_dev->pendingq_count++;
1382 		if (ap_dev->queue_count < ap_dev->queue_depth &&
1383 		    ap_dev->requestq_count > 0)
1384 			*flags |= 1;
1385 		*flags |= 2;
1386 		break;
1387 	case AP_RESPONSE_RESET_IN_PROGRESS:
1388 		__ap_schedule_poll_timer();
1389 	case AP_RESPONSE_Q_FULL:
1390 		*flags |= 2;
1391 		break;
1392 	case AP_RESPONSE_MESSAGE_TOO_BIG:
1393 	case AP_RESPONSE_REQ_FAC_NOT_INST:
1394 		return -EINVAL;
1395 	default:
1396 		return -ENODEV;
1397 	}
1398 	return 0;
1399 }
1400 
1401 /**
1402  * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1403  * @ap_dev: pointer to the bus device
1404  * @flags: pointer to control flags, bit 2^0 is set if another poll is
1405  *	   required, bit 2^1 is set if the poll timer needs to get armed
1406  *
1407  * Poll AP device for pending replies and send new messages. If either
1408  * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1409  * Returns 0.
1410  */
ap_poll_queue(struct ap_device * ap_dev,unsigned long * flags)1411 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1412 {
1413 	int rc;
1414 
1415 	rc = ap_poll_read(ap_dev, flags);
1416 	if (rc)
1417 		return rc;
1418 	return ap_poll_write(ap_dev, flags);
1419 }
1420 
1421 /**
1422  * __ap_queue_message(): Queue a message to a device.
1423  * @ap_dev: pointer to the AP device
1424  * @ap_msg: the message to be queued
1425  *
1426  * Queue a message to a device. Returns 0 if successful.
1427  */
__ap_queue_message(struct ap_device * ap_dev,struct ap_message * ap_msg)1428 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1429 {
1430 	struct ap_queue_status status;
1431 
1432 	if (list_empty(&ap_dev->requestq) &&
1433 	    ap_dev->queue_count < ap_dev->queue_depth) {
1434 		status = __ap_send(ap_dev->qid, ap_msg->psmid,
1435 				   ap_msg->message, ap_msg->length,
1436 				   ap_msg->special);
1437 		switch (status.response_code) {
1438 		case AP_RESPONSE_NORMAL:
1439 			list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1440 			atomic_inc(&ap_poll_requests);
1441 			ap_dev->pendingq_count++;
1442 			ap_increase_queue_count(ap_dev);
1443 			ap_dev->total_request_count++;
1444 			break;
1445 		case AP_RESPONSE_Q_FULL:
1446 		case AP_RESPONSE_RESET_IN_PROGRESS:
1447 			list_add_tail(&ap_msg->list, &ap_dev->requestq);
1448 			ap_dev->requestq_count++;
1449 			ap_dev->total_request_count++;
1450 			return -EBUSY;
1451 		case AP_RESPONSE_REQ_FAC_NOT_INST:
1452 		case AP_RESPONSE_MESSAGE_TOO_BIG:
1453 			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1454 			return -EINVAL;
1455 		default:	/* Device is gone. */
1456 			ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1457 			return -ENODEV;
1458 		}
1459 	} else {
1460 		list_add_tail(&ap_msg->list, &ap_dev->requestq);
1461 		ap_dev->requestq_count++;
1462 		ap_dev->total_request_count++;
1463 		return -EBUSY;
1464 	}
1465 	ap_schedule_poll_timer();
1466 	return 0;
1467 }
1468 
ap_queue_message(struct ap_device * ap_dev,struct ap_message * ap_msg)1469 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1470 {
1471 	unsigned long flags;
1472 	int rc;
1473 
1474 	spin_lock_bh(&ap_dev->lock);
1475 	if (!ap_dev->unregistered) {
1476 		/* Make room on the queue by polling for finished requests. */
1477 		rc = ap_poll_queue(ap_dev, &flags);
1478 		if (!rc)
1479 			rc = __ap_queue_message(ap_dev, ap_msg);
1480 		if (!rc)
1481 			wake_up(&ap_poll_wait);
1482 		if (rc == -ENODEV)
1483 			ap_dev->unregistered = 1;
1484 	} else {
1485 		ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1486 		rc = -ENODEV;
1487 	}
1488 	spin_unlock_bh(&ap_dev->lock);
1489 	if (rc == -ENODEV)
1490 		device_unregister(&ap_dev->device);
1491 }
1492 EXPORT_SYMBOL(ap_queue_message);
1493 
1494 /**
1495  * ap_cancel_message(): Cancel a crypto request.
1496  * @ap_dev: The AP device that has the message queued
1497  * @ap_msg: The message that is to be removed
1498  *
1499  * Cancel a crypto request. This is done by removing the request
1500  * from the device pending or request queue. Note that the
1501  * request stays on the AP queue. When it finishes the message
1502  * reply will be discarded because the psmid can't be found.
1503  */
ap_cancel_message(struct ap_device * ap_dev,struct ap_message * ap_msg)1504 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1505 {
1506 	struct ap_message *tmp;
1507 
1508 	spin_lock_bh(&ap_dev->lock);
1509 	if (!list_empty(&ap_msg->list)) {
1510 		list_for_each_entry(tmp, &ap_dev->pendingq, list)
1511 			if (tmp->psmid == ap_msg->psmid) {
1512 				ap_dev->pendingq_count--;
1513 				goto found;
1514 			}
1515 		ap_dev->requestq_count--;
1516 	found:
1517 		list_del_init(&ap_msg->list);
1518 	}
1519 	spin_unlock_bh(&ap_dev->lock);
1520 }
1521 EXPORT_SYMBOL(ap_cancel_message);
1522 
1523 /**
1524  * ap_poll_timeout(): AP receive polling for finished AP requests.
1525  * @unused: Unused pointer.
1526  *
1527  * Schedules the AP tasklet using a high resolution timer.
1528  */
ap_poll_timeout(struct hrtimer * unused)1529 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1530 {
1531 	tasklet_schedule(&ap_tasklet);
1532 	return HRTIMER_NORESTART;
1533 }
1534 
1535 /**
1536  * ap_reset(): Reset a not responding AP device.
1537  * @ap_dev: Pointer to the AP device
1538  *
1539  * Reset a not responding AP device and move all requests from the
1540  * pending queue to the request queue.
1541  */
ap_reset(struct ap_device * ap_dev)1542 static void ap_reset(struct ap_device *ap_dev)
1543 {
1544 	int rc;
1545 
1546 	ap_dev->reset = AP_RESET_IGNORE;
1547 	atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1548 	ap_dev->queue_count = 0;
1549 	list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1550 	ap_dev->requestq_count += ap_dev->pendingq_count;
1551 	ap_dev->pendingq_count = 0;
1552 	rc = ap_init_queue(ap_dev->qid);
1553 	if (rc == -ENODEV)
1554 		ap_dev->unregistered = 1;
1555 	else
1556 		__ap_schedule_poll_timer();
1557 }
1558 
__ap_poll_device(struct ap_device * ap_dev,unsigned long * flags)1559 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1560 {
1561 	if (!ap_dev->unregistered) {
1562 		if (ap_poll_queue(ap_dev, flags))
1563 			ap_dev->unregistered = 1;
1564 		if (ap_dev->reset == AP_RESET_DO)
1565 			ap_reset(ap_dev);
1566 	}
1567 	return 0;
1568 }
1569 
1570 /**
1571  * ap_poll_all(): Poll all AP devices.
1572  * @dummy: Unused variable
1573  *
1574  * Poll all AP devices on the bus in a round robin fashion. Continue
1575  * polling until bit 2^0 of the control flags is not set. If bit 2^1
1576  * of the control flags has been set arm the poll timer.
1577  */
ap_poll_all(unsigned long dummy)1578 static void ap_poll_all(unsigned long dummy)
1579 {
1580 	unsigned long flags;
1581 	struct ap_device *ap_dev;
1582 
1583 	/* Reset the indicator if interrupts are used. Thus new interrupts can
1584 	 * be received. Doing it in the beginning of the tasklet is therefor
1585 	 * important that no requests on any AP get lost.
1586 	 */
1587 	if (ap_using_interrupts())
1588 		xchg((u8 *)ap_interrupt_indicator, 0);
1589 	do {
1590 		flags = 0;
1591 		spin_lock(&ap_device_list_lock);
1592 		list_for_each_entry(ap_dev, &ap_device_list, list) {
1593 			spin_lock(&ap_dev->lock);
1594 			__ap_poll_device(ap_dev, &flags);
1595 			spin_unlock(&ap_dev->lock);
1596 		}
1597 		spin_unlock(&ap_device_list_lock);
1598 	} while (flags & 1);
1599 	if (flags & 2)
1600 		ap_schedule_poll_timer();
1601 }
1602 
1603 /**
1604  * ap_poll_thread(): Thread that polls for finished requests.
1605  * @data: Unused pointer
1606  *
1607  * AP bus poll thread. The purpose of this thread is to poll for
1608  * finished requests in a loop if there is a "free" cpu - that is
1609  * a cpu that doesn't have anything better to do. The polling stops
1610  * as soon as there is another task or if all messages have been
1611  * delivered.
1612  */
ap_poll_thread(void * data)1613 static int ap_poll_thread(void *data)
1614 {
1615 	DECLARE_WAITQUEUE(wait, current);
1616 	unsigned long flags;
1617 	int requests;
1618 	struct ap_device *ap_dev;
1619 
1620 	set_user_nice(current, 19);
1621 	while (1) {
1622 		if (ap_suspend_flag)
1623 			return 0;
1624 		if (need_resched()) {
1625 			schedule();
1626 			continue;
1627 		}
1628 		add_wait_queue(&ap_poll_wait, &wait);
1629 		set_current_state(TASK_INTERRUPTIBLE);
1630 		if (kthread_should_stop())
1631 			break;
1632 		requests = atomic_read(&ap_poll_requests);
1633 		if (requests <= 0)
1634 			schedule();
1635 		set_current_state(TASK_RUNNING);
1636 		remove_wait_queue(&ap_poll_wait, &wait);
1637 
1638 		flags = 0;
1639 		spin_lock_bh(&ap_device_list_lock);
1640 		list_for_each_entry(ap_dev, &ap_device_list, list) {
1641 			spin_lock(&ap_dev->lock);
1642 			__ap_poll_device(ap_dev, &flags);
1643 			spin_unlock(&ap_dev->lock);
1644 		}
1645 		spin_unlock_bh(&ap_device_list_lock);
1646 	}
1647 	set_current_state(TASK_RUNNING);
1648 	remove_wait_queue(&ap_poll_wait, &wait);
1649 	return 0;
1650 }
1651 
ap_poll_thread_start(void)1652 static int ap_poll_thread_start(void)
1653 {
1654 	int rc;
1655 
1656 	if (ap_using_interrupts() || ap_suspend_flag)
1657 		return 0;
1658 	mutex_lock(&ap_poll_thread_mutex);
1659 	if (!ap_poll_kthread) {
1660 		ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1661 		rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1662 		if (rc)
1663 			ap_poll_kthread = NULL;
1664 	}
1665 	else
1666 		rc = 0;
1667 	mutex_unlock(&ap_poll_thread_mutex);
1668 	return rc;
1669 }
1670 
ap_poll_thread_stop(void)1671 static void ap_poll_thread_stop(void)
1672 {
1673 	mutex_lock(&ap_poll_thread_mutex);
1674 	if (ap_poll_kthread) {
1675 		kthread_stop(ap_poll_kthread);
1676 		ap_poll_kthread = NULL;
1677 	}
1678 	mutex_unlock(&ap_poll_thread_mutex);
1679 }
1680 
1681 /**
1682  * ap_request_timeout(): Handling of request timeouts
1683  * @data: Holds the AP device.
1684  *
1685  * Handles request timeouts.
1686  */
ap_request_timeout(unsigned long data)1687 static void ap_request_timeout(unsigned long data)
1688 {
1689 	struct ap_device *ap_dev = (struct ap_device *) data;
1690 
1691 	if (ap_dev->reset == AP_RESET_ARMED) {
1692 		ap_dev->reset = AP_RESET_DO;
1693 
1694 		if (ap_using_interrupts())
1695 			tasklet_schedule(&ap_tasklet);
1696 	}
1697 }
1698 
ap_reset_domain(void)1699 static void ap_reset_domain(void)
1700 {
1701 	int i;
1702 
1703 	if (ap_domain_index != -1)
1704 		for (i = 0; i < AP_DEVICES; i++)
1705 			ap_reset_queue(AP_MKQID(i, ap_domain_index));
1706 }
1707 
ap_reset_all(void)1708 static void ap_reset_all(void)
1709 {
1710 	int i, j;
1711 
1712 	for (i = 0; i < AP_DOMAINS; i++)
1713 		for (j = 0; j < AP_DEVICES; j++)
1714 			ap_reset_queue(AP_MKQID(j, i));
1715 }
1716 
1717 static struct reset_call ap_reset_call = {
1718 	.fn = ap_reset_all,
1719 };
1720 
1721 /**
1722  * ap_module_init(): The module initialization code.
1723  *
1724  * Initializes the module.
1725  */
ap_module_init(void)1726 int __init ap_module_init(void)
1727 {
1728 	int rc, i;
1729 
1730 	if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1731 		pr_warning("%d is not a valid cryptographic domain\n",
1732 			   ap_domain_index);
1733 		return -EINVAL;
1734 	}
1735 	/* In resume callback we need to know if the user had set the domain.
1736 	 * If so, we can not just reset it.
1737 	 */
1738 	if (ap_domain_index >= 0)
1739 		user_set_domain = 1;
1740 
1741 	if (ap_instructions_available() != 0) {
1742 		pr_warning("The hardware system does not support "
1743 			   "AP instructions\n");
1744 		return -ENODEV;
1745 	}
1746 	if (ap_interrupts_available()) {
1747 		isc_register(AP_ISC);
1748 		ap_interrupt_indicator = s390_register_adapter_interrupt(
1749 			&ap_interrupt_handler, NULL, AP_ISC);
1750 		if (IS_ERR(ap_interrupt_indicator)) {
1751 			ap_interrupt_indicator = NULL;
1752 			isc_unregister(AP_ISC);
1753 		}
1754 	}
1755 
1756 	register_reset_call(&ap_reset_call);
1757 
1758 	/* Create /sys/bus/ap. */
1759 	rc = bus_register(&ap_bus_type);
1760 	if (rc)
1761 		goto out;
1762 	for (i = 0; ap_bus_attrs[i]; i++) {
1763 		rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1764 		if (rc)
1765 			goto out_bus;
1766 	}
1767 
1768 	/* Create /sys/devices/ap. */
1769 	ap_root_device = root_device_register("ap");
1770 	rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1771 	if (rc)
1772 		goto out_bus;
1773 
1774 	ap_work_queue = create_singlethread_workqueue("kapwork");
1775 	if (!ap_work_queue) {
1776 		rc = -ENOMEM;
1777 		goto out_root;
1778 	}
1779 
1780 	if (ap_select_domain() == 0)
1781 		ap_scan_bus(NULL);
1782 
1783 	/* Setup the AP bus rescan timer. */
1784 	init_timer(&ap_config_timer);
1785 	ap_config_timer.function = ap_config_timeout;
1786 	ap_config_timer.data = 0;
1787 	ap_config_timer.expires = jiffies + ap_config_time * HZ;
1788 	add_timer(&ap_config_timer);
1789 
1790 	/* Setup the high resultion poll timer.
1791 	 * If we are running under z/VM adjust polling to z/VM polling rate.
1792 	 */
1793 	if (MACHINE_IS_VM)
1794 		poll_timeout = 1500000;
1795 	spin_lock_init(&ap_poll_timer_lock);
1796 	hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1797 	ap_poll_timer.function = ap_poll_timeout;
1798 
1799 	/* Start the low priority AP bus poll thread. */
1800 	if (ap_thread_flag) {
1801 		rc = ap_poll_thread_start();
1802 		if (rc)
1803 			goto out_work;
1804 	}
1805 
1806 	return 0;
1807 
1808 out_work:
1809 	del_timer_sync(&ap_config_timer);
1810 	hrtimer_cancel(&ap_poll_timer);
1811 	destroy_workqueue(ap_work_queue);
1812 out_root:
1813 	root_device_unregister(ap_root_device);
1814 out_bus:
1815 	while (i--)
1816 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1817 	bus_unregister(&ap_bus_type);
1818 out:
1819 	unregister_reset_call(&ap_reset_call);
1820 	if (ap_using_interrupts()) {
1821 		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1822 		isc_unregister(AP_ISC);
1823 	}
1824 	return rc;
1825 }
1826 
__ap_match_all(struct device * dev,void * data)1827 static int __ap_match_all(struct device *dev, void *data)
1828 {
1829 	return 1;
1830 }
1831 
1832 /**
1833  * ap_modules_exit(): The module termination code
1834  *
1835  * Terminates the module.
1836  */
ap_module_exit(void)1837 void ap_module_exit(void)
1838 {
1839 	int i;
1840 	struct device *dev;
1841 
1842 	ap_reset_domain();
1843 	ap_poll_thread_stop();
1844 	del_timer_sync(&ap_config_timer);
1845 	hrtimer_cancel(&ap_poll_timer);
1846 	destroy_workqueue(ap_work_queue);
1847 	tasklet_kill(&ap_tasklet);
1848 	root_device_unregister(ap_root_device);
1849 	while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1850 		    __ap_match_all)))
1851 	{
1852 		device_unregister(dev);
1853 		put_device(dev);
1854 	}
1855 	for (i = 0; ap_bus_attrs[i]; i++)
1856 		bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1857 	bus_unregister(&ap_bus_type);
1858 	unregister_reset_call(&ap_reset_call);
1859 	if (ap_using_interrupts()) {
1860 		s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC);
1861 		isc_unregister(AP_ISC);
1862 	}
1863 }
1864 
1865 module_init(ap_module_init);
1866 module_exit(ap_module_exit);
1867