1 /*
2  * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
3  *
4  * This HVC device driver provides terminal access using
5  * z/VM IUCV communication paths.
6  *
7  * Copyright IBM Corp. 2008, 2009
8  *
9  * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10  */
11 #define KMSG_COMPONENT		"hvc_iucv"
12 #define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
13 
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <asm/ebcdic.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/mempool.h>
22 #include <linux/moduleparam.h>
23 #include <linux/tty.h>
24 #include <linux/wait.h>
25 #include <net/iucv/iucv.h>
26 
27 #include "hvc_console.h"
28 
29 
30 /* General device driver settings */
31 #define HVC_IUCV_MAGIC		0xc9e4c3e5
32 #define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
33 #define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34 
35 /* IUCV TTY message  */
36 #define MSG_VERSION		0x02	/* Message version */
37 #define MSG_TYPE_ERROR		0x01	/* Error message */
38 #define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */
39 #define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */
40 #define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
41 #define MSG_TYPE_DATA		0x10	/* Terminal data */
42 
43 struct iucv_tty_msg {
44 	u8	version;		/* Message version */
45 	u8	type;			/* Message type */
46 #define MSG_MAX_DATALEN		((u16)(~0))
47 	u16	datalen;		/* Payload length */
48 	u8	data[];			/* Payload buffer */
49 } __attribute__((packed));
50 #define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
51 
52 enum iucv_state_t {
53 	IUCV_DISCONN	= 0,
54 	IUCV_CONNECTED	= 1,
55 	IUCV_SEVERED	= 2,
56 };
57 
58 enum tty_state_t {
59 	TTY_CLOSED	= 0,
60 	TTY_OPENED	= 1,
61 };
62 
63 struct hvc_iucv_private {
64 	struct hvc_struct	*hvc;		/* HVC struct reference */
65 	u8			srv_name[8];	/* IUCV service name (ebcdic) */
66 	unsigned char		is_console;	/* Linux console usage flag */
67 	enum iucv_state_t	iucv_state;	/* IUCV connection status */
68 	enum tty_state_t	tty_state;	/* TTY status */
69 	struct iucv_path	*path;		/* IUCV path pointer */
70 	spinlock_t		lock;		/* hvc_iucv_private lock */
71 #define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
72 	void			*sndbuf;	/* send buffer		  */
73 	size_t			sndbuf_len;	/* length of send buffer  */
74 #define QUEUE_SNDBUF_DELAY	(HZ / 25)
75 	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
76 	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
77 	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
78 	struct list_head	tty_inqueue;	/* incoming IUCV messages */
79 	struct device		*dev;		/* device structure */
80 };
81 
82 struct iucv_tty_buffer {
83 	struct list_head	list;	/* list pointer */
84 	struct iucv_message	msg;	/* store an IUCV message */
85 	size_t			offset;	/* data buffer offset */
86 	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
87 };
88 
89 /* IUCV callback handler */
90 static	int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
91 static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
92 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
93 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
94 
95 
96 /* Kernel module parameter: use one terminal device as default */
97 static unsigned long hvc_iucv_devices = 1;
98 
99 /* Array of allocated hvc iucv tty lines... */
100 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
101 #define IUCV_HVC_CON_IDX	(0)
102 /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
103 #define MAX_VMID_FILTER		(500)
104 static size_t hvc_iucv_filter_size;
105 static void *hvc_iucv_filter;
106 static const char *hvc_iucv_filter_string;
107 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
108 
109 /* Kmem cache and mempool for iucv_tty_buffer elements */
110 static struct kmem_cache *hvc_iucv_buffer_cache;
111 static mempool_t *hvc_iucv_mempool;
112 
113 /* IUCV handler callback functions */
114 static struct iucv_handler hvc_iucv_handler = {
115 	.path_pending  = hvc_iucv_path_pending,
116 	.path_severed  = hvc_iucv_path_severed,
117 	.message_complete = hvc_iucv_msg_complete,
118 	.message_pending  = hvc_iucv_msg_pending,
119 };
120 
121 
122 /**
123  * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
124  * @num:	The HVC virtual terminal number (vtermno)
125  *
126  * This function returns the struct hvc_iucv_private instance that corresponds
127  * to the HVC virtual terminal number specified as parameter @num.
128  */
hvc_iucv_get_private(uint32_t num)129 struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
130 {
131 	if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
132 		return NULL;
133 	return hvc_iucv_table[num - HVC_IUCV_MAGIC];
134 }
135 
136 /**
137  * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
138  * @size:	Size of the internal buffer used to store data.
139  * @flags:	Memory allocation flags passed to mempool.
140  *
141  * This function allocates a new struct iucv_tty_buffer element and, optionally,
142  * allocates an internal data buffer with the specified size @size.
143  * The internal data buffer is always allocated with GFP_DMA which is
144  * required for receiving and sending data with IUCV.
145  * Note: The total message size arises from the internal buffer size and the
146  *	 members of the iucv_tty_msg structure.
147  * The function returns NULL if memory allocation has failed.
148  */
alloc_tty_buffer(size_t size,gfp_t flags)149 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
150 {
151 	struct iucv_tty_buffer *bufp;
152 
153 	bufp = mempool_alloc(hvc_iucv_mempool, flags);
154 	if (!bufp)
155 		return NULL;
156 	memset(bufp, 0, sizeof(*bufp));
157 
158 	if (size > 0) {
159 		bufp->msg.length = MSG_SIZE(size);
160 		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
161 		if (!bufp->mbuf) {
162 			mempool_free(bufp, hvc_iucv_mempool);
163 			return NULL;
164 		}
165 		bufp->mbuf->version = MSG_VERSION;
166 		bufp->mbuf->type    = MSG_TYPE_DATA;
167 		bufp->mbuf->datalen = (u16) size;
168 	}
169 	return bufp;
170 }
171 
172 /**
173  * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
174  * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
175  */
destroy_tty_buffer(struct iucv_tty_buffer * bufp)176 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
177 {
178 	kfree(bufp->mbuf);
179 	mempool_free(bufp, hvc_iucv_mempool);
180 }
181 
182 /**
183  * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
184  * @list:	List containing struct iucv_tty_buffer elements.
185  */
destroy_tty_buffer_list(struct list_head * list)186 static void destroy_tty_buffer_list(struct list_head *list)
187 {
188 	struct iucv_tty_buffer *ent, *next;
189 
190 	list_for_each_entry_safe(ent, next, list, list) {
191 		list_del(&ent->list);
192 		destroy_tty_buffer(ent);
193 	}
194 }
195 
196 /**
197  * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
198  * @priv:		Pointer to struct hvc_iucv_private
199  * @buf:		HVC buffer for writing received terminal data.
200  * @count:		HVC buffer size.
201  * @has_more_data:	Pointer to an int variable.
202  *
203  * The function picks up pending messages from the input queue and receives
204  * the message data that is then written to the specified buffer @buf.
205  * If the buffer size @count is less than the data message size, the
206  * message is kept on the input queue and @has_more_data is set to 1.
207  * If all message data has been written, the message is removed from
208  * the input queue.
209  *
210  * The function returns the number of bytes written to the terminal, zero if
211  * there are no pending data messages available or if there is no established
212  * IUCV path.
213  * If the IUCV path has been severed, then -EPIPE is returned to cause a
214  * hang up (that is issued by the HVC layer).
215  */
hvc_iucv_write(struct hvc_iucv_private * priv,char * buf,int count,int * has_more_data)216 static int hvc_iucv_write(struct hvc_iucv_private *priv,
217 			  char *buf, int count, int *has_more_data)
218 {
219 	struct iucv_tty_buffer *rb;
220 	int written;
221 	int rc;
222 
223 	/* immediately return if there is no IUCV connection */
224 	if (priv->iucv_state == IUCV_DISCONN)
225 		return 0;
226 
227 	/* if the IUCV path has been severed, return -EPIPE to inform the
228 	 * HVC layer to hang up the tty device. */
229 	if (priv->iucv_state == IUCV_SEVERED)
230 		return -EPIPE;
231 
232 	/* check if there are pending messages */
233 	if (list_empty(&priv->tty_inqueue))
234 		return 0;
235 
236 	/* receive an iucv message and flip data to the tty (ldisc) */
237 	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
238 
239 	written = 0;
240 	if (!rb->mbuf) { /* message not yet received ... */
241 		/* allocate mem to store msg data; if no memory is available
242 		 * then leave the buffer on the list and re-try later */
243 		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
244 		if (!rb->mbuf)
245 			return -ENOMEM;
246 
247 		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
248 					    rb->mbuf, rb->msg.length, NULL);
249 		switch (rc) {
250 		case 0: /* Successful	    */
251 			break;
252 		case 2:	/* No message found */
253 		case 9: /* Message purged   */
254 			break;
255 		default:
256 			written = -EIO;
257 		}
258 		/* remove buffer if an error has occurred or received data
259 		 * is not correct */
260 		if (rc || (rb->mbuf->version != MSG_VERSION) ||
261 			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
262 			goto out_remove_buffer;
263 	}
264 
265 	switch (rb->mbuf->type) {
266 	case MSG_TYPE_DATA:
267 		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
268 		memcpy(buf, rb->mbuf->data + rb->offset, written);
269 		if (written < (rb->mbuf->datalen - rb->offset)) {
270 			rb->offset += written;
271 			*has_more_data = 1;
272 			goto out_written;
273 		}
274 		break;
275 
276 	case MSG_TYPE_WINSIZE:
277 		if (rb->mbuf->datalen != sizeof(struct winsize))
278 			break;
279 		/* The caller must ensure that the hvc is locked, which
280 		 * is the case when called from hvc_iucv_get_chars() */
281 		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
282 		break;
283 
284 	case MSG_TYPE_ERROR:	/* ignored ... */
285 	case MSG_TYPE_TERMENV:	/* ignored ... */
286 	case MSG_TYPE_TERMIOS:	/* ignored ... */
287 		break;
288 	}
289 
290 out_remove_buffer:
291 	list_del(&rb->list);
292 	destroy_tty_buffer(rb);
293 	*has_more_data = !list_empty(&priv->tty_inqueue);
294 
295 out_written:
296 	return written;
297 }
298 
299 /**
300  * hvc_iucv_get_chars() - HVC get_chars operation.
301  * @vtermno:	HVC virtual terminal number.
302  * @buf:	Pointer to a buffer to store data
303  * @count:	Size of buffer available for writing
304  *
305  * The HVC thread calls this method to read characters from the back-end.
306  * If an IUCV communication path has been established, pending IUCV messages
307  * are received and data is copied into buffer @buf up to @count bytes.
308  *
309  * Locking:	The routine gets called under an irqsave() spinlock; and
310  *		the routine locks the struct hvc_iucv_private->lock to call
311  *		helper functions.
312  */
hvc_iucv_get_chars(uint32_t vtermno,char * buf,int count)313 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
314 {
315 	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
316 	int written;
317 	int has_more_data;
318 
319 	if (count <= 0)
320 		return 0;
321 
322 	if (!priv)
323 		return -ENODEV;
324 
325 	spin_lock(&priv->lock);
326 	has_more_data = 0;
327 	written = hvc_iucv_write(priv, buf, count, &has_more_data);
328 	spin_unlock(&priv->lock);
329 
330 	/* if there are still messages on the queue... schedule another run */
331 	if (has_more_data)
332 		hvc_kick();
333 
334 	return written;
335 }
336 
337 /**
338  * hvc_iucv_queue() - Buffer terminal data for sending.
339  * @priv:	Pointer to struct hvc_iucv_private instance.
340  * @buf:	Buffer containing data to send.
341  * @count:	Size of buffer and amount of data to send.
342  *
343  * The function queues data for sending. To actually send the buffered data,
344  * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
345  * The function returns the number of data bytes that has been buffered.
346  *
347  * If the device is not connected, data is ignored and the function returns
348  * @count.
349  * If the buffer is full, the function returns 0.
350  * If an existing IUCV communicaton path has been severed, -EPIPE is returned
351  * (that can be passed to HVC layer to cause a tty hangup).
352  */
hvc_iucv_queue(struct hvc_iucv_private * priv,const char * buf,int count)353 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
354 			  int count)
355 {
356 	size_t len;
357 
358 	if (priv->iucv_state == IUCV_DISCONN)
359 		return count;			/* ignore data */
360 
361 	if (priv->iucv_state == IUCV_SEVERED)
362 		return -EPIPE;
363 
364 	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
365 	if (!len)
366 		return 0;
367 
368 	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
369 	priv->sndbuf_len += len;
370 
371 	if (priv->iucv_state == IUCV_CONNECTED)
372 		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
373 
374 	return len;
375 }
376 
377 /**
378  * hvc_iucv_send() - Send an IUCV message containing terminal data.
379  * @priv:	Pointer to struct hvc_iucv_private instance.
380  *
381  * If an IUCV communication path has been established, the buffered output data
382  * is sent via an IUCV message and the number of bytes sent is returned.
383  * Returns 0 if there is no established IUCV communication path or
384  * -EPIPE if an existing IUCV communicaton path has been severed.
385  */
hvc_iucv_send(struct hvc_iucv_private * priv)386 static int hvc_iucv_send(struct hvc_iucv_private *priv)
387 {
388 	struct iucv_tty_buffer *sb;
389 	int rc, len;
390 
391 	if (priv->iucv_state == IUCV_SEVERED)
392 		return -EPIPE;
393 
394 	if (priv->iucv_state == IUCV_DISCONN)
395 		return -EIO;
396 
397 	if (!priv->sndbuf_len)
398 		return 0;
399 
400 	/* allocate internal buffer to store msg data and also compute total
401 	 * message length */
402 	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
403 	if (!sb)
404 		return -ENOMEM;
405 
406 	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
407 	sb->mbuf->datalen = (u16) priv->sndbuf_len;
408 	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
409 
410 	list_add_tail(&sb->list, &priv->tty_outqueue);
411 
412 	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
413 				 (void *) sb->mbuf, sb->msg.length);
414 	if (rc) {
415 		/* drop the message here; however we might want to handle
416 		 * 0x03 (msg limit reached) by trying again... */
417 		list_del(&sb->list);
418 		destroy_tty_buffer(sb);
419 	}
420 	len = priv->sndbuf_len;
421 	priv->sndbuf_len = 0;
422 
423 	return len;
424 }
425 
426 /**
427  * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
428  * @work:	Work structure.
429  *
430  * This work queue function sends buffered output data over IUCV and,
431  * if not all buffered data could be sent, reschedules itself.
432  */
hvc_iucv_sndbuf_work(struct work_struct * work)433 static void hvc_iucv_sndbuf_work(struct work_struct *work)
434 {
435 	struct hvc_iucv_private *priv;
436 
437 	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
438 	if (!priv)
439 		return;
440 
441 	spin_lock_bh(&priv->lock);
442 	hvc_iucv_send(priv);
443 	spin_unlock_bh(&priv->lock);
444 }
445 
446 /**
447  * hvc_iucv_put_chars() - HVC put_chars operation.
448  * @vtermno:	HVC virtual terminal number.
449  * @buf:	Pointer to an buffer to read data from
450  * @count:	Size of buffer available for reading
451  *
452  * The HVC thread calls this method to write characters to the back-end.
453  * The function calls hvc_iucv_queue() to queue terminal data for sending.
454  *
455  * Locking:	The method gets called under an irqsave() spinlock; and
456  *		locks struct hvc_iucv_private->lock.
457  */
hvc_iucv_put_chars(uint32_t vtermno,const char * buf,int count)458 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
459 {
460 	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
461 	int queued;
462 
463 	if (count <= 0)
464 		return 0;
465 
466 	if (!priv)
467 		return -ENODEV;
468 
469 	spin_lock(&priv->lock);
470 	queued = hvc_iucv_queue(priv, buf, count);
471 	spin_unlock(&priv->lock);
472 
473 	return queued;
474 }
475 
476 /**
477  * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
478  * @hp:	Pointer to the HVC device (struct hvc_struct)
479  * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
480  *	hvc_iucv_private instance.
481  *
482  * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
483  * instance that is derived from @id. Always returns 0.
484  *
485  * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
486  */
hvc_iucv_notifier_add(struct hvc_struct * hp,int id)487 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
488 {
489 	struct hvc_iucv_private *priv;
490 
491 	priv = hvc_iucv_get_private(id);
492 	if (!priv)
493 		return 0;
494 
495 	spin_lock_bh(&priv->lock);
496 	priv->tty_state = TTY_OPENED;
497 	spin_unlock_bh(&priv->lock);
498 
499 	return 0;
500 }
501 
502 /**
503  * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
504  * @priv:	Pointer to the struct hvc_iucv_private instance.
505  */
hvc_iucv_cleanup(struct hvc_iucv_private * priv)506 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
507 {
508 	destroy_tty_buffer_list(&priv->tty_outqueue);
509 	destroy_tty_buffer_list(&priv->tty_inqueue);
510 
511 	priv->tty_state = TTY_CLOSED;
512 	priv->iucv_state = IUCV_DISCONN;
513 
514 	priv->sndbuf_len = 0;
515 }
516 
517 /**
518  * tty_outqueue_empty() - Test if the tty outq is empty
519  * @priv:	Pointer to struct hvc_iucv_private instance.
520  */
tty_outqueue_empty(struct hvc_iucv_private * priv)521 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
522 {
523 	int rc;
524 
525 	spin_lock_bh(&priv->lock);
526 	rc = list_empty(&priv->tty_outqueue);
527 	spin_unlock_bh(&priv->lock);
528 
529 	return rc;
530 }
531 
532 /**
533  * flush_sndbuf_sync() - Flush send buffer and wait for completion
534  * @priv:	Pointer to struct hvc_iucv_private instance.
535  *
536  * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
537  * to flush any buffered terminal output data and waits for completion.
538  */
flush_sndbuf_sync(struct hvc_iucv_private * priv)539 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
540 {
541 	int sync_wait;
542 
543 	cancel_delayed_work_sync(&priv->sndbuf_work);
544 
545 	spin_lock_bh(&priv->lock);
546 	hvc_iucv_send(priv);		/* force sending buffered data */
547 	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
548 	spin_unlock_bh(&priv->lock);
549 
550 	if (sync_wait)
551 		wait_event_timeout(priv->sndbuf_waitq,
552 				   tty_outqueue_empty(priv), HZ/10);
553 }
554 
555 /**
556  * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
557  * @priv:	Pointer to hvc_iucv_private structure
558  *
559  * This routine severs an existing IUCV communication path and hangs
560  * up the underlying HVC terminal device.
561  * The hang-up occurs only if an IUCV communication path is established;
562  * otherwise there is no need to hang up the terminal device.
563  *
564  * The IUCV HVC hang-up is separated into two steps:
565  * 1. After the IUCV path has been severed, the iucv_state is set to
566  *    IUCV_SEVERED.
567  * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
568  *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
569  *
570  * If the tty has not yet been opened, clean up the hvc_iucv_private
571  * structure to allow re-connects.
572  * If the tty has been opened, let get_chars() return -EPIPE to signal
573  * the HVC layer to hang up the tty and, if so, wake up the HVC thread
574  * to call get_chars()...
575  *
576  * Special notes on hanging up a HVC terminal instantiated as console:
577  * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
578  *		2. do_tty_hangup() calls tty->ops->close() for console_filp
579  *			=> no hangup notifier is called by HVC (default)
580  *		2. hvc_close() returns because of tty_hung_up_p(filp)
581  *			=> no delete notifier is called!
582  * Finally, the back-end is not being notified, thus, the tty session is
583  * kept active (TTY_OPEN) to be ready for re-connects.
584  *
585  * Locking:	spin_lock(&priv->lock) w/o disabling bh
586  */
hvc_iucv_hangup(struct hvc_iucv_private * priv)587 static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
588 {
589 	struct iucv_path *path;
590 
591 	path = NULL;
592 	spin_lock(&priv->lock);
593 	if (priv->iucv_state == IUCV_CONNECTED) {
594 		path = priv->path;
595 		priv->path = NULL;
596 		priv->iucv_state = IUCV_SEVERED;
597 		if (priv->tty_state == TTY_CLOSED)
598 			hvc_iucv_cleanup(priv);
599 		else
600 			/* console is special (see above) */
601 			if (priv->is_console) {
602 				hvc_iucv_cleanup(priv);
603 				priv->tty_state = TTY_OPENED;
604 			} else
605 				hvc_kick();
606 	}
607 	spin_unlock(&priv->lock);
608 
609 	/* finally sever path (outside of priv->lock due to lock ordering) */
610 	if (path) {
611 		iucv_path_sever(path, NULL);
612 		iucv_path_free(path);
613 	}
614 }
615 
616 /**
617  * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
618  * @hp:		Pointer to the HVC device (struct hvc_struct)
619  * @id:		Additional data (originally passed to hvc_alloc):
620  *		the index of an struct hvc_iucv_private instance.
621  *
622  * This routine notifies the HVC back-end that a tty hangup (carrier loss,
623  * virtual or otherwise) has occurred.
624  * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
625  * to keep an existing IUCV communication path established.
626  * (Background: vhangup() is called from user space (by getty or login) to
627  *		disable writing to the tty by other applications).
628  * If the tty has been opened and an established IUCV path has been severed
629  * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
630  *
631  * Locking:	struct hvc_iucv_private->lock
632  */
hvc_iucv_notifier_hangup(struct hvc_struct * hp,int id)633 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
634 {
635 	struct hvc_iucv_private *priv;
636 
637 	priv = hvc_iucv_get_private(id);
638 	if (!priv)
639 		return;
640 
641 	flush_sndbuf_sync(priv);
642 
643 	spin_lock_bh(&priv->lock);
644 	/* NOTE: If the hangup was scheduled by ourself (from the iucv
645 	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
646 	 *	 our structure and to set state to TTY_CLOSED.
647 	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
648 	 *	 ignore this hangup and keep an established IUCV path open...
649 	 *	 (...the reason is that we are not able to connect back to the
650 	 *	 client if we disconnect on hang up) */
651 	priv->tty_state = TTY_CLOSED;
652 
653 	if (priv->iucv_state == IUCV_SEVERED)
654 		hvc_iucv_cleanup(priv);
655 	spin_unlock_bh(&priv->lock);
656 }
657 
658 /**
659  * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
660  * @hp:		Pointer to the HVC device (struct hvc_struct)
661  * @id:		Additional data (originally passed to hvc_alloc):
662  *		the index of an struct hvc_iucv_private instance.
663  *
664  * This routine notifies the HVC back-end that the last tty device fd has been
665  * closed.  The function calls hvc_iucv_cleanup() to clean up the struct
666  * hvc_iucv_private instance.
667  *
668  * Locking:	struct hvc_iucv_private->lock
669  */
hvc_iucv_notifier_del(struct hvc_struct * hp,int id)670 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
671 {
672 	struct hvc_iucv_private *priv;
673 	struct iucv_path	*path;
674 
675 	priv = hvc_iucv_get_private(id);
676 	if (!priv)
677 		return;
678 
679 	flush_sndbuf_sync(priv);
680 
681 	spin_lock_bh(&priv->lock);
682 	path = priv->path;		/* save reference to IUCV path */
683 	priv->path = NULL;
684 	hvc_iucv_cleanup(priv);
685 	spin_unlock_bh(&priv->lock);
686 
687 	/* sever IUCV path outside of priv->lock due to lock ordering of:
688 	 * priv->lock <--> iucv_table_lock */
689 	if (path) {
690 		iucv_path_sever(path, NULL);
691 		iucv_path_free(path);
692 	}
693 }
694 
695 /**
696  * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
697  * @ipvmid:	Originating z/VM user ID (right padded with blanks)
698  *
699  * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
700  * non-zero.
701  */
hvc_iucv_filter_connreq(u8 ipvmid[8])702 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
703 {
704 	size_t i;
705 
706 	/* Note: default policy is ACCEPT if no filter is set */
707 	if (!hvc_iucv_filter_size)
708 		return 0;
709 
710 	for (i = 0; i < hvc_iucv_filter_size; i++)
711 		if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
712 			return 0;
713 	return 1;
714 }
715 
716 /**
717  * hvc_iucv_path_pending() - IUCV handler to process a connection request.
718  * @path:	Pending path (struct iucv_path)
719  * @ipvmid:	z/VM system identifier of originator
720  * @ipuser:	User specified data for this path
721  *		(AF_IUCV: port/service name and originator port)
722  *
723  * The function uses the @ipuser data to determine if the pending path belongs
724  * to a terminal managed by this device driver.
725  * If the path belongs to this driver, ensure that the terminal is not accessed
726  * multiple times (only one connection to a terminal is allowed).
727  * If the terminal is not yet connected, the pending path is accepted and is
728  * associated to the appropriate struct hvc_iucv_private instance.
729  *
730  * Returns 0 if @path belongs to a terminal managed by the this device driver;
731  * otherwise returns -ENODEV in order to dispatch this path to other handlers.
732  *
733  * Locking:	struct hvc_iucv_private->lock
734  */
hvc_iucv_path_pending(struct iucv_path * path,u8 ipvmid[8],u8 ipuser[16])735 static	int hvc_iucv_path_pending(struct iucv_path *path,
736 				  u8 ipvmid[8], u8 ipuser[16])
737 {
738 	struct hvc_iucv_private *priv;
739 	u8 nuser_data[16];
740 	u8 vm_user_id[9];
741 	int i, rc;
742 
743 	priv = NULL;
744 	for (i = 0; i < hvc_iucv_devices; i++)
745 		if (hvc_iucv_table[i] &&
746 		    (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
747 			priv = hvc_iucv_table[i];
748 			break;
749 		}
750 	if (!priv)
751 		return -ENODEV;
752 
753 	/* Enforce that ipvmid is allowed to connect to us */
754 	read_lock(&hvc_iucv_filter_lock);
755 	rc = hvc_iucv_filter_connreq(ipvmid);
756 	read_unlock(&hvc_iucv_filter_lock);
757 	if (rc) {
758 		iucv_path_sever(path, ipuser);
759 		iucv_path_free(path);
760 		memcpy(vm_user_id, ipvmid, 8);
761 		vm_user_id[8] = 0;
762 		pr_info("A connection request from z/VM user ID %s "
763 			"was refused\n", vm_user_id);
764 		return 0;
765 	}
766 
767 	spin_lock(&priv->lock);
768 
769 	/* If the terminal is already connected or being severed, then sever
770 	 * this path to enforce that there is only ONE established communication
771 	 * path per terminal. */
772 	if (priv->iucv_state != IUCV_DISCONN) {
773 		iucv_path_sever(path, ipuser);
774 		iucv_path_free(path);
775 		goto out_path_handled;
776 	}
777 
778 	/* accept path */
779 	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */
780 	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */
781 	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */
782 	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */
783 	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
784 	if (rc) {
785 		iucv_path_sever(path, ipuser);
786 		iucv_path_free(path);
787 		goto out_path_handled;
788 	}
789 	priv->path = path;
790 	priv->iucv_state = IUCV_CONNECTED;
791 
792 	/* flush buffered output data... */
793 	schedule_delayed_work(&priv->sndbuf_work, 5);
794 
795 out_path_handled:
796 	spin_unlock(&priv->lock);
797 	return 0;
798 }
799 
800 /**
801  * hvc_iucv_path_severed() - IUCV handler to process a path sever.
802  * @path:	Pending path (struct iucv_path)
803  * @ipuser:	User specified data for this path
804  *		(AF_IUCV: port/service name and originator port)
805  *
806  * This function calls the hvc_iucv_hangup() function for the
807  * respective IUCV HVC terminal.
808  *
809  * Locking:	struct hvc_iucv_private->lock
810  */
hvc_iucv_path_severed(struct iucv_path * path,u8 ipuser[16])811 static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
812 {
813 	struct hvc_iucv_private *priv = path->private;
814 
815 	hvc_iucv_hangup(priv);
816 }
817 
818 /**
819  * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
820  * @path:	Pending path (struct iucv_path)
821  * @msg:	Pointer to the IUCV message
822  *
823  * The function puts an incoming message on the input queue for later
824  * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
825  * If the tty has not yet been opened, the message is rejected.
826  *
827  * Locking:	struct hvc_iucv_private->lock
828  */
hvc_iucv_msg_pending(struct iucv_path * path,struct iucv_message * msg)829 static void hvc_iucv_msg_pending(struct iucv_path *path,
830 				 struct iucv_message *msg)
831 {
832 	struct hvc_iucv_private *priv = path->private;
833 	struct iucv_tty_buffer *rb;
834 
835 	/* reject messages that exceed max size of iucv_tty_msg->datalen */
836 	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
837 		iucv_message_reject(path, msg);
838 		return;
839 	}
840 
841 	spin_lock(&priv->lock);
842 
843 	/* reject messages if tty has not yet been opened */
844 	if (priv->tty_state == TTY_CLOSED) {
845 		iucv_message_reject(path, msg);
846 		goto unlock_return;
847 	}
848 
849 	/* allocate tty buffer to save iucv msg only */
850 	rb = alloc_tty_buffer(0, GFP_ATOMIC);
851 	if (!rb) {
852 		iucv_message_reject(path, msg);
853 		goto unlock_return;	/* -ENOMEM */
854 	}
855 	rb->msg = *msg;
856 
857 	list_add_tail(&rb->list, &priv->tty_inqueue);
858 
859 	hvc_kick();	/* wake up hvc thread */
860 
861 unlock_return:
862 	spin_unlock(&priv->lock);
863 }
864 
865 /**
866  * hvc_iucv_msg_complete() - IUCV handler to process message completion
867  * @path:	Pending path (struct iucv_path)
868  * @msg:	Pointer to the IUCV message
869  *
870  * The function is called upon completion of message delivery to remove the
871  * message from the outqueue. Additional delivery information can be found
872  * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
873  *	       purged messages	 (0x010000 (IPADPGNR)).
874  *
875  * Locking:	struct hvc_iucv_private->lock
876  */
hvc_iucv_msg_complete(struct iucv_path * path,struct iucv_message * msg)877 static void hvc_iucv_msg_complete(struct iucv_path *path,
878 				  struct iucv_message *msg)
879 {
880 	struct hvc_iucv_private *priv = path->private;
881 	struct iucv_tty_buffer	*ent, *next;
882 	LIST_HEAD(list_remove);
883 
884 	spin_lock(&priv->lock);
885 	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
886 		if (ent->msg.id == msg->id) {
887 			list_move(&ent->list, &list_remove);
888 			break;
889 		}
890 	wake_up(&priv->sndbuf_waitq);
891 	spin_unlock(&priv->lock);
892 	destroy_tty_buffer_list(&list_remove);
893 }
894 
895 /**
896  * hvc_iucv_pm_freeze() - Freeze PM callback
897  * @dev:	IUVC HVC terminal device
898  *
899  * Sever an established IUCV communication path and
900  * trigger a hang-up of the underlying HVC terminal.
901  */
hvc_iucv_pm_freeze(struct device * dev)902 static int hvc_iucv_pm_freeze(struct device *dev)
903 {
904 	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
905 
906 	local_bh_disable();
907 	hvc_iucv_hangup(priv);
908 	local_bh_enable();
909 
910 	return 0;
911 }
912 
913 /**
914  * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
915  * @dev:	IUVC HVC terminal device
916  *
917  * Wake up the HVC thread to trigger hang-up and respective
918  * HVC back-end notifier invocations.
919  */
hvc_iucv_pm_restore_thaw(struct device * dev)920 static int hvc_iucv_pm_restore_thaw(struct device *dev)
921 {
922 	hvc_kick();
923 	return 0;
924 }
925 
926 
927 /* HVC operations */
928 static const struct hv_ops hvc_iucv_ops = {
929 	.get_chars = hvc_iucv_get_chars,
930 	.put_chars = hvc_iucv_put_chars,
931 	.notifier_add = hvc_iucv_notifier_add,
932 	.notifier_del = hvc_iucv_notifier_del,
933 	.notifier_hangup = hvc_iucv_notifier_hangup,
934 };
935 
936 /* Suspend / resume device operations */
937 static const struct dev_pm_ops hvc_iucv_pm_ops = {
938 	.freeze	  = hvc_iucv_pm_freeze,
939 	.thaw	  = hvc_iucv_pm_restore_thaw,
940 	.restore  = hvc_iucv_pm_restore_thaw,
941 };
942 
943 /* IUCV HVC device driver */
944 static struct device_driver hvc_iucv_driver = {
945 	.name = KMSG_COMPONENT,
946 	.bus  = &iucv_bus,
947 	.pm   = &hvc_iucv_pm_ops,
948 };
949 
950 /**
951  * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
952  * @id:			hvc_iucv_table index
953  * @is_console:		Flag if the instance is used as Linux console
954  *
955  * This function allocates a new hvc_iucv_private structure and stores
956  * the instance in hvc_iucv_table at index @id.
957  * Returns 0 on success; otherwise non-zero.
958  */
hvc_iucv_alloc(int id,unsigned int is_console)959 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
960 {
961 	struct hvc_iucv_private *priv;
962 	char name[9];
963 	int rc;
964 
965 	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
966 	if (!priv)
967 		return -ENOMEM;
968 
969 	spin_lock_init(&priv->lock);
970 	INIT_LIST_HEAD(&priv->tty_outqueue);
971 	INIT_LIST_HEAD(&priv->tty_inqueue);
972 	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
973 	init_waitqueue_head(&priv->sndbuf_waitq);
974 
975 	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
976 	if (!priv->sndbuf) {
977 		kfree(priv);
978 		return -ENOMEM;
979 	}
980 
981 	/* set console flag */
982 	priv->is_console = is_console;
983 
984 	/* allocate hvc device */
985 	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */
986 			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
987 	if (IS_ERR(priv->hvc)) {
988 		rc = PTR_ERR(priv->hvc);
989 		goto out_error_hvc;
990 	}
991 
992 	/* notify HVC thread instead of using polling */
993 	priv->hvc->irq_requested = 1;
994 
995 	/* setup iucv related information */
996 	snprintf(name, 9, "lnxhvc%-2d", id);
997 	memcpy(priv->srv_name, name, 8);
998 	ASCEBC(priv->srv_name, 8);
999 
1000 	/* create and setup device */
1001 	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1002 	if (!priv->dev) {
1003 		rc = -ENOMEM;
1004 		goto out_error_dev;
1005 	}
1006 	dev_set_name(priv->dev, "hvc_iucv%d", id);
1007 	dev_set_drvdata(priv->dev, priv);
1008 	priv->dev->bus = &iucv_bus;
1009 	priv->dev->parent = iucv_root;
1010 	priv->dev->driver = &hvc_iucv_driver;
1011 	priv->dev->release = (void (*)(struct device *)) kfree;
1012 	rc = device_register(priv->dev);
1013 	if (rc) {
1014 		put_device(priv->dev);
1015 		goto out_error_dev;
1016 	}
1017 
1018 	hvc_iucv_table[id] = priv;
1019 	return 0;
1020 
1021 out_error_dev:
1022 	hvc_remove(priv->hvc);
1023 out_error_hvc:
1024 	free_page((unsigned long) priv->sndbuf);
1025 	kfree(priv);
1026 
1027 	return rc;
1028 }
1029 
1030 /**
1031  * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1032  */
hvc_iucv_destroy(struct hvc_iucv_private * priv)1033 static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1034 {
1035 	hvc_remove(priv->hvc);
1036 	device_unregister(priv->dev);
1037 	free_page((unsigned long) priv->sndbuf);
1038 	kfree(priv);
1039 }
1040 
1041 /**
1042  * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1043  * @filter:	String containing a comma-separated list of z/VM user IDs
1044  */
hvc_iucv_parse_filter(const char * filter,char * dest)1045 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1046 {
1047 	const char *nextdelim, *residual;
1048 	size_t len;
1049 
1050 	nextdelim = strchr(filter, ',');
1051 	if (nextdelim) {
1052 		len = nextdelim - filter;
1053 		residual = nextdelim + 1;
1054 	} else {
1055 		len = strlen(filter);
1056 		residual = filter + len;
1057 	}
1058 
1059 	if (len == 0)
1060 		return ERR_PTR(-EINVAL);
1061 
1062 	/* check for '\n' (if called from sysfs) */
1063 	if (filter[len - 1] == '\n')
1064 		len--;
1065 
1066 	if (len > 8)
1067 		return ERR_PTR(-EINVAL);
1068 
1069 	/* pad with blanks and save upper case version of user ID */
1070 	memset(dest, ' ', 8);
1071 	while (len--)
1072 		dest[len] = toupper(filter[len]);
1073 	return residual;
1074 }
1075 
1076 /**
1077  * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1078  * @filter:	String consisting of a comma-separated list of z/VM user IDs
1079  *
1080  * The function parses the @filter string and creates an array containing
1081  * the list of z/VM user ID filter entries.
1082  * Return code 0 means success, -EINVAL if the filter is syntactically
1083  * incorrect, -ENOMEM if there was not enough memory to allocate the
1084  * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1085  */
hvc_iucv_setup_filter(const char * val)1086 static int hvc_iucv_setup_filter(const char *val)
1087 {
1088 	const char *residual;
1089 	int err;
1090 	size_t size, count;
1091 	void *array, *old_filter;
1092 
1093 	count = strlen(val);
1094 	if (count == 0 || (count == 1 && val[0] == '\n')) {
1095 		size  = 0;
1096 		array = NULL;
1097 		goto out_replace_filter;	/* clear filter */
1098 	}
1099 
1100 	/* count user IDs in order to allocate sufficient memory */
1101 	size = 1;
1102 	residual = val;
1103 	while ((residual = strchr(residual, ',')) != NULL) {
1104 		residual++;
1105 		size++;
1106 	}
1107 
1108 	/* check if the specified list exceeds the filter limit */
1109 	if (size > MAX_VMID_FILTER)
1110 		return -ENOSPC;
1111 
1112 	array = kzalloc(size * 8, GFP_KERNEL);
1113 	if (!array)
1114 		return -ENOMEM;
1115 
1116 	count = size;
1117 	residual = val;
1118 	while (*residual && count) {
1119 		residual = hvc_iucv_parse_filter(residual,
1120 						 array + ((size - count) * 8));
1121 		if (IS_ERR(residual)) {
1122 			err = PTR_ERR(residual);
1123 			kfree(array);
1124 			goto out_err;
1125 		}
1126 		count--;
1127 	}
1128 
1129 out_replace_filter:
1130 	write_lock_bh(&hvc_iucv_filter_lock);
1131 	old_filter = hvc_iucv_filter;
1132 	hvc_iucv_filter_size = size;
1133 	hvc_iucv_filter = array;
1134 	write_unlock_bh(&hvc_iucv_filter_lock);
1135 	kfree(old_filter);
1136 
1137 	err = 0;
1138 out_err:
1139 	return err;
1140 }
1141 
1142 /**
1143  * param_set_vmidfilter() - Set z/VM user ID filter parameter
1144  * @val:	String consisting of a comma-separated list of z/VM user IDs
1145  * @kp:		Kernel parameter pointing to hvc_iucv_filter array
1146  *
1147  * The function sets up the z/VM user ID filter specified as comma-separated
1148  * list of user IDs in @val.
1149  * Note: If it is called early in the boot process, @val is stored and
1150  *	 parsed later in hvc_iucv_init().
1151  */
param_set_vmidfilter(const char * val,const struct kernel_param * kp)1152 static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1153 {
1154 	int rc;
1155 
1156 	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1157 		return -ENODEV;
1158 
1159 	if (!val)
1160 		return -EINVAL;
1161 
1162 	rc = 0;
1163 	if (slab_is_available())
1164 		rc = hvc_iucv_setup_filter(val);
1165 	else
1166 		hvc_iucv_filter_string = val;	/* defer... */
1167 	return rc;
1168 }
1169 
1170 /**
1171  * param_get_vmidfilter() - Get z/VM user ID filter
1172  * @buffer:	Buffer to store z/VM user ID filter,
1173  *		(buffer size assumption PAGE_SIZE)
1174  * @kp:		Kernel parameter pointing to the hvc_iucv_filter array
1175  *
1176  * The function stores the filter as a comma-separated list of z/VM user IDs
1177  * in @buffer. Typically, sysfs routines call this function for attr show.
1178  */
param_get_vmidfilter(char * buffer,const struct kernel_param * kp)1179 static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1180 {
1181 	int rc;
1182 	size_t index, len;
1183 	void *start, *end;
1184 
1185 	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1186 		return -ENODEV;
1187 
1188 	rc = 0;
1189 	read_lock_bh(&hvc_iucv_filter_lock);
1190 	for (index = 0; index < hvc_iucv_filter_size; index++) {
1191 		start = hvc_iucv_filter + (8 * index);
1192 		end   = memchr(start, ' ', 8);
1193 		len   = (end) ? end - start : 8;
1194 		memcpy(buffer + rc, start, len);
1195 		rc += len;
1196 		buffer[rc++] = ',';
1197 	}
1198 	read_unlock_bh(&hvc_iucv_filter_lock);
1199 	if (rc)
1200 		buffer[--rc] = '\0';	/* replace last comma and update rc */
1201 	return rc;
1202 }
1203 
1204 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1205 
1206 static struct kernel_param_ops param_ops_vmidfilter = {
1207 	.set = param_set_vmidfilter,
1208 	.get = param_get_vmidfilter,
1209 };
1210 
1211 /**
1212  * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1213  */
hvc_iucv_init(void)1214 static int __init hvc_iucv_init(void)
1215 {
1216 	int rc;
1217 	unsigned int i;
1218 
1219 	if (!hvc_iucv_devices)
1220 		return -ENODEV;
1221 
1222 	if (!MACHINE_IS_VM) {
1223 		pr_notice("The z/VM IUCV HVC device driver cannot "
1224 			   "be used without z/VM\n");
1225 		rc = -ENODEV;
1226 		goto out_error;
1227 	}
1228 
1229 	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1230 		pr_err("%lu is not a valid value for the hvc_iucv= "
1231 			"kernel parameter\n", hvc_iucv_devices);
1232 		rc = -EINVAL;
1233 		goto out_error;
1234 	}
1235 
1236 	/* register IUCV HVC device driver */
1237 	rc = driver_register(&hvc_iucv_driver);
1238 	if (rc)
1239 		goto out_error;
1240 
1241 	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
1242 	if (hvc_iucv_filter_string) {
1243 		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1244 		switch (rc) {
1245 		case 0:
1246 			break;
1247 		case -ENOMEM:
1248 			pr_err("Allocating memory failed with "
1249 				"reason code=%d\n", 3);
1250 			goto out_error;
1251 		case -EINVAL:
1252 			pr_err("hvc_iucv_allow= does not specify a valid "
1253 				"z/VM user ID list\n");
1254 			goto out_error;
1255 		case -ENOSPC:
1256 			pr_err("hvc_iucv_allow= specifies too many "
1257 				"z/VM user IDs\n");
1258 			goto out_error;
1259 		default:
1260 			goto out_error;
1261 		}
1262 	}
1263 
1264 	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1265 					   sizeof(struct iucv_tty_buffer),
1266 					   0, 0, NULL);
1267 	if (!hvc_iucv_buffer_cache) {
1268 		pr_err("Allocating memory failed with reason code=%d\n", 1);
1269 		rc = -ENOMEM;
1270 		goto out_error;
1271 	}
1272 
1273 	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1274 						    hvc_iucv_buffer_cache);
1275 	if (!hvc_iucv_mempool) {
1276 		pr_err("Allocating memory failed with reason code=%d\n", 2);
1277 		kmem_cache_destroy(hvc_iucv_buffer_cache);
1278 		rc = -ENOMEM;
1279 		goto out_error;
1280 	}
1281 
1282 	/* register the first terminal device as console
1283 	 * (must be done before allocating hvc terminal devices) */
1284 	rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1285 	if (rc) {
1286 		pr_err("Registering HVC terminal device as "
1287 		       "Linux console failed\n");
1288 		goto out_error_memory;
1289 	}
1290 
1291 	/* allocate hvc_iucv_private structs */
1292 	for (i = 0; i < hvc_iucv_devices; i++) {
1293 		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1294 		if (rc) {
1295 			pr_err("Creating a new HVC terminal device "
1296 				"failed with error code=%d\n", rc);
1297 			goto out_error_hvc;
1298 		}
1299 	}
1300 
1301 	/* register IUCV callback handler */
1302 	rc = iucv_register(&hvc_iucv_handler, 0);
1303 	if (rc) {
1304 		pr_err("Registering IUCV handlers failed with error code=%d\n",
1305 			rc);
1306 		goto out_error_hvc;
1307 	}
1308 
1309 	return 0;
1310 
1311 out_error_hvc:
1312 	for (i = 0; i < hvc_iucv_devices; i++)
1313 		if (hvc_iucv_table[i])
1314 			hvc_iucv_destroy(hvc_iucv_table[i]);
1315 out_error_memory:
1316 	mempool_destroy(hvc_iucv_mempool);
1317 	kmem_cache_destroy(hvc_iucv_buffer_cache);
1318 out_error:
1319 	if (hvc_iucv_filter)
1320 		kfree(hvc_iucv_filter);
1321 	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1322 	return rc;
1323 }
1324 
1325 /**
1326  * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
1327  * @val:	Parameter value (numeric)
1328  */
hvc_iucv_config(char * val)1329 static	int __init hvc_iucv_config(char *val)
1330 {
1331 	 return strict_strtoul(val, 10, &hvc_iucv_devices);
1332 }
1333 
1334 
1335 device_initcall(hvc_iucv_init);
1336 __setup("hvc_iucv=", hvc_iucv_config);
1337 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1338