virtio: console: statically initialize virtio_cons
[linux-2.6-block.git] / drivers / char / hvc_iucv.c
CommitLineData
44a01d5b 1/*
17e19f04 2 * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
44a01d5b 3 *
17e19f04 4 * This HVC device driver provides terminal access using
44a01d5b
HB
5 * z/VM IUCV communication paths.
6 *
0259162e 7 * Copyright IBM Corp. 2008, 2009
44a01d5b
HB
8 *
9 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 */
11#define KMSG_COMPONENT "hvc_iucv"
17e19f04 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44a01d5b
HB
13
14#include <linux/types.h>
15#include <asm/ebcdic.h>
431429ff 16#include <linux/ctype.h>
c45ce4b5 17#include <linux/delay.h>
0259162e 18#include <linux/device.h>
68c6b3d2 19#include <linux/init.h>
44a01d5b 20#include <linux/mempool.h>
431429ff 21#include <linux/moduleparam.h>
44a01d5b 22#include <linux/tty.h>
c45ce4b5 23#include <linux/wait.h>
44a01d5b
HB
24#include <net/iucv/iucv.h>
25
26#include "hvc_console.h"
27
28
17e19f04 29/* General device driver settings */
44a01d5b
HB
30#define HVC_IUCV_MAGIC 0xc9e4c3e5
31#define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
32#define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
33
34/* IUCV TTY message */
35#define MSG_VERSION 0x02 /* Message version */
36#define MSG_TYPE_ERROR 0x01 /* Error message */
37#define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
38#define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
39#define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
40#define MSG_TYPE_DATA 0x10 /* Terminal data */
41
44a01d5b
HB
42struct iucv_tty_msg {
43 u8 version; /* Message version */
44 u8 type; /* Message type */
c45ce4b5 45#define MSG_MAX_DATALEN ((u16)(~0))
44a01d5b
HB
46 u16 datalen; /* Payload length */
47 u8 data[]; /* Payload buffer */
48} __attribute__((packed));
17e19f04 49#define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
44a01d5b
HB
50
51enum iucv_state_t {
52 IUCV_DISCONN = 0,
53 IUCV_CONNECTED = 1,
54 IUCV_SEVERED = 2,
55};
56
57enum tty_state_t {
58 TTY_CLOSED = 0,
59 TTY_OPENED = 1,
60};
61
62struct hvc_iucv_private {
17e19f04 63 struct hvc_struct *hvc; /* HVC struct reference */
44a01d5b 64 u8 srv_name[8]; /* IUCV service name (ebcdic) */
6c089fd3 65 unsigned char is_console; /* Linux console usage flag */
44a01d5b
HB
66 enum iucv_state_t iucv_state; /* IUCV connection status */
67 enum tty_state_t tty_state; /* TTY status */
68 struct iucv_path *path; /* IUCV path pointer */
69 spinlock_t lock; /* hvc_iucv_private lock */
c45ce4b5
HB
70#define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
71 void *sndbuf; /* send buffer */
72 size_t sndbuf_len; /* length of send buffer */
73#define QUEUE_SNDBUF_DELAY (HZ / 25)
74 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
75 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
44a01d5b
HB
76 struct list_head tty_outqueue; /* outgoing IUCV messages */
77 struct list_head tty_inqueue; /* incoming IUCV messages */
0259162e 78 struct device *dev; /* device structure */
44a01d5b
HB
79};
80
81struct iucv_tty_buffer {
82 struct list_head list; /* list pointer */
17e19f04 83 struct iucv_message msg; /* store an IUCV message */
44a01d5b
HB
84 size_t offset; /* data buffer offset */
85 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
86};
87
88/* IUCV callback handler */
89static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
90static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
91static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
92static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
93
94
2dc184c0
HB
95/* Kernel module parameter: use one terminal device as default */
96static unsigned long hvc_iucv_devices = 1;
44a01d5b
HB
97
98/* Array of allocated hvc iucv tty lines... */
99static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
6c089fd3 100#define IUCV_HVC_CON_IDX (0)
431429ff
HB
101/* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
102#define MAX_VMID_FILTER (500)
103static size_t hvc_iucv_filter_size;
104static void *hvc_iucv_filter;
105static const char *hvc_iucv_filter_string;
106static DEFINE_RWLOCK(hvc_iucv_filter_lock);
44a01d5b
HB
107
108/* Kmem cache and mempool for iucv_tty_buffer elements */
109static struct kmem_cache *hvc_iucv_buffer_cache;
110static mempool_t *hvc_iucv_mempool;
111
112/* IUCV handler callback functions */
113static struct iucv_handler hvc_iucv_handler = {
114 .path_pending = hvc_iucv_path_pending,
115 .path_severed = hvc_iucv_path_severed,
116 .message_complete = hvc_iucv_msg_complete,
117 .message_pending = hvc_iucv_msg_pending,
118};
119
120
121/**
122 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
123 * @num: The HVC virtual terminal number (vtermno)
124 *
125 * This function returns the struct hvc_iucv_private instance that corresponds
126 * to the HVC virtual terminal number specified as parameter @num.
127 */
128struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
129{
130 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
131 return NULL;
132 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
133}
134
135/**
17e19f04 136 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
44a01d5b
HB
137 * @size: Size of the internal buffer used to store data.
138 * @flags: Memory allocation flags passed to mempool.
139 *
140 * This function allocates a new struct iucv_tty_buffer element and, optionally,
141 * allocates an internal data buffer with the specified size @size.
142 * Note: The total message size arises from the internal buffer size and the
143 * members of the iucv_tty_msg structure.
44a01d5b
HB
144 * The function returns NULL if memory allocation has failed.
145 */
146static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
147{
148 struct iucv_tty_buffer *bufp;
149
150 bufp = mempool_alloc(hvc_iucv_mempool, flags);
151 if (!bufp)
152 return NULL;
6c089fd3 153 memset(bufp, 0, sizeof(*bufp));
44a01d5b
HB
154
155 if (size > 0) {
156 bufp->msg.length = MSG_SIZE(size);
157 bufp->mbuf = kmalloc(bufp->msg.length, flags);
158 if (!bufp->mbuf) {
159 mempool_free(bufp, hvc_iucv_mempool);
160 return NULL;
161 }
162 bufp->mbuf->version = MSG_VERSION;
163 bufp->mbuf->type = MSG_TYPE_DATA;
164 bufp->mbuf->datalen = (u16) size;
165 }
166 return bufp;
167}
168
169/**
170 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
171 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
44a01d5b
HB
172 */
173static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
174{
175 kfree(bufp->mbuf);
176 mempool_free(bufp, hvc_iucv_mempool);
177}
178
179/**
180 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
17e19f04 181 * @list: List containing struct iucv_tty_buffer elements.
44a01d5b
HB
182 */
183static void destroy_tty_buffer_list(struct list_head *list)
184{
185 struct iucv_tty_buffer *ent, *next;
186
187 list_for_each_entry_safe(ent, next, list, list) {
188 list_del(&ent->list);
189 destroy_tty_buffer(ent);
190 }
191}
192
193/**
17e19f04
HB
194 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
195 * @priv: Pointer to struct hvc_iucv_private
196 * @buf: HVC buffer for writing received terminal data.
197 * @count: HVC buffer size.
44a01d5b
HB
198 * @has_more_data: Pointer to an int variable.
199 *
200 * The function picks up pending messages from the input queue and receives
201 * the message data that is then written to the specified buffer @buf.
17e19f04 202 * If the buffer size @count is less than the data message size, the
44a01d5b 203 * message is kept on the input queue and @has_more_data is set to 1.
17e19f04 204 * If all message data has been written, the message is removed from
44a01d5b
HB
205 * the input queue.
206 *
207 * The function returns the number of bytes written to the terminal, zero if
208 * there are no pending data messages available or if there is no established
209 * IUCV path.
210 * If the IUCV path has been severed, then -EPIPE is returned to cause a
17e19f04 211 * hang up (that is issued by the HVC layer).
44a01d5b
HB
212 */
213static int hvc_iucv_write(struct hvc_iucv_private *priv,
214 char *buf, int count, int *has_more_data)
215{
216 struct iucv_tty_buffer *rb;
217 int written;
218 int rc;
219
17e19f04 220 /* immediately return if there is no IUCV connection */
44a01d5b
HB
221 if (priv->iucv_state == IUCV_DISCONN)
222 return 0;
223
17e19f04
HB
224 /* if the IUCV path has been severed, return -EPIPE to inform the
225 * HVC layer to hang up the tty device. */
44a01d5b
HB
226 if (priv->iucv_state == IUCV_SEVERED)
227 return -EPIPE;
228
229 /* check if there are pending messages */
230 if (list_empty(&priv->tty_inqueue))
231 return 0;
232
17e19f04 233 /* receive an iucv message and flip data to the tty (ldisc) */
44a01d5b
HB
234 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
235
236 written = 0;
237 if (!rb->mbuf) { /* message not yet received ... */
238 /* allocate mem to store msg data; if no memory is available
239 * then leave the buffer on the list and re-try later */
240 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC);
241 if (!rb->mbuf)
242 return -ENOMEM;
243
244 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
245 rb->mbuf, rb->msg.length, NULL);
246 switch (rc) {
247 case 0: /* Successful */
248 break;
249 case 2: /* No message found */
250 case 9: /* Message purged */
251 break;
252 default:
253 written = -EIO;
254 }
255 /* remove buffer if an error has occured or received data
256 * is not correct */
257 if (rc || (rb->mbuf->version != MSG_VERSION) ||
258 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
259 goto out_remove_buffer;
260 }
261
262 switch (rb->mbuf->type) {
263 case MSG_TYPE_DATA:
264 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
265 memcpy(buf, rb->mbuf->data + rb->offset, written);
266 if (written < (rb->mbuf->datalen - rb->offset)) {
267 rb->offset += written;
268 *has_more_data = 1;
269 goto out_written;
270 }
271 break;
272
273 case MSG_TYPE_WINSIZE:
274 if (rb->mbuf->datalen != sizeof(struct winsize))
275 break;
254be490
HB
276 /* The caller must ensure that the hvc is locked, which
277 * is the case when called from hvc_iucv_get_chars() */
278 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
44a01d5b
HB
279 break;
280
281 case MSG_TYPE_ERROR: /* ignored ... */
282 case MSG_TYPE_TERMENV: /* ignored ... */
283 case MSG_TYPE_TERMIOS: /* ignored ... */
284 break;
285 }
286
287out_remove_buffer:
288 list_del(&rb->list);
289 destroy_tty_buffer(rb);
290 *has_more_data = !list_empty(&priv->tty_inqueue);
291
292out_written:
293 return written;
294}
295
296/**
297 * hvc_iucv_get_chars() - HVC get_chars operation.
298 * @vtermno: HVC virtual terminal number.
299 * @buf: Pointer to a buffer to store data
300 * @count: Size of buffer available for writing
301 *
17e19f04
HB
302 * The HVC thread calls this method to read characters from the back-end.
303 * If an IUCV communication path has been established, pending IUCV messages
304 * are received and data is copied into buffer @buf up to @count bytes.
44a01d5b
HB
305 *
306 * Locking: The routine gets called under an irqsave() spinlock; and
307 * the routine locks the struct hvc_iucv_private->lock to call
308 * helper functions.
309 */
310static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
311{
312 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
313 int written;
314 int has_more_data;
315
316 if (count <= 0)
317 return 0;
318
319 if (!priv)
320 return -ENODEV;
321
322 spin_lock(&priv->lock);
323 has_more_data = 0;
324 written = hvc_iucv_write(priv, buf, count, &has_more_data);
325 spin_unlock(&priv->lock);
326
327 /* if there are still messages on the queue... schedule another run */
328 if (has_more_data)
329 hvc_kick();
330
331 return written;
332}
333
334/**
c45ce4b5 335 * hvc_iucv_queue() - Buffer terminal data for sending.
44a01d5b
HB
336 * @priv: Pointer to struct hvc_iucv_private instance.
337 * @buf: Buffer containing data to send.
c45ce4b5 338 * @count: Size of buffer and amount of data to send.
44a01d5b 339 *
c45ce4b5 340 * The function queues data for sending. To actually send the buffered data,
17e19f04 341 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
c45ce4b5 342 * The function returns the number of data bytes that has been buffered.
44a01d5b 343 *
c45ce4b5
HB
344 * If the device is not connected, data is ignored and the function returns
345 * @count.
346 * If the buffer is full, the function returns 0.
17e19f04
HB
347 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
348 * (that can be passed to HVC layer to cause a tty hangup).
44a01d5b 349 */
c45ce4b5 350static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
17e19f04 351 int count)
c45ce4b5
HB
352{
353 size_t len;
354
355 if (priv->iucv_state == IUCV_DISCONN)
356 return count; /* ignore data */
357
358 if (priv->iucv_state == IUCV_SEVERED)
359 return -EPIPE;
360
361 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
362 if (!len)
363 return 0;
364
365 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
366 priv->sndbuf_len += len;
367
368 if (priv->iucv_state == IUCV_CONNECTED)
369 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
370
371 return len;
372}
373
374/**
375 * hvc_iucv_send() - Send an IUCV message containing terminal data.
376 * @priv: Pointer to struct hvc_iucv_private instance.
377 *
17e19f04
HB
378 * If an IUCV communication path has been established, the buffered output data
379 * is sent via an IUCV message and the number of bytes sent is returned.
380 * Returns 0 if there is no established IUCV communication path or
381 * -EPIPE if an existing IUCV communicaton path has been severed.
c45ce4b5
HB
382 */
383static int hvc_iucv_send(struct hvc_iucv_private *priv)
44a01d5b
HB
384{
385 struct iucv_tty_buffer *sb;
c45ce4b5 386 int rc, len;
44a01d5b
HB
387
388 if (priv->iucv_state == IUCV_SEVERED)
389 return -EPIPE;
390
391 if (priv->iucv_state == IUCV_DISCONN)
c45ce4b5 392 return -EIO;
44a01d5b 393
c45ce4b5
HB
394 if (!priv->sndbuf_len)
395 return 0;
44a01d5b
HB
396
397 /* allocate internal buffer to store msg data and also compute total
398 * message length */
c45ce4b5 399 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
44a01d5b
HB
400 if (!sb)
401 return -ENOMEM;
402
c45ce4b5
HB
403 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
404 sb->mbuf->datalen = (u16) priv->sndbuf_len;
405 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
44a01d5b
HB
406
407 list_add_tail(&sb->list, &priv->tty_outqueue);
408
409 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
410 (void *) sb->mbuf, sb->msg.length);
411 if (rc) {
c45ce4b5
HB
412 /* drop the message here; however we might want to handle
413 * 0x03 (msg limit reached) by trying again... */
44a01d5b
HB
414 list_del(&sb->list);
415 destroy_tty_buffer(sb);
44a01d5b 416 }
c45ce4b5
HB
417 len = priv->sndbuf_len;
418 priv->sndbuf_len = 0;
44a01d5b
HB
419
420 return len;
421}
422
c45ce4b5
HB
423/**
424 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
425 * @work: Work structure.
426 *
17e19f04
HB
427 * This work queue function sends buffered output data over IUCV and,
428 * if not all buffered data could be sent, reschedules itself.
c45ce4b5
HB
429 */
430static void hvc_iucv_sndbuf_work(struct work_struct *work)
431{
432 struct hvc_iucv_private *priv;
433
434 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
c45ce4b5
HB
435 if (!priv)
436 return;
437
438 spin_lock_bh(&priv->lock);
439 hvc_iucv_send(priv);
440 spin_unlock_bh(&priv->lock);
441}
442
44a01d5b
HB
443/**
444 * hvc_iucv_put_chars() - HVC put_chars operation.
445 * @vtermno: HVC virtual terminal number.
446 * @buf: Pointer to an buffer to read data from
447 * @count: Size of buffer available for reading
448 *
17e19f04
HB
449 * The HVC thread calls this method to write characters to the back-end.
450 * The function calls hvc_iucv_queue() to queue terminal data for sending.
44a01d5b
HB
451 *
452 * Locking: The method gets called under an irqsave() spinlock; and
453 * locks struct hvc_iucv_private->lock.
454 */
455static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
456{
457 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
c45ce4b5 458 int queued;
44a01d5b
HB
459
460 if (count <= 0)
461 return 0;
462
463 if (!priv)
464 return -ENODEV;
465
466 spin_lock(&priv->lock);
c45ce4b5 467 queued = hvc_iucv_queue(priv, buf, count);
44a01d5b
HB
468 spin_unlock(&priv->lock);
469
c45ce4b5 470 return queued;
44a01d5b
HB
471}
472
473/**
474 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
475 * @hp: Pointer to the HVC device (struct hvc_struct)
476 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
477 * hvc_iucv_private instance.
478 *
6c089fd3 479 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
44a01d5b
HB
480 * instance that is derived from @id. Always returns 0.
481 *
482 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
483 */
484static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
485{
486 struct hvc_iucv_private *priv;
487
488 priv = hvc_iucv_get_private(id);
489 if (!priv)
490 return 0;
491
492 spin_lock_bh(&priv->lock);
493 priv->tty_state = TTY_OPENED;
494 spin_unlock_bh(&priv->lock);
495
496 return 0;
497}
498
499/**
17e19f04 500 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
44a01d5b 501 * @priv: Pointer to the struct hvc_iucv_private instance.
44a01d5b
HB
502 */
503static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
504{
505 destroy_tty_buffer_list(&priv->tty_outqueue);
506 destroy_tty_buffer_list(&priv->tty_inqueue);
507
508 priv->tty_state = TTY_CLOSED;
509 priv->iucv_state = IUCV_DISCONN;
c45ce4b5
HB
510
511 priv->sndbuf_len = 0;
512}
513
514/**
515 * tty_outqueue_empty() - Test if the tty outq is empty
516 * @priv: Pointer to struct hvc_iucv_private instance.
517 */
518static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
519{
520 int rc;
521
522 spin_lock_bh(&priv->lock);
523 rc = list_empty(&priv->tty_outqueue);
524 spin_unlock_bh(&priv->lock);
525
526 return rc;
527}
528
529/**
530 * flush_sndbuf_sync() - Flush send buffer and wait for completion
531 * @priv: Pointer to struct hvc_iucv_private instance.
532 *
533 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
534 * to flush any buffered terminal output data and waits for completion.
535 */
536static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
537{
538 int sync_wait;
539
540 cancel_delayed_work_sync(&priv->sndbuf_work);
541
542 spin_lock_bh(&priv->lock);
543 hvc_iucv_send(priv); /* force sending buffered data */
544 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
545 spin_unlock_bh(&priv->lock);
546
547 if (sync_wait)
548 wait_event_timeout(priv->sndbuf_waitq,
0259162e
HB
549 tty_outqueue_empty(priv), HZ/10);
550}
551
552/**
553 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
554 * @priv: Pointer to hvc_iucv_private structure
555 *
556 * This routine severs an existing IUCV communication path and hangs
557 * up the underlying HVC terminal device.
558 * The hang-up occurs only if an IUCV communication path is established;
559 * otherwise there is no need to hang up the terminal device.
560 *
561 * The IUCV HVC hang-up is separated into two steps:
562 * 1. After the IUCV path has been severed, the iucv_state is set to
563 * IUCV_SEVERED.
564 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
565 * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
566 *
567 * If the tty has not yet been opened, clean up the hvc_iucv_private
568 * structure to allow re-connects.
569 * If the tty has been opened, let get_chars() return -EPIPE to signal
570 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
571 * to call get_chars()...
572 *
573 * Special notes on hanging up a HVC terminal instantiated as console:
574 * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
575 * 2. do_tty_hangup() calls tty->ops->close() for console_filp
576 * => no hangup notifier is called by HVC (default)
577 * 2. hvc_close() returns because of tty_hung_up_p(filp)
578 * => no delete notifier is called!
579 * Finally, the back-end is not being notified, thus, the tty session is
580 * kept active (TTY_OPEN) to be ready for re-connects.
581 *
582 * Locking: spin_lock(&priv->lock) w/o disabling bh
583 */
584static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
585{
586 struct iucv_path *path;
587
588 path = NULL;
589 spin_lock(&priv->lock);
590 if (priv->iucv_state == IUCV_CONNECTED) {
591 path = priv->path;
592 priv->path = NULL;
593 priv->iucv_state = IUCV_SEVERED;
594 if (priv->tty_state == TTY_CLOSED)
595 hvc_iucv_cleanup(priv);
596 else
597 /* console is special (see above) */
598 if (priv->is_console) {
599 hvc_iucv_cleanup(priv);
600 priv->tty_state = TTY_OPENED;
601 } else
602 hvc_kick();
603 }
604 spin_unlock(&priv->lock);
605
606 /* finally sever path (outside of priv->lock due to lock ordering) */
607 if (path) {
608 iucv_path_sever(path, NULL);
609 iucv_path_free(path);
610 }
44a01d5b
HB
611}
612
613/**
17e19f04
HB
614 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
615 * @hp: Pointer to the HVC device (struct hvc_struct)
616 * @id: Additional data (originally passed to hvc_alloc):
617 * the index of an struct hvc_iucv_private instance.
44a01d5b 618 *
17e19f04 619 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
44a01d5b 620 * virtual or otherwise) has occured.
17e19f04
HB
621 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
622 * to keep an existing IUCV communication path established.
44a01d5b
HB
623 * (Background: vhangup() is called from user space (by getty or login) to
624 * disable writing to the tty by other applications).
17e19f04
HB
625 * If the tty has been opened and an established IUCV path has been severed
626 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
44a01d5b
HB
627 *
628 * Locking: struct hvc_iucv_private->lock
629 */
630static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
631{
632 struct hvc_iucv_private *priv;
633
634 priv = hvc_iucv_get_private(id);
635 if (!priv)
636 return;
637
c45ce4b5
HB
638 flush_sndbuf_sync(priv);
639
44a01d5b
HB
640 spin_lock_bh(&priv->lock);
641 /* NOTE: If the hangup was scheduled by ourself (from the iucv
17e19f04
HB
642 * path_servered callback [IUCV_SEVERED]), we have to clean up
643 * our structure and to set state to TTY_CLOSED.
44a01d5b
HB
644 * If the tty was hung up otherwise (e.g. vhangup()), then we
645 * ignore this hangup and keep an established IUCV path open...
646 * (...the reason is that we are not able to connect back to the
647 * client if we disconnect on hang up) */
648 priv->tty_state = TTY_CLOSED;
649
650 if (priv->iucv_state == IUCV_SEVERED)
651 hvc_iucv_cleanup(priv);
652 spin_unlock_bh(&priv->lock);
653}
654
655/**
656 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
657 * @hp: Pointer to the HVC device (struct hvc_struct)
658 * @id: Additional data (originally passed to hvc_alloc):
659 * the index of an struct hvc_iucv_private instance.
660 *
17e19f04
HB
661 * This routine notifies the HVC back-end that the last tty device fd has been
662 * closed. The function calls hvc_iucv_cleanup() to clean up the struct
663 * hvc_iucv_private instance.
44a01d5b
HB
664 *
665 * Locking: struct hvc_iucv_private->lock
666 */
667static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
668{
669 struct hvc_iucv_private *priv;
670 struct iucv_path *path;
671
672 priv = hvc_iucv_get_private(id);
673 if (!priv)
674 return;
675
c45ce4b5
HB
676 flush_sndbuf_sync(priv);
677
44a01d5b
HB
678 spin_lock_bh(&priv->lock);
679 path = priv->path; /* save reference to IUCV path */
680 priv->path = NULL;
681 hvc_iucv_cleanup(priv);
682 spin_unlock_bh(&priv->lock);
683
684 /* sever IUCV path outside of priv->lock due to lock ordering of:
685 * priv->lock <--> iucv_table_lock */
686 if (path) {
687 iucv_path_sever(path, NULL);
688 iucv_path_free(path);
689 }
690}
691
431429ff
HB
692/**
693 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
694 * @ipvmid: Originating z/VM user ID (right padded with blanks)
695 *
696 * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
697 * non-zero.
698 */
699static int hvc_iucv_filter_connreq(u8 ipvmid[8])
700{
701 size_t i;
702
703 /* Note: default policy is ACCEPT if no filter is set */
704 if (!hvc_iucv_filter_size)
705 return 0;
706
707 for (i = 0; i < hvc_iucv_filter_size; i++)
708 if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
709 return 0;
710 return 1;
711}
712
44a01d5b
HB
713/**
714 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
715 * @path: Pending path (struct iucv_path)
17e19f04 716 * @ipvmid: z/VM system identifier of originator
44a01d5b
HB
717 * @ipuser: User specified data for this path
718 * (AF_IUCV: port/service name and originator port)
719 *
17e19f04
HB
720 * The function uses the @ipuser data to determine if the pending path belongs
721 * to a terminal managed by this device driver.
722 * If the path belongs to this driver, ensure that the terminal is not accessed
723 * multiple times (only one connection to a terminal is allowed).
724 * If the terminal is not yet connected, the pending path is accepted and is
725 * associated to the appropriate struct hvc_iucv_private instance.
44a01d5b 726 *
17e19f04 727 * Returns 0 if @path belongs to a terminal managed by the this device driver;
44a01d5b
HB
728 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
729 *
730 * Locking: struct hvc_iucv_private->lock
731 */
732static int hvc_iucv_path_pending(struct iucv_path *path,
733 u8 ipvmid[8], u8 ipuser[16])
734{
735 struct hvc_iucv_private *priv;
736 u8 nuser_data[16];
431429ff 737 u8 vm_user_id[9];
44a01d5b
HB
738 int i, rc;
739
740 priv = NULL;
741 for (i = 0; i < hvc_iucv_devices; i++)
742 if (hvc_iucv_table[i] &&
743 (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) {
744 priv = hvc_iucv_table[i];
745 break;
746 }
44a01d5b
HB
747 if (!priv)
748 return -ENODEV;
749
431429ff
HB
750 /* Enforce that ipvmid is allowed to connect to us */
751 read_lock(&hvc_iucv_filter_lock);
752 rc = hvc_iucv_filter_connreq(ipvmid);
753 read_unlock(&hvc_iucv_filter_lock);
754 if (rc) {
755 iucv_path_sever(path, ipuser);
756 iucv_path_free(path);
757 memcpy(vm_user_id, ipvmid, 8);
758 vm_user_id[8] = 0;
759 pr_info("A connection request from z/VM user ID %s "
760 "was refused\n", vm_user_id);
761 return 0;
762 }
763
44a01d5b
HB
764 spin_lock(&priv->lock);
765
766 /* If the terminal is already connected or being severed, then sever
767 * this path to enforce that there is only ONE established communication
768 * path per terminal. */
769 if (priv->iucv_state != IUCV_DISCONN) {
770 iucv_path_sever(path, ipuser);
771 iucv_path_free(path);
772 goto out_path_handled;
773 }
774
775 /* accept path */
776 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
777 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
778 path->msglim = 0xffff; /* IUCV MSGLIMIT */
779 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
780 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
781 if (rc) {
782 iucv_path_sever(path, ipuser);
783 iucv_path_free(path);
784 goto out_path_handled;
785 }
786 priv->path = path;
787 priv->iucv_state = IUCV_CONNECTED;
788
c45ce4b5
HB
789 /* flush buffered output data... */
790 schedule_delayed_work(&priv->sndbuf_work, 5);
791
44a01d5b
HB
792out_path_handled:
793 spin_unlock(&priv->lock);
794 return 0;
795}
796
797/**
798 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
799 * @path: Pending path (struct iucv_path)
800 * @ipuser: User specified data for this path
801 * (AF_IUCV: port/service name and originator port)
802 *
0259162e
HB
803 * This function calls the hvc_iucv_hangup() function for the
804 * respective IUCV HVC terminal.
44a01d5b
HB
805 *
806 * Locking: struct hvc_iucv_private->lock
807 */
808static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
809{
810 struct hvc_iucv_private *priv = path->private;
811
0259162e 812 hvc_iucv_hangup(priv);
44a01d5b
HB
813}
814
815/**
816 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
817 * @path: Pending path (struct iucv_path)
818 * @msg: Pointer to the IUCV message
819 *
17e19f04 820 * The function puts an incoming message on the input queue for later
44a01d5b 821 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
17e19f04 822 * If the tty has not yet been opened, the message is rejected.
44a01d5b
HB
823 *
824 * Locking: struct hvc_iucv_private->lock
825 */
826static void hvc_iucv_msg_pending(struct iucv_path *path,
827 struct iucv_message *msg)
828{
829 struct hvc_iucv_private *priv = path->private;
830 struct iucv_tty_buffer *rb;
831
c45ce4b5
HB
832 /* reject messages that exceed max size of iucv_tty_msg->datalen */
833 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
834 iucv_message_reject(path, msg);
835 return;
836 }
837
44a01d5b
HB
838 spin_lock(&priv->lock);
839
840 /* reject messages if tty has not yet been opened */
841 if (priv->tty_state == TTY_CLOSED) {
842 iucv_message_reject(path, msg);
843 goto unlock_return;
844 }
845
c45ce4b5 846 /* allocate tty buffer to save iucv msg only */
44a01d5b
HB
847 rb = alloc_tty_buffer(0, GFP_ATOMIC);
848 if (!rb) {
849 iucv_message_reject(path, msg);
850 goto unlock_return; /* -ENOMEM */
851 }
852 rb->msg = *msg;
853
854 list_add_tail(&rb->list, &priv->tty_inqueue);
855
17e19f04 856 hvc_kick(); /* wake up hvc thread */
44a01d5b
HB
857
858unlock_return:
859 spin_unlock(&priv->lock);
860}
861
862/**
863 * hvc_iucv_msg_complete() - IUCV handler to process message completion
864 * @path: Pending path (struct iucv_path)
865 * @msg: Pointer to the IUCV message
866 *
17e19f04
HB
867 * The function is called upon completion of message delivery to remove the
868 * message from the outqueue. Additional delivery information can be found
869 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
870 * purged messages (0x010000 (IPADPGNR)).
44a01d5b
HB
871 *
872 * Locking: struct hvc_iucv_private->lock
873 */
874static void hvc_iucv_msg_complete(struct iucv_path *path,
875 struct iucv_message *msg)
876{
877 struct hvc_iucv_private *priv = path->private;
878 struct iucv_tty_buffer *ent, *next;
879 LIST_HEAD(list_remove);
880
881 spin_lock(&priv->lock);
882 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
883 if (ent->msg.id == msg->id) {
884 list_move(&ent->list, &list_remove);
885 break;
886 }
c45ce4b5 887 wake_up(&priv->sndbuf_waitq);
44a01d5b
HB
888 spin_unlock(&priv->lock);
889 destroy_tty_buffer_list(&list_remove);
890}
891
0259162e
HB
892/**
893 * hvc_iucv_pm_freeze() - Freeze PM callback
894 * @dev: IUVC HVC terminal device
895 *
896 * Sever an established IUCV communication path and
897 * trigger a hang-up of the underlying HVC terminal.
898 */
899static int hvc_iucv_pm_freeze(struct device *dev)
900{
901 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
902
903 local_bh_disable();
904 hvc_iucv_hangup(priv);
905 local_bh_enable();
906
907 return 0;
908}
909
910/**
911 * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
912 * @dev: IUVC HVC terminal device
913 *
914 * Wake up the HVC thread to trigger hang-up and respective
915 * HVC back-end notifier invocations.
916 */
917static int hvc_iucv_pm_restore_thaw(struct device *dev)
918{
919 hvc_kick();
920 return 0;
921}
922
44a01d5b
HB
923
924/* HVC operations */
925static struct hv_ops hvc_iucv_ops = {
926 .get_chars = hvc_iucv_get_chars,
927 .put_chars = hvc_iucv_put_chars,
928 .notifier_add = hvc_iucv_notifier_add,
929 .notifier_del = hvc_iucv_notifier_del,
930 .notifier_hangup = hvc_iucv_notifier_hangup,
931};
932
0259162e 933/* Suspend / resume device operations */
47145210 934static const struct dev_pm_ops hvc_iucv_pm_ops = {
0259162e
HB
935 .freeze = hvc_iucv_pm_freeze,
936 .thaw = hvc_iucv_pm_restore_thaw,
937 .restore = hvc_iucv_pm_restore_thaw,
938};
939
940/* IUCV HVC device driver */
941static struct device_driver hvc_iucv_driver = {
942 .name = KMSG_COMPONENT,
943 .bus = &iucv_bus,
944 .pm = &hvc_iucv_pm_ops,
945};
946
44a01d5b
HB
947/**
948 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
6c089fd3
HB
949 * @id: hvc_iucv_table index
950 * @is_console: Flag if the instance is used as Linux console
44a01d5b 951 *
17e19f04
HB
952 * This function allocates a new hvc_iucv_private structure and stores
953 * the instance in hvc_iucv_table at index @id.
44a01d5b
HB
954 * Returns 0 on success; otherwise non-zero.
955 */
6c089fd3 956static int __init hvc_iucv_alloc(int id, unsigned int is_console)
44a01d5b
HB
957{
958 struct hvc_iucv_private *priv;
959 char name[9];
960 int rc;
961
962 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
963 if (!priv)
964 return -ENOMEM;
965
966 spin_lock_init(&priv->lock);
967 INIT_LIST_HEAD(&priv->tty_outqueue);
968 INIT_LIST_HEAD(&priv->tty_inqueue);
c45ce4b5
HB
969 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
970 init_waitqueue_head(&priv->sndbuf_waitq);
971
972 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
973 if (!priv->sndbuf) {
974 kfree(priv);
975 return -ENOMEM;
976 }
44a01d5b 977
6c089fd3
HB
978 /* set console flag */
979 priv->is_console = is_console;
980
0259162e 981 /* allocate hvc device */
c45ce4b5
HB
982 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
983 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
44a01d5b
HB
984 if (IS_ERR(priv->hvc)) {
985 rc = PTR_ERR(priv->hvc);
0259162e 986 goto out_error_hvc;
44a01d5b
HB
987 }
988
17e19f04 989 /* notify HVC thread instead of using polling */
c45ce4b5
HB
990 priv->hvc->irq_requested = 1;
991
44a01d5b 992 /* setup iucv related information */
2dc184c0 993 snprintf(name, 9, "lnxhvc%-2d", id);
44a01d5b
HB
994 memcpy(priv->srv_name, name, 8);
995 ASCEBC(priv->srv_name, 8);
996
0259162e
HB
997 /* create and setup device */
998 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
999 if (!priv->dev) {
1000 rc = -ENOMEM;
1001 goto out_error_dev;
1002 }
1003 dev_set_name(priv->dev, "hvc_iucv%d", id);
1004 dev_set_drvdata(priv->dev, priv);
1005 priv->dev->bus = &iucv_bus;
1006 priv->dev->parent = iucv_root;
1007 priv->dev->driver = &hvc_iucv_driver;
1008 priv->dev->release = (void (*)(struct device *)) kfree;
1009 rc = device_register(priv->dev);
1010 if (rc) {
c6304933 1011 put_device(priv->dev);
0259162e
HB
1012 goto out_error_dev;
1013 }
1014
44a01d5b
HB
1015 hvc_iucv_table[id] = priv;
1016 return 0;
0259162e
HB
1017
1018out_error_dev:
1019 hvc_remove(priv->hvc);
1020out_error_hvc:
1021 free_page((unsigned long) priv->sndbuf);
1022 kfree(priv);
1023
1024 return rc;
1025}
1026
1027/**
1028 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1029 */
1030static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1031{
1032 hvc_remove(priv->hvc);
1033 device_unregister(priv->dev);
1034 free_page((unsigned long) priv->sndbuf);
1035 kfree(priv);
44a01d5b
HB
1036}
1037
431429ff
HB
1038/**
1039 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1040 * @filter: String containing a comma-separated list of z/VM user IDs
1041 */
1042static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1043{
1044 const char *nextdelim, *residual;
1045 size_t len;
1046
1047 nextdelim = strchr(filter, ',');
1048 if (nextdelim) {
1049 len = nextdelim - filter;
1050 residual = nextdelim + 1;
1051 } else {
1052 len = strlen(filter);
1053 residual = filter + len;
1054 }
1055
1056 if (len == 0)
1057 return ERR_PTR(-EINVAL);
1058
1059 /* check for '\n' (if called from sysfs) */
1060 if (filter[len - 1] == '\n')
1061 len--;
1062
1063 if (len > 8)
1064 return ERR_PTR(-EINVAL);
1065
1066 /* pad with blanks and save upper case version of user ID */
1067 memset(dest, ' ', 8);
1068 while (len--)
1069 dest[len] = toupper(filter[len]);
1070 return residual;
1071}
1072
1073/**
1074 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1075 * @filter: String consisting of a comma-separated list of z/VM user IDs
1076 *
1077 * The function parses the @filter string and creates an array containing
1078 * the list of z/VM user ID filter entries.
1079 * Return code 0 means success, -EINVAL if the filter is syntactically
1080 * incorrect, -ENOMEM if there was not enough memory to allocate the
1081 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1082 */
1083static int hvc_iucv_setup_filter(const char *val)
1084{
1085 const char *residual;
1086 int err;
1087 size_t size, count;
1088 void *array, *old_filter;
1089
1090 count = strlen(val);
1091 if (count == 0 || (count == 1 && val[0] == '\n')) {
1092 size = 0;
1093 array = NULL;
1094 goto out_replace_filter; /* clear filter */
1095 }
1096
1097 /* count user IDs in order to allocate sufficient memory */
1098 size = 1;
1099 residual = val;
1100 while ((residual = strchr(residual, ',')) != NULL) {
1101 residual++;
1102 size++;
1103 }
1104
1105 /* check if the specified list exceeds the filter limit */
1106 if (size > MAX_VMID_FILTER)
1107 return -ENOSPC;
1108
1109 array = kzalloc(size * 8, GFP_KERNEL);
1110 if (!array)
1111 return -ENOMEM;
1112
1113 count = size;
1114 residual = val;
1115 while (*residual && count) {
1116 residual = hvc_iucv_parse_filter(residual,
1117 array + ((size - count) * 8));
1118 if (IS_ERR(residual)) {
1119 err = PTR_ERR(residual);
1120 kfree(array);
1121 goto out_err;
1122 }
1123 count--;
1124 }
1125
1126out_replace_filter:
1127 write_lock_bh(&hvc_iucv_filter_lock);
1128 old_filter = hvc_iucv_filter;
1129 hvc_iucv_filter_size = size;
1130 hvc_iucv_filter = array;
1131 write_unlock_bh(&hvc_iucv_filter_lock);
1132 kfree(old_filter);
1133
1134 err = 0;
1135out_err:
1136 return err;
1137}
1138
1139/**
1140 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1141 * @val: String consisting of a comma-separated list of z/VM user IDs
1142 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1143 *
1144 * The function sets up the z/VM user ID filter specified as comma-separated
1145 * list of user IDs in @val.
1146 * Note: If it is called early in the boot process, @val is stored and
1147 * parsed later in hvc_iucv_init().
1148 */
1149static int param_set_vmidfilter(const char *val, struct kernel_param *kp)
1150{
1151 int rc;
1152
1153 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1154 return -ENODEV;
1155
1156 if (!val)
1157 return -EINVAL;
1158
1159 rc = 0;
1160 if (slab_is_available())
1161 rc = hvc_iucv_setup_filter(val);
1162 else
1163 hvc_iucv_filter_string = val; /* defer... */
1164 return rc;
1165}
1166
1167/**
1168 * param_get_vmidfilter() - Get z/VM user ID filter
1169 * @buffer: Buffer to store z/VM user ID filter,
1170 * (buffer size assumption PAGE_SIZE)
1171 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1172 *
1173 * The function stores the filter as a comma-separated list of z/VM user IDs
1174 * in @buffer. Typically, sysfs routines call this function for attr show.
1175 */
1176static int param_get_vmidfilter(char *buffer, struct kernel_param *kp)
1177{
1178 int rc;
1179 size_t index, len;
1180 void *start, *end;
1181
1182 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1183 return -ENODEV;
1184
1185 rc = 0;
1186 read_lock_bh(&hvc_iucv_filter_lock);
1187 for (index = 0; index < hvc_iucv_filter_size; index++) {
1188 start = hvc_iucv_filter + (8 * index);
1189 end = memchr(start, ' ', 8);
1190 len = (end) ? end - start : 8;
1191 memcpy(buffer + rc, start, len);
1192 rc += len;
1193 buffer[rc++] = ',';
1194 }
1195 read_unlock_bh(&hvc_iucv_filter_lock);
1196 if (rc)
1197 buffer[--rc] = '\0'; /* replace last comma and update rc */
1198 return rc;
1199}
1200
1201#define param_check_vmidfilter(name, p) __param_check(name, p, void)
1202
44a01d5b 1203/**
17e19f04 1204 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
44a01d5b
HB
1205 */
1206static int __init hvc_iucv_init(void)
1207{
6c089fd3
HB
1208 int rc;
1209 unsigned int i;
44a01d5b 1210
431429ff
HB
1211 if (!hvc_iucv_devices)
1212 return -ENODEV;
1213
44a01d5b 1214 if (!MACHINE_IS_VM) {
82f3a79b 1215 pr_notice("The z/VM IUCV HVC device driver cannot "
c45ce4b5 1216 "be used without z/VM\n");
431429ff
HB
1217 rc = -ENODEV;
1218 goto out_error;
44a01d5b
HB
1219 }
1220
82f3a79b
HB
1221 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1222 pr_err("%lu is not a valid value for the hvc_iucv= "
1223 "kernel parameter\n", hvc_iucv_devices);
431429ff
HB
1224 rc = -EINVAL;
1225 goto out_error;
1226 }
1227
0259162e
HB
1228 /* register IUCV HVC device driver */
1229 rc = driver_register(&hvc_iucv_driver);
1230 if (rc)
1231 goto out_error;
1232
431429ff
HB
1233 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1234 if (hvc_iucv_filter_string) {
1235 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1236 switch (rc) {
1237 case 0:
1238 break;
1239 case -ENOMEM:
1240 pr_err("Allocating memory failed with "
1241 "reason code=%d\n", 3);
1242 goto out_error;
1243 case -EINVAL:
1244 pr_err("hvc_iucv_allow= does not specify a valid "
1245 "z/VM user ID list\n");
1246 goto out_error;
1247 case -ENOSPC:
1248 pr_err("hvc_iucv_allow= specifies too many "
1249 "z/VM user IDs\n");
1250 goto out_error;
1251 default:
1252 goto out_error;
1253 }
82f3a79b 1254 }
44a01d5b
HB
1255
1256 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1257 sizeof(struct iucv_tty_buffer),
1258 0, 0, NULL);
1259 if (!hvc_iucv_buffer_cache) {
c45ce4b5 1260 pr_err("Allocating memory failed with reason code=%d\n", 1);
431429ff
HB
1261 rc = -ENOMEM;
1262 goto out_error;
44a01d5b
HB
1263 }
1264
1265 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1266 hvc_iucv_buffer_cache);
1267 if (!hvc_iucv_mempool) {
c45ce4b5 1268 pr_err("Allocating memory failed with reason code=%d\n", 2);
44a01d5b 1269 kmem_cache_destroy(hvc_iucv_buffer_cache);
431429ff
HB
1270 rc = -ENOMEM;
1271 goto out_error;
44a01d5b
HB
1272 }
1273
68c6b3d2
HB
1274 /* register the first terminal device as console
1275 * (must be done before allocating hvc terminal devices) */
6c089fd3
HB
1276 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1277 if (rc) {
1278 pr_err("Registering HVC terminal device as "
1279 "Linux console failed\n");
1280 goto out_error_memory;
1281 }
68c6b3d2 1282
44a01d5b
HB
1283 /* allocate hvc_iucv_private structs */
1284 for (i = 0; i < hvc_iucv_devices; i++) {
6c089fd3 1285 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
44a01d5b 1286 if (rc) {
c45ce4b5 1287 pr_err("Creating a new HVC terminal device "
17e19f04 1288 "failed with error code=%d\n", rc);
44a01d5b
HB
1289 goto out_error_hvc;
1290 }
1291 }
1292
1293 /* register IUCV callback handler */
1294 rc = iucv_register(&hvc_iucv_handler, 0);
1295 if (rc) {
c45ce4b5
HB
1296 pr_err("Registering IUCV handlers failed with error code=%d\n",
1297 rc);
44a01d5b
HB
1298 goto out_error_iucv;
1299 }
1300
1301 return 0;
1302
1303out_error_iucv:
1304 iucv_unregister(&hvc_iucv_handler, 0);
1305out_error_hvc:
1306 for (i = 0; i < hvc_iucv_devices; i++)
0259162e
HB
1307 if (hvc_iucv_table[i])
1308 hvc_iucv_destroy(hvc_iucv_table[i]);
6c089fd3 1309out_error_memory:
44a01d5b
HB
1310 mempool_destroy(hvc_iucv_mempool);
1311 kmem_cache_destroy(hvc_iucv_buffer_cache);
431429ff 1312out_error:
0259162e
HB
1313 if (hvc_iucv_filter)
1314 kfree(hvc_iucv_filter);
431429ff 1315 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
44a01d5b
HB
1316 return rc;
1317}
1318
44a01d5b
HB
1319/**
1320 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1321 * @val: Parameter value (numeric)
1322 */
1323static int __init hvc_iucv_config(char *val)
1324{
1325 return strict_strtoul(val, 10, &hvc_iucv_devices);
1326}
1327
1328
68c6b3d2 1329device_initcall(hvc_iucv_init);
44a01d5b 1330__setup("hvc_iucv=", hvc_iucv_config);
431429ff 1331core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);