ftrace: Allow stealing pages from pipe buffer
[linux-2.6-block.git] / drivers / char / virtio_console.c
CommitLineData
a23ea924
RR
1/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
5084f893
AS
3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
31610434
RR
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
fb08bd27 20#include <linux/cdev.h>
d99393ef 21#include <linux/debugfs.h>
5e38483b 22#include <linux/completion.h>
fb08bd27 23#include <linux/device.h>
31610434 24#include <linux/err.h>
a08fa92d 25#include <linux/freezer.h>
2030fa49 26#include <linux/fs.h>
eb5e89fc
MH
27#include <linux/splice.h>
28#include <linux/pagemap.h>
31610434 29#include <linux/init.h>
38edf58d 30#include <linux/list.h>
2030fa49
AS
31#include <linux/poll.h>
32#include <linux/sched.h>
5a0e3ad6 33#include <linux/slab.h>
38edf58d 34#include <linux/spinlock.h>
31610434
RR
35#include <linux/virtio.h>
36#include <linux/virtio_console.h>
2030fa49 37#include <linux/wait.h>
17634ba2 38#include <linux/workqueue.h>
c22405c9 39#include <linux/module.h>
51df0acc 40#include "../tty/hvc/hvc_console.h"
31610434 41
38edf58d
AS
42/*
43 * This is a global struct for storing common data for all the devices
44 * this driver handles.
45 *
46 * Mainly, it has a linked list for all the consoles in one place so
47 * that callbacks from hvc for get_chars(), put_chars() work properly
48 * across multiple devices and multiple ports per device.
49 */
50struct ports_driver_data {
fb08bd27
AS
51 /* Used for registering chardevs */
52 struct class *class;
53
d99393ef
AS
54 /* Used for exporting per-port information to debugfs */
55 struct dentry *debugfs_dir;
56
6bdf2afd
AS
57 /* List of all the devices we're handling */
58 struct list_head portdevs;
59
fb08bd27
AS
60 /* Number of devices this driver is handling */
61 unsigned int index;
62
d8a02bd5
RR
63 /*
64 * This is used to keep track of the number of hvc consoles
65 * spawned by this driver. This number is given as the first
66 * argument to hvc_alloc(). To correctly map an initial
67 * console spawned via hvc_instantiate to the console being
68 * hooked up via hvc_alloc, we need to pass the same vtermno.
69 *
70 * We also just assume the first console being initialised was
71 * the first one that got used as the initial console.
72 */
73 unsigned int next_vtermno;
74
38edf58d
AS
75 /* All the console devices handled by this driver */
76 struct list_head consoles;
77};
78static struct ports_driver_data pdrvdata;
79
80DEFINE_SPINLOCK(pdrvdata_lock);
5e38483b 81DECLARE_COMPLETION(early_console_added);
38edf58d 82
4f23c573
AS
83/* This struct holds information that's relevant only for console ports */
84struct console {
85 /* We'll place all consoles in a list in the pdrvdata struct */
86 struct list_head list;
87
88 /* The hvc device associated with this console port */
89 struct hvc_struct *hvc;
90
9778829c
AS
91 /* The size of the console */
92 struct winsize ws;
93
4f23c573
AS
94 /*
95 * This number identifies the number that we used to register
96 * with hvc in hvc_instantiate() and hvc_alloc(); this is the
97 * number passed on by the hvc callbacks to us to
98 * differentiate between the other console ports handled by
99 * this driver
100 */
101 u32 vtermno;
102};
103
fdb9a054
AS
104struct port_buffer {
105 char *buf;
106
107 /* size of the buffer in *buf above */
108 size_t size;
109
110 /* used length of the buffer */
111 size_t len;
112 /* offset in the buf from which to consume data */
113 size_t offset;
114};
115
17634ba2
AS
116/*
117 * This is a per-device struct that stores data common to all the
118 * ports for that device (vdev->priv).
119 */
120struct ports_device {
6bdf2afd
AS
121 /* Next portdev in the list, head is in the pdrvdata struct */
122 struct list_head list;
123
17634ba2
AS
124 /*
125 * Workqueue handlers where we process deferred work after
126 * notification
127 */
128 struct work_struct control_work;
129
130 struct list_head ports;
131
132 /* To protect the list of ports */
133 spinlock_t ports_lock;
134
135 /* To protect the vq operations for the control channel */
136 spinlock_t cvq_lock;
137
138 /* The current config space is stored here */
b99fa815 139 struct virtio_console_config config;
17634ba2
AS
140
141 /* The virtio device we're associated with */
142 struct virtio_device *vdev;
143
144 /*
145 * A couple of virtqueues for the control channel: one for
146 * guest->host transfers, one for host->guest transfers
147 */
148 struct virtqueue *c_ivq, *c_ovq;
149
150 /* Array of per-port IO virtqueues */
151 struct virtqueue **in_vqs, **out_vqs;
fb08bd27
AS
152
153 /* Used for numbering devices for sysfs and debugfs */
154 unsigned int drv_index;
155
156 /* Major number for this device. Ports will be created as minors. */
157 int chr_major;
17634ba2
AS
158};
159
17e5b4f2
AS
160struct port_stats {
161 unsigned long bytes_sent, bytes_received, bytes_discarded;
162};
163
1c85bf35 164/* This struct holds the per-port data */
21206ede 165struct port {
17634ba2
AS
166 /* Next port in the list, head is in the ports_device */
167 struct list_head list;
168
1c85bf35
AS
169 /* Pointer to the parent virtio_console device */
170 struct ports_device *portdev;
fdb9a054
AS
171
172 /* The current buffer from which data has to be fed to readers */
173 struct port_buffer *inbuf;
21206ede 174
203baab8
AS
175 /*
176 * To protect the operations on the in_vq associated with this
177 * port. Has to be a spinlock because it can be called from
178 * interrupt context (get_char()).
179 */
180 spinlock_t inbuf_lock;
181
cdfadfc1
AS
182 /* Protect the operations on the out_vq. */
183 spinlock_t outvq_lock;
184
1c85bf35
AS
185 /* The IO vqs for this port */
186 struct virtqueue *in_vq, *out_vq;
187
d99393ef
AS
188 /* File in the debugfs directory that exposes this port's information */
189 struct dentry *debugfs_file;
190
17e5b4f2
AS
191 /*
192 * Keep count of the bytes sent, received and discarded for
193 * this port for accounting and debugging purposes. These
194 * counts are not reset across port open / close events.
195 */
196 struct port_stats stats;
197
4f23c573
AS
198 /*
199 * The entries in this struct will be valid if this port is
200 * hooked up to an hvc console
201 */
202 struct console cons;
17634ba2 203
fb08bd27 204 /* Each port associates with a separate char device */
d22a6989 205 struct cdev *cdev;
fb08bd27
AS
206 struct device *dev;
207
b353a6b8
AS
208 /* Reference-counting to handle port hot-unplugs and file operations */
209 struct kref kref;
210
2030fa49
AS
211 /* A waitqueue for poll() or blocking read operations */
212 wait_queue_head_t waitqueue;
213
431edb8a
AS
214 /* The 'name' of the port that we expose via sysfs properties */
215 char *name;
216
3eae0ade
AS
217 /* We can notify apps of host connect / disconnect events via SIGIO */
218 struct fasync_struct *async_queue;
219
17634ba2
AS
220 /* The 'id' to identify the port with the Host */
221 u32 id;
2030fa49 222
cdfadfc1
AS
223 bool outvq_full;
224
2030fa49
AS
225 /* Is the host device open */
226 bool host_connected;
3c7969cc
AS
227
228 /* We should allow only one process to open a port */
229 bool guest_connected;
21206ede 230};
31610434 231
eb5e89fc 232#define MAX_SPLICE_PAGES 32
971f3390
RR
233/* This is the very early arch-specified put chars function. */
234static int (*early_put_chars)(u32, const char *, int);
235
38edf58d
AS
236static struct port *find_port_by_vtermno(u32 vtermno)
237{
238 struct port *port;
4f23c573 239 struct console *cons;
38edf58d
AS
240 unsigned long flags;
241
242 spin_lock_irqsave(&pdrvdata_lock, flags);
4f23c573
AS
243 list_for_each_entry(cons, &pdrvdata.consoles, list) {
244 if (cons->vtermno == vtermno) {
245 port = container_of(cons, struct port, cons);
38edf58d 246 goto out;
4f23c573 247 }
38edf58d
AS
248 }
249 port = NULL;
250out:
251 spin_unlock_irqrestore(&pdrvdata_lock, flags);
252 return port;
253}
254
04950cdf
AS
255static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
256 dev_t dev)
257{
258 struct port *port;
259 unsigned long flags;
260
261 spin_lock_irqsave(&portdev->ports_lock, flags);
262 list_for_each_entry(port, &portdev->ports, list)
d22a6989 263 if (port->cdev->dev == dev)
04950cdf
AS
264 goto out;
265 port = NULL;
266out:
267 spin_unlock_irqrestore(&portdev->ports_lock, flags);
268
269 return port;
270}
271
272static struct port *find_port_by_devt(dev_t dev)
273{
274 struct ports_device *portdev;
275 struct port *port;
276 unsigned long flags;
277
278 spin_lock_irqsave(&pdrvdata_lock, flags);
279 list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
280 port = find_port_by_devt_in_portdev(portdev, dev);
281 if (port)
282 goto out;
283 }
284 port = NULL;
285out:
286 spin_unlock_irqrestore(&pdrvdata_lock, flags);
287 return port;
288}
289
17634ba2
AS
290static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
291{
292 struct port *port;
293 unsigned long flags;
294
295 spin_lock_irqsave(&portdev->ports_lock, flags);
296 list_for_each_entry(port, &portdev->ports, list)
297 if (port->id == id)
298 goto out;
299 port = NULL;
300out:
301 spin_unlock_irqrestore(&portdev->ports_lock, flags);
302
303 return port;
304}
305
203baab8
AS
306static struct port *find_port_by_vq(struct ports_device *portdev,
307 struct virtqueue *vq)
308{
309 struct port *port;
203baab8
AS
310 unsigned long flags;
311
17634ba2
AS
312 spin_lock_irqsave(&portdev->ports_lock, flags);
313 list_for_each_entry(port, &portdev->ports, list)
203baab8
AS
314 if (port->in_vq == vq || port->out_vq == vq)
315 goto out;
203baab8
AS
316 port = NULL;
317out:
17634ba2 318 spin_unlock_irqrestore(&portdev->ports_lock, flags);
203baab8
AS
319 return port;
320}
321
17634ba2
AS
322static bool is_console_port(struct port *port)
323{
324 if (port->cons.hvc)
325 return true;
326 return false;
327}
328
329static inline bool use_multiport(struct ports_device *portdev)
330{
331 /*
332 * This condition can be true when put_chars is called from
333 * early_init
334 */
335 if (!portdev->vdev)
336 return 0;
337 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
338}
339
fdb9a054
AS
340static void free_buf(struct port_buffer *buf)
341{
342 kfree(buf->buf);
343 kfree(buf);
344}
345
346static struct port_buffer *alloc_buf(size_t buf_size)
347{
348 struct port_buffer *buf;
349
350 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
351 if (!buf)
352 goto fail;
353 buf->buf = kzalloc(buf_size, GFP_KERNEL);
354 if (!buf->buf)
355 goto free_buf;
356 buf->len = 0;
357 buf->offset = 0;
358 buf->size = buf_size;
359 return buf;
360
361free_buf:
362 kfree(buf);
363fail:
364 return NULL;
365}
366
a3cde449 367/* Callers should take appropriate locks */
defde669 368static struct port_buffer *get_inbuf(struct port *port)
a3cde449
AS
369{
370 struct port_buffer *buf;
a3cde449
AS
371 unsigned int len;
372
d25a9dda
AS
373 if (port->inbuf)
374 return port->inbuf;
375
376 buf = virtqueue_get_buf(port->in_vq, &len);
a3cde449
AS
377 if (buf) {
378 buf->len = len;
379 buf->offset = 0;
17e5b4f2 380 port->stats.bytes_received += len;
a3cde449
AS
381 }
382 return buf;
383}
384
e27b5198
AS
385/*
386 * Create a scatter-gather list representing our input buffer and put
387 * it in the queue.
388 *
389 * Callers should take appropriate locks.
390 */
203baab8 391static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
e27b5198
AS
392{
393 struct scatterlist sg[1];
203baab8 394 int ret;
1c85bf35 395
e27b5198
AS
396 sg_init_one(sg, buf->buf, buf->size);
397
f96fde41 398 ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
505b0451 399 virtqueue_kick(vq);
203baab8
AS
400 return ret;
401}
402
88f251ac
AS
403/* Discard any unread data this port has. Callers lockers. */
404static void discard_port_data(struct port *port)
405{
406 struct port_buffer *buf;
2d24cdaa 407 unsigned int err;
88f251ac 408
d7a62cd0
AS
409 if (!port->portdev) {
410 /* Device has been unplugged. vqs are already gone. */
411 return;
412 }
2d24cdaa 413 buf = get_inbuf(port);
88f251ac 414
ce072a0c 415 err = 0;
d6933561 416 while (buf) {
17e5b4f2 417 port->stats.bytes_discarded += buf->len - buf->offset;
2d24cdaa 418 if (add_inbuf(port->in_vq, buf) < 0) {
ce072a0c 419 err++;
d6933561
AS
420 free_buf(buf);
421 }
2d24cdaa
AS
422 port->inbuf = NULL;
423 buf = get_inbuf(port);
88f251ac 424 }
ce072a0c 425 if (err)
d6933561 426 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
ce072a0c 427 err);
88f251ac
AS
428}
429
203baab8
AS
430static bool port_has_data(struct port *port)
431{
432 unsigned long flags;
433 bool ret;
434
d25a9dda 435 ret = false;
203baab8 436 spin_lock_irqsave(&port->inbuf_lock, flags);
d6933561 437 port->inbuf = get_inbuf(port);
d25a9dda 438 if (port->inbuf)
d6933561 439 ret = true;
d25a9dda 440
203baab8 441 spin_unlock_irqrestore(&port->inbuf_lock, flags);
203baab8
AS
442 return ret;
443}
444
3425e706
AS
445static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
446 unsigned int event, unsigned int value)
17634ba2
AS
447{
448 struct scatterlist sg[1];
449 struct virtio_console_control cpkt;
450 struct virtqueue *vq;
604b2ad7 451 unsigned int len;
17634ba2 452
3425e706 453 if (!use_multiport(portdev))
17634ba2
AS
454 return 0;
455
3425e706 456 cpkt.id = port_id;
17634ba2
AS
457 cpkt.event = event;
458 cpkt.value = value;
459
3425e706 460 vq = portdev->c_ovq;
17634ba2
AS
461
462 sg_init_one(sg, &cpkt, sizeof(cpkt));
f96fde41 463 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
505b0451
MT
464 virtqueue_kick(vq);
465 while (!virtqueue_get_buf(vq, &len))
17634ba2
AS
466 cpu_relax();
467 }
468 return 0;
469}
470
3425e706
AS
471static ssize_t send_control_msg(struct port *port, unsigned int event,
472 unsigned int value)
473{
84ec06c5
AS
474 /* Did the port get unplugged before userspace closed it? */
475 if (port->portdev)
476 return __send_control_msg(port->portdev, port->id, event, value);
477 return 0;
3425e706
AS
478}
479
eb5e89fc
MH
480struct buffer_token {
481 union {
482 void *buf;
483 struct scatterlist *sg;
484 } u;
485 bool sgpages;
486};
487
488static void reclaim_sg_pages(struct scatterlist *sg)
489{
490 int i;
491 struct page *page;
492
493 for (i = 0; i < MAX_SPLICE_PAGES; i++) {
494 page = sg_page(&sg[i]);
495 if (!page)
496 break;
497 put_page(page);
498 }
499 kfree(sg);
500}
501
cdfadfc1
AS
502/* Callers must take the port->outvq_lock */
503static void reclaim_consumed_buffers(struct port *port)
504{
eb5e89fc 505 struct buffer_token *tok;
cdfadfc1
AS
506 unsigned int len;
507
d7a62cd0
AS
508 if (!port->portdev) {
509 /* Device has been unplugged. vqs are already gone. */
510 return;
511 }
eb5e89fc
MH
512 while ((tok = virtqueue_get_buf(port->out_vq, &len))) {
513 if (tok->sgpages)
514 reclaim_sg_pages(tok->u.sg);
515 else
516 kfree(tok->u.buf);
517 kfree(tok);
cdfadfc1
AS
518 port->outvq_full = false;
519 }
520}
521
eb5e89fc
MH
522static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
523 int nents, size_t in_count,
524 struct buffer_token *tok, bool nonblock)
f997f00b 525{
f997f00b
AS
526 struct virtqueue *out_vq;
527 ssize_t ret;
cdfadfc1 528 unsigned long flags;
f997f00b
AS
529 unsigned int len;
530
531 out_vq = port->out_vq;
532
cdfadfc1
AS
533 spin_lock_irqsave(&port->outvq_lock, flags);
534
535 reclaim_consumed_buffers(port);
536
eb5e89fc 537 ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC);
f997f00b
AS
538
539 /* Tell Host to go! */
505b0451 540 virtqueue_kick(out_vq);
f997f00b
AS
541
542 if (ret < 0) {
9ff4cfab 543 in_count = 0;
cdfadfc1 544 goto done;
f997f00b
AS
545 }
546
cdfadfc1
AS
547 if (ret == 0)
548 port->outvq_full = true;
549
550 if (nonblock)
551 goto done;
552
553 /*
554 * Wait till the host acknowledges it pushed out the data we
531295e6
AS
555 * sent. This is done for data from the hvc_console; the tty
556 * operations are performed with spinlocks held so we can't
557 * sleep here. An alternative would be to copy the data to a
558 * buffer and relax the spinning requirement. The downside is
559 * we need to kmalloc a GFP_ATOMIC buffer each time the
560 * console driver writes something out.
cdfadfc1 561 */
505b0451 562 while (!virtqueue_get_buf(out_vq, &len))
f997f00b 563 cpu_relax();
cdfadfc1
AS
564done:
565 spin_unlock_irqrestore(&port->outvq_lock, flags);
17e5b4f2
AS
566
567 port->stats.bytes_sent += in_count;
cdfadfc1
AS
568 /*
569 * We're expected to return the amount of data we wrote -- all
570 * of it
571 */
9ff4cfab 572 return in_count;
f997f00b
AS
573}
574
eb5e89fc
MH
575static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
576 bool nonblock)
577{
578 struct scatterlist sg[1];
579 struct buffer_token *tok;
580
581 tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
582 if (!tok)
583 return -ENOMEM;
584 tok->sgpages = false;
585 tok->u.buf = in_buf;
586
587 sg_init_one(sg, in_buf, in_count);
588
589 return __send_to_port(port, sg, 1, in_count, tok, nonblock);
590}
591
592static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents,
593 size_t in_count, bool nonblock)
594{
595 struct buffer_token *tok;
596
597 tok = kmalloc(sizeof(*tok), GFP_ATOMIC);
598 if (!tok)
599 return -ENOMEM;
600 tok->sgpages = true;
601 tok->u.sg = sg;
602
603 return __send_to_port(port, sg, nents, in_count, tok, nonblock);
604}
605
203baab8
AS
606/*
607 * Give out the data that's requested from the buffer that we have
608 * queued up.
609 */
b766ceed
AS
610static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
611 bool to_user)
203baab8
AS
612{
613 struct port_buffer *buf;
614 unsigned long flags;
615
616 if (!out_count || !port_has_data(port))
617 return 0;
618
619 buf = port->inbuf;
b766ceed 620 out_count = min(out_count, buf->len - buf->offset);
203baab8 621
b766ceed
AS
622 if (to_user) {
623 ssize_t ret;
624
625 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
626 if (ret)
627 return -EFAULT;
628 } else {
629 memcpy(out_buf, buf->buf + buf->offset, out_count);
630 }
203baab8 631
203baab8
AS
632 buf->offset += out_count;
633
634 if (buf->offset == buf->len) {
635 /*
636 * We're done using all the data in this buffer.
637 * Re-queue so that the Host can send us more data.
638 */
639 spin_lock_irqsave(&port->inbuf_lock, flags);
640 port->inbuf = NULL;
641
642 if (add_inbuf(port->in_vq, buf) < 0)
fb08bd27 643 dev_warn(port->dev, "failed add_buf\n");
203baab8
AS
644
645 spin_unlock_irqrestore(&port->inbuf_lock, flags);
646 }
b766ceed 647 /* Return the number of bytes actually copied */
203baab8 648 return out_count;
e27b5198
AS
649}
650
2030fa49 651/* The condition that must be true for polling to end */
60caacd3 652static bool will_read_block(struct port *port)
2030fa49 653{
3709ea7a
AS
654 if (!port->guest_connected) {
655 /* Port got hot-unplugged. Let's exit. */
656 return false;
657 }
60caacd3 658 return !port_has_data(port) && port->host_connected;
2030fa49
AS
659}
660
cdfadfc1
AS
661static bool will_write_block(struct port *port)
662{
663 bool ret;
664
60e5e0b8
AS
665 if (!port->guest_connected) {
666 /* Port got hot-unplugged. Let's exit. */
667 return false;
668 }
cdfadfc1
AS
669 if (!port->host_connected)
670 return true;
671
672 spin_lock_irq(&port->outvq_lock);
673 /*
674 * Check if the Host has consumed any buffers since we last
675 * sent data (this is only applicable for nonblocking ports).
676 */
677 reclaim_consumed_buffers(port);
678 ret = port->outvq_full;
679 spin_unlock_irq(&port->outvq_lock);
680
681 return ret;
682}
683
2030fa49
AS
684static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
685 size_t count, loff_t *offp)
686{
687 struct port *port;
688 ssize_t ret;
689
690 port = filp->private_data;
691
692 if (!port_has_data(port)) {
693 /*
694 * If nothing's connected on the host just return 0 in
695 * case of list_empty; this tells the userspace app
696 * that there's no connection
697 */
698 if (!port->host_connected)
699 return 0;
700 if (filp->f_flags & O_NONBLOCK)
701 return -EAGAIN;
702
a08fa92d
AS
703 ret = wait_event_freezable(port->waitqueue,
704 !will_read_block(port));
2030fa49
AS
705 if (ret < 0)
706 return ret;
707 }
b3dddb9e
AS
708 /* Port got hot-unplugged. */
709 if (!port->guest_connected)
710 return -ENODEV;
2030fa49
AS
711 /*
712 * We could've received a disconnection message while we were
713 * waiting for more data.
714 *
715 * This check is not clubbed in the if() statement above as we
716 * might receive some data as well as the host could get
717 * disconnected after we got woken up from our wait. So we
718 * really want to give off whatever data we have and only then
719 * check for host_connected.
720 */
721 if (!port_has_data(port) && !port->host_connected)
722 return 0;
723
724 return fill_readbuf(port, ubuf, count, true);
725}
726
efe75d24
MH
727static int wait_port_writable(struct port *port, bool nonblock)
728{
729 int ret;
730
731 if (will_write_block(port)) {
732 if (nonblock)
733 return -EAGAIN;
734
735 ret = wait_event_freezable(port->waitqueue,
736 !will_write_block(port));
737 if (ret < 0)
738 return ret;
739 }
740 /* Port got hot-unplugged. */
741 if (!port->guest_connected)
742 return -ENODEV;
743
744 return 0;
745}
746
2030fa49
AS
747static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
748 size_t count, loff_t *offp)
749{
750 struct port *port;
751 char *buf;
752 ssize_t ret;
cdfadfc1 753 bool nonblock;
2030fa49 754
65745422
AS
755 /* Userspace could be out to fool us */
756 if (!count)
757 return 0;
758
2030fa49
AS
759 port = filp->private_data;
760
cdfadfc1
AS
761 nonblock = filp->f_flags & O_NONBLOCK;
762
efe75d24
MH
763 ret = wait_port_writable(port, nonblock);
764 if (ret < 0)
765 return ret;
cdfadfc1 766
2030fa49
AS
767 count = min((size_t)(32 * 1024), count);
768
769 buf = kmalloc(count, GFP_KERNEL);
770 if (!buf)
771 return -ENOMEM;
772
773 ret = copy_from_user(buf, ubuf, count);
774 if (ret) {
775 ret = -EFAULT;
776 goto free_buf;
777 }
778
531295e6
AS
779 /*
780 * We now ask send_buf() to not spin for generic ports -- we
781 * can re-use the same code path that non-blocking file
782 * descriptors take for blocking file descriptors since the
783 * wait is already done and we're certain the write will go
784 * through to the host.
785 */
786 nonblock = true;
cdfadfc1
AS
787 ret = send_buf(port, buf, count, nonblock);
788
789 if (nonblock && ret > 0)
790 goto out;
791
2030fa49
AS
792free_buf:
793 kfree(buf);
cdfadfc1 794out:
2030fa49
AS
795 return ret;
796}
797
eb5e89fc
MH
798struct sg_list {
799 unsigned int n;
800 size_t len;
801 struct scatterlist *sg;
802};
803
804static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
805 struct splice_desc *sd)
806{
807 struct sg_list *sgl = sd->u.data;
ec8fc870 808 unsigned int offset, len;
eb5e89fc
MH
809
810 if (sgl->n == MAX_SPLICE_PAGES)
811 return 0;
812
813 /* Try lock this page */
814 if (buf->ops->steal(pipe, buf) == 0) {
815 /* Get reference and unlock page for moving */
816 get_page(buf->page);
817 unlock_page(buf->page);
818
819 len = min(buf->len, sd->len);
820 sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
ec8fc870
MH
821 } else {
822 /* Failback to copying a page */
823 struct page *page = alloc_page(GFP_KERNEL);
824 char *src = buf->ops->map(pipe, buf, 1);
825 char *dst;
826
827 if (!page)
828 return -ENOMEM;
829 dst = kmap(page);
830
831 offset = sd->pos & ~PAGE_MASK;
832
833 len = sd->len;
834 if (len + offset > PAGE_SIZE)
835 len = PAGE_SIZE - offset;
836
837 memcpy(dst + offset, src + buf->offset, len);
838
839 kunmap(page);
840 buf->ops->unmap(pipe, buf, src);
841
842 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
eb5e89fc 843 }
ec8fc870
MH
844 sgl->n++;
845 sgl->len += len;
eb5e89fc
MH
846
847 return len;
848}
849
850/* Faster zero-copy write by splicing */
851static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
852 struct file *filp, loff_t *ppos,
853 size_t len, unsigned int flags)
854{
855 struct port *port = filp->private_data;
856 struct sg_list sgl;
857 ssize_t ret;
858 struct splice_desc sd = {
859 .total_len = len,
860 .flags = flags,
861 .pos = *ppos,
862 .u.data = &sgl,
863 };
864
efe75d24
MH
865 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
866 if (ret < 0)
867 return ret;
868
eb5e89fc
MH
869 sgl.n = 0;
870 sgl.len = 0;
871 sgl.sg = kmalloc(sizeof(struct scatterlist) * MAX_SPLICE_PAGES,
872 GFP_KERNEL);
873 if (unlikely(!sgl.sg))
874 return -ENOMEM;
875
876 sg_init_table(sgl.sg, MAX_SPLICE_PAGES);
877 ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
878 if (likely(ret > 0))
879 ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true);
880
881 return ret;
882}
883
2030fa49
AS
884static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
885{
886 struct port *port;
887 unsigned int ret;
888
889 port = filp->private_data;
890 poll_wait(filp, &port->waitqueue, wait);
891
8529a504
AS
892 if (!port->guest_connected) {
893 /* Port got unplugged */
894 return POLLHUP;
895 }
2030fa49 896 ret = 0;
6df7aadc 897 if (!will_read_block(port))
2030fa49 898 ret |= POLLIN | POLLRDNORM;
cdfadfc1 899 if (!will_write_block(port))
2030fa49
AS
900 ret |= POLLOUT;
901 if (!port->host_connected)
902 ret |= POLLHUP;
903
904 return ret;
905}
906
b353a6b8
AS
907static void remove_port(struct kref *kref);
908
2030fa49
AS
909static int port_fops_release(struct inode *inode, struct file *filp)
910{
911 struct port *port;
912
913 port = filp->private_data;
914
915 /* Notify host of port being closed */
916 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
917
88f251ac 918 spin_lock_irq(&port->inbuf_lock);
3c7969cc
AS
919 port->guest_connected = false;
920
88f251ac
AS
921 discard_port_data(port);
922
923 spin_unlock_irq(&port->inbuf_lock);
924
cdfadfc1
AS
925 spin_lock_irq(&port->outvq_lock);
926 reclaim_consumed_buffers(port);
927 spin_unlock_irq(&port->outvq_lock);
928
b353a6b8
AS
929 /*
930 * Locks aren't necessary here as a port can't be opened after
931 * unplug, and if a port isn't unplugged, a kref would already
932 * exist for the port. Plus, taking ports_lock here would
933 * create a dependency on other locks taken by functions
934 * inside remove_port if we're the last holder of the port,
935 * creating many problems.
936 */
937 kref_put(&port->kref, remove_port);
938
2030fa49
AS
939 return 0;
940}
941
942static int port_fops_open(struct inode *inode, struct file *filp)
943{
944 struct cdev *cdev = inode->i_cdev;
945 struct port *port;
8ad37e83 946 int ret;
2030fa49 947
04950cdf 948 port = find_port_by_devt(cdev->dev);
2030fa49
AS
949 filp->private_data = port;
950
b353a6b8
AS
951 /* Prevent against a port getting hot-unplugged at the same time */
952 spin_lock_irq(&port->portdev->ports_lock);
953 kref_get(&port->kref);
954 spin_unlock_irq(&port->portdev->ports_lock);
955
2030fa49
AS
956 /*
957 * Don't allow opening of console port devices -- that's done
958 * via /dev/hvc
959 */
8ad37e83
AS
960 if (is_console_port(port)) {
961 ret = -ENXIO;
962 goto out;
963 }
2030fa49 964
3c7969cc
AS
965 /* Allow only one process to open a particular port at a time */
966 spin_lock_irq(&port->inbuf_lock);
967 if (port->guest_connected) {
968 spin_unlock_irq(&port->inbuf_lock);
8ad37e83
AS
969 ret = -EMFILE;
970 goto out;
3c7969cc
AS
971 }
972
973 port->guest_connected = true;
974 spin_unlock_irq(&port->inbuf_lock);
975
cdfadfc1
AS
976 spin_lock_irq(&port->outvq_lock);
977 /*
978 * There might be a chance that we missed reclaiming a few
979 * buffers in the window of the port getting previously closed
980 * and opening now.
981 */
982 reclaim_consumed_buffers(port);
983 spin_unlock_irq(&port->outvq_lock);
984
299fb61c
AS
985 nonseekable_open(inode, filp);
986
2030fa49
AS
987 /* Notify host of port being opened */
988 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
989
990 return 0;
8ad37e83 991out:
b353a6b8 992 kref_put(&port->kref, remove_port);
8ad37e83 993 return ret;
2030fa49
AS
994}
995
3eae0ade
AS
996static int port_fops_fasync(int fd, struct file *filp, int mode)
997{
998 struct port *port;
999
1000 port = filp->private_data;
1001 return fasync_helper(fd, filp, mode, &port->async_queue);
1002}
1003
2030fa49
AS
1004/*
1005 * The file operations that we support: programs in the guest can open
1006 * a console device, read from it, write to it, poll for data and
1007 * close it. The devices are at
1008 * /dev/vport<device number>p<port number>
1009 */
1010static const struct file_operations port_fops = {
1011 .owner = THIS_MODULE,
1012 .open = port_fops_open,
1013 .read = port_fops_read,
1014 .write = port_fops_write,
eb5e89fc 1015 .splice_write = port_fops_splice_write,
2030fa49
AS
1016 .poll = port_fops_poll,
1017 .release = port_fops_release,
3eae0ade 1018 .fasync = port_fops_fasync,
299fb61c 1019 .llseek = no_llseek,
2030fa49
AS
1020};
1021
a23ea924
RR
1022/*
1023 * The put_chars() callback is pretty straightforward.
31610434 1024 *
a23ea924
RR
1025 * We turn the characters into a scatter-gather list, add it to the
1026 * output queue and then kick the Host. Then we sit here waiting for
1027 * it to finish: inefficient in theory, but in practice
1028 * implementations will do it immediately (lguest's Launcher does).
1029 */
31610434
RR
1030static int put_chars(u32 vtermno, const char *buf, int count)
1031{
21206ede 1032 struct port *port;
38edf58d 1033
162a689a
FD
1034 if (unlikely(early_put_chars))
1035 return early_put_chars(vtermno, buf, count);
1036
38edf58d
AS
1037 port = find_port_by_vtermno(vtermno);
1038 if (!port)
6dc69f97 1039 return -EPIPE;
31610434 1040
cdfadfc1 1041 return send_buf(port, (void *)buf, count, false);
31610434
RR
1042}
1043
a23ea924
RR
1044/*
1045 * get_chars() is the callback from the hvc_console infrastructure
1046 * when an interrupt is received.
31610434 1047 *
203baab8
AS
1048 * We call out to fill_readbuf that gets us the required data from the
1049 * buffers that are queued up.
a23ea924 1050 */
31610434
RR
1051static int get_chars(u32 vtermno, char *buf, int count)
1052{
21206ede
RR
1053 struct port *port;
1054
6dc69f97
AS
1055 /* If we've not set up the port yet, we have no input to give. */
1056 if (unlikely(early_put_chars))
1057 return 0;
1058
38edf58d
AS
1059 port = find_port_by_vtermno(vtermno);
1060 if (!port)
6dc69f97 1061 return -EPIPE;
21206ede 1062
31610434 1063 /* If we don't have an input queue yet, we can't get input. */
21206ede 1064 BUG_ON(!port->in_vq);
31610434 1065
b766ceed 1066 return fill_readbuf(port, buf, count, false);
31610434 1067}
31610434 1068
cb06e367 1069static void resize_console(struct port *port)
c2983458 1070{
cb06e367 1071 struct virtio_device *vdev;
c2983458 1072
2de16a49 1073 /* The port could have been hot-unplugged */
9778829c 1074 if (!port || !is_console_port(port))
2de16a49
AS
1075 return;
1076
cb06e367 1077 vdev = port->portdev->vdev;
9778829c
AS
1078 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
1079 hvc_resize(port->cons.hvc, port->cons.ws);
c2983458
CB
1080}
1081
38edf58d 1082/* We set the configuration at this point, since we now have a tty */
91fcad19
CB
1083static int notifier_add_vio(struct hvc_struct *hp, int data)
1084{
38edf58d
AS
1085 struct port *port;
1086
1087 port = find_port_by_vtermno(hp->vtermno);
1088 if (!port)
1089 return -EINVAL;
1090
91fcad19 1091 hp->irq_requested = 1;
cb06e367 1092 resize_console(port);
c2983458 1093
91fcad19
CB
1094 return 0;
1095}
1096
1097static void notifier_del_vio(struct hvc_struct *hp, int data)
1098{
1099 hp->irq_requested = 0;
1100}
1101
17634ba2 1102/* The operations for console ports. */
1dff3996 1103static const struct hv_ops hv_ops = {
971f3390
RR
1104 .get_chars = get_chars,
1105 .put_chars = put_chars,
1106 .notifier_add = notifier_add_vio,
1107 .notifier_del = notifier_del_vio,
1108 .notifier_hangup = notifier_del_vio,
1109};
1110
1111/*
1112 * Console drivers are initialized very early so boot messages can go
1113 * out, so we do things slightly differently from the generic virtio
1114 * initialization of the net and block drivers.
1115 *
1116 * At this stage, the console is output-only. It's too early to set
1117 * up a virtqueue, so we let the drivers do some boutique early-output
1118 * thing.
1119 */
1120int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
1121{
1122 early_put_chars = put_chars;
1123 return hvc_instantiate(0, 0, &hv_ops);
1124}
1125
17634ba2 1126int init_port_console(struct port *port)
cfa6d379
AS
1127{
1128 int ret;
1129
1130 /*
1131 * The Host's telling us this port is a console port. Hook it
1132 * up with an hvc console.
1133 *
1134 * To set up and manage our virtual console, we call
1135 * hvc_alloc().
1136 *
1137 * The first argument of hvc_alloc() is the virtual console
1138 * number. The second argument is the parameter for the
1139 * notification mechanism (like irq number). We currently
1140 * leave this as zero, virtqueues have implicit notifications.
1141 *
1142 * The third argument is a "struct hv_ops" containing the
1143 * put_chars() get_chars(), notifier_add() and notifier_del()
1144 * pointers. The final argument is the output buffer size: we
1145 * can do any size, so we put PAGE_SIZE here.
1146 */
1147 port->cons.vtermno = pdrvdata.next_vtermno;
1148
1149 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
1150 if (IS_ERR(port->cons.hvc)) {
1151 ret = PTR_ERR(port->cons.hvc);
298add72
AS
1152 dev_err(port->dev,
1153 "error %d allocating hvc for port\n", ret);
cfa6d379
AS
1154 port->cons.hvc = NULL;
1155 return ret;
1156 }
1157 spin_lock_irq(&pdrvdata_lock);
1158 pdrvdata.next_vtermno++;
1159 list_add_tail(&port->cons.list, &pdrvdata.consoles);
1160 spin_unlock_irq(&pdrvdata_lock);
3c7969cc 1161 port->guest_connected = true;
cfa6d379 1162
1d05160b
AS
1163 /*
1164 * Start using the new console output if this is the first
1165 * console to come up.
1166 */
1167 if (early_put_chars)
1168 early_put_chars = NULL;
1169
2030fa49
AS
1170 /* Notify host of port being opened */
1171 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1172
cfa6d379
AS
1173 return 0;
1174}
1175
431edb8a
AS
1176static ssize_t show_port_name(struct device *dev,
1177 struct device_attribute *attr, char *buffer)
1178{
1179 struct port *port;
1180
1181 port = dev_get_drvdata(dev);
1182
1183 return sprintf(buffer, "%s\n", port->name);
1184}
1185
1186static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
1187
1188static struct attribute *port_sysfs_entries[] = {
1189 &dev_attr_name.attr,
1190 NULL
1191};
1192
1193static struct attribute_group port_attribute_group = {
1194 .name = NULL, /* put in device directory */
1195 .attrs = port_sysfs_entries,
1196};
1197
d99393ef
AS
1198static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1199 size_t count, loff_t *offp)
1200{
1201 struct port *port;
1202 char *buf;
1203 ssize_t ret, out_offset, out_count;
1204
1205 out_count = 1024;
1206 buf = kmalloc(out_count, GFP_KERNEL);
1207 if (!buf)
1208 return -ENOMEM;
1209
1210 port = filp->private_data;
1211 out_offset = 0;
1212 out_offset += snprintf(buf + out_offset, out_count,
1213 "name: %s\n", port->name ? port->name : "");
1214 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1215 "guest_connected: %d\n", port->guest_connected);
1216 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1217 "host_connected: %d\n", port->host_connected);
cdfadfc1
AS
1218 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1219 "outvq_full: %d\n", port->outvq_full);
17e5b4f2
AS
1220 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1221 "bytes_sent: %lu\n", port->stats.bytes_sent);
1222 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1223 "bytes_received: %lu\n",
1224 port->stats.bytes_received);
1225 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1226 "bytes_discarded: %lu\n",
1227 port->stats.bytes_discarded);
d99393ef
AS
1228 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1229 "is_console: %s\n",
1230 is_console_port(port) ? "yes" : "no");
1231 out_offset += snprintf(buf + out_offset, out_count - out_offset,
1232 "console_vtermno: %u\n", port->cons.vtermno);
1233
1234 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
1235 kfree(buf);
1236 return ret;
1237}
1238
1239static const struct file_operations port_debugfs_ops = {
1240 .owner = THIS_MODULE,
234e3405 1241 .open = simple_open,
d99393ef
AS
1242 .read = debugfs_read,
1243};
1244
9778829c
AS
1245static void set_console_size(struct port *port, u16 rows, u16 cols)
1246{
1247 if (!port || !is_console_port(port))
1248 return;
1249
1250 port->cons.ws.ws_row = rows;
1251 port->cons.ws.ws_col = cols;
1252}
1253
c446f8fc
AS
1254static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1255{
1256 struct port_buffer *buf;
1257 unsigned int nr_added_bufs;
1258 int ret;
1259
1260 nr_added_bufs = 0;
1261 do {
1262 buf = alloc_buf(PAGE_SIZE);
1263 if (!buf)
1264 break;
1265
1266 spin_lock_irq(lock);
1267 ret = add_inbuf(vq, buf);
1268 if (ret < 0) {
1269 spin_unlock_irq(lock);
1270 free_buf(buf);
1271 break;
1272 }
1273 nr_added_bufs++;
1274 spin_unlock_irq(lock);
1275 } while (ret > 0);
1276
1277 return nr_added_bufs;
1278}
1279
3eae0ade
AS
1280static void send_sigio_to_port(struct port *port)
1281{
1282 if (port->async_queue && port->guest_connected)
1283 kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1284}
1285
c446f8fc
AS
1286static int add_port(struct ports_device *portdev, u32 id)
1287{
1288 char debugfs_name[16];
1289 struct port *port;
1290 struct port_buffer *buf;
1291 dev_t devt;
1292 unsigned int nr_added_bufs;
1293 int err;
1294
1295 port = kmalloc(sizeof(*port), GFP_KERNEL);
1296 if (!port) {
1297 err = -ENOMEM;
1298 goto fail;
1299 }
b353a6b8 1300 kref_init(&port->kref);
c446f8fc
AS
1301
1302 port->portdev = portdev;
1303 port->id = id;
1304
1305 port->name = NULL;
1306 port->inbuf = NULL;
1307 port->cons.hvc = NULL;
3eae0ade 1308 port->async_queue = NULL;
c446f8fc 1309
9778829c
AS
1310 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1311
c446f8fc 1312 port->host_connected = port->guest_connected = false;
17e5b4f2 1313 port->stats = (struct port_stats) { 0 };
c446f8fc 1314
cdfadfc1
AS
1315 port->outvq_full = false;
1316
c446f8fc
AS
1317 port->in_vq = portdev->in_vqs[port->id];
1318 port->out_vq = portdev->out_vqs[port->id];
1319
d22a6989
AS
1320 port->cdev = cdev_alloc();
1321 if (!port->cdev) {
1322 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1323 err = -ENOMEM;
1324 goto free_port;
1325 }
1326 port->cdev->ops = &port_fops;
c446f8fc
AS
1327
1328 devt = MKDEV(portdev->chr_major, id);
d22a6989 1329 err = cdev_add(port->cdev, devt, 1);
c446f8fc
AS
1330 if (err < 0) {
1331 dev_err(&port->portdev->vdev->dev,
1332 "Error %d adding cdev for port %u\n", err, id);
d22a6989 1333 goto free_cdev;
c446f8fc
AS
1334 }
1335 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1336 devt, port, "vport%up%u",
1337 port->portdev->drv_index, id);
1338 if (IS_ERR(port->dev)) {
1339 err = PTR_ERR(port->dev);
1340 dev_err(&port->portdev->vdev->dev,
1341 "Error %d creating device for port %u\n",
1342 err, id);
1343 goto free_cdev;
1344 }
1345
1346 spin_lock_init(&port->inbuf_lock);
cdfadfc1 1347 spin_lock_init(&port->outvq_lock);
c446f8fc
AS
1348 init_waitqueue_head(&port->waitqueue);
1349
1350 /* Fill the in_vq with buffers so the host can send us data. */
1351 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1352 if (!nr_added_bufs) {
1353 dev_err(port->dev, "Error allocating inbufs\n");
1354 err = -ENOMEM;
1355 goto free_device;
1356 }
1357
1358 /*
1359 * If we're not using multiport support, this has to be a console port
1360 */
1361 if (!use_multiport(port->portdev)) {
1362 err = init_port_console(port);
1363 if (err)
1364 goto free_inbufs;
1365 }
1366
1367 spin_lock_irq(&portdev->ports_lock);
1368 list_add_tail(&port->list, &port->portdev->ports);
1369 spin_unlock_irq(&portdev->ports_lock);
1370
1371 /*
1372 * Tell the Host we're set so that it can send us various
1373 * configuration parameters for this port (eg, port name,
1374 * caching, whether this is a console port, etc.)
1375 */
1376 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1377
1378 if (pdrvdata.debugfs_dir) {
1379 /*
1380 * Finally, create the debugfs file that we can use to
1381 * inspect a port's state at any time
1382 */
1383 sprintf(debugfs_name, "vport%up%u",
1384 port->portdev->drv_index, id);
1385 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1386 pdrvdata.debugfs_dir,
1387 port,
1388 &port_debugfs_ops);
1389 }
1390 return 0;
1391
1392free_inbufs:
1393 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1394 free_buf(buf);
1395free_device:
1396 device_destroy(pdrvdata.class, port->dev->devt);
1397free_cdev:
d22a6989 1398 cdev_del(port->cdev);
c446f8fc
AS
1399free_port:
1400 kfree(port);
1401fail:
1402 /* The host might want to notify management sw about port add failure */
0643e4c6 1403 __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
c446f8fc
AS
1404 return err;
1405}
1406
b353a6b8
AS
1407/* No users remain, remove all port-specific data. */
1408static void remove_port(struct kref *kref)
1409{
1410 struct port *port;
1411
1412 port = container_of(kref, struct port, kref);
1413
1414 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1415 device_destroy(pdrvdata.class, port->dev->devt);
1416 cdev_del(port->cdev);
1417
1418 kfree(port->name);
1419
1420 debugfs_remove(port->debugfs_file);
1421
1422 kfree(port);
1423}
1424
a0e2dbfc
AS
1425static void remove_port_data(struct port *port)
1426{
1427 struct port_buffer *buf;
1428
1429 /* Remove unused data this port might have received. */
1430 discard_port_data(port);
1431
1432 reclaim_consumed_buffers(port);
1433
1434 /* Remove buffers we queued up for the Host to send us data in. */
1435 while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1436 free_buf(buf);
1437}
1438
b353a6b8
AS
1439/*
1440 * Port got unplugged. Remove port from portdev's list and drop the
1441 * kref reference. If no userspace has this port opened, it will
1442 * result in immediate removal the port.
1443 */
1444static void unplug_port(struct port *port)
1f7aa42d 1445{
b353a6b8
AS
1446 spin_lock_irq(&port->portdev->ports_lock);
1447 list_del(&port->list);
1448 spin_unlock_irq(&port->portdev->ports_lock);
1449
0047634d
AS
1450 if (port->guest_connected) {
1451 port->guest_connected = false;
1452 port->host_connected = false;
1453 wake_up_interruptible(&port->waitqueue);
a461e11e
AS
1454
1455 /* Let the app know the port is going down. */
1456 send_sigio_to_port(port);
0047634d
AS
1457 }
1458
1f7aa42d
AS
1459 if (is_console_port(port)) {
1460 spin_lock_irq(&pdrvdata_lock);
1461 list_del(&port->cons.list);
1462 spin_unlock_irq(&pdrvdata_lock);
1463 hvc_remove(port->cons.hvc);
1464 }
1f7aa42d 1465
a0e2dbfc 1466 remove_port_data(port);
a9cdd485 1467
b353a6b8
AS
1468 /*
1469 * We should just assume the device itself has gone off --
1470 * else a close on an open port later will try to send out a
1471 * control message.
1472 */
1473 port->portdev = NULL;
d99393ef 1474
b353a6b8
AS
1475 /*
1476 * Locks around here are not necessary - a port can't be
1477 * opened after we removed the port struct from ports_list
1478 * above.
1479 */
1480 kref_put(&port->kref, remove_port);
1f7aa42d
AS
1481}
1482
17634ba2
AS
1483/* Any private messages that the Host and Guest want to share */
1484static void handle_control_message(struct ports_device *portdev,
1485 struct port_buffer *buf)
1486{
1487 struct virtio_console_control *cpkt;
1488 struct port *port;
431edb8a
AS
1489 size_t name_size;
1490 int err;
17634ba2
AS
1491
1492 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1493
1494 port = find_port_by_id(portdev, cpkt->id);
f909f850 1495 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
17634ba2
AS
1496 /* No valid header at start of buffer. Drop it. */
1497 dev_dbg(&portdev->vdev->dev,
1498 "Invalid index %u in control packet\n", cpkt->id);
1499 return;
1500 }
1501
1502 switch (cpkt->event) {
f909f850
AS
1503 case VIRTIO_CONSOLE_PORT_ADD:
1504 if (port) {
1d05160b
AS
1505 dev_dbg(&portdev->vdev->dev,
1506 "Port %u already added\n", port->id);
f909f850
AS
1507 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1508 break;
1509 }
1510 if (cpkt->id >= portdev->config.max_nr_ports) {
1511 dev_warn(&portdev->vdev->dev,
1512 "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
1513 cpkt->id, portdev->config.max_nr_ports - 1);
1514 break;
1515 }
1516 add_port(portdev, cpkt->id);
1517 break;
1518 case VIRTIO_CONSOLE_PORT_REMOVE:
b353a6b8 1519 unplug_port(port);
f909f850 1520 break;
17634ba2
AS
1521 case VIRTIO_CONSOLE_CONSOLE_PORT:
1522 if (!cpkt->value)
1523 break;
1524 if (is_console_port(port))
1525 break;
1526
1527 init_port_console(port);
5e38483b 1528 complete(&early_console_added);
17634ba2
AS
1529 /*
1530 * Could remove the port here in case init fails - but
1531 * have to notify the host first.
1532 */
1533 break;
8345adbf
AS
1534 case VIRTIO_CONSOLE_RESIZE: {
1535 struct {
1536 __u16 rows;
1537 __u16 cols;
1538 } size;
1539
17634ba2
AS
1540 if (!is_console_port(port))
1541 break;
8345adbf
AS
1542
1543 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1544 sizeof(size));
1545 set_console_size(port, size.rows, size.cols);
1546
17634ba2
AS
1547 port->cons.hvc->irq_requested = 1;
1548 resize_console(port);
1549 break;
8345adbf 1550 }
2030fa49
AS
1551 case VIRTIO_CONSOLE_PORT_OPEN:
1552 port->host_connected = cpkt->value;
1553 wake_up_interruptible(&port->waitqueue);
cdfadfc1
AS
1554 /*
1555 * If the host port got closed and the host had any
1556 * unconsumed buffers, we'll be able to reclaim them
1557 * now.
1558 */
1559 spin_lock_irq(&port->outvq_lock);
1560 reclaim_consumed_buffers(port);
1561 spin_unlock_irq(&port->outvq_lock);
3eae0ade
AS
1562
1563 /*
1564 * If the guest is connected, it'll be interested in
1565 * knowing the host connection state changed.
1566 */
1567 send_sigio_to_port(port);
2030fa49 1568 break;
431edb8a 1569 case VIRTIO_CONSOLE_PORT_NAME:
291024ef
AS
1570 /*
1571 * If we woke up after hibernation, we can get this
1572 * again. Skip it in that case.
1573 */
1574 if (port->name)
1575 break;
1576
431edb8a
AS
1577 /*
1578 * Skip the size of the header and the cpkt to get the size
1579 * of the name that was sent
1580 */
1581 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1582
1583 port->name = kmalloc(name_size, GFP_KERNEL);
1584 if (!port->name) {
1585 dev_err(port->dev,
1586 "Not enough space to store port name\n");
1587 break;
1588 }
1589 strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1590 name_size - 1);
1591 port->name[name_size - 1] = 0;
1592
1593 /*
1594 * Since we only have one sysfs attribute, 'name',
1595 * create it only if we have a name for the port.
1596 */
1597 err = sysfs_create_group(&port->dev->kobj,
1598 &port_attribute_group);
ec64213c 1599 if (err) {
431edb8a
AS
1600 dev_err(port->dev,
1601 "Error %d creating sysfs device attributes\n",
1602 err);
ec64213c
AS
1603 } else {
1604 /*
1605 * Generate a udev event so that appropriate
1606 * symlinks can be created based on udev
1607 * rules.
1608 */
1609 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1610 }
431edb8a 1611 break;
17634ba2
AS
1612 }
1613}
1614
1615static void control_work_handler(struct work_struct *work)
1616{
1617 struct ports_device *portdev;
1618 struct virtqueue *vq;
1619 struct port_buffer *buf;
1620 unsigned int len;
1621
1622 portdev = container_of(work, struct ports_device, control_work);
1623 vq = portdev->c_ivq;
1624
1625 spin_lock(&portdev->cvq_lock);
505b0451 1626 while ((buf = virtqueue_get_buf(vq, &len))) {
17634ba2
AS
1627 spin_unlock(&portdev->cvq_lock);
1628
1629 buf->len = len;
1630 buf->offset = 0;
1631
1632 handle_control_message(portdev, buf);
1633
1634 spin_lock(&portdev->cvq_lock);
1635 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1636 dev_warn(&portdev->vdev->dev,
1637 "Error adding buffer to queue\n");
1638 free_buf(buf);
1639 }
1640 }
1641 spin_unlock(&portdev->cvq_lock);
1642}
1643
2770c5ea
AS
1644static void out_intr(struct virtqueue *vq)
1645{
1646 struct port *port;
1647
1648 port = find_port_by_vq(vq->vdev->priv, vq);
1649 if (!port)
1650 return;
1651
1652 wake_up_interruptible(&port->waitqueue);
1653}
1654
17634ba2
AS
1655static void in_intr(struct virtqueue *vq)
1656{
1657 struct port *port;
1658 unsigned long flags;
1659
1660 port = find_port_by_vq(vq->vdev->priv, vq);
1661 if (!port)
1662 return;
1663
1664 spin_lock_irqsave(&port->inbuf_lock, flags);
d25a9dda 1665 port->inbuf = get_inbuf(port);
17634ba2 1666
88f251ac
AS
1667 /*
1668 * Don't queue up data when port is closed. This condition
1669 * can be reached when a console port is not yet connected (no
1670 * tty is spawned) and the host sends out data to console
1671 * ports. For generic serial ports, the host won't
1672 * (shouldn't) send data till the guest is connected.
1673 */
1674 if (!port->guest_connected)
1675 discard_port_data(port);
1676
17634ba2
AS
1677 spin_unlock_irqrestore(&port->inbuf_lock, flags);
1678
2030fa49
AS
1679 wake_up_interruptible(&port->waitqueue);
1680
55f6bcce
AS
1681 /* Send a SIGIO indicating new data in case the process asked for it */
1682 send_sigio_to_port(port);
1683
17634ba2
AS
1684 if (is_console_port(port) && hvc_poll(port->cons.hvc))
1685 hvc_kick();
1686}
1687
1688static void control_intr(struct virtqueue *vq)
1689{
1690 struct ports_device *portdev;
1691
1692 portdev = vq->vdev->priv;
1693 schedule_work(&portdev->control_work);
1694}
1695
7f5d810d
AS
1696static void config_intr(struct virtio_device *vdev)
1697{
1698 struct ports_device *portdev;
1699
1700 portdev = vdev->priv;
99f905f8 1701
4038f5b7 1702 if (!use_multiport(portdev)) {
9778829c
AS
1703 struct port *port;
1704 u16 rows, cols;
1705
1706 vdev->config->get(vdev,
1707 offsetof(struct virtio_console_config, cols),
1708 &cols, sizeof(u16));
1709 vdev->config->get(vdev,
1710 offsetof(struct virtio_console_config, rows),
1711 &rows, sizeof(u16));
1712
1713 port = find_port_by_id(portdev, 0);
1714 set_console_size(port, rows, cols);
1715
4038f5b7
AS
1716 /*
1717 * We'll use this way of resizing only for legacy
1718 * support. For newer userspace
1719 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1720 * to indicate console size changes so that it can be
1721 * done per-port.
1722 */
9778829c 1723 resize_console(port);
4038f5b7 1724 }
7f5d810d
AS
1725}
1726
2658a79a
AS
1727static int init_vqs(struct ports_device *portdev)
1728{
1729 vq_callback_t **io_callbacks;
1730 char **io_names;
1731 struct virtqueue **vqs;
17634ba2 1732 u32 i, j, nr_ports, nr_queues;
2658a79a
AS
1733 int err;
1734
17634ba2
AS
1735 nr_ports = portdev->config.max_nr_ports;
1736 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
2658a79a
AS
1737
1738 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
2658a79a 1739 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
2658a79a 1740 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
2658a79a
AS
1741 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1742 GFP_KERNEL);
2658a79a
AS
1743 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1744 GFP_KERNEL);
22e132ff 1745 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
286f9a22 1746 !portdev->out_vqs) {
2658a79a 1747 err = -ENOMEM;
22e132ff 1748 goto free;
2658a79a
AS
1749 }
1750
17634ba2
AS
1751 /*
1752 * For backward compat (newer host but older guest), the host
1753 * spawns a console port first and also inits the vqs for port
1754 * 0 before others.
1755 */
1756 j = 0;
1757 io_callbacks[j] = in_intr;
2770c5ea 1758 io_callbacks[j + 1] = out_intr;
17634ba2
AS
1759 io_names[j] = "input";
1760 io_names[j + 1] = "output";
1761 j += 2;
1762
1763 if (use_multiport(portdev)) {
1764 io_callbacks[j] = control_intr;
1765 io_callbacks[j + 1] = NULL;
1766 io_names[j] = "control-i";
1767 io_names[j + 1] = "control-o";
1768
1769 for (i = 1; i < nr_ports; i++) {
1770 j += 2;
1771 io_callbacks[j] = in_intr;
2770c5ea 1772 io_callbacks[j + 1] = out_intr;
17634ba2
AS
1773 io_names[j] = "input";
1774 io_names[j + 1] = "output";
1775 }
1776 }
2658a79a
AS
1777 /* Find the queues. */
1778 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
1779 io_callbacks,
1780 (const char **)io_names);
1781 if (err)
22e132ff 1782 goto free;
2658a79a 1783
17634ba2 1784 j = 0;
2658a79a
AS
1785 portdev->in_vqs[0] = vqs[0];
1786 portdev->out_vqs[0] = vqs[1];
17634ba2
AS
1787 j += 2;
1788 if (use_multiport(portdev)) {
1789 portdev->c_ivq = vqs[j];
1790 portdev->c_ovq = vqs[j + 1];
1791
1792 for (i = 1; i < nr_ports; i++) {
1793 j += 2;
1794 portdev->in_vqs[i] = vqs[j];
1795 portdev->out_vqs[i] = vqs[j + 1];
1796 }
1797 }
2658a79a 1798 kfree(io_names);
22e132ff 1799 kfree(io_callbacks);
2658a79a
AS
1800 kfree(vqs);
1801
1802 return 0;
1803
22e132ff 1804free:
2658a79a 1805 kfree(portdev->out_vqs);
2658a79a 1806 kfree(portdev->in_vqs);
22e132ff
JS
1807 kfree(io_names);
1808 kfree(io_callbacks);
2658a79a 1809 kfree(vqs);
22e132ff 1810
2658a79a
AS
1811 return err;
1812}
1813
fb08bd27
AS
1814static const struct file_operations portdev_fops = {
1815 .owner = THIS_MODULE,
1816};
1817
a0e2dbfc
AS
1818static void remove_vqs(struct ports_device *portdev)
1819{
1820 portdev->vdev->config->del_vqs(portdev->vdev);
1821 kfree(portdev->in_vqs);
1822 kfree(portdev->out_vqs);
1823}
1824
1825static void remove_controlq_data(struct ports_device *portdev)
1826{
1827 struct port_buffer *buf;
1828 unsigned int len;
1829
1830 if (!use_multiport(portdev))
1831 return;
1832
1833 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1834 free_buf(buf);
1835
1836 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1837 free_buf(buf);
1838}
1839
1c85bf35
AS
1840/*
1841 * Once we're further in boot, we get probed like any other virtio
1842 * device.
17634ba2
AS
1843 *
1844 * If the host also supports multiple console ports, we check the
1845 * config space to see how many ports the host has spawned. We
1846 * initialize each port found.
1c85bf35
AS
1847 */
1848static int __devinit virtcons_probe(struct virtio_device *vdev)
1849{
1c85bf35
AS
1850 struct ports_device *portdev;
1851 int err;
17634ba2 1852 bool multiport;
5e38483b
CB
1853 bool early = early_put_chars != NULL;
1854
1855 /* Ensure to read early_put_chars now */
1856 barrier();
1c85bf35
AS
1857
1858 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
1859 if (!portdev) {
1860 err = -ENOMEM;
1861 goto fail;
1862 }
1863
1864 /* Attach this portdev to this virtio_device, and vice-versa. */
1865 portdev->vdev = vdev;
1866 vdev->priv = portdev;
1867
fb08bd27
AS
1868 spin_lock_irq(&pdrvdata_lock);
1869 portdev->drv_index = pdrvdata.index++;
1870 spin_unlock_irq(&pdrvdata_lock);
1871
1872 portdev->chr_major = register_chrdev(0, "virtio-portsdev",
1873 &portdev_fops);
1874 if (portdev->chr_major < 0) {
1875 dev_err(&vdev->dev,
1876 "Error %d registering chrdev for device %u\n",
1877 portdev->chr_major, portdev->drv_index);
1878 err = portdev->chr_major;
1879 goto free;
1880 }
1881
17634ba2 1882 multiport = false;
17634ba2 1883 portdev->config.max_nr_ports = 1;
51c6d61a
SL
1884 if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
1885 offsetof(struct virtio_console_config,
1886 max_nr_ports),
1887 &portdev->config.max_nr_ports) == 0)
17634ba2 1888 multiport = true;
17634ba2 1889
2658a79a
AS
1890 err = init_vqs(portdev);
1891 if (err < 0) {
1892 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
fb08bd27 1893 goto free_chrdev;
2658a79a 1894 }
1c85bf35 1895
17634ba2
AS
1896 spin_lock_init(&portdev->ports_lock);
1897 INIT_LIST_HEAD(&portdev->ports);
1898
1899 if (multiport) {
335a64a5
AS
1900 unsigned int nr_added_bufs;
1901
17634ba2
AS
1902 spin_lock_init(&portdev->cvq_lock);
1903 INIT_WORK(&portdev->control_work, &control_work_handler);
1904
335a64a5
AS
1905 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
1906 if (!nr_added_bufs) {
22a29eac
AS
1907 dev_err(&vdev->dev,
1908 "Error allocating buffers for control queue\n");
1909 err = -ENOMEM;
1910 goto free_vqs;
1911 }
1d05160b
AS
1912 } else {
1913 /*
1914 * For backward compatibility: Create a console port
1915 * if we're running on older host.
1916 */
1917 add_port(portdev, 0);
17634ba2
AS
1918 }
1919
6bdf2afd
AS
1920 spin_lock_irq(&pdrvdata_lock);
1921 list_add_tail(&portdev->list, &pdrvdata.portdevs);
1922 spin_unlock_irq(&pdrvdata_lock);
1923
f909f850
AS
1924 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1925 VIRTIO_CONSOLE_DEVICE_READY, 1);
5e38483b
CB
1926
1927 /*
1928 * If there was an early virtio console, assume that there are no
1929 * other consoles. We need to wait until the hvc_alloc matches the
1930 * hvc_instantiate, otherwise tty_open will complain, resulting in
1931 * a "Warning: unable to open an initial console" boot failure.
1932 * Without multiport this is done in add_port above. With multiport
1933 * this might take some host<->guest communication - thus we have to
1934 * wait.
1935 */
1936 if (multiport && early)
1937 wait_for_completion(&early_console_added);
1938
31610434
RR
1939 return 0;
1940
22a29eac 1941free_vqs:
0643e4c6
JL
1942 /* The host might want to notify mgmt sw about device add failure */
1943 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1944 VIRTIO_CONSOLE_DEVICE_READY, 0);
a0e2dbfc 1945 remove_vqs(portdev);
fb08bd27
AS
1946free_chrdev:
1947 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
31610434 1948free:
1c85bf35 1949 kfree(portdev);
31610434
RR
1950fail:
1951 return err;
1952}
1953
7177876f
AS
1954static void virtcons_remove(struct virtio_device *vdev)
1955{
1956 struct ports_device *portdev;
1957 struct port *port, *port2;
7177876f
AS
1958
1959 portdev = vdev->priv;
1960
6bdf2afd
AS
1961 spin_lock_irq(&pdrvdata_lock);
1962 list_del(&portdev->list);
1963 spin_unlock_irq(&pdrvdata_lock);
1964
02238959
AS
1965 /* Disable interrupts for vqs */
1966 vdev->config->reset(vdev);
1967 /* Finish up work that's lined up */
7177876f 1968 cancel_work_sync(&portdev->control_work);
7177876f
AS
1969
1970 list_for_each_entry_safe(port, port2, &portdev->ports, list)
b353a6b8 1971 unplug_port(port);
7177876f
AS
1972
1973 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1974
e062013c
AS
1975 /*
1976 * When yanking out a device, we immediately lose the
1977 * (device-side) queues. So there's no point in keeping the
1978 * guest side around till we drop our final reference. This
1979 * also means that any ports which are in an open state will
1980 * have to just stop using the port, as the vqs are going
1981 * away.
1982 */
a0e2dbfc
AS
1983 remove_controlq_data(portdev);
1984 remove_vqs(portdev);
7177876f
AS
1985 kfree(portdev);
1986}
1987
31610434
RR
1988static struct virtio_device_id id_table[] = {
1989 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
1990 { 0 },
1991};
1992
c2983458
CB
1993static unsigned int features[] = {
1994 VIRTIO_CONSOLE_F_SIZE,
b99fa815 1995 VIRTIO_CONSOLE_F_MULTIPORT,
c2983458
CB
1996};
1997
2b8f41d8
AS
1998#ifdef CONFIG_PM
1999static int virtcons_freeze(struct virtio_device *vdev)
2000{
2001 struct ports_device *portdev;
2002 struct port *port;
2003
2004 portdev = vdev->priv;
2005
2006 vdev->config->reset(vdev);
2007
c743d09d 2008 virtqueue_disable_cb(portdev->c_ivq);
2b8f41d8 2009 cancel_work_sync(&portdev->control_work);
c743d09d
AS
2010 /*
2011 * Once more: if control_work_handler() was running, it would
2012 * enable the cb as the last step.
2013 */
2014 virtqueue_disable_cb(portdev->c_ivq);
2b8f41d8
AS
2015 remove_controlq_data(portdev);
2016
2017 list_for_each_entry(port, &portdev->ports, list) {
c743d09d
AS
2018 virtqueue_disable_cb(port->in_vq);
2019 virtqueue_disable_cb(port->out_vq);
2b8f41d8
AS
2020 /*
2021 * We'll ask the host later if the new invocation has
2022 * the port opened or closed.
2023 */
2024 port->host_connected = false;
2025 remove_port_data(port);
2026 }
2027 remove_vqs(portdev);
2028
2029 return 0;
2030}
2031
2032static int virtcons_restore(struct virtio_device *vdev)
2033{
2034 struct ports_device *portdev;
2035 struct port *port;
2036 int ret;
2037
2038 portdev = vdev->priv;
2039
2040 ret = init_vqs(portdev);
2041 if (ret)
2042 return ret;
2043
2044 if (use_multiport(portdev))
2045 fill_queue(portdev->c_ivq, &portdev->cvq_lock);
2046
2047 list_for_each_entry(port, &portdev->ports, list) {
2048 port->in_vq = portdev->in_vqs[port->id];
2049 port->out_vq = portdev->out_vqs[port->id];
2050
2051 fill_queue(port->in_vq, &port->inbuf_lock);
2052
2053 /* Get port open/close status on the host */
2054 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
fa8b66cc
AS
2055
2056 /*
2057 * If a port was open at the time of suspending, we
2058 * have to let the host know that it's still open.
2059 */
2060 if (port->guest_connected)
2061 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
2b8f41d8
AS
2062 }
2063 return 0;
2064}
2065#endif
2066
31610434 2067static struct virtio_driver virtio_console = {
c2983458
CB
2068 .feature_table = features,
2069 .feature_table_size = ARRAY_SIZE(features),
31610434
RR
2070 .driver.name = KBUILD_MODNAME,
2071 .driver.owner = THIS_MODULE,
2072 .id_table = id_table,
2073 .probe = virtcons_probe,
7177876f 2074 .remove = virtcons_remove,
7f5d810d 2075 .config_changed = config_intr,
2b8f41d8
AS
2076#ifdef CONFIG_PM
2077 .freeze = virtcons_freeze,
2078 .restore = virtcons_restore,
2079#endif
31610434
RR
2080};
2081
2082static int __init init(void)
2083{
fb08bd27
AS
2084 int err;
2085
2086 pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
2087 if (IS_ERR(pdrvdata.class)) {
2088 err = PTR_ERR(pdrvdata.class);
2089 pr_err("Error %d creating virtio-ports class\n", err);
2090 return err;
2091 }
d99393ef
AS
2092
2093 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
2094 if (!pdrvdata.debugfs_dir) {
2095 pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
2096 PTR_ERR(pdrvdata.debugfs_dir));
2097 }
38edf58d 2098 INIT_LIST_HEAD(&pdrvdata.consoles);
6bdf2afd 2099 INIT_LIST_HEAD(&pdrvdata.portdevs);
38edf58d 2100
31610434
RR
2101 return register_virtio_driver(&virtio_console);
2102}
7177876f
AS
2103
2104static void __exit fini(void)
2105{
2106 unregister_virtio_driver(&virtio_console);
2107
2108 class_destroy(pdrvdata.class);
2109 if (pdrvdata.debugfs_dir)
2110 debugfs_remove_recursive(pdrvdata.debugfs_dir);
2111}
31610434 2112module_init(init);
7177876f 2113module_exit(fini);
31610434
RR
2114
2115MODULE_DEVICE_TABLE(virtio, id_table);
2116MODULE_DESCRIPTION("Virtio console driver");
2117MODULE_LICENSE("GPL");