source: src/linux/universal/linux-3.18/drivers/char/virtio_console.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 3 months ago

update

File size: 55.8 KB
Line 
1/*
2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19 */
20#include <linux/cdev.h>
21#include <linux/debugfs.h>
22#include <linux/completion.h>
23#include <linux/device.h>
24#include <linux/err.h>
25#include <linux/freezer.h>
26#include <linux/fs.h>
27#include <linux/splice.h>
28#include <linux/pagemap.h>
29#include <linux/init.h>
30#include <linux/list.h>
31#include <linux/poll.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/virtio.h>
36#include <linux/virtio_console.h>
37#include <linux/wait.h>
38#include <linux/workqueue.h>
39#include <linux/module.h>
40#include <linux/dma-mapping.h>
41#include <linux/kconfig.h>
42#include "../tty/hvc/hvc_console.h"
43
44#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
45
46/*
47 * This is a global struct for storing common data for all the devices
48 * this driver handles.
49 *
50 * Mainly, it has a linked list for all the consoles in one place so
51 * that callbacks from hvc for get_chars(), put_chars() work properly
52 * across multiple devices and multiple ports per device.
53 */
54struct ports_driver_data {
55        /* Used for registering chardevs */
56        struct class *class;
57
58        /* Used for exporting per-port information to debugfs */
59        struct dentry *debugfs_dir;
60
61        /* List of all the devices we're handling */
62        struct list_head portdevs;
63
64        /*
65         * This is used to keep track of the number of hvc consoles
66         * spawned by this driver.  This number is given as the first
67         * argument to hvc_alloc().  To correctly map an initial
68         * console spawned via hvc_instantiate to the console being
69         * hooked up via hvc_alloc, we need to pass the same vtermno.
70         *
71         * We also just assume the first console being initialised was
72         * the first one that got used as the initial console.
73         */
74        unsigned int next_vtermno;
75
76        /* All the console devices handled by this driver */
77        struct list_head consoles;
78};
79static struct ports_driver_data pdrvdata;
80
81static DEFINE_SPINLOCK(pdrvdata_lock);
82static DECLARE_COMPLETION(early_console_added);
83
84/* This struct holds information that's relevant only for console ports */
85struct console {
86        /* We'll place all consoles in a list in the pdrvdata struct */
87        struct list_head list;
88
89        /* The hvc device associated with this console port */
90        struct hvc_struct *hvc;
91
92        /* The size of the console */
93        struct winsize ws;
94
95        /*
96         * This number identifies the number that we used to register
97         * with hvc in hvc_instantiate() and hvc_alloc(); this is the
98         * number passed on by the hvc callbacks to us to
99         * differentiate between the other console ports handled by
100         * this driver
101         */
102        u32 vtermno;
103};
104
105struct port_buffer {
106        char *buf;
107
108        /* size of the buffer in *buf above */
109        size_t size;
110
111        /* used length of the buffer */
112        size_t len;
113        /* offset in the buf from which to consume data */
114        size_t offset;
115
116        /* DMA address of buffer */
117        dma_addr_t dma;
118
119        /* Device we got DMA memory from */
120        struct device *dev;
121
122        /* List of pending dma buffers to free */
123        struct list_head list;
124
125        /* If sgpages == 0 then buf is used */
126        unsigned int sgpages;
127
128        /* sg is used if spages > 0. sg must be the last in is struct */
129        struct scatterlist sg[0];
130};
131
132/*
133 * This is a per-device struct that stores data common to all the
134 * ports for that device (vdev->priv).
135 */
136struct ports_device {
137        /* Next portdev in the list, head is in the pdrvdata struct */
138        struct list_head list;
139
140        /*
141         * Workqueue handlers where we process deferred work after
142         * notification
143         */
144        struct work_struct control_work;
145        struct work_struct config_work;
146
147        struct list_head ports;
148
149        /* To protect the list of ports */
150        spinlock_t ports_lock;
151
152        /* To protect the vq operations for the control channel */
153        spinlock_t c_ivq_lock;
154        spinlock_t c_ovq_lock;
155
156        /* The current config space is stored here */
157        struct virtio_console_config config;
158
159        /* The virtio device we're associated with */
160        struct virtio_device *vdev;
161
162        /*
163         * A couple of virtqueues for the control channel: one for
164         * guest->host transfers, one for host->guest transfers
165         */
166        struct virtqueue *c_ivq, *c_ovq;
167
168        /* Array of per-port IO virtqueues */
169        struct virtqueue **in_vqs, **out_vqs;
170
171        /* Major number for this device.  Ports will be created as minors. */
172        int chr_major;
173};
174
175struct port_stats {
176        unsigned long bytes_sent, bytes_received, bytes_discarded;
177};
178
179/* This struct holds the per-port data */
180struct port {
181        /* Next port in the list, head is in the ports_device */
182        struct list_head list;
183
184        /* Pointer to the parent virtio_console device */
185        struct ports_device *portdev;
186
187        /* The current buffer from which data has to be fed to readers */
188        struct port_buffer *inbuf;
189
190        /*
191         * To protect the operations on the in_vq associated with this
192         * port.  Has to be a spinlock because it can be called from
193         * interrupt context (get_char()).
194         */
195        spinlock_t inbuf_lock;
196
197        /* Protect the operations on the out_vq. */
198        spinlock_t outvq_lock;
199
200        /* The IO vqs for this port */
201        struct virtqueue *in_vq, *out_vq;
202
203        /* File in the debugfs directory that exposes this port's information */
204        struct dentry *debugfs_file;
205
206        /*
207         * Keep count of the bytes sent, received and discarded for
208         * this port for accounting and debugging purposes.  These
209         * counts are not reset across port open / close events.
210         */
211        struct port_stats stats;
212
213        /*
214         * The entries in this struct will be valid if this port is
215         * hooked up to an hvc console
216         */
217        struct console cons;
218
219        /* Each port associates with a separate char device */
220        struct cdev *cdev;
221        struct device *dev;
222
223        /* Reference-counting to handle port hot-unplugs and file operations */
224        struct kref kref;
225
226        /* A waitqueue for poll() or blocking read operations */
227        wait_queue_head_t waitqueue;
228
229        /* The 'name' of the port that we expose via sysfs properties */
230        char *name;
231
232        /* We can notify apps of host connect / disconnect events via SIGIO */
233        struct fasync_struct *async_queue;
234
235        /* The 'id' to identify the port with the Host */
236        u32 id;
237
238        bool outvq_full;
239
240        /* Is the host device open */
241        bool host_connected;
242
243        /* We should allow only one process to open a port */
244        bool guest_connected;
245};
246
247/* This is the very early arch-specified put chars function. */
248static int (*early_put_chars)(u32, const char *, int);
249
250static struct port *find_port_by_vtermno(u32 vtermno)
251{
252        struct port *port;
253        struct console *cons;
254        unsigned long flags;
255
256        spin_lock_irqsave(&pdrvdata_lock, flags);
257        list_for_each_entry(cons, &pdrvdata.consoles, list) {
258                if (cons->vtermno == vtermno) {
259                        port = container_of(cons, struct port, cons);
260                        goto out;
261                }
262        }
263        port = NULL;
264out:
265        spin_unlock_irqrestore(&pdrvdata_lock, flags);
266        return port;
267}
268
269static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
270                                                 dev_t dev)
271{
272        struct port *port;
273        unsigned long flags;
274
275        spin_lock_irqsave(&portdev->ports_lock, flags);
276        list_for_each_entry(port, &portdev->ports, list) {
277                if (port->cdev->dev == dev) {
278                        kref_get(&port->kref);
279                        goto out;
280                }
281        }
282        port = NULL;
283out:
284        spin_unlock_irqrestore(&portdev->ports_lock, flags);
285
286        return port;
287}
288
289static struct port *find_port_by_devt(dev_t dev)
290{
291        struct ports_device *portdev;
292        struct port *port;
293        unsigned long flags;
294
295        spin_lock_irqsave(&pdrvdata_lock, flags);
296        list_for_each_entry(portdev, &pdrvdata.portdevs, list) {
297                port = find_port_by_devt_in_portdev(portdev, dev);
298                if (port)
299                        goto out;
300        }
301        port = NULL;
302out:
303        spin_unlock_irqrestore(&pdrvdata_lock, flags);
304        return port;
305}
306
307static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
308{
309        struct port *port;
310        unsigned long flags;
311
312        spin_lock_irqsave(&portdev->ports_lock, flags);
313        list_for_each_entry(port, &portdev->ports, list)
314                if (port->id == id)
315                        goto out;
316        port = NULL;
317out:
318        spin_unlock_irqrestore(&portdev->ports_lock, flags);
319
320        return port;
321}
322
323static struct port *find_port_by_vq(struct ports_device *portdev,
324                                    struct virtqueue *vq)
325{
326        struct port *port;
327        unsigned long flags;
328
329        spin_lock_irqsave(&portdev->ports_lock, flags);
330        list_for_each_entry(port, &portdev->ports, list)
331                if (port->in_vq == vq || port->out_vq == vq)
332                        goto out;
333        port = NULL;
334out:
335        spin_unlock_irqrestore(&portdev->ports_lock, flags);
336        return port;
337}
338
339static bool is_console_port(struct port *port)
340{
341        if (port->cons.hvc)
342                return true;
343        return false;
344}
345
346static bool is_rproc_serial(const struct virtio_device *vdev)
347{
348        return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
349}
350
351static inline bool use_multiport(struct ports_device *portdev)
352{
353        /*
354         * This condition can be true when put_chars is called from
355         * early_init
356         */
357        if (!portdev->vdev)
358                return 0;
359        return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
360}
361
362static DEFINE_SPINLOCK(dma_bufs_lock);
363static LIST_HEAD(pending_free_dma_bufs);
364
365static void free_buf(struct port_buffer *buf, bool can_sleep)
366{
367        unsigned int i;
368
369        for (i = 0; i < buf->sgpages; i++) {
370                struct page *page = sg_page(&buf->sg[i]);
371                if (!page)
372                        break;
373                put_page(page);
374        }
375
376        if (!buf->dev) {
377                kfree(buf->buf);
378        } else if (is_rproc_enabled) {
379                unsigned long flags;
380
381                /* dma_free_coherent requires interrupts to be enabled. */
382                if (!can_sleep) {
383                        /* queue up dma-buffers to be freed later */
384                        spin_lock_irqsave(&dma_bufs_lock, flags);
385                        list_add_tail(&buf->list, &pending_free_dma_bufs);
386                        spin_unlock_irqrestore(&dma_bufs_lock, flags);
387                        return;
388                }
389                dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
390
391                /* Release device refcnt and allow it to be freed */
392                put_device(buf->dev);
393        }
394
395        kfree(buf);
396}
397
398static void reclaim_dma_bufs(void)
399{
400        unsigned long flags;
401        struct port_buffer *buf, *tmp;
402        LIST_HEAD(tmp_list);
403
404        if (list_empty(&pending_free_dma_bufs))
405                return;
406
407        /* Create a copy of the pending_free_dma_bufs while holding the lock */
408        spin_lock_irqsave(&dma_bufs_lock, flags);
409        list_cut_position(&tmp_list, &pending_free_dma_bufs,
410                          pending_free_dma_bufs.prev);
411        spin_unlock_irqrestore(&dma_bufs_lock, flags);
412
413        /* Release the dma buffers, without irqs enabled */
414        list_for_each_entry_safe(buf, tmp, &tmp_list, list) {
415                list_del(&buf->list);
416                free_buf(buf, true);
417        }
418}
419
420static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
421                                     int pages)
422{
423        struct port_buffer *buf;
424
425        reclaim_dma_bufs();
426
427        /*
428         * Allocate buffer and the sg list. The sg list array is allocated
429         * directly after the port_buffer struct.
430         */
431        buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages,
432                      GFP_KERNEL);
433        if (!buf)
434                goto fail;
435
436        buf->sgpages = pages;
437        if (pages > 0) {
438                buf->dev = NULL;
439                buf->buf = NULL;
440                return buf;
441        }
442
443        if (is_rproc_serial(vq->vdev)) {
444                /*
445                 * Allocate DMA memory from ancestor. When a virtio
446                 * device is created by remoteproc, the DMA memory is
447                 * associated with the grandparent device:
448                 * vdev => rproc => platform-dev.
449                 * The code here would have been less quirky if
450                 * DMA_MEMORY_INCLUDES_CHILDREN had been supported
451                 * in dma-coherent.c
452                 */
453                if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
454                        goto free_buf;
455                buf->dev = vq->vdev->dev.parent->parent;
456
457                /* Increase device refcnt to avoid freeing it */
458                get_device(buf->dev);
459                buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
460                                              GFP_KERNEL);
461        } else {
462                buf->dev = NULL;
463                buf->buf = kmalloc(buf_size, GFP_KERNEL);
464        }
465
466        if (!buf->buf)
467                goto free_buf;
468        buf->len = 0;
469        buf->offset = 0;
470        buf->size = buf_size;
471        return buf;
472
473free_buf:
474        kfree(buf);
475fail:
476        return NULL;
477}
478
479/* Callers should take appropriate locks */
480static struct port_buffer *get_inbuf(struct port *port)
481{
482        struct port_buffer *buf;
483        unsigned int len;
484
485        if (port->inbuf)
486                return port->inbuf;
487
488        buf = virtqueue_get_buf(port->in_vq, &len);
489        if (buf) {
490                buf->len = len;
491                buf->offset = 0;
492                port->stats.bytes_received += len;
493        }
494        return buf;
495}
496
497/*
498 * Create a scatter-gather list representing our input buffer and put
499 * it in the queue.
500 *
501 * Callers should take appropriate locks.
502 */
503static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
504{
505        struct scatterlist sg[1];
506        int ret;
507
508        sg_init_one(sg, buf->buf, buf->size);
509
510        ret = virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC);
511        virtqueue_kick(vq);
512        if (!ret)
513                ret = vq->num_free;
514        return ret;
515}
516
517/* Discard any unread data this port has. Callers lockers. */
518static void discard_port_data(struct port *port)
519{
520        struct port_buffer *buf;
521        unsigned int err;
522
523        if (!port->portdev) {
524                /* Device has been unplugged.  vqs are already gone. */
525                return;
526        }
527        buf = get_inbuf(port);
528
529        err = 0;
530        while (buf) {
531                port->stats.bytes_discarded += buf->len - buf->offset;
532                if (add_inbuf(port->in_vq, buf) < 0) {
533                        err++;
534                        free_buf(buf, false);
535                }
536                port->inbuf = NULL;
537                buf = get_inbuf(port);
538        }
539        if (err)
540                dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
541                         err);
542}
543
544static bool port_has_data(struct port *port)
545{
546        unsigned long flags;
547        bool ret;
548
549        ret = false;
550        spin_lock_irqsave(&port->inbuf_lock, flags);
551        port->inbuf = get_inbuf(port);
552        if (port->inbuf)
553                ret = true;
554
555        spin_unlock_irqrestore(&port->inbuf_lock, flags);
556        return ret;
557}
558
559static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
560                                  unsigned int event, unsigned int value)
561{
562        struct scatterlist sg[1];
563        struct virtio_console_control cpkt;
564        struct virtqueue *vq;
565        unsigned int len;
566
567        if (!use_multiport(portdev))
568                return 0;
569
570        cpkt.id = port_id;
571        cpkt.event = event;
572        cpkt.value = value;
573
574        vq = portdev->c_ovq;
575
576        sg_init_one(sg, &cpkt, sizeof(cpkt));
577
578        spin_lock(&portdev->c_ovq_lock);
579        if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
580                virtqueue_kick(vq);
581                while (!virtqueue_get_buf(vq, &len)
582                        && !virtqueue_is_broken(vq))
583                        cpu_relax();
584        }
585        spin_unlock(&portdev->c_ovq_lock);
586        return 0;
587}
588
589static ssize_t send_control_msg(struct port *port, unsigned int event,
590                                unsigned int value)
591{
592        /* Did the port get unplugged before userspace closed it? */
593        if (port->portdev)
594                return __send_control_msg(port->portdev, port->id, event, value);
595        return 0;
596}
597
598
599/* Callers must take the port->outvq_lock */
600static void reclaim_consumed_buffers(struct port *port)
601{
602        struct port_buffer *buf;
603        unsigned int len;
604
605        if (!port->portdev) {
606                /* Device has been unplugged.  vqs are already gone. */
607                return;
608        }
609        while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
610                free_buf(buf, false);
611                port->outvq_full = false;
612        }
613}
614
615static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
616                              int nents, size_t in_count,
617                              void *data, bool nonblock)
618{
619        struct virtqueue *out_vq;
620        int err;
621        unsigned long flags;
622        unsigned int len;
623
624        out_vq = port->out_vq;
625
626        spin_lock_irqsave(&port->outvq_lock, flags);
627
628        reclaim_consumed_buffers(port);
629
630        err = virtqueue_add_outbuf(out_vq, sg, nents, data, GFP_ATOMIC);
631
632        /* Tell Host to go! */
633        virtqueue_kick(out_vq);
634
635        if (err) {
636                in_count = 0;
637                goto done;
638        }
639
640        if (out_vq->num_free == 0)
641                port->outvq_full = true;
642
643        if (nonblock)
644                goto done;
645
646        /*
647         * Wait till the host acknowledges it pushed out the data we
648         * sent.  This is done for data from the hvc_console; the tty
649         * operations are performed with spinlocks held so we can't
650         * sleep here.  An alternative would be to copy the data to a
651         * buffer and relax the spinning requirement.  The downside is
652         * we need to kmalloc a GFP_ATOMIC buffer each time the
653         * console driver writes something out.
654         */
655        while (!virtqueue_get_buf(out_vq, &len)
656                && !virtqueue_is_broken(out_vq))
657                cpu_relax();
658done:
659        spin_unlock_irqrestore(&port->outvq_lock, flags);
660
661        port->stats.bytes_sent += in_count;
662        /*
663         * We're expected to return the amount of data we wrote -- all
664         * of it
665         */
666        return in_count;
667}
668
669/*
670 * Give out the data that's requested from the buffer that we have
671 * queued up.
672 */
673static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
674                            bool to_user)
675{
676        struct port_buffer *buf;
677        unsigned long flags;
678
679        if (!out_count || !port_has_data(port))
680                return 0;
681
682        buf = port->inbuf;
683        out_count = min(out_count, buf->len - buf->offset);
684
685        if (to_user) {
686                ssize_t ret;
687
688                ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
689                if (ret)
690                        return -EFAULT;
691        } else {
692                memcpy(out_buf, buf->buf + buf->offset, out_count);
693        }
694
695        buf->offset += out_count;
696
697        if (buf->offset == buf->len) {
698                /*
699                 * We're done using all the data in this buffer.
700                 * Re-queue so that the Host can send us more data.
701                 */
702                spin_lock_irqsave(&port->inbuf_lock, flags);
703                port->inbuf = NULL;
704
705                if (add_inbuf(port->in_vq, buf) < 0)
706                        dev_warn(port->dev, "failed add_buf\n");
707
708                spin_unlock_irqrestore(&port->inbuf_lock, flags);
709        }
710        /* Return the number of bytes actually copied */
711        return out_count;
712}
713
714/* The condition that must be true for polling to end */
715static bool will_read_block(struct port *port)
716{
717        if (!port->guest_connected) {
718                /* Port got hot-unplugged. Let's exit. */
719                return false;
720        }
721        return !port_has_data(port) && port->host_connected;
722}
723
724static bool will_write_block(struct port *port)
725{
726        bool ret;
727
728        if (!port->guest_connected) {
729                /* Port got hot-unplugged. Let's exit. */
730                return false;
731        }
732        if (!port->host_connected)
733                return true;
734
735        spin_lock_irq(&port->outvq_lock);
736        /*
737         * Check if the Host has consumed any buffers since we last
738         * sent data (this is only applicable for nonblocking ports).
739         */
740        reclaim_consumed_buffers(port);
741        ret = port->outvq_full;
742        spin_unlock_irq(&port->outvq_lock);
743
744        return ret;
745}
746
747static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
748                              size_t count, loff_t *offp)
749{
750        struct port *port;
751        ssize_t ret;
752
753        port = filp->private_data;
754
755        /* Port is hot-unplugged. */
756        if (!port->guest_connected)
757                return -ENODEV;
758
759        if (!port_has_data(port)) {
760                /*
761                 * If nothing's connected on the host just return 0 in
762                 * case of list_empty; this tells the userspace app
763                 * that there's no connection
764                 */
765                if (!port->host_connected)
766                        return 0;
767                if (filp->f_flags & O_NONBLOCK)
768                        return -EAGAIN;
769
770                ret = wait_event_freezable(port->waitqueue,
771                                           !will_read_block(port));
772                if (ret < 0)
773                        return ret;
774        }
775        /* Port got hot-unplugged while we were waiting above. */
776        if (!port->guest_connected)
777                return -ENODEV;
778        /*
779         * We could've received a disconnection message while we were
780         * waiting for more data.
781         *
782         * This check is not clubbed in the if() statement above as we
783         * might receive some data as well as the host could get
784         * disconnected after we got woken up from our wait.  So we
785         * really want to give off whatever data we have and only then
786         * check for host_connected.
787         */
788        if (!port_has_data(port) && !port->host_connected)
789                return 0;
790
791        return fill_readbuf(port, ubuf, count, true);
792}
793
794static int wait_port_writable(struct port *port, bool nonblock)
795{
796        int ret;
797
798        if (will_write_block(port)) {
799                if (nonblock)
800                        return -EAGAIN;
801
802                ret = wait_event_freezable(port->waitqueue,
803                                           !will_write_block(port));
804                if (ret < 0)
805                        return ret;
806        }
807        /* Port got hot-unplugged. */
808        if (!port->guest_connected)
809                return -ENODEV;
810
811        return 0;
812}
813
814static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
815                               size_t count, loff_t *offp)
816{
817        struct port *port;
818        struct port_buffer *buf;
819        ssize_t ret;
820        bool nonblock;
821        struct scatterlist sg[1];
822
823        /* Userspace could be out to fool us */
824        if (!count)
825                return 0;
826
827        port = filp->private_data;
828
829        nonblock = filp->f_flags & O_NONBLOCK;
830
831        ret = wait_port_writable(port, nonblock);
832        if (ret < 0)
833                return ret;
834
835        count = min((size_t)(32 * 1024), count);
836
837        buf = alloc_buf(port->out_vq, count, 0);
838        if (!buf)
839                return -ENOMEM;
840
841        ret = copy_from_user(buf->buf, ubuf, count);
842        if (ret) {
843                ret = -EFAULT;
844                goto free_buf;
845        }
846
847        /*
848         * We now ask send_buf() to not spin for generic ports -- we
849         * can re-use the same code path that non-blocking file
850         * descriptors take for blocking file descriptors since the
851         * wait is already done and we're certain the write will go
852         * through to the host.
853         */
854        nonblock = true;
855        sg_init_one(sg, buf->buf, count);
856        ret = __send_to_port(port, sg, 1, count, buf, nonblock);
857
858        if (nonblock && ret > 0)
859                goto out;
860
861free_buf:
862        free_buf(buf, true);
863out:
864        return ret;
865}
866
867struct sg_list {
868        unsigned int n;
869        unsigned int size;
870        size_t len;
871        struct scatterlist *sg;
872};
873
874static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
875                        struct splice_desc *sd)
876{
877        struct sg_list *sgl = sd->u.data;
878        unsigned int offset, len;
879
880        if (sgl->n == sgl->size)
881                return 0;
882
883        /* Try lock this page */
884        if (buf->ops->steal(pipe, buf) == 0) {
885                /* Get reference and unlock page for moving */
886                get_page(buf->page);
887                unlock_page(buf->page);
888
889                len = min(buf->len, sd->len);
890                sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
891        } else {
892                /* Failback to copying a page */
893                struct page *page = alloc_page(GFP_KERNEL);
894                char *src;
895
896                if (!page)
897                        return -ENOMEM;
898
899                offset = sd->pos & ~PAGE_MASK;
900
901                len = sd->len;
902                if (len + offset > PAGE_SIZE)
903                        len = PAGE_SIZE - offset;
904
905                src = kmap_atomic(buf->page);
906                memcpy(page_address(page) + offset, src + buf->offset, len);
907                kunmap_atomic(src);
908
909                sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
910        }
911        sgl->n++;
912        sgl->len += len;
913
914        return len;
915}
916
917/* Faster zero-copy write by splicing */
918static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
919                                      struct file *filp, loff_t *ppos,
920                                      size_t len, unsigned int flags)
921{
922        struct port *port = filp->private_data;
923        struct sg_list sgl;
924        ssize_t ret;
925        struct port_buffer *buf;
926        struct splice_desc sd = {
927                .total_len = len,
928                .flags = flags,
929                .pos = *ppos,
930                .u.data = &sgl,
931        };
932
933        /*
934         * Rproc_serial does not yet support splice. To support splice
935         * pipe_to_sg() must allocate dma-buffers and copy content from
936         * regular pages to dma pages. And alloc_buf and free_buf must
937         * support allocating and freeing such a list of dma-buffers.
938         */
939        if (is_rproc_serial(port->out_vq->vdev))
940                return -EINVAL;
941
942        /*
943         * pipe->nrbufs == 0 means there are no data to transfer,
944         * so this returns just 0 for no data.
945         */
946        pipe_lock(pipe);
947        if (!pipe->nrbufs) {
948                ret = 0;
949                goto error_out;
950        }
951
952        ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
953        if (ret < 0)
954                goto error_out;
955
956        buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
957        if (!buf) {
958                ret = -ENOMEM;
959                goto error_out;
960        }
961
962        sgl.n = 0;
963        sgl.len = 0;
964        sgl.size = pipe->nrbufs;
965        sgl.sg = buf->sg;
966        sg_init_table(sgl.sg, sgl.size);
967        ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
968        pipe_unlock(pipe);
969        if (likely(ret > 0))
970                ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
971
972        if (unlikely(ret <= 0))
973                free_buf(buf, true);
974        return ret;
975
976error_out:
977        pipe_unlock(pipe);
978        return ret;
979}
980
981static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
982{
983        struct port *port;
984        unsigned int ret;
985
986        port = filp->private_data;
987        poll_wait(filp, &port->waitqueue, wait);
988
989        if (!port->guest_connected) {
990                /* Port got unplugged */
991                return POLLHUP;
992        }
993        ret = 0;
994        if (!will_read_block(port))
995                ret |= POLLIN | POLLRDNORM;
996        if (!will_write_block(port))
997                ret |= POLLOUT;
998        if (!port->host_connected)
999                ret |= POLLHUP;
1000
1001        return ret;
1002}
1003
1004static void remove_port(struct kref *kref);
1005
1006static int port_fops_release(struct inode *inode, struct file *filp)
1007{
1008        struct port *port;
1009
1010        port = filp->private_data;
1011
1012        /* Notify host of port being closed */
1013        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
1014
1015        spin_lock_irq(&port->inbuf_lock);
1016        port->guest_connected = false;
1017
1018        discard_port_data(port);
1019
1020        spin_unlock_irq(&port->inbuf_lock);
1021
1022        spin_lock_irq(&port->outvq_lock);
1023        reclaim_consumed_buffers(port);
1024        spin_unlock_irq(&port->outvq_lock);
1025
1026        reclaim_dma_bufs();
1027        /*
1028         * Locks aren't necessary here as a port can't be opened after
1029         * unplug, and if a port isn't unplugged, a kref would already
1030         * exist for the port.  Plus, taking ports_lock here would
1031         * create a dependency on other locks taken by functions
1032         * inside remove_port if we're the last holder of the port,
1033         * creating many problems.
1034         */
1035        kref_put(&port->kref, remove_port);
1036
1037        return 0;
1038}
1039
1040static int port_fops_open(struct inode *inode, struct file *filp)
1041{
1042        struct cdev *cdev = inode->i_cdev;
1043        struct port *port;
1044        int ret;
1045
1046        /* We get the port with a kref here */
1047        port = find_port_by_devt(cdev->dev);
1048        if (!port) {
1049                /* Port was unplugged before we could proceed */
1050                return -ENXIO;
1051        }
1052        filp->private_data = port;
1053
1054        /*
1055         * Don't allow opening of console port devices -- that's done
1056         * via /dev/hvc
1057         */
1058        if (is_console_port(port)) {
1059                ret = -ENXIO;
1060                goto out;
1061        }
1062
1063        /* Allow only one process to open a particular port at a time */
1064        spin_lock_irq(&port->inbuf_lock);
1065        if (port->guest_connected) {
1066                spin_unlock_irq(&port->inbuf_lock);
1067                ret = -EBUSY;
1068                goto out;
1069        }
1070
1071        port->guest_connected = true;
1072        spin_unlock_irq(&port->inbuf_lock);
1073
1074        spin_lock_irq(&port->outvq_lock);
1075        /*
1076         * There might be a chance that we missed reclaiming a few
1077         * buffers in the window of the port getting previously closed
1078         * and opening now.
1079         */
1080        reclaim_consumed_buffers(port);
1081        spin_unlock_irq(&port->outvq_lock);
1082
1083        nonseekable_open(inode, filp);
1084
1085        /* Notify host of port being opened */
1086        send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
1087
1088        return 0;
1089out:
1090        kref_put(&port->kref, remove_port);
1091        return ret;
1092}
1093
1094static int port_fops_fasync(int fd, struct file *filp, int mode)
1095{
1096        struct port *port;
1097
1098        port = filp->private_data;
1099        return fasync_helper(fd, filp, mode, &port->async_queue);
1100}
1101
1102/*
1103 * The file operations that we support: programs in the guest can open
1104 * a console device, read from it, write to it, poll for data and
1105 * close it.  The devices are at
1106 *   /dev/vport<device number>p<port number>
1107 */
1108static const struct file_operations port_fops = {
1109        .owner = THIS_MODULE,
1110        .open  = port_fops_open,
1111        .read  = port_fops_read,
1112        .write = port_fops_write,
1113        .splice_write = port_fops_splice_write,
1114        .poll  = port_fops_poll,
1115        .release = port_fops_release,
1116        .fasync = port_fops_fasync,
1117        .llseek = no_llseek,
1118};
1119
1120/*
1121 * The put_chars() callback is pretty straightforward.
1122 *
1123 * We turn the characters into a scatter-gather list, add it to the
1124 * output queue and then kick the Host.  Then we sit here waiting for
1125 * it to finish: inefficient in theory, but in practice
1126 * implementations will do it immediately (lguest's Launcher does).
1127 */
1128static int put_chars(u32 vtermno, const char *buf, int count)
1129{
1130        struct port *port;
1131        struct scatterlist sg[1];
1132        void *data;
1133        int ret;
1134
1135        if (unlikely(early_put_chars))
1136                return early_put_chars(vtermno, buf, count);
1137
1138        port = find_port_by_vtermno(vtermno);
1139        if (!port)
1140                return -EPIPE;
1141
1142        data = kmemdup(buf, count, GFP_ATOMIC);
1143        if (!data)
1144                return -ENOMEM;
1145
1146        sg_init_one(sg, data, count);
1147        ret = __send_to_port(port, sg, 1, count, data, false);
1148        kfree(data);
1149        return ret;
1150}
1151
1152/*
1153 * get_chars() is the callback from the hvc_console infrastructure
1154 * when an interrupt is received.
1155 *
1156 * We call out to fill_readbuf that gets us the required data from the
1157 * buffers that are queued up.
1158 */
1159static int get_chars(u32 vtermno, char *buf, int count)
1160{
1161        struct port *port;
1162
1163        /* If we've not set up the port yet, we have no input to give. */
1164        if (unlikely(early_put_chars))
1165                return 0;
1166
1167        port = find_port_by_vtermno(vtermno);
1168        if (!port)
1169                return -EPIPE;
1170
1171        /* If we don't have an input queue yet, we can't get input. */
1172        BUG_ON(!port->in_vq);
1173
1174        return fill_readbuf(port, buf, count, false);
1175}
1176
1177static void resize_console(struct port *port)
1178{
1179        struct virtio_device *vdev;
1180
1181        /* The port could have been hot-unplugged */
1182        if (!port || !is_console_port(port))
1183                return;
1184
1185        vdev = port->portdev->vdev;
1186
1187        /* Don't test F_SIZE at all if we're rproc: not a valid feature! */
1188        if (!is_rproc_serial(vdev) &&
1189            virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE))
1190                hvc_resize(port->cons.hvc, port->cons.ws);
1191}
1192
1193/* We set the configuration at this point, since we now have a tty */
1194static int notifier_add_vio(struct hvc_struct *hp, int data)
1195{
1196        struct port *port;
1197
1198        port = find_port_by_vtermno(hp->vtermno);
1199        if (!port)
1200                return -EINVAL;
1201
1202        hp->irq_requested = 1;
1203        resize_console(port);
1204
1205        return 0;
1206}
1207
1208static void notifier_del_vio(struct hvc_struct *hp, int data)
1209{
1210        hp->irq_requested = 0;
1211}
1212
1213/* The operations for console ports. */
1214static const struct hv_ops hv_ops = {
1215        .get_chars = get_chars,
1216        .put_chars = put_chars,
1217        .notifier_add = notifier_add_vio,
1218        .notifier_del = notifier_del_vio,
1219        .notifier_hangup = notifier_del_vio,
1220};
1221
1222/*
1223 * Console drivers are initialized very early so boot messages can go
1224 * out, so we do things slightly differently from the generic virtio
1225 * initialization of the net and block drivers.
1226 *
1227 * At this stage, the console is output-only.  It's too early to set
1228 * up a virtqueue, so we let the drivers do some boutique early-output
1229 * thing.
1230 */
1231int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
1232{
1233        early_put_chars = put_chars;
1234        return hvc_instantiate(0, 0, &hv_ops);
1235}
1236
1237static int init_port_console(struct port *port)
1238{
1239        int ret;
1240
1241        /*
1242         * The Host's telling us this port is a console port.  Hook it
1243         * up with an hvc console.
1244         *
1245         * To set up and manage our virtual console, we call
1246         * hvc_alloc().
1247         *
1248         * The first argument of hvc_alloc() is the virtual console
1249         * number.  The second argument is the parameter for the
1250         * notification mechanism (like irq number).  We currently
1251         * leave this as zero, virtqueues have implicit notifications.
1252         *
1253         * The third argument is a "struct hv_ops" containing the
1254         * put_chars() get_chars(), notifier_add() and notifier_del()
1255         * pointers.  The final argument is the output buffer size: we
1256         * can do any size, so we put PAGE_SIZE here.
1257         */
1258        port->cons.vtermno = pdrvdata.next_vtermno;
1259
1260        port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
1261        if (IS_ERR(port->cons.hvc)) {
1262                ret = PTR_ERR(port->cons.hvc);
1263                dev_err(port->dev,
1264                        "error %d allocating hvc for port\n", ret);
1265                port->cons.hvc = NULL;
1266                return ret;
1267        }
1268        spin_lock_irq(&pdrvdata_lock);
1269        pdrvdata.next_vtermno++;
1270        list_add_tail(&port->cons.list, &pdrvdata.consoles);
1271        spin_unlock_irq(&pdrvdata_lock);
1272        port->guest_connected = true;
1273
1274        /*
1275         * Start using the new console output if this is the first
1276         * console to come up.
1277         */
1278        if (early_put_chars)
1279                early_put_chars = NULL;
1280
1281        /* Notify host of port being opened */
1282        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1283
1284        return 0;
1285}
1286
1287static ssize_t show_port_name(struct device *dev,
1288                              struct device_attribute *attr, char *buffer)
1289{
1290        struct port *port;
1291
1292        port = dev_get_drvdata(dev);
1293
1294        return sprintf(buffer, "%s\n", port->name);
1295}
1296
1297static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
1298
1299static struct attribute *port_sysfs_entries[] = {
1300        &dev_attr_name.attr,
1301        NULL
1302};
1303
1304static struct attribute_group port_attribute_group = {
1305        .name = NULL,           /* put in device directory */
1306        .attrs = port_sysfs_entries,
1307};
1308
1309static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
1310                            size_t count, loff_t *offp)
1311{
1312        struct port *port;
1313        char *buf;
1314        ssize_t ret, out_offset, out_count;
1315
1316        out_count = 1024;
1317        buf = kmalloc(out_count, GFP_KERNEL);
1318        if (!buf)
1319                return -ENOMEM;
1320
1321        port = filp->private_data;
1322        out_offset = 0;
1323        out_offset += snprintf(buf + out_offset, out_count,
1324                               "name: %s\n", port->name ? port->name : "");
1325        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1326                               "guest_connected: %d\n", port->guest_connected);
1327        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1328                               "host_connected: %d\n", port->host_connected);
1329        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1330                               "outvq_full: %d\n", port->outvq_full);
1331        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1332                               "bytes_sent: %lu\n", port->stats.bytes_sent);
1333        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1334                               "bytes_received: %lu\n",
1335                               port->stats.bytes_received);
1336        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1337                               "bytes_discarded: %lu\n",
1338                               port->stats.bytes_discarded);
1339        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1340                               "is_console: %s\n",
1341                               is_console_port(port) ? "yes" : "no");
1342        out_offset += snprintf(buf + out_offset, out_count - out_offset,
1343                               "console_vtermno: %u\n", port->cons.vtermno);
1344
1345        ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
1346        kfree(buf);
1347        return ret;
1348}
1349
1350static const struct file_operations port_debugfs_ops = {
1351        .owner = THIS_MODULE,
1352        .open  = simple_open,
1353        .read  = debugfs_read,
1354};
1355
1356static void set_console_size(struct port *port, u16 rows, u16 cols)
1357{
1358        if (!port || !is_console_port(port))
1359                return;
1360
1361        port->cons.ws.ws_row = rows;
1362        port->cons.ws.ws_col = cols;
1363}
1364
1365static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1366{
1367        struct port_buffer *buf;
1368        unsigned int nr_added_bufs;
1369        int ret;
1370
1371        nr_added_bufs = 0;
1372        do {
1373                buf = alloc_buf(vq, PAGE_SIZE, 0);
1374                if (!buf)
1375                        break;
1376
1377                spin_lock_irq(lock);
1378                ret = add_inbuf(vq, buf);
1379                if (ret < 0) {
1380                        spin_unlock_irq(lock);
1381                        free_buf(buf, true);
1382                        break;
1383                }
1384                nr_added_bufs++;
1385                spin_unlock_irq(lock);
1386        } while (ret > 0);
1387
1388        return nr_added_bufs;
1389}
1390
1391static void send_sigio_to_port(struct port *port)
1392{
1393        if (port->async_queue && port->guest_connected)
1394                kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1395}
1396
1397static int add_port(struct ports_device *portdev, u32 id)
1398{
1399        char debugfs_name[16];
1400        struct port *port;
1401        struct port_buffer *buf;
1402        dev_t devt;
1403        unsigned int nr_added_bufs;
1404        int err;
1405
1406        port = kmalloc(sizeof(*port), GFP_KERNEL);
1407        if (!port) {
1408                err = -ENOMEM;
1409                goto fail;
1410        }
1411        kref_init(&port->kref);
1412
1413        port->portdev = portdev;
1414        port->id = id;
1415
1416        port->name = NULL;
1417        port->inbuf = NULL;
1418        port->cons.hvc = NULL;
1419        port->async_queue = NULL;
1420
1421        port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1422
1423        port->host_connected = port->guest_connected = false;
1424        port->stats = (struct port_stats) { 0 };
1425
1426        port->outvq_full = false;
1427
1428        port->in_vq = portdev->in_vqs[port->id];
1429        port->out_vq = portdev->out_vqs[port->id];
1430
1431        port->cdev = cdev_alloc();
1432        if (!port->cdev) {
1433                dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1434                err = -ENOMEM;
1435                goto free_port;
1436        }
1437        port->cdev->ops = &port_fops;
1438
1439        devt = MKDEV(portdev->chr_major, id);
1440        err = cdev_add(port->cdev, devt, 1);
1441        if (err < 0) {
1442                dev_err(&port->portdev->vdev->dev,
1443                        "Error %d adding cdev for port %u\n", err, id);
1444                goto free_cdev;
1445        }
1446        port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
1447                                  devt, port, "vport%up%u",
1448                                  port->portdev->vdev->index, id);
1449        if (IS_ERR(port->dev)) {
1450                err = PTR_ERR(port->dev);
1451                dev_err(&port->portdev->vdev->dev,
1452                        "Error %d creating device for port %u\n",
1453                        err, id);
1454                goto free_cdev;
1455        }
1456
1457        spin_lock_init(&port->inbuf_lock);
1458        spin_lock_init(&port->outvq_lock);
1459        init_waitqueue_head(&port->waitqueue);
1460
1461        /* Fill the in_vq with buffers so the host can send us data. */
1462        nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1463        if (!nr_added_bufs) {
1464                dev_err(port->dev, "Error allocating inbufs\n");
1465                err = -ENOMEM;
1466                goto free_device;
1467        }
1468
1469        if (is_rproc_serial(port->portdev->vdev))
1470                /*
1471                 * For rproc_serial assume remote processor is connected.
1472                 * rproc_serial does not want the console port, only
1473                 * the generic port implementation.
1474                 */
1475                port->host_connected = true;
1476        else if (!use_multiport(port->portdev)) {
1477                /*
1478                 * If we're not using multiport support,
1479                 * this has to be a console port.
1480                 */
1481                err = init_port_console(port);
1482                if (err)
1483                        goto free_inbufs;
1484        }
1485
1486        spin_lock_irq(&portdev->ports_lock);
1487        list_add_tail(&port->list, &port->portdev->ports);
1488        spin_unlock_irq(&portdev->ports_lock);
1489
1490        /*
1491         * Tell the Host we're set so that it can send us various
1492         * configuration parameters for this port (eg, port name,
1493         * caching, whether this is a console port, etc.)
1494         */
1495        send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1496
1497        if (pdrvdata.debugfs_dir) {
1498                /*
1499                 * Finally, create the debugfs file that we can use to
1500                 * inspect a port's state at any time
1501                 */
1502                sprintf(debugfs_name, "vport%up%u",
1503                        port->portdev->vdev->index, id);
1504                port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1505                                                         pdrvdata.debugfs_dir,
1506                                                         port,
1507                                                         &port_debugfs_ops);
1508        }
1509        return 0;
1510
1511free_inbufs:
1512        while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
1513                free_buf(buf, true);
1514free_device:
1515        device_destroy(pdrvdata.class, port->dev->devt);
1516free_cdev:
1517        cdev_del(port->cdev);
1518free_port:
1519        kfree(port);
1520fail:
1521        /* The host might want to notify management sw about port add failure */
1522        __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0);
1523        return err;
1524}
1525
1526/* No users remain, remove all port-specific data. */
1527static void remove_port(struct kref *kref)
1528{
1529        struct port *port;
1530
1531        port = container_of(kref, struct port, kref);
1532
1533        kfree(port);
1534}
1535
1536static void remove_port_data(struct port *port)
1537{
1538        struct port_buffer *buf;
1539
1540        spin_lock_irq(&port->inbuf_lock);
1541        /* Remove unused data this port might have received. */
1542        discard_port_data(port);
1543        spin_unlock_irq(&port->inbuf_lock);
1544
1545        /* Remove buffers we queued up for the Host to send us data in. */
1546        do {
1547                spin_lock_irq(&port->inbuf_lock);
1548                buf = virtqueue_detach_unused_buf(port->in_vq);
1549                spin_unlock_irq(&port->inbuf_lock);
1550                if (buf)
1551                        free_buf(buf, true);
1552        } while (buf);
1553
1554        spin_lock_irq(&port->outvq_lock);
1555        reclaim_consumed_buffers(port);
1556        spin_unlock_irq(&port->outvq_lock);
1557
1558        /* Free pending buffers from the out-queue. */
1559        do {
1560                spin_lock_irq(&port->outvq_lock);
1561                buf = virtqueue_detach_unused_buf(port->out_vq);
1562                spin_unlock_irq(&port->outvq_lock);
1563                if (buf)
1564                        free_buf(buf, true);
1565        } while (buf);
1566}
1567
1568/*
1569 * Port got unplugged.  Remove port from portdev's list and drop the
1570 * kref reference.  If no userspace has this port opened, it will
1571 * result in immediate removal the port.
1572 */
1573static void unplug_port(struct port *port)
1574{
1575        spin_lock_irq(&port->portdev->ports_lock);
1576        list_del(&port->list);
1577        spin_unlock_irq(&port->portdev->ports_lock);
1578
1579        spin_lock_irq(&port->inbuf_lock);
1580        if (port->guest_connected) {
1581                /* Let the app know the port is going down. */
1582                send_sigio_to_port(port);
1583
1584                /* Do this after sigio is actually sent */
1585                port->guest_connected = false;
1586                port->host_connected = false;
1587
1588                wake_up_interruptible(&port->waitqueue);
1589        }
1590        spin_unlock_irq(&port->inbuf_lock);
1591
1592        if (is_console_port(port)) {
1593                spin_lock_irq(&pdrvdata_lock);
1594                list_del(&port->cons.list);
1595                spin_unlock_irq(&pdrvdata_lock);
1596                hvc_remove(port->cons.hvc);
1597        }
1598
1599        remove_port_data(port);
1600
1601        /*
1602         * We should just assume the device itself has gone off --
1603         * else a close on an open port later will try to send out a
1604         * control message.
1605         */
1606        port->portdev = NULL;
1607
1608        sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1609        device_destroy(pdrvdata.class, port->dev->devt);
1610        cdev_del(port->cdev);
1611
1612        debugfs_remove(port->debugfs_file);
1613        kfree(port->name);
1614
1615        /*
1616         * Locks around here are not necessary - a port can't be
1617         * opened after we removed the port struct from ports_list
1618         * above.
1619         */
1620        kref_put(&port->kref, remove_port);
1621}
1622
1623/* Any private messages that the Host and Guest want to share */
1624static void handle_control_message(struct ports_device *portdev,
1625                                   struct port_buffer *buf)
1626{
1627        struct virtio_console_control *cpkt;
1628        struct port *port;
1629        size_t name_size;
1630        int err;
1631
1632        cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1633
1634        port = find_port_by_id(portdev, cpkt->id);
1635        if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) {
1636                /* No valid header at start of buffer.  Drop it. */
1637                dev_dbg(&portdev->vdev->dev,
1638                        "Invalid index %u in control packet\n", cpkt->id);
1639                return;
1640        }
1641
1642        switch (cpkt->event) {
1643        case VIRTIO_CONSOLE_PORT_ADD:
1644                if (port) {
1645                        dev_dbg(&portdev->vdev->dev,
1646                                "Port %u already added\n", port->id);
1647                        send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1648                        break;
1649                }
1650                if (cpkt->id >= portdev->config.max_nr_ports) {
1651                        dev_warn(&portdev->vdev->dev,
1652                                "Request for adding port with out-of-bound id %u, max. supported id: %u\n",
1653                                cpkt->id, portdev->config.max_nr_ports - 1);
1654                        break;
1655                }
1656                add_port(portdev, cpkt->id);
1657                break;
1658        case VIRTIO_CONSOLE_PORT_REMOVE:
1659                unplug_port(port);
1660                break;
1661        case VIRTIO_CONSOLE_CONSOLE_PORT:
1662                if (!cpkt->value)
1663                        break;
1664                if (is_console_port(port))
1665                        break;
1666
1667                init_port_console(port);
1668                complete(&early_console_added);
1669                /*
1670                 * Could remove the port here in case init fails - but
1671                 * have to notify the host first.
1672                 */
1673                break;
1674        case VIRTIO_CONSOLE_RESIZE: {
1675                struct {
1676                        __u16 rows;
1677                        __u16 cols;
1678                } size;
1679
1680                if (!is_console_port(port))
1681                        break;
1682
1683                memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1684                       sizeof(size));
1685                set_console_size(port, size.rows, size.cols);
1686
1687                port->cons.hvc->irq_requested = 1;
1688                resize_console(port);
1689                break;
1690        }
1691        case VIRTIO_CONSOLE_PORT_OPEN:
1692                port->host_connected = cpkt->value;
1693                wake_up_interruptible(&port->waitqueue);
1694                /*
1695                 * If the host port got closed and the host had any
1696                 * unconsumed buffers, we'll be able to reclaim them
1697                 * now.
1698                 */
1699                spin_lock_irq(&port->outvq_lock);
1700                reclaim_consumed_buffers(port);
1701                spin_unlock_irq(&port->outvq_lock);
1702
1703                /*
1704                 * If the guest is connected, it'll be interested in
1705                 * knowing the host connection state changed.
1706                 */
1707                spin_lock_irq(&port->inbuf_lock);
1708                send_sigio_to_port(port);
1709                spin_unlock_irq(&port->inbuf_lock);
1710                break;
1711        case VIRTIO_CONSOLE_PORT_NAME:
1712                /*
1713                 * If we woke up after hibernation, we can get this
1714                 * again.  Skip it in that case.
1715                 */
1716                if (port->name)
1717                        break;
1718
1719                /*
1720                 * Skip the size of the header and the cpkt to get the size
1721                 * of the name that was sent
1722                 */
1723                name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1724
1725                port->name = kmalloc(name_size, GFP_KERNEL);
1726                if (!port->name) {
1727                        dev_err(port->dev,
1728                                "Not enough space to store port name\n");
1729                        break;
1730                }
1731                strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1732                        name_size - 1);
1733                port->name[name_size - 1] = 0;
1734
1735                /*
1736                 * Since we only have one sysfs attribute, 'name',
1737                 * create it only if we have a name for the port.
1738                 */
1739                err = sysfs_create_group(&port->dev->kobj,
1740                                         &port_attribute_group);
1741                if (err) {
1742                        dev_err(port->dev,
1743                                "Error %d creating sysfs device attributes\n",
1744                                err);
1745                } else {
1746                        /*
1747                         * Generate a udev event so that appropriate
1748                         * symlinks can be created based on udev
1749                         * rules.
1750                         */
1751                        kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1752                }
1753                break;
1754        }
1755}
1756
1757static void control_work_handler(struct work_struct *work)
1758{
1759        struct ports_device *portdev;
1760        struct virtqueue *vq;
1761        struct port_buffer *buf;
1762        unsigned int len;
1763
1764        portdev = container_of(work, struct ports_device, control_work);
1765        vq = portdev->c_ivq;
1766
1767        spin_lock(&portdev->c_ivq_lock);
1768        while ((buf = virtqueue_get_buf(vq, &len))) {
1769                spin_unlock(&portdev->c_ivq_lock);
1770
1771                buf->len = len;
1772                buf->offset = 0;
1773
1774                handle_control_message(portdev, buf);
1775
1776                spin_lock(&portdev->c_ivq_lock);
1777                if (add_inbuf(portdev->c_ivq, buf) < 0) {
1778                        dev_warn(&portdev->vdev->dev,
1779                                 "Error adding buffer to queue\n");
1780                        free_buf(buf, false);
1781                }
1782        }
1783        spin_unlock(&portdev->c_ivq_lock);
1784}
1785
1786static void out_intr(struct virtqueue *vq)
1787{
1788        struct port *port;
1789
1790        port = find_port_by_vq(vq->vdev->priv, vq);
1791        if (!port)
1792                return;
1793
1794        wake_up_interruptible(&port->waitqueue);
1795}
1796
1797static void in_intr(struct virtqueue *vq)
1798{
1799        struct port *port;
1800        unsigned long flags;
1801
1802        port = find_port_by_vq(vq->vdev->priv, vq);
1803        if (!port)
1804                return;
1805
1806        spin_lock_irqsave(&port->inbuf_lock, flags);
1807        port->inbuf = get_inbuf(port);
1808
1809        /*
1810         * Normally the port should not accept data when the port is
1811         * closed. For generic serial ports, the host won't (shouldn't)
1812         * send data till the guest is connected. But this condition
1813         * can be reached when a console port is not yet connected (no
1814         * tty is spawned) and the other side sends out data over the
1815         * vring, or when a remote devices start sending data before
1816         * the ports are opened.
1817         *
1818         * A generic serial port will discard data if not connected,
1819         * while console ports and rproc-serial ports accepts data at
1820         * any time. rproc-serial is initiated with guest_connected to
1821         * false because port_fops_open expects this. Console ports are
1822         * hooked up with an HVC console and is initialized with
1823         * guest_connected to true.
1824         */
1825
1826        if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
1827                discard_port_data(port);
1828
1829        /* Send a SIGIO indicating new data in case the process asked for it */
1830        send_sigio_to_port(port);
1831
1832        spin_unlock_irqrestore(&port->inbuf_lock, flags);
1833
1834        wake_up_interruptible(&port->waitqueue);
1835
1836        if (is_console_port(port) && hvc_poll(port->cons.hvc))
1837                hvc_kick();
1838}
1839
1840static void control_intr(struct virtqueue *vq)
1841{
1842        struct ports_device *portdev;
1843
1844        portdev = vq->vdev->priv;
1845        schedule_work(&portdev->control_work);
1846}
1847
1848static void config_intr(struct virtio_device *vdev)
1849{
1850        struct ports_device *portdev;
1851
1852        portdev = vdev->priv;
1853
1854        if (!use_multiport(portdev))
1855                schedule_work(&portdev->config_work);
1856}
1857
1858static void config_work_handler(struct work_struct *work)
1859{
1860        struct ports_device *portdev;
1861
1862        portdev = container_of(work, struct ports_device, control_work);
1863        if (!use_multiport(portdev)) {
1864                struct virtio_device *vdev;
1865                struct port *port;
1866                u16 rows, cols;
1867
1868                vdev = portdev->vdev;
1869                virtio_cread(vdev, struct virtio_console_config, cols, &cols);
1870                virtio_cread(vdev, struct virtio_console_config, rows, &rows);
1871
1872                port = find_port_by_id(portdev, 0);
1873                set_console_size(port, rows, cols);
1874
1875                /*
1876                 * We'll use this way of resizing only for legacy
1877                 * support.  For newer userspace
1878                 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages
1879                 * to indicate console size changes so that it can be
1880                 * done per-port.
1881                 */
1882                resize_console(port);
1883        }
1884}
1885
1886static int init_vqs(struct ports_device *portdev)
1887{
1888        vq_callback_t **io_callbacks;
1889        char **io_names;
1890        struct virtqueue **vqs;
1891        u32 i, j, nr_ports, nr_queues;
1892        int err;
1893
1894        nr_ports = portdev->config.max_nr_ports;
1895        nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
1896
1897        vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
1898        io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
1899        io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
1900        portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1901                                  GFP_KERNEL);
1902        portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1903                                   GFP_KERNEL);
1904        if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1905            !portdev->out_vqs) {
1906                err = -ENOMEM;
1907                goto free;
1908        }
1909
1910        /*
1911         * For backward compat (newer host but older guest), the host
1912         * spawns a console port first and also inits the vqs for port
1913         * 0 before others.
1914         */
1915        j = 0;
1916        io_callbacks[j] = in_intr;
1917        io_callbacks[j + 1] = out_intr;
1918        io_names[j] = "input";
1919        io_names[j + 1] = "output";
1920        j += 2;
1921
1922        if (use_multiport(portdev)) {
1923                io_callbacks[j] = control_intr;
1924                io_callbacks[j + 1] = NULL;
1925                io_names[j] = "control-i";
1926                io_names[j + 1] = "control-o";
1927
1928                for (i = 1; i < nr_ports; i++) {
1929                        j += 2;
1930                        io_callbacks[j] = in_intr;
1931                        io_callbacks[j + 1] = out_intr;
1932                        io_names[j] = "input";
1933                        io_names[j + 1] = "output";
1934                }
1935        }
1936        /* Find the queues. */
1937        err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
1938                                              io_callbacks,
1939                                              (const char **)io_names);
1940        if (err)
1941                goto free;
1942
1943        j = 0;
1944        portdev->in_vqs[0] = vqs[0];
1945        portdev->out_vqs[0] = vqs[1];
1946        j += 2;
1947        if (use_multiport(portdev)) {
1948                portdev->c_ivq = vqs[j];
1949                portdev->c_ovq = vqs[j + 1];
1950
1951                for (i = 1; i < nr_ports; i++) {
1952                        j += 2;
1953                        portdev->in_vqs[i] = vqs[j];
1954                        portdev->out_vqs[i] = vqs[j + 1];
1955                }
1956        }
1957        kfree(io_names);
1958        kfree(io_callbacks);
1959        kfree(vqs);
1960
1961        return 0;
1962
1963free:
1964        kfree(portdev->out_vqs);
1965        kfree(portdev->in_vqs);
1966        kfree(io_names);
1967        kfree(io_callbacks);
1968        kfree(vqs);
1969
1970        return err;
1971}
1972
1973static const struct file_operations portdev_fops = {
1974        .owner = THIS_MODULE,
1975};
1976
1977static void remove_vqs(struct ports_device *portdev)
1978{
1979        portdev->vdev->config->del_vqs(portdev->vdev);
1980        kfree(portdev->in_vqs);
1981        kfree(portdev->out_vqs);
1982}
1983
1984static void remove_controlq_data(struct ports_device *portdev)
1985{
1986        struct port_buffer *buf;
1987        unsigned int len;
1988
1989        if (!use_multiport(portdev))
1990                return;
1991
1992        while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1993                free_buf(buf, true);
1994
1995        while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1996                free_buf(buf, true);
1997}
1998
1999/*
2000 * Once we're further in boot, we get probed like any other virtio
2001 * device.
2002 *
2003 * If the host also supports multiple console ports, we check the
2004 * config space to see how many ports the host has spawned.  We
2005 * initialize each port found.
2006 */
2007static int virtcons_probe(struct virtio_device *vdev)
2008{
2009        struct ports_device *portdev;
2010        int err;
2011        bool multiport;
2012        bool early = early_put_chars != NULL;
2013
2014        /* Ensure to read early_put_chars now */
2015        barrier();
2016
2017        portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
2018        if (!portdev) {
2019                err = -ENOMEM;
2020                goto fail;
2021        }
2022
2023        /* Attach this portdev to this virtio_device, and vice-versa. */
2024        portdev->vdev = vdev;
2025        vdev->priv = portdev;
2026
2027        portdev->chr_major = register_chrdev(0, "virtio-portsdev",
2028                                             &portdev_fops);
2029        if (portdev->chr_major < 0) {
2030                dev_err(&vdev->dev,
2031                        "Error %d registering chrdev for device %u\n",
2032                        portdev->chr_major, vdev->index);
2033                err = portdev->chr_major;
2034                goto free;
2035        }
2036
2037        multiport = false;
2038        portdev->config.max_nr_ports = 1;
2039
2040        /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
2041        if (!is_rproc_serial(vdev) &&
2042            virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
2043                                 struct virtio_console_config, max_nr_ports,
2044                                 &portdev->config.max_nr_ports) == 0) {
2045                multiport = true;
2046        }
2047
2048        err = init_vqs(portdev);
2049        if (err < 0) {
2050                dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
2051                goto free_chrdev;
2052        }
2053
2054        spin_lock_init(&portdev->ports_lock);
2055        INIT_LIST_HEAD(&portdev->ports);
2056
2057        virtio_device_ready(portdev->vdev);
2058
2059        INIT_WORK(&portdev->config_work, &config_work_handler);
2060        INIT_WORK(&portdev->control_work, &control_work_handler);
2061
2062        if (multiport) {
2063                unsigned int nr_added_bufs;
2064
2065                spin_lock_init(&portdev->c_ivq_lock);
2066                spin_lock_init(&portdev->c_ovq_lock);
2067
2068                nr_added_bufs = fill_queue(portdev->c_ivq,
2069                                           &portdev->c_ivq_lock);
2070                if (!nr_added_bufs) {
2071                        dev_err(&vdev->dev,
2072                                "Error allocating buffers for control queue\n");
2073                        err = -ENOMEM;
2074                        goto free_vqs;
2075                }
2076        } else {
2077                /*
2078                 * For backward compatibility: Create a console port
2079                 * if we're running on older host.
2080                 */
2081                add_port(portdev, 0);
2082        }
2083
2084        spin_lock_irq(&pdrvdata_lock);
2085        list_add_tail(&portdev->list, &pdrvdata.portdevs);
2086        spin_unlock_irq(&pdrvdata_lock);
2087
2088        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2089                           VIRTIO_CONSOLE_DEVICE_READY, 1);
2090
2091        /*
2092         * If there was an early virtio console, assume that there are no
2093         * other consoles. We need to wait until the hvc_alloc matches the
2094         * hvc_instantiate, otherwise tty_open will complain, resulting in
2095         * a "Warning: unable to open an initial console" boot failure.
2096         * Without multiport this is done in add_port above. With multiport
2097         * this might take some host<->guest communication - thus we have to
2098         * wait.
2099         */
2100        if (multiport && early)
2101                wait_for_completion(&early_console_added);
2102
2103        return 0;
2104
2105free_vqs:
2106        /* The host might want to notify mgmt sw about device add failure */
2107        __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
2108                           VIRTIO_CONSOLE_DEVICE_READY, 0);
2109        remove_vqs(portdev);
2110free_chrdev:
2111        unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2112free:
2113        kfree(portdev);
2114fail:
2115        return err;
2116}
2117
2118static void virtcons_remove(struct virtio_device *vdev)
2119{
2120        struct ports_device *portdev;
2121        struct port *port, *port2;
2122
2123        portdev = vdev->priv;
2124
2125        spin_lock_irq(&pdrvdata_lock);
2126        list_del(&portdev->list);
2127        spin_unlock_irq(&pdrvdata_lock);
2128
2129        /* Disable interrupts for vqs */
2130        vdev->config->reset(vdev);
2131        /* Finish up work that's lined up */
2132        if (use_multiport(portdev))
2133                cancel_work_sync(&portdev->control_work);
2134        else
2135                cancel_work_sync(&portdev->config_work);
2136
2137        list_for_each_entry_safe(port, port2, &portdev->ports, list)
2138                unplug_port(port);
2139
2140        unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2141
2142        /*
2143         * When yanking out a device, we immediately lose the
2144         * (device-side) queues.  So there's no point in keeping the
2145         * guest side around till we drop our final reference.  This
2146         * also means that any ports which are in an open state will
2147         * have to just stop using the port, as the vqs are going
2148         * away.
2149         */
2150        remove_controlq_data(portdev);
2151        remove_vqs(portdev);
2152        kfree(portdev);
2153}
2154
2155static struct virtio_device_id id_table[] = {
2156        { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
2157        { 0 },
2158};
2159
2160static unsigned int features[] = {
2161        VIRTIO_CONSOLE_F_SIZE,
2162        VIRTIO_CONSOLE_F_MULTIPORT,
2163};
2164
2165static struct virtio_device_id rproc_serial_id_table[] = {
2166#if IS_ENABLED(CONFIG_REMOTEPROC)
2167        { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID },
2168#endif
2169        { 0 },
2170};
2171
2172static unsigned int rproc_serial_features[] = {
2173};
2174
2175#ifdef CONFIG_PM_SLEEP
2176static int virtcons_freeze(struct virtio_device *vdev)
2177{
2178        struct ports_device *portdev;
2179        struct port *port;
2180
2181        portdev = vdev->priv;
2182
2183        vdev->config->reset(vdev);
2184
2185        virtqueue_disable_cb(portdev->c_ivq);
2186        cancel_work_sync(&portdev->control_work);
2187        cancel_work_sync(&portdev->config_work);
2188        /*
2189         * Once more: if control_work_handler() was running, it would
2190         * enable the cb as the last step.
2191         */
2192        virtqueue_disable_cb(portdev->c_ivq);
2193        remove_controlq_data(portdev);
2194
2195        list_for_each_entry(port, &portdev->ports, list) {
2196                virtqueue_disable_cb(port->in_vq);
2197                virtqueue_disable_cb(port->out_vq);
2198                /*
2199                 * We'll ask the host later if the new invocation has
2200                 * the port opened or closed.
2201                 */
2202                port->host_connected = false;
2203                remove_port_data(port);
2204        }
2205        remove_vqs(portdev);
2206
2207        return 0;
2208}
2209
2210static int virtcons_restore(struct virtio_device *vdev)
2211{
2212        struct ports_device *portdev;
2213        struct port *port;
2214        int ret;
2215
2216        portdev = vdev->priv;
2217
2218        ret = init_vqs(portdev);
2219        if (ret)
2220                return ret;
2221
2222        virtio_device_ready(portdev->vdev);
2223
2224        if (use_multiport(portdev))
2225                fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
2226
2227        list_for_each_entry(port, &portdev->ports, list) {
2228                port->in_vq = portdev->in_vqs[port->id];
2229                port->out_vq = portdev->out_vqs[port->id];
2230
2231                fill_queue(port->in_vq, &port->inbuf_lock);
2232
2233                /* Get port open/close status on the host */
2234                send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
2235
2236                /*
2237                 * If a port was open at the time of suspending, we
2238                 * have to let the host know that it's still open.
2239                 */
2240                if (port->guest_connected)
2241                        send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
2242        }
2243        return 0;
2244}
2245#endif
2246
2247static struct virtio_driver virtio_console = {
2248        .feature_table = features,
2249        .feature_table_size = ARRAY_SIZE(features),
2250        .driver.name =  KBUILD_MODNAME,
2251        .driver.owner = THIS_MODULE,
2252        .id_table =     id_table,
2253        .probe =        virtcons_probe,
2254        .remove =       virtcons_remove,
2255        .config_changed = config_intr,
2256#ifdef CONFIG_PM_SLEEP
2257        .freeze =       virtcons_freeze,
2258        .restore =      virtcons_restore,
2259#endif
2260};
2261
2262static struct virtio_driver virtio_rproc_serial = {
2263        .feature_table = rproc_serial_features,
2264        .feature_table_size = ARRAY_SIZE(rproc_serial_features),
2265        .driver.name =  "virtio_rproc_serial",
2266        .driver.owner = THIS_MODULE,
2267        .id_table =     rproc_serial_id_table,
2268        .probe =        virtcons_probe,
2269        .remove =       virtcons_remove,
2270};
2271
2272static int __init init(void)
2273{
2274        int err;
2275
2276        pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
2277        if (IS_ERR(pdrvdata.class)) {
2278                err = PTR_ERR(pdrvdata.class);
2279                pr_err("Error %d creating virtio-ports class\n", err);
2280                return err;
2281        }
2282
2283        pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
2284        if (!pdrvdata.debugfs_dir)
2285                pr_warning("Error creating debugfs dir for virtio-ports\n");
2286        INIT_LIST_HEAD(&pdrvdata.consoles);
2287        INIT_LIST_HEAD(&pdrvdata.portdevs);
2288
2289        err = register_virtio_driver(&virtio_console);
2290        if (err < 0) {
2291                pr_err("Error %d registering virtio driver\n", err);
2292                goto free;
2293        }
2294        err = register_virtio_driver(&virtio_rproc_serial);
2295        if (err < 0) {
2296                pr_err("Error %d registering virtio rproc serial driver\n",
2297                       err);
2298                goto unregister;
2299        }
2300        return 0;
2301unregister:
2302        unregister_virtio_driver(&virtio_console);
2303free:
2304        debugfs_remove_recursive(pdrvdata.debugfs_dir);
2305        class_destroy(pdrvdata.class);
2306        return err;
2307}
2308
2309static void __exit fini(void)
2310{
2311        reclaim_dma_bufs();
2312
2313        unregister_virtio_driver(&virtio_console);
2314        unregister_virtio_driver(&virtio_rproc_serial);
2315
2316        class_destroy(pdrvdata.class);
2317        debugfs_remove_recursive(pdrvdata.debugfs_dir);
2318}
2319module_init(init);
2320module_exit(fini);
2321
2322MODULE_DEVICE_TABLE(virtio, id_table);
2323MODULE_DESCRIPTION("Virtio console driver");
2324MODULE_LICENSE("GPL");
Note: See TracBrowser for help on using the repository browser.