xref: /onnv-gate/usr/src/uts/common/xen/io/xencons.c (revision 5084)
1*5084Sjohnlev /*
2*5084Sjohnlev  * CDDL HEADER START
3*5084Sjohnlev  *
4*5084Sjohnlev  * The contents of this file are subject to the terms of the
5*5084Sjohnlev  * Common Development and Distribution License (the "License").
6*5084Sjohnlev  * You may not use this file except in compliance with the License.
7*5084Sjohnlev  *
8*5084Sjohnlev  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*5084Sjohnlev  * or http://www.opensolaris.org/os/licensing.
10*5084Sjohnlev  * See the License for the specific language governing permissions
11*5084Sjohnlev  * and limitations under the License.
12*5084Sjohnlev  *
13*5084Sjohnlev  * When distributing Covered Code, include this CDDL HEADER in each
14*5084Sjohnlev  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*5084Sjohnlev  * If applicable, add the following below this CDDL HEADER, with the
16*5084Sjohnlev  * fields enclosed by brackets "[]" replaced with your own identifying
17*5084Sjohnlev  * information: Portions Copyright [yyyy] [name of copyright owner]
18*5084Sjohnlev  *
19*5084Sjohnlev  * CDDL HEADER END
20*5084Sjohnlev  */
21*5084Sjohnlev 
22*5084Sjohnlev /*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
23*5084Sjohnlev /*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T	*/
24*5084Sjohnlev /*	  All Rights Reserved					*/
25*5084Sjohnlev 
26*5084Sjohnlev /*
27*5084Sjohnlev  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
28*5084Sjohnlev  * Use is subject to license terms.
29*5084Sjohnlev  */
30*5084Sjohnlev 
31*5084Sjohnlev #pragma ident	"%Z%%M%	%I%	%E% SMI"
32*5084Sjohnlev 
33*5084Sjohnlev /*
34*5084Sjohnlev  *
35*5084Sjohnlev  * Copyright (c) 2004 Christian Limpach.
36*5084Sjohnlev  * All rights reserved.
37*5084Sjohnlev  *
38*5084Sjohnlev  * Redistribution and use in source and binary forms, with or without
39*5084Sjohnlev  * modification, are permitted provided that the following conditions
40*5084Sjohnlev  * are met:
41*5084Sjohnlev  * 1. Redistributions of source code must retain the above copyright
42*5084Sjohnlev  *    notice, this list of conditions and the following disclaimer.
43*5084Sjohnlev  * 2. Redistributions in binary form must reproduce the above copyright
44*5084Sjohnlev  *    notice, this list of conditions and the following disclaimer in the
45*5084Sjohnlev  *    documentation and/or other materials provided with the distribution.
46*5084Sjohnlev  * 3. This section intentionally left blank.
47*5084Sjohnlev  * 4. The name of the author may not be used to endorse or promote products
48*5084Sjohnlev  *    derived from this software without specific prior written permission.
49*5084Sjohnlev  *
50*5084Sjohnlev  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51*5084Sjohnlev  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52*5084Sjohnlev  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53*5084Sjohnlev  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54*5084Sjohnlev  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55*5084Sjohnlev  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56*5084Sjohnlev  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57*5084Sjohnlev  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58*5084Sjohnlev  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59*5084Sjohnlev  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60*5084Sjohnlev  */
61*5084Sjohnlev /*
62*5084Sjohnlev  * Section 3 of the above license was updated in response to bug 6379571.
63*5084Sjohnlev  */
64*5084Sjohnlev 
65*5084Sjohnlev /*
66*5084Sjohnlev  * Hypervisor virtual console driver
67*5084Sjohnlev  */
68*5084Sjohnlev 
69*5084Sjohnlev #include <sys/param.h>
70*5084Sjohnlev #include <sys/types.h>
71*5084Sjohnlev #include <sys/signal.h>
72*5084Sjohnlev #include <sys/stream.h>
73*5084Sjohnlev #include <sys/termio.h>
74*5084Sjohnlev #include <sys/errno.h>
75*5084Sjohnlev #include <sys/file.h>
76*5084Sjohnlev #include <sys/cmn_err.h>
77*5084Sjohnlev #include <sys/stropts.h>
78*5084Sjohnlev #include <sys/strsubr.h>
79*5084Sjohnlev #include <sys/strtty.h>
80*5084Sjohnlev #include <sys/debug.h>
81*5084Sjohnlev #include <sys/kbio.h>
82*5084Sjohnlev #include <sys/cred.h>
83*5084Sjohnlev #include <sys/stat.h>
84*5084Sjohnlev #include <sys/consdev.h>
85*5084Sjohnlev #include <sys/mkdev.h>
86*5084Sjohnlev #include <sys/kmem.h>
87*5084Sjohnlev #include <sys/cred.h>
88*5084Sjohnlev #include <sys/strsun.h>
89*5084Sjohnlev #ifdef DEBUG
90*5084Sjohnlev #include <sys/promif.h>
91*5084Sjohnlev #endif
92*5084Sjohnlev #include <sys/modctl.h>
93*5084Sjohnlev #include <sys/ddi.h>
94*5084Sjohnlev #include <sys/sunddi.h>
95*5084Sjohnlev #include <sys/sunndi.h>
96*5084Sjohnlev #include <sys/policy.h>
97*5084Sjohnlev #include <sys/atomic.h>
98*5084Sjohnlev #include <sys/psm.h>
99*5084Sjohnlev #include <xen/public/io/console.h>
100*5084Sjohnlev 
101*5084Sjohnlev #include "xencons.h"
102*5084Sjohnlev 
103*5084Sjohnlev #include <sys/hypervisor.h>
104*5084Sjohnlev #include <sys/evtchn_impl.h>
105*5084Sjohnlev #include <xen/sys/xenbus_impl.h>
106*5084Sjohnlev #include <xen/sys/xendev.h>
107*5084Sjohnlev 
108*5084Sjohnlev #ifdef DEBUG
109*5084Sjohnlev #define	XENCONS_DEBUG_INIT	0x0001	/* msgs during driver initialization. */
110*5084Sjohnlev #define	XENCONS_DEBUG_INPUT	0x0002	/* characters received during int. */
111*5084Sjohnlev #define	XENCONS_DEBUG_EOT	0x0004	/* msgs when wait for xmit to finish. */
112*5084Sjohnlev #define	XENCONS_DEBUG_CLOSE	0x0008	/* msgs when driver open/close called */
113*5084Sjohnlev #define	XENCONS_DEBUG_PROCS	0x0020	/* each proc name as it is entered. */
114*5084Sjohnlev #define	XENCONS_DEBUG_OUT	0x0100	/* msgs about output events. */
115*5084Sjohnlev #define	XENCONS_DEBUG_BUSY	0x0200	/* msgs when xmit is enabled/disabled */
116*5084Sjohnlev #define	XENCONS_DEBUG_MODEM	0x0400	/* msgs about modem status & control. */
117*5084Sjohnlev #define	XENCONS_DEBUG_MODM2	0x0800	/* msgs about modem status & control. */
118*5084Sjohnlev #define	XENCONS_DEBUG_IOCTL	0x1000	/* Output msgs about ioctl messages. */
119*5084Sjohnlev #define	XENCONS_DEBUG_CHIP	0x2000	/* msgs about chip identification. */
120*5084Sjohnlev #define	XENCONS_DEBUG_SFLOW	0x4000	/* msgs when S/W flowcontrol active */
121*5084Sjohnlev #define	XENCONS_DEBUG(x) (debug & (x))
122*5084Sjohnlev static int debug  = 0;
123*5084Sjohnlev #else
124*5084Sjohnlev #define	XENCONS_DEBUG(x) B_FALSE
125*5084Sjohnlev #endif
126*5084Sjohnlev 
127*5084Sjohnlev #define	XENCONS_WBUFSIZE	4096
128*5084Sjohnlev 
129*5084Sjohnlev static boolean_t abort_charseq_recognize(uchar_t);
130*5084Sjohnlev 
131*5084Sjohnlev /* The async interrupt entry points */
132*5084Sjohnlev static void	xcasync_ioctl(struct asyncline *, queue_t *, mblk_t *);
133*5084Sjohnlev static void	xcasync_reioctl(void *);
134*5084Sjohnlev static void	xcasync_start(struct asyncline *);
135*5084Sjohnlev static void	xenconsputchar(cons_polledio_arg_t, uchar_t);
136*5084Sjohnlev static int	xenconsgetchar(cons_polledio_arg_t);
137*5084Sjohnlev static boolean_t	xenconsischar(cons_polledio_arg_t);
138*5084Sjohnlev 
139*5084Sjohnlev static uint_t	xenconsintr(caddr_t);
140*5084Sjohnlev static uint_t	xenconsintr_priv(caddr_t);
141*5084Sjohnlev /*PRINTFLIKE2*/
142*5084Sjohnlev static void	xenconserror(int, const char *, ...) __KPRINTFLIKE(2);
143*5084Sjohnlev static void	xencons_soft_state_free(struct xencons *);
144*5084Sjohnlev static boolean_t
145*5084Sjohnlev xcasync_flowcontrol_sw_input(struct xencons *, async_flowc_action, int);
146*5084Sjohnlev static void
147*5084Sjohnlev xcasync_flowcontrol_sw_output(struct xencons *, async_flowc_action);
148*5084Sjohnlev 
149*5084Sjohnlev void		*xencons_soft_state;
150*5084Sjohnlev char		*xencons_wbuf;
151*5084Sjohnlev struct xencons	*xencons_console;
152*5084Sjohnlev 
153*5084Sjohnlev static void
154*5084Sjohnlev xenconssetup_avintr(struct xencons *xcp, int attach)
155*5084Sjohnlev {
156*5084Sjohnlev 	/*
157*5084Sjohnlev 	 * On xen, CPU 0 always exists and can't be taken offline,
158*5084Sjohnlev 	 * so binding this thread to it should always succeed.
159*5084Sjohnlev 	 */
160*5084Sjohnlev 	mutex_enter(&cpu_lock);
161*5084Sjohnlev 	thread_affinity_set(curthread, 0);
162*5084Sjohnlev 	mutex_exit(&cpu_lock);
163*5084Sjohnlev 
164*5084Sjohnlev 	if (attach) {
165*5084Sjohnlev 		/* Setup our interrupt binding. */
166*5084Sjohnlev 		(void) add_avintr(NULL, IPL_CONS, (avfunc)xenconsintr_priv,
167*5084Sjohnlev 		    "xencons", xcp->console_irq, (caddr_t)xcp, NULL, NULL,
168*5084Sjohnlev 		    xcp->dip);
169*5084Sjohnlev 	} else {
170*5084Sjohnlev 		/*
171*5084Sjohnlev 		 * Cleanup interrupt configuration.  Note that the framework
172*5084Sjohnlev 		 * _should_ ensure that when rem_avintr() returns the interrupt
173*5084Sjohnlev 		 * service routine is not currently executing and that it won't
174*5084Sjohnlev 		 * be invoked again.
175*5084Sjohnlev 		 */
176*5084Sjohnlev 		(void) rem_avintr(NULL, IPL_CONS, (avfunc)xenconsintr_priv,
177*5084Sjohnlev 		    xcp->console_irq);
178*5084Sjohnlev 	}
179*5084Sjohnlev 
180*5084Sjohnlev 	/* Notify our caller that we're done. */
181*5084Sjohnlev 	mutex_enter(&xcp->excl);
182*5084Sjohnlev 	cv_signal(&xcp->excl_cv);
183*5084Sjohnlev 	mutex_exit(&xcp->excl);
184*5084Sjohnlev 
185*5084Sjohnlev 	/* Clear our binding to CPU 0 */
186*5084Sjohnlev 	thread_affinity_clear(curthread);
187*5084Sjohnlev 
188*5084Sjohnlev }
189*5084Sjohnlev 
190*5084Sjohnlev static void
191*5084Sjohnlev xenconssetup_add_avintr(struct xencons *xcp)
192*5084Sjohnlev {
193*5084Sjohnlev 	xenconssetup_avintr(xcp, B_TRUE);
194*5084Sjohnlev }
195*5084Sjohnlev 
196*5084Sjohnlev static void
197*5084Sjohnlev xenconssetup_rem_avintr(struct xencons *xcp)
198*5084Sjohnlev {
199*5084Sjohnlev 	xenconssetup_avintr(xcp, B_FALSE);
200*5084Sjohnlev }
201*5084Sjohnlev 
202*5084Sjohnlev static int
203*5084Sjohnlev xenconsdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
204*5084Sjohnlev {
205*5084Sjohnlev 	int instance;
206*5084Sjohnlev 	struct xencons *xcp;
207*5084Sjohnlev 
208*5084Sjohnlev 	if (cmd != DDI_DETACH && cmd != DDI_SUSPEND)
209*5084Sjohnlev 		return (DDI_FAILURE);
210*5084Sjohnlev 
211*5084Sjohnlev 	if (cmd == DDI_SUSPEND) {
212*5084Sjohnlev 		ddi_remove_intr(devi, 0, NULL);
213*5084Sjohnlev 		return (DDI_SUCCESS);
214*5084Sjohnlev 	}
215*5084Sjohnlev 
216*5084Sjohnlev 	/*
217*5084Sjohnlev 	 * We should never try to detach the console driver on a domU
218*5084Sjohnlev 	 * because it should always be held open
219*5084Sjohnlev 	 */
220*5084Sjohnlev 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
221*5084Sjohnlev 	if (!DOMAIN_IS_INITDOMAIN(xen_info))
222*5084Sjohnlev 		return (DDI_FAILURE);
223*5084Sjohnlev 
224*5084Sjohnlev 	instance = ddi_get_instance(devi);	/* find out which unit */
225*5084Sjohnlev 
226*5084Sjohnlev 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
227*5084Sjohnlev 	if (xcp == NULL)
228*5084Sjohnlev 		return (DDI_FAILURE);
229*5084Sjohnlev 
230*5084Sjohnlev 	/*
231*5084Sjohnlev 	 * Cleanup our interrupt bindings.  For more info on why we
232*5084Sjohnlev 	 * do this in a seperate thread, see the comments for when we
233*5084Sjohnlev 	 * setup the interrupt bindings.
234*5084Sjohnlev 	 */
235*5084Sjohnlev 	xencons_console = NULL;
236*5084Sjohnlev 	mutex_enter(&xcp->excl);
237*5084Sjohnlev 	(void) taskq_dispatch(system_taskq,
238*5084Sjohnlev 	    (void (*)(void *))xenconssetup_rem_avintr, xcp, TQ_SLEEP);
239*5084Sjohnlev 	cv_wait(&xcp->excl_cv, &xcp->excl);
240*5084Sjohnlev 	mutex_exit(&xcp->excl);
241*5084Sjohnlev 
242*5084Sjohnlev 	/* remove all minor device node(s) for this device */
243*5084Sjohnlev 	ddi_remove_minor_node(devi, NULL);
244*5084Sjohnlev 
245*5084Sjohnlev 	/* free up state */
246*5084Sjohnlev 	xencons_soft_state_free(xcp);
247*5084Sjohnlev 	kmem_free(xencons_wbuf, XENCONS_WBUFSIZE);
248*5084Sjohnlev 
249*5084Sjohnlev 	DEBUGNOTE1(XENCONS_DEBUG_INIT, "xencons%d: shutdown complete",
250*5084Sjohnlev 	    instance);
251*5084Sjohnlev 	return (DDI_SUCCESS);
252*5084Sjohnlev }
253*5084Sjohnlev 
254*5084Sjohnlev static void
255*5084Sjohnlev xenconssetup(struct xencons *xcp)
256*5084Sjohnlev {
257*5084Sjohnlev 	xcp->ifp = (volatile struct xencons_interface *)HYPERVISOR_console_page;
258*5084Sjohnlev 
259*5084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
260*5084Sjohnlev 		xencons_wbuf = kmem_alloc(XENCONS_WBUFSIZE, KM_SLEEP);
261*5084Sjohnlev 
262*5084Sjohnlev 		/*
263*5084Sjohnlev 		 * Activate the xen console virq.  Note that xen requires
264*5084Sjohnlev 		 * that VIRQs be bound to CPU 0 when first created.
265*5084Sjohnlev 		 */
266*5084Sjohnlev 		xcp->console_irq = ec_bind_virq_to_irq(VIRQ_CONSOLE, 0);
267*5084Sjohnlev 
268*5084Sjohnlev 		/*
269*5084Sjohnlev 		 * Ok.  This is kinda ugly.  We want to register an
270*5084Sjohnlev 		 * interrupt handler for the xen console virq, but
271*5084Sjohnlev 		 * virq's are xen sepcific and currently the DDI doesn't
272*5084Sjohnlev 		 * support binding to them.  So instead we need to use
273*5084Sjohnlev 		 * add_avintr().  So to make things more complicated,
274*5084Sjohnlev 		 * we already had to bind the xen console VIRQ to CPU 0,
275*5084Sjohnlev 		 * and add_avintr() needs to be invoked on the same CPU
276*5084Sjohnlev 		 * where the VIRQ is bound, in this case on CPU 0.  We
277*5084Sjohnlev 		 * could just temporarily bind ourselves to CPU 0, but
278*5084Sjohnlev 		 * we don't want to do that since this attach thread
279*5084Sjohnlev 		 * could have been invoked in a user thread context,
280*5084Sjohnlev 		 * in which case this thread could already have some
281*5084Sjohnlev 		 * pre-existing cpu binding.  So to avoid changing our
282*5084Sjohnlev 		 * cpu binding we're going to use a taskq thread that
283*5084Sjohnlev 		 * will bind to CPU 0 and register our interrupts
284*5084Sjohnlev 		 * handler for us.
285*5084Sjohnlev 		 */
286*5084Sjohnlev 		mutex_enter(&xcp->excl);
287*5084Sjohnlev 		(void) taskq_dispatch(system_taskq,
288*5084Sjohnlev 		    (void (*)(void *))xenconssetup_add_avintr, xcp, TQ_SLEEP);
289*5084Sjohnlev 		cv_wait(&xcp->excl_cv, &xcp->excl);
290*5084Sjohnlev 		mutex_exit(&xcp->excl);
291*5084Sjohnlev 	} else {
292*5084Sjohnlev 		(void) xvdi_alloc_evtchn(xcp->dip);
293*5084Sjohnlev 		(void) ddi_add_intr(xcp->dip, 0, NULL, NULL, xenconsintr,
294*5084Sjohnlev 		    (caddr_t)xcp);
295*5084Sjohnlev 		xcp->evtchn = xvdi_get_evtchn(xcp->dip);
296*5084Sjohnlev 	}
297*5084Sjohnlev }
298*5084Sjohnlev 
299*5084Sjohnlev static int
300*5084Sjohnlev xenconsattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
301*5084Sjohnlev {
302*5084Sjohnlev 	int instance = ddi_get_instance(devi);
303*5084Sjohnlev 	struct xencons *xcp;
304*5084Sjohnlev 	int ret;
305*5084Sjohnlev 
306*5084Sjohnlev 	/* There can be only one. */
307*5084Sjohnlev 	if (instance != 0)
308*5084Sjohnlev 		return (DDI_FAILURE);
309*5084Sjohnlev 
310*5084Sjohnlev 	switch (cmd) {
311*5084Sjohnlev 	case DDI_RESUME:
312*5084Sjohnlev 		xcp = xencons_console;
313*5084Sjohnlev 		xenconssetup(xcp);
314*5084Sjohnlev 		return (DDI_SUCCESS);
315*5084Sjohnlev 	case DDI_ATTACH:
316*5084Sjohnlev 		break;
317*5084Sjohnlev 	default:
318*5084Sjohnlev 		return (DDI_FAILURE);
319*5084Sjohnlev 	}
320*5084Sjohnlev 
321*5084Sjohnlev 	ret = ddi_soft_state_zalloc(xencons_soft_state, instance);
322*5084Sjohnlev 	if (ret != DDI_SUCCESS)
323*5084Sjohnlev 		return (DDI_FAILURE);
324*5084Sjohnlev 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
325*5084Sjohnlev 	ASSERT(xcp != NULL);	/* can't fail - we only just allocated it */
326*5084Sjohnlev 
327*5084Sjohnlev 	/*
328*5084Sjohnlev 	 * Set up the other components of the xencons structure for this port.
329*5084Sjohnlev 	 */
330*5084Sjohnlev 	xcp->unit = instance;
331*5084Sjohnlev 	xcp->dip = devi;
332*5084Sjohnlev 
333*5084Sjohnlev 	/* Fill in the polled I/O structure. */
334*5084Sjohnlev 	xcp->polledio.cons_polledio_version = CONSPOLLEDIO_V0;
335*5084Sjohnlev 	xcp->polledio.cons_polledio_argument = (cons_polledio_arg_t)xcp;
336*5084Sjohnlev 	xcp->polledio.cons_polledio_putchar = xenconsputchar;
337*5084Sjohnlev 	xcp->polledio.cons_polledio_getchar = xenconsgetchar;
338*5084Sjohnlev 	xcp->polledio.cons_polledio_ischar = xenconsischar;
339*5084Sjohnlev 	xcp->polledio.cons_polledio_enter = NULL;
340*5084Sjohnlev 	xcp->polledio.cons_polledio_exit = NULL;
341*5084Sjohnlev 
342*5084Sjohnlev 	/*
343*5084Sjohnlev 	 * Initializes the asyncline structure which has TTY protocol-private
344*5084Sjohnlev 	 * data before enabling interrupts.
345*5084Sjohnlev 	 */
346*5084Sjohnlev 	xcp->priv = kmem_zalloc(sizeof (struct asyncline), KM_SLEEP);
347*5084Sjohnlev 	xcp->priv->async_common = xcp;
348*5084Sjohnlev 	cv_init(&xcp->priv->async_flags_cv, NULL, CV_DRIVER, NULL);
349*5084Sjohnlev 
350*5084Sjohnlev 	/* Initialize mutexes before accessing the interface. */
351*5084Sjohnlev 	mutex_init(&xcp->excl, NULL, MUTEX_DRIVER, NULL);
352*5084Sjohnlev 	cv_init(&xcp->excl_cv, NULL, CV_DEFAULT, NULL);
353*5084Sjohnlev 
354*5084Sjohnlev 	/* create minor device node for this device */
355*5084Sjohnlev 	ret = ddi_create_minor_node(devi, "xencons", S_IFCHR, instance,
356*5084Sjohnlev 	    DDI_NT_SERIAL, NULL);
357*5084Sjohnlev 	if (ret != DDI_SUCCESS) {
358*5084Sjohnlev 		ddi_remove_minor_node(devi, NULL);
359*5084Sjohnlev 		xencons_soft_state_free(xcp);
360*5084Sjohnlev 		return (DDI_FAILURE);
361*5084Sjohnlev 	}
362*5084Sjohnlev 
363*5084Sjohnlev 	ddi_report_dev(devi);
364*5084Sjohnlev 	xencons_console = xcp;
365*5084Sjohnlev 	xenconssetup(xcp);
366*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_INIT, "xencons%dattach: done\n", instance);
367*5084Sjohnlev 	return (DDI_SUCCESS);
368*5084Sjohnlev }
369*5084Sjohnlev 
370*5084Sjohnlev /*ARGSUSED*/
371*5084Sjohnlev static int
372*5084Sjohnlev xenconsinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
373*5084Sjohnlev 	void **result)
374*5084Sjohnlev {
375*5084Sjohnlev 	dev_t dev = (dev_t)arg;
376*5084Sjohnlev 	int instance, error;
377*5084Sjohnlev 	struct xencons *xcp;
378*5084Sjohnlev 
379*5084Sjohnlev 	instance = getminor(dev);
380*5084Sjohnlev 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
381*5084Sjohnlev 	if (xcp == NULL)
382*5084Sjohnlev 		return (DDI_FAILURE);
383*5084Sjohnlev 
384*5084Sjohnlev 	switch (infocmd) {
385*5084Sjohnlev 	case DDI_INFO_DEVT2DEVINFO:
386*5084Sjohnlev 		if (xcp->dip == NULL)
387*5084Sjohnlev 			error = DDI_FAILURE;
388*5084Sjohnlev 		else {
389*5084Sjohnlev 			*result = (void *) xcp->dip;
390*5084Sjohnlev 			error = DDI_SUCCESS;
391*5084Sjohnlev 		}
392*5084Sjohnlev 		break;
393*5084Sjohnlev 	case DDI_INFO_DEVT2INSTANCE:
394*5084Sjohnlev 		*result = (void *)(intptr_t)instance;
395*5084Sjohnlev 		error = DDI_SUCCESS;
396*5084Sjohnlev 		break;
397*5084Sjohnlev 	default:
398*5084Sjohnlev 		error = DDI_FAILURE;
399*5084Sjohnlev 	}
400*5084Sjohnlev 	return (error);
401*5084Sjohnlev }
402*5084Sjohnlev 
403*5084Sjohnlev /* xencons_soft_state_free - local wrapper for ddi_soft_state_free(9F) */
404*5084Sjohnlev 
405*5084Sjohnlev static void
406*5084Sjohnlev xencons_soft_state_free(struct xencons *xcp)
407*5084Sjohnlev {
408*5084Sjohnlev 	mutex_destroy(&xcp->excl);
409*5084Sjohnlev 	cv_destroy(&xcp->excl_cv);
410*5084Sjohnlev 	kmem_free(xcp->priv, sizeof (struct asyncline));
411*5084Sjohnlev 	ddi_soft_state_free(xencons_soft_state, xcp->unit);
412*5084Sjohnlev }
413*5084Sjohnlev 
414*5084Sjohnlev /*ARGSUSED*/
415*5084Sjohnlev static int
416*5084Sjohnlev xenconsopen(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr)
417*5084Sjohnlev {
418*5084Sjohnlev 	struct xencons	*xcp;
419*5084Sjohnlev 	struct asyncline *async;
420*5084Sjohnlev 	int		unit;
421*5084Sjohnlev 
422*5084Sjohnlev 	unit = getminor(*dev);
423*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dopen\n", unit);
424*5084Sjohnlev 	xcp = ddi_get_soft_state(xencons_soft_state, unit);
425*5084Sjohnlev 	if (xcp == NULL)
426*5084Sjohnlev 		return (ENXIO);		/* unit not configured */
427*5084Sjohnlev 	async = xcp->priv;
428*5084Sjohnlev 	mutex_enter(&xcp->excl);
429*5084Sjohnlev 
430*5084Sjohnlev again:
431*5084Sjohnlev 
432*5084Sjohnlev 	if ((async->async_flags & ASYNC_ISOPEN) == 0) {
433*5084Sjohnlev 		async->async_ttycommon.t_iflag = 0;
434*5084Sjohnlev 		async->async_ttycommon.t_iocpending = NULL;
435*5084Sjohnlev 		async->async_ttycommon.t_size.ws_row = 0;
436*5084Sjohnlev 		async->async_ttycommon.t_size.ws_col = 0;
437*5084Sjohnlev 		async->async_ttycommon.t_size.ws_xpixel = 0;
438*5084Sjohnlev 		async->async_ttycommon.t_size.ws_ypixel = 0;
439*5084Sjohnlev 		async->async_dev = *dev;
440*5084Sjohnlev 		async->async_wbufcid = 0;
441*5084Sjohnlev 
442*5084Sjohnlev 		async->async_startc = CSTART;
443*5084Sjohnlev 		async->async_stopc = CSTOP;
444*5084Sjohnlev 	} else if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
445*5084Sjohnlev 	    secpolicy_excl_open(cr) != 0) {
446*5084Sjohnlev 		mutex_exit(&xcp->excl);
447*5084Sjohnlev 		return (EBUSY);
448*5084Sjohnlev 	}
449*5084Sjohnlev 
450*5084Sjohnlev 	async->async_ttycommon.t_flags |= TS_SOFTCAR;
451*5084Sjohnlev 
452*5084Sjohnlev 	async->async_ttycommon.t_readq = rq;
453*5084Sjohnlev 	async->async_ttycommon.t_writeq = WR(rq);
454*5084Sjohnlev 	rq->q_ptr = WR(rq)->q_ptr = (caddr_t)async;
455*5084Sjohnlev 	mutex_exit(&xcp->excl);
456*5084Sjohnlev 	/*
457*5084Sjohnlev 	 * Caution here -- qprocson sets the pointers that are used by canput
458*5084Sjohnlev 	 * called by xencons_rxint.  ASYNC_ISOPEN must *not* be set until those
459*5084Sjohnlev 	 * pointers are valid.
460*5084Sjohnlev 	 */
461*5084Sjohnlev 	qprocson(rq);
462*5084Sjohnlev 	async->async_flags |= ASYNC_ISOPEN;
463*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_INIT, "asy%dopen: done\n", unit);
464*5084Sjohnlev 	return (0);
465*5084Sjohnlev }
466*5084Sjohnlev 
467*5084Sjohnlev 
468*5084Sjohnlev /*
469*5084Sjohnlev  * Close routine.
470*5084Sjohnlev  */
471*5084Sjohnlev /*ARGSUSED*/
472*5084Sjohnlev static int
473*5084Sjohnlev xenconsclose(queue_t *q, int flag, cred_t *credp)
474*5084Sjohnlev {
475*5084Sjohnlev 	struct asyncline *async;
476*5084Sjohnlev 	struct xencons	 *xcp;
477*5084Sjohnlev #ifdef DEBUG
478*5084Sjohnlev 	int instance;
479*5084Sjohnlev #endif
480*5084Sjohnlev 
481*5084Sjohnlev 	async = (struct asyncline *)q->q_ptr;
482*5084Sjohnlev 	ASSERT(async != NULL);
483*5084Sjohnlev 	xcp = async->async_common;
484*5084Sjohnlev #ifdef DEBUG
485*5084Sjohnlev 	instance = xcp->unit;
486*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose\n", instance);
487*5084Sjohnlev #endif
488*5084Sjohnlev 
489*5084Sjohnlev 	mutex_enter(&xcp->excl);
490*5084Sjohnlev 	async->async_flags |= ASYNC_CLOSING;
491*5084Sjohnlev 
492*5084Sjohnlev 	async->async_ocnt = 0;
493*5084Sjohnlev 	if (async->async_xmitblk != NULL)
494*5084Sjohnlev 		freeb(async->async_xmitblk);
495*5084Sjohnlev 	async->async_xmitblk = NULL;
496*5084Sjohnlev 
497*5084Sjohnlev out:
498*5084Sjohnlev 	ttycommon_close(&async->async_ttycommon);
499*5084Sjohnlev 
500*5084Sjohnlev 	/*
501*5084Sjohnlev 	 * Cancel outstanding "bufcall" request.
502*5084Sjohnlev 	 */
503*5084Sjohnlev 	if (async->async_wbufcid != 0) {
504*5084Sjohnlev 		unbufcall(async->async_wbufcid);
505*5084Sjohnlev 		async->async_wbufcid = 0;
506*5084Sjohnlev 	}
507*5084Sjohnlev 
508*5084Sjohnlev 	/* Note that qprocsoff can't be done until after interrupts are off */
509*5084Sjohnlev 	qprocsoff(q);
510*5084Sjohnlev 	q->q_ptr = WR(q)->q_ptr = NULL;
511*5084Sjohnlev 	async->async_ttycommon.t_readq = NULL;
512*5084Sjohnlev 	async->async_ttycommon.t_writeq = NULL;
513*5084Sjohnlev 
514*5084Sjohnlev 	/*
515*5084Sjohnlev 	 * Clear out device state, except persistant device property flags.
516*5084Sjohnlev 	 */
517*5084Sjohnlev 	async->async_flags = 0;
518*5084Sjohnlev 	cv_broadcast(&async->async_flags_cv);
519*5084Sjohnlev 	mutex_exit(&xcp->excl);
520*5084Sjohnlev 
521*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose: done\n", instance);
522*5084Sjohnlev 	return (0);
523*5084Sjohnlev }
524*5084Sjohnlev 
525*5084Sjohnlev #define	INBUF_IX(ix, ifp)	(DOMAIN_IS_INITDOMAIN(xen_info) ? \
526*5084Sjohnlev 	(ix) : MASK_XENCONS_IDX((ix), (ifp)->in))
527*5084Sjohnlev 
528*5084Sjohnlev /*
529*5084Sjohnlev  * Handle a xen console rx interrupt.
530*5084Sjohnlev  */
531*5084Sjohnlev /*ARGSUSED*/
532*5084Sjohnlev static void
533*5084Sjohnlev xencons_rxint(struct xencons *xcp)
534*5084Sjohnlev {
535*5084Sjohnlev 	struct asyncline *async;
536*5084Sjohnlev 	short	cc;
537*5084Sjohnlev 	mblk_t	*bp;
538*5084Sjohnlev 	queue_t	*q;
539*5084Sjohnlev 	uchar_t	c, buf[16];
540*5084Sjohnlev 	uchar_t	*cp;
541*5084Sjohnlev 	tty_common_t	*tp;
542*5084Sjohnlev 	int instance;
543*5084Sjohnlev 	volatile struct xencons_interface *ifp;
544*5084Sjohnlev 	XENCONS_RING_IDX cons, prod;
545*5084Sjohnlev 
546*5084Sjohnlev 	DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_rxint\n");
547*5084Sjohnlev 
548*5084Sjohnlev loop:
549*5084Sjohnlev 	mutex_enter(&xcp->excl);
550*5084Sjohnlev 
551*5084Sjohnlev 	/* sanity check if we should bail */
552*5084Sjohnlev 	if (xencons_console == NULL) {
553*5084Sjohnlev 		mutex_exit(&xcp->excl);
554*5084Sjohnlev 		goto out;
555*5084Sjohnlev 	}
556*5084Sjohnlev 
557*5084Sjohnlev 	async = xcp->priv;
558*5084Sjohnlev 	instance = xcp->unit;
559*5084Sjohnlev 	ifp = xcp->ifp;
560*5084Sjohnlev 	tp = &async->async_ttycommon;
561*5084Sjohnlev 	q = tp->t_readq;
562*5084Sjohnlev 
563*5084Sjohnlev 	if (async->async_flags & ASYNC_OUT_FLW_RESUME) {
564*5084Sjohnlev 		xcasync_start(async);
565*5084Sjohnlev 		async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
566*5084Sjohnlev 	}
567*5084Sjohnlev 
568*5084Sjohnlev 	/*
569*5084Sjohnlev 	 * If data is available, send it up the stream if there's
570*5084Sjohnlev 	 * somebody listening.
571*5084Sjohnlev 	 */
572*5084Sjohnlev 	if (!(async->async_flags & ASYNC_ISOPEN)) {
573*5084Sjohnlev 		mutex_exit(&xcp->excl);
574*5084Sjohnlev 		goto out;
575*5084Sjohnlev 	}
576*5084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
577*5084Sjohnlev 		cc = HYPERVISOR_console_io(CONSOLEIO_read, 16, (char *)buf);
578*5084Sjohnlev 		cp = buf;
579*5084Sjohnlev 		cons = 0;
580*5084Sjohnlev 	} else {
581*5084Sjohnlev 		cons = ifp->in_cons;
582*5084Sjohnlev 		prod = ifp->in_prod;
583*5084Sjohnlev 
584*5084Sjohnlev 		cc = prod - cons;
585*5084Sjohnlev 		cp = (uchar_t *)ifp->in;
586*5084Sjohnlev 	}
587*5084Sjohnlev 	if (cc <= 0) {
588*5084Sjohnlev 		mutex_exit(&xcp->excl);
589*5084Sjohnlev 		goto out;
590*5084Sjohnlev 	}
591*5084Sjohnlev 
592*5084Sjohnlev 	/*
593*5084Sjohnlev 	 * Check for character break sequence.
594*5084Sjohnlev 	 *
595*5084Sjohnlev 	 * Note that normally asy drivers only check for a character sequence
596*5084Sjohnlev 	 * if abort_enable == KIOCABORTALTERNATE and otherwise use a break
597*5084Sjohnlev 	 * sensed on the line to do an abort_sequence_enter.  Since the
598*5084Sjohnlev 	 * hypervisor does not use a real chip for the console we default to
599*5084Sjohnlev 	 * using the alternate sequence.
600*5084Sjohnlev 	 */
601*5084Sjohnlev 	if ((abort_enable == KIOCABORTENABLE) && (xcp->flags & ASY_CONSOLE)) {
602*5084Sjohnlev 		XENCONS_RING_IDX i;
603*5084Sjohnlev 
604*5084Sjohnlev 		for (i = 0; i < cc; i++) {
605*5084Sjohnlev 			c = cp[INBUF_IX(cons + i, ifp)];
606*5084Sjohnlev 			if (abort_charseq_recognize(c)) {
607*5084Sjohnlev 				/*
608*5084Sjohnlev 				 * Eat abort seg, it's not a valid debugger
609*5084Sjohnlev 				 * command.
610*5084Sjohnlev 				 */
611*5084Sjohnlev 				if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
612*5084Sjohnlev 					membar_producer();
613*5084Sjohnlev 					ifp->in_cons = cons + i;
614*5084Sjohnlev 				} else {
615*5084Sjohnlev 					cons += i;
616*5084Sjohnlev 				}
617*5084Sjohnlev 				abort_sequence_enter((char *)NULL);
618*5084Sjohnlev 				/*
619*5084Sjohnlev 				 * Back from debugger, resume normal processing
620*5084Sjohnlev 				 */
621*5084Sjohnlev 				mutex_exit(&xcp->excl);
622*5084Sjohnlev 				goto loop;
623*5084Sjohnlev 			}
624*5084Sjohnlev 		}
625*5084Sjohnlev 	}
626*5084Sjohnlev 
627*5084Sjohnlev 	if (!canput(q)) {
628*5084Sjohnlev 		if (!(async->async_inflow_source & IN_FLOW_STREAMS)) {
629*5084Sjohnlev 			(void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
630*5084Sjohnlev 			    IN_FLOW_STREAMS);
631*5084Sjohnlev 		}
632*5084Sjohnlev 		mutex_exit(&xcp->excl);
633*5084Sjohnlev 		goto out;
634*5084Sjohnlev 	}
635*5084Sjohnlev 	if (async->async_inflow_source & IN_FLOW_STREAMS) {
636*5084Sjohnlev 		(void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
637*5084Sjohnlev 		    IN_FLOW_STREAMS);
638*5084Sjohnlev 	}
639*5084Sjohnlev 	DEBUGCONT2(XENCONS_DEBUG_INPUT,
640*5084Sjohnlev 	    "xencons%d_rxint: %d char(s) in queue.\n", instance, cc);
641*5084Sjohnlev 	if (!(bp = allocb(cc, BPRI_MED))) {
642*5084Sjohnlev 		mutex_exit(&xcp->excl);
643*5084Sjohnlev 		ttycommon_qfull(&async->async_ttycommon, q);
644*5084Sjohnlev 		goto out;
645*5084Sjohnlev 	}
646*5084Sjohnlev 	do {
647*5084Sjohnlev 		c = cp[INBUF_IX(cons++, ifp)];
648*5084Sjohnlev 		/*
649*5084Sjohnlev 		 * We handle XON/XOFF char if IXON is set,
650*5084Sjohnlev 		 * but if received char is _POSIX_VDISABLE,
651*5084Sjohnlev 		 * we left it to the up level module.
652*5084Sjohnlev 		 */
653*5084Sjohnlev 		if (tp->t_iflag & IXON) {
654*5084Sjohnlev 			if ((c == async->async_stopc) &&
655*5084Sjohnlev 			    (c != _POSIX_VDISABLE)) {
656*5084Sjohnlev 				xcasync_flowcontrol_sw_output(xcp, FLOW_STOP);
657*5084Sjohnlev 				continue;
658*5084Sjohnlev 			} else if ((c == async->async_startc) &&
659*5084Sjohnlev 			    (c != _POSIX_VDISABLE)) {
660*5084Sjohnlev 				xcasync_flowcontrol_sw_output(xcp, FLOW_START);
661*5084Sjohnlev 				continue;
662*5084Sjohnlev 			}
663*5084Sjohnlev 			if ((tp->t_iflag & IXANY) &&
664*5084Sjohnlev 			    (async->async_flags & ASYNC_SW_OUT_FLW)) {
665*5084Sjohnlev 				xcasync_flowcontrol_sw_output(xcp, FLOW_START);
666*5084Sjohnlev 			}
667*5084Sjohnlev 		}
668*5084Sjohnlev 		*bp->b_wptr++ = c;
669*5084Sjohnlev 	} while (--cc);
670*5084Sjohnlev 	membar_producer();
671*5084Sjohnlev 	if (!DOMAIN_IS_INITDOMAIN(xen_info))
672*5084Sjohnlev 		ifp->in_cons = cons;
673*5084Sjohnlev 	mutex_exit(&xcp->excl);
674*5084Sjohnlev 	if (bp->b_wptr > bp->b_rptr) {
675*5084Sjohnlev 		if (!canput(q)) {
676*5084Sjohnlev 			xenconserror(CE_NOTE, "xencons%d: local queue full",
677*5084Sjohnlev 			    instance);
678*5084Sjohnlev 			freemsg(bp);
679*5084Sjohnlev 		} else
680*5084Sjohnlev 			(void) putq(q, bp);
681*5084Sjohnlev 	} else
682*5084Sjohnlev 		freemsg(bp);
683*5084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info))
684*5084Sjohnlev 		goto loop;
685*5084Sjohnlev out:
686*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "xencons%d_rxint: done\n", instance);
687*5084Sjohnlev 	if (!DOMAIN_IS_INITDOMAIN(xen_info))
688*5084Sjohnlev 		ec_notify_via_evtchn(xcp->evtchn);
689*5084Sjohnlev }
690*5084Sjohnlev 
691*5084Sjohnlev 
692*5084Sjohnlev /*
693*5084Sjohnlev  * Handle a xen console tx interrupt.
694*5084Sjohnlev  */
695*5084Sjohnlev /*ARGSUSED*/
696*5084Sjohnlev static void
697*5084Sjohnlev xencons_txint(struct xencons *xcp)
698*5084Sjohnlev {
699*5084Sjohnlev 	struct asyncline *async;
700*5084Sjohnlev 
701*5084Sjohnlev 	DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint\n");
702*5084Sjohnlev 
703*5084Sjohnlev 	/*
704*5084Sjohnlev 	 * prevent recursive entry
705*5084Sjohnlev 	 */
706*5084Sjohnlev 	if (mutex_owner(&xcp->excl) == curthread) {
707*5084Sjohnlev 		goto out;
708*5084Sjohnlev 	}
709*5084Sjohnlev 
710*5084Sjohnlev 	mutex_enter(&xcp->excl);
711*5084Sjohnlev 	if (xencons_console == NULL) {
712*5084Sjohnlev 		mutex_exit(&xcp->excl);
713*5084Sjohnlev 		goto out;
714*5084Sjohnlev 	}
715*5084Sjohnlev 
716*5084Sjohnlev 	/* make sure the device is open */
717*5084Sjohnlev 	async = xcp->priv;
718*5084Sjohnlev 	if ((async->async_flags & ASYNC_ISOPEN) != 0)
719*5084Sjohnlev 		xcasync_start(async);
720*5084Sjohnlev 
721*5084Sjohnlev 	mutex_exit(&xcp->excl);
722*5084Sjohnlev out:
723*5084Sjohnlev 	DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint: done\n");
724*5084Sjohnlev }
725*5084Sjohnlev 
726*5084Sjohnlev 
727*5084Sjohnlev /*
728*5084Sjohnlev  * Get an event when input ring becomes not empty or output ring becomes not
729*5084Sjohnlev  * full.
730*5084Sjohnlev  */
731*5084Sjohnlev static uint_t
732*5084Sjohnlev xenconsintr(caddr_t arg)
733*5084Sjohnlev {
734*5084Sjohnlev 	struct xencons *xcp = (struct xencons *)arg;
735*5084Sjohnlev 	volatile struct xencons_interface *ifp = xcp->ifp;
736*5084Sjohnlev 
737*5084Sjohnlev 	if (ifp->in_prod != ifp->in_cons)
738*5084Sjohnlev 		xencons_rxint(xcp);
739*5084Sjohnlev 	if (ifp->out_prod - ifp->out_cons < sizeof (ifp->out))
740*5084Sjohnlev 		xencons_txint(xcp);
741*5084Sjohnlev 	return (DDI_INTR_CLAIMED);
742*5084Sjohnlev }
743*5084Sjohnlev 
744*5084Sjohnlev /*
745*5084Sjohnlev  * Console interrupt routine for priviliged domains
746*5084Sjohnlev  */
747*5084Sjohnlev static uint_t
748*5084Sjohnlev xenconsintr_priv(caddr_t arg)
749*5084Sjohnlev {
750*5084Sjohnlev 	struct xencons *xcp = (struct xencons *)arg;
751*5084Sjohnlev 
752*5084Sjohnlev 	xencons_rxint(xcp);
753*5084Sjohnlev 	xencons_txint(xcp);
754*5084Sjohnlev 	return (DDI_INTR_CLAIMED);
755*5084Sjohnlev }
756*5084Sjohnlev 
757*5084Sjohnlev /*
758*5084Sjohnlev  * Start output on a line, unless it's busy, frozen, or otherwise.
759*5084Sjohnlev  */
760*5084Sjohnlev /*ARGSUSED*/
761*5084Sjohnlev static void
762*5084Sjohnlev xcasync_start(struct asyncline *async)
763*5084Sjohnlev {
764*5084Sjohnlev 	struct xencons *xcp = async->async_common;
765*5084Sjohnlev 	int cc;
766*5084Sjohnlev 	queue_t *q;
767*5084Sjohnlev 	mblk_t *bp;
768*5084Sjohnlev 	int	len, space, blen;
769*5084Sjohnlev 	mblk_t *nbp;
770*5084Sjohnlev 
771*5084Sjohnlev #ifdef DEBUG
772*5084Sjohnlev 	int instance = xcp->unit;
773*5084Sjohnlev 
774*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_nstart\n", instance);
775*5084Sjohnlev #endif
776*5084Sjohnlev 	ASSERT(mutex_owned(&xcp->excl));
777*5084Sjohnlev 
778*5084Sjohnlev 	/*
779*5084Sjohnlev 	 * Check only pended sw input flow control.
780*5084Sjohnlev 	 */
781*5084Sjohnlev domore:
782*5084Sjohnlev 	(void) xcasync_flowcontrol_sw_input(xcp, FLOW_CHECK, IN_FLOW_NULL);
783*5084Sjohnlev 
784*5084Sjohnlev 	if ((q = async->async_ttycommon.t_writeq) == NULL) {
785*5084Sjohnlev 		return;	/* not attached to a stream */
786*5084Sjohnlev 	}
787*5084Sjohnlev 
788*5084Sjohnlev 	for (;;) {
789*5084Sjohnlev 		if ((bp = getq(q)) == NULL)
790*5084Sjohnlev 			return;	/* no data to transmit */
791*5084Sjohnlev 
792*5084Sjohnlev 		/*
793*5084Sjohnlev 		 * We have a message block to work on.
794*5084Sjohnlev 		 * Check whether it's a break, a delay, or an ioctl (the latter
795*5084Sjohnlev 		 * occurs if the ioctl in question was waiting for the output
796*5084Sjohnlev 		 * to drain).  If it's one of those, process it immediately.
797*5084Sjohnlev 		 */
798*5084Sjohnlev 		switch (bp->b_datap->db_type) {
799*5084Sjohnlev 
800*5084Sjohnlev 		case M_IOCTL:
801*5084Sjohnlev 			/*
802*5084Sjohnlev 			 * This ioctl was waiting for the output ahead of
803*5084Sjohnlev 			 * it to drain; obviously, it has.  Do it, and
804*5084Sjohnlev 			 * then grab the next message after it.
805*5084Sjohnlev 			 */
806*5084Sjohnlev 			mutex_exit(&xcp->excl);
807*5084Sjohnlev 			xcasync_ioctl(async, q, bp);
808*5084Sjohnlev 			mutex_enter(&xcp->excl);
809*5084Sjohnlev 			continue;
810*5084Sjohnlev 		}
811*5084Sjohnlev 
812*5084Sjohnlev 		while (bp != NULL && (cc = bp->b_wptr - bp->b_rptr) == 0) {
813*5084Sjohnlev 			nbp = bp->b_cont;
814*5084Sjohnlev 			freeb(bp);
815*5084Sjohnlev 			bp = nbp;
816*5084Sjohnlev 		}
817*5084Sjohnlev 		if (bp != NULL)
818*5084Sjohnlev 			break;
819*5084Sjohnlev 	}
820*5084Sjohnlev 
821*5084Sjohnlev 	/*
822*5084Sjohnlev 	 * We have data to transmit.  If output is stopped, put
823*5084Sjohnlev 	 * it back and try again later.
824*5084Sjohnlev 	 */
825*5084Sjohnlev 	if (async->async_flags & (ASYNC_SW_OUT_FLW | ASYNC_STOPPED)) {
826*5084Sjohnlev 		(void) putbq(q, bp);
827*5084Sjohnlev 		return;
828*5084Sjohnlev 	}
829*5084Sjohnlev 
830*5084Sjohnlev 
831*5084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
832*5084Sjohnlev 		len = 0;
833*5084Sjohnlev 		space = XENCONS_WBUFSIZE;
834*5084Sjohnlev 		while (bp != NULL && space) {
835*5084Sjohnlev 			blen = bp->b_wptr - bp->b_rptr;
836*5084Sjohnlev 			cc = min(blen, space);
837*5084Sjohnlev 			bcopy(bp->b_rptr, &xencons_wbuf[len], cc);
838*5084Sjohnlev 			bp->b_rptr += cc;
839*5084Sjohnlev 			if (cc == blen) {
840*5084Sjohnlev 				nbp = bp->b_cont;
841*5084Sjohnlev 				freeb(bp);
842*5084Sjohnlev 				bp = nbp;
843*5084Sjohnlev 			}
844*5084Sjohnlev 			space -= cc;
845*5084Sjohnlev 			len += cc;
846*5084Sjohnlev 		}
847*5084Sjohnlev 		mutex_exit(&xcp->excl);
848*5084Sjohnlev 		(void) HYPERVISOR_console_io(CONSOLEIO_write, len,
849*5084Sjohnlev 		    xencons_wbuf);
850*5084Sjohnlev 		mutex_enter(&xcp->excl);
851*5084Sjohnlev 		if (bp != NULL)
852*5084Sjohnlev 			(void) putbq(q, bp); /* not done with this msg yet */
853*5084Sjohnlev 		/*
854*5084Sjohnlev 		 * There are no completion interrupts when using the
855*5084Sjohnlev 		 * HYPERVISOR_console_io call to write console data
856*5084Sjohnlev 		 * so we loop here till we have sent all the data to the
857*5084Sjohnlev 		 * hypervisor.
858*5084Sjohnlev 		 */
859*5084Sjohnlev 		goto domore;
860*5084Sjohnlev 	} else {
861*5084Sjohnlev 		volatile struct xencons_interface *ifp = xcp->ifp;
862*5084Sjohnlev 		XENCONS_RING_IDX cons, prod;
863*5084Sjohnlev 
864*5084Sjohnlev 		cons = ifp->out_cons;
865*5084Sjohnlev 		prod = ifp->out_prod;
866*5084Sjohnlev 		membar_enter();
867*5084Sjohnlev 		while (bp != NULL && ((prod - cons) < sizeof (ifp->out))) {
868*5084Sjohnlev 			ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] =
869*5084Sjohnlev 			    *bp->b_rptr++;
870*5084Sjohnlev 			if (bp->b_rptr == bp->b_wptr) {
871*5084Sjohnlev 				nbp = bp->b_cont;
872*5084Sjohnlev 				freeb(bp);
873*5084Sjohnlev 				bp = nbp;
874*5084Sjohnlev 			}
875*5084Sjohnlev 		}
876*5084Sjohnlev 		membar_producer();
877*5084Sjohnlev 		ifp->out_prod = prod;
878*5084Sjohnlev 		ec_notify_via_evtchn(xcp->evtchn);
879*5084Sjohnlev 		if (bp != NULL)
880*5084Sjohnlev 			(void) putbq(q, bp); /* not done with this msg yet */
881*5084Sjohnlev 	}
882*5084Sjohnlev }
883*5084Sjohnlev 
884*5084Sjohnlev 
885*5084Sjohnlev /*
886*5084Sjohnlev  * Process an "ioctl" message sent down to us.
887*5084Sjohnlev  * Note that we don't need to get any locks until we are ready to access
888*5084Sjohnlev  * the hardware.  Nothing we access until then is going to be altered
889*5084Sjohnlev  * outside of the STREAMS framework, so we should be safe.
890*5084Sjohnlev  */
891*5084Sjohnlev static void
892*5084Sjohnlev xcasync_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
893*5084Sjohnlev {
894*5084Sjohnlev 	struct xencons *xcp = async->async_common;
895*5084Sjohnlev 	tty_common_t  *tp = &async->async_ttycommon;
896*5084Sjohnlev 	struct iocblk *iocp;
897*5084Sjohnlev 	unsigned datasize;
898*5084Sjohnlev 	int error = 0;
899*5084Sjohnlev 
900*5084Sjohnlev #ifdef DEBUG
901*5084Sjohnlev 	int instance = xcp->unit;
902*5084Sjohnlev 
903*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl\n", instance);
904*5084Sjohnlev #endif
905*5084Sjohnlev 
906*5084Sjohnlev 	if (tp->t_iocpending != NULL) {
907*5084Sjohnlev 		/*
908*5084Sjohnlev 		 * We were holding an "ioctl" response pending the
909*5084Sjohnlev 		 * availability of an "mblk" to hold data to be passed up;
910*5084Sjohnlev 		 * another "ioctl" came through, which means that "ioctl"
911*5084Sjohnlev 		 * must have timed out or been aborted.
912*5084Sjohnlev 		 */
913*5084Sjohnlev 		freemsg(async->async_ttycommon.t_iocpending);
914*5084Sjohnlev 		async->async_ttycommon.t_iocpending = NULL;
915*5084Sjohnlev 	}
916*5084Sjohnlev 
917*5084Sjohnlev 	iocp = (struct iocblk *)mp->b_rptr;
918*5084Sjohnlev 
919*5084Sjohnlev 	/*
920*5084Sjohnlev 	 * For TIOCMGET and the PPS ioctls, do NOT call ttycommon_ioctl()
921*5084Sjohnlev 	 * because this function frees up the message block (mp->b_cont) that
922*5084Sjohnlev 	 * contains the user location where we pass back the results.
923*5084Sjohnlev 	 *
924*5084Sjohnlev 	 * Similarly, CONSOPENPOLLEDIO needs ioc_count, which ttycommon_ioctl
925*5084Sjohnlev 	 * zaps.  We know that ttycommon_ioctl doesn't know any CONS*
926*5084Sjohnlev 	 * ioctls, so keep the others safe too.
927*5084Sjohnlev 	 */
928*5084Sjohnlev 	DEBUGCONT2(XENCONS_DEBUG_IOCTL, "async%d_ioctl: %s\n",
929*5084Sjohnlev 	    instance,
930*5084Sjohnlev 	    iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
931*5084Sjohnlev 	    iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
932*5084Sjohnlev 	    iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
933*5084Sjohnlev 	    iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" : "other");
934*5084Sjohnlev 
935*5084Sjohnlev 	switch (iocp->ioc_cmd) {
936*5084Sjohnlev 	case TIOCMGET:
937*5084Sjohnlev 	case TIOCGPPS:
938*5084Sjohnlev 	case TIOCSPPS:
939*5084Sjohnlev 	case TIOCGPPSEV:
940*5084Sjohnlev 	case CONSOPENPOLLEDIO:
941*5084Sjohnlev 	case CONSCLOSEPOLLEDIO:
942*5084Sjohnlev 	case CONSSETABORTENABLE:
943*5084Sjohnlev 	case CONSGETABORTENABLE:
944*5084Sjohnlev 		error = -1; /* Do Nothing */
945*5084Sjohnlev 		break;
946*5084Sjohnlev 	default:
947*5084Sjohnlev 
948*5084Sjohnlev 		/*
949*5084Sjohnlev 		 * The only way in which "ttycommon_ioctl" can fail is if the
950*5084Sjohnlev 		 * "ioctl" requires a response containing data to be returned
951*5084Sjohnlev 		 * to the user, and no mblk could be allocated for the data.
952*5084Sjohnlev 		 * No such "ioctl" alters our state.  Thus, we always go ahead
953*5084Sjohnlev 		 * and do any state-changes the "ioctl" calls for.  If we
954*5084Sjohnlev 		 * couldn't allocate the data, "ttycommon_ioctl" has stashed
955*5084Sjohnlev 		 * the "ioctl" away safely, so we just call "bufcall" to
956*5084Sjohnlev 		 * request that we be called back when we stand a better
957*5084Sjohnlev 		 * chance of allocating the data.
958*5084Sjohnlev 		 */
959*5084Sjohnlev 		if ((datasize = ttycommon_ioctl(tp, wq, mp, &error)) != 0) {
960*5084Sjohnlev 			if (async->async_wbufcid)
961*5084Sjohnlev 				unbufcall(async->async_wbufcid);
962*5084Sjohnlev 			async->async_wbufcid = bufcall(datasize, BPRI_HI,
963*5084Sjohnlev 			    (void (*)(void *)) xcasync_reioctl,
964*5084Sjohnlev 			    (void *)(intptr_t)async->async_common->unit);
965*5084Sjohnlev 			return;
966*5084Sjohnlev 		}
967*5084Sjohnlev 	}
968*5084Sjohnlev 
969*5084Sjohnlev 	mutex_enter(&xcp->excl);
970*5084Sjohnlev 
971*5084Sjohnlev 	if (error == 0) {
972*5084Sjohnlev 		/*
973*5084Sjohnlev 		 * "ttycommon_ioctl" did most of the work; we just use the
974*5084Sjohnlev 		 * data it set up.
975*5084Sjohnlev 		 */
976*5084Sjohnlev 		switch (iocp->ioc_cmd) {
977*5084Sjohnlev 
978*5084Sjohnlev 		case TCSETS:
979*5084Sjohnlev 		case TCSETSF:
980*5084Sjohnlev 		case TCSETSW:
981*5084Sjohnlev 		case TCSETA:
982*5084Sjohnlev 		case TCSETAW:
983*5084Sjohnlev 		case TCSETAF:
984*5084Sjohnlev 			break;
985*5084Sjohnlev 		}
986*5084Sjohnlev 	} else if (error < 0) {
987*5084Sjohnlev 		/*
988*5084Sjohnlev 		 * "ttycommon_ioctl" didn't do anything; we process it here.
989*5084Sjohnlev 		 */
990*5084Sjohnlev 		error = 0;
991*5084Sjohnlev 		switch (iocp->ioc_cmd) {
992*5084Sjohnlev 
993*5084Sjohnlev 		case TCSBRK:
994*5084Sjohnlev 			error = miocpullup(mp, sizeof (int));
995*5084Sjohnlev 			break;
996*5084Sjohnlev 
997*5084Sjohnlev 		case TIOCSBRK:
998*5084Sjohnlev 			mioc2ack(mp, NULL, 0, 0);
999*5084Sjohnlev 			break;
1000*5084Sjohnlev 
1001*5084Sjohnlev 		case TIOCCBRK:
1002*5084Sjohnlev 			mioc2ack(mp, NULL, 0, 0);
1003*5084Sjohnlev 			break;
1004*5084Sjohnlev 
1005*5084Sjohnlev 		case CONSOPENPOLLEDIO:
1006*5084Sjohnlev 			error = miocpullup(mp, sizeof (cons_polledio_arg_t));
1007*5084Sjohnlev 			if (error != 0)
1008*5084Sjohnlev 				break;
1009*5084Sjohnlev 
1010*5084Sjohnlev 			*(cons_polledio_arg_t *)mp->b_cont->b_rptr =
1011*5084Sjohnlev 			    (cons_polledio_arg_t)&xcp->polledio;
1012*5084Sjohnlev 
1013*5084Sjohnlev 			mp->b_datap->db_type = M_IOCACK;
1014*5084Sjohnlev 			break;
1015*5084Sjohnlev 
1016*5084Sjohnlev 		case CONSCLOSEPOLLEDIO:
1017*5084Sjohnlev 			mp->b_datap->db_type = M_IOCACK;
1018*5084Sjohnlev 			iocp->ioc_error = 0;
1019*5084Sjohnlev 			iocp->ioc_rval = 0;
1020*5084Sjohnlev 			break;
1021*5084Sjohnlev 
1022*5084Sjohnlev 		case CONSSETABORTENABLE:
1023*5084Sjohnlev 			error = secpolicy_console(iocp->ioc_cr);
1024*5084Sjohnlev 			if (error != 0)
1025*5084Sjohnlev 				break;
1026*5084Sjohnlev 
1027*5084Sjohnlev 			if (iocp->ioc_count != TRANSPARENT) {
1028*5084Sjohnlev 				error = EINVAL;
1029*5084Sjohnlev 				break;
1030*5084Sjohnlev 			}
1031*5084Sjohnlev 
1032*5084Sjohnlev 			if (*(intptr_t *)mp->b_cont->b_rptr)
1033*5084Sjohnlev 				xcp->flags |= ASY_CONSOLE;
1034*5084Sjohnlev 			else
1035*5084Sjohnlev 				xcp->flags &= ~ASY_CONSOLE;
1036*5084Sjohnlev 
1037*5084Sjohnlev 			mp->b_datap->db_type = M_IOCACK;
1038*5084Sjohnlev 			iocp->ioc_error = 0;
1039*5084Sjohnlev 			iocp->ioc_rval = 0;
1040*5084Sjohnlev 			break;
1041*5084Sjohnlev 
1042*5084Sjohnlev 		case CONSGETABORTENABLE:
1043*5084Sjohnlev 			/*CONSTANTCONDITION*/
1044*5084Sjohnlev 			ASSERT(sizeof (boolean_t) <= sizeof (boolean_t *));
1045*5084Sjohnlev 			/*
1046*5084Sjohnlev 			 * Store the return value right in the payload
1047*5084Sjohnlev 			 * we were passed.  Crude.
1048*5084Sjohnlev 			 */
1049*5084Sjohnlev 			mcopyout(mp, NULL, sizeof (boolean_t), NULL, NULL);
1050*5084Sjohnlev 			*(boolean_t *)mp->b_cont->b_rptr =
1051*5084Sjohnlev 			    (xcp->flags & ASY_CONSOLE) != 0;
1052*5084Sjohnlev 			break;
1053*5084Sjohnlev 
1054*5084Sjohnlev 		default:
1055*5084Sjohnlev 			/*
1056*5084Sjohnlev 			 * If we don't understand it, it's an error.  NAK it.
1057*5084Sjohnlev 			 */
1058*5084Sjohnlev 			error = EINVAL;
1059*5084Sjohnlev 			break;
1060*5084Sjohnlev 		}
1061*5084Sjohnlev 	}
1062*5084Sjohnlev 	if (error != 0) {
1063*5084Sjohnlev 		iocp->ioc_error = error;
1064*5084Sjohnlev 		mp->b_datap->db_type = M_IOCNAK;
1065*5084Sjohnlev 	}
1066*5084Sjohnlev 	mutex_exit(&xcp->excl);
1067*5084Sjohnlev 	qreply(wq, mp);
1068*5084Sjohnlev 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl: done\n", instance);
1069*5084Sjohnlev }
1070*5084Sjohnlev 
1071*5084Sjohnlev static int
1072*5084Sjohnlev xenconsrsrv(queue_t *q)
1073*5084Sjohnlev {
1074*5084Sjohnlev 	mblk_t *bp;
1075*5084Sjohnlev 
1076*5084Sjohnlev 	while (canputnext(q) && (bp = getq(q)))
1077*5084Sjohnlev 		putnext(q, bp);
1078*5084Sjohnlev 	return (0);
1079*5084Sjohnlev }
1080*5084Sjohnlev 
1081*5084Sjohnlev /*
1082*5084Sjohnlev  * Put procedure for write queue.
1083*5084Sjohnlev  * Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
1084*5084Sjohnlev  * set the flow control character for M_STOPI and M_STARTI messages;
1085*5084Sjohnlev  * queue up M_BREAK, M_DELAY, and M_DATA messages for processing
1086*5084Sjohnlev  * by the start routine, and then call the start routine; discard
1087*5084Sjohnlev  * everything else.  Note that this driver does not incorporate any
1088*5084Sjohnlev  * mechanism to negotiate to handle the canonicalization process.
1089*5084Sjohnlev  * It expects that these functions are handled in upper module(s),
1090*5084Sjohnlev  * as we do in ldterm.
1091*5084Sjohnlev  */
1092*5084Sjohnlev static int
1093*5084Sjohnlev xenconswput(queue_t *q, mblk_t *mp)
1094*5084Sjohnlev {
1095*5084Sjohnlev 	struct asyncline *async;
1096*5084Sjohnlev 	struct xencons *xcp;
1097*5084Sjohnlev 
1098*5084Sjohnlev 	async = (struct asyncline *)q->q_ptr;
1099*5084Sjohnlev 	xcp = async->async_common;
1100*5084Sjohnlev 
1101*5084Sjohnlev 	switch (mp->b_datap->db_type) {
1102*5084Sjohnlev 
1103*5084Sjohnlev 	case M_STOP:
1104*5084Sjohnlev 		mutex_enter(&xcp->excl);
1105*5084Sjohnlev 		async->async_flags |= ASYNC_STOPPED;
1106*5084Sjohnlev 		mutex_exit(&xcp->excl);
1107*5084Sjohnlev 		freemsg(mp);
1108*5084Sjohnlev 		break;
1109*5084Sjohnlev 
1110*5084Sjohnlev 	case M_START:
1111*5084Sjohnlev 		mutex_enter(&xcp->excl);
1112*5084Sjohnlev 		if (async->async_flags & ASYNC_STOPPED) {
1113*5084Sjohnlev 			async->async_flags &= ~ASYNC_STOPPED;
1114*5084Sjohnlev 			xcasync_start(async);
1115*5084Sjohnlev 		}
1116*5084Sjohnlev 		mutex_exit(&xcp->excl);
1117*5084Sjohnlev 		freemsg(mp);
1118*5084Sjohnlev 		break;
1119*5084Sjohnlev 
1120*5084Sjohnlev 	case M_IOCTL:
1121*5084Sjohnlev 		switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1122*5084Sjohnlev 
1123*5084Sjohnlev 		case TCSETSW:
1124*5084Sjohnlev 		case TCSETSF:
1125*5084Sjohnlev 		case TCSETAW:
1126*5084Sjohnlev 		case TCSETAF:
1127*5084Sjohnlev 			/*
1128*5084Sjohnlev 			 * The changes do not take effect until all
1129*5084Sjohnlev 			 * output queued before them is drained.
1130*5084Sjohnlev 			 * Put this message on the queue, so that
1131*5084Sjohnlev 			 * "xcasync_start" will see it when it's done
1132*5084Sjohnlev 			 * with the output before it.  Poke the
1133*5084Sjohnlev 			 * start routine, just in case.
1134*5084Sjohnlev 			 */
1135*5084Sjohnlev 			(void) putq(q, mp);
1136*5084Sjohnlev 			mutex_enter(&xcp->excl);
1137*5084Sjohnlev 			xcasync_start(async);
1138*5084Sjohnlev 			mutex_exit(&xcp->excl);
1139*5084Sjohnlev 			break;
1140*5084Sjohnlev 
1141*5084Sjohnlev 		default:
1142*5084Sjohnlev 			/*
1143*5084Sjohnlev 			 * Do it now.
1144*5084Sjohnlev 			 */
1145*5084Sjohnlev 			xcasync_ioctl(async, q, mp);
1146*5084Sjohnlev 			break;
1147*5084Sjohnlev 		}
1148*5084Sjohnlev 		break;
1149*5084Sjohnlev 
1150*5084Sjohnlev 	case M_FLUSH:
1151*5084Sjohnlev 		if (*mp->b_rptr & FLUSHW) {
1152*5084Sjohnlev 			mutex_enter(&xcp->excl);
1153*5084Sjohnlev 			/*
1154*5084Sjohnlev 			 * Flush our write queue.
1155*5084Sjohnlev 			 */
1156*5084Sjohnlev 			flushq(q, FLUSHDATA);	/* XXX doesn't flush M_DELAY */
1157*5084Sjohnlev 			if (async->async_xmitblk != NULL) {
1158*5084Sjohnlev 				freeb(async->async_xmitblk);
1159*5084Sjohnlev 				async->async_xmitblk = NULL;
1160*5084Sjohnlev 			}
1161*5084Sjohnlev 			mutex_exit(&xcp->excl);
1162*5084Sjohnlev 			*mp->b_rptr &= ~FLUSHW;	/* it has been flushed */
1163*5084Sjohnlev 		}
1164*5084Sjohnlev 		if (*mp->b_rptr & FLUSHR) {
1165*5084Sjohnlev 			flushq(RD(q), FLUSHDATA);
1166*5084Sjohnlev 			qreply(q, mp);	/* give the read queues a crack at it */
1167*5084Sjohnlev 		} else {
1168*5084Sjohnlev 			freemsg(mp);
1169*5084Sjohnlev 		}
1170*5084Sjohnlev 
1171*5084Sjohnlev 		/*
1172*5084Sjohnlev 		 * We must make sure we process messages that survive the
1173*5084Sjohnlev 		 * write-side flush.
1174*5084Sjohnlev 		 */
1175*5084Sjohnlev 		mutex_enter(&xcp->excl);
1176*5084Sjohnlev 		xcasync_start(async);
1177*5084Sjohnlev 		mutex_exit(&xcp->excl);
1178*5084Sjohnlev 		break;
1179*5084Sjohnlev 
1180*5084Sjohnlev 	case M_BREAK:
1181*5084Sjohnlev 	case M_DELAY:
1182*5084Sjohnlev 	case M_DATA:
1183*5084Sjohnlev 		/*
1184*5084Sjohnlev 		 * Queue the message up to be transmitted,
1185*5084Sjohnlev 		 * and poke the start routine.
1186*5084Sjohnlev 		 */
1187*5084Sjohnlev 		(void) putq(q, mp);
1188*5084Sjohnlev 		mutex_enter(&xcp->excl);
1189*5084Sjohnlev 		xcasync_start(async);
1190*5084Sjohnlev 		mutex_exit(&xcp->excl);
1191*5084Sjohnlev 		break;
1192*5084Sjohnlev 
1193*5084Sjohnlev 	case M_STOPI:
1194*5084Sjohnlev 		mutex_enter(&xcp->excl);
1195*5084Sjohnlev 		mutex_enter(&xcp->excl);
1196*5084Sjohnlev 		if (!(async->async_inflow_source & IN_FLOW_USER)) {
1197*5084Sjohnlev 			(void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
1198*5084Sjohnlev 			    IN_FLOW_USER);
1199*5084Sjohnlev 		}
1200*5084Sjohnlev 		mutex_exit(&xcp->excl);
1201*5084Sjohnlev 		mutex_exit(&xcp->excl);
1202*5084Sjohnlev 		freemsg(mp);
1203*5084Sjohnlev 		break;
1204*5084Sjohnlev 
1205*5084Sjohnlev 	case M_STARTI:
1206*5084Sjohnlev 		mutex_enter(&xcp->excl);
1207*5084Sjohnlev 		mutex_enter(&xcp->excl);
1208*5084Sjohnlev 		if (async->async_inflow_source & IN_FLOW_USER) {
1209*5084Sjohnlev 			(void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
1210*5084Sjohnlev 			    IN_FLOW_USER);
1211*5084Sjohnlev 		}
1212*5084Sjohnlev 		mutex_exit(&xcp->excl);
1213*5084Sjohnlev 		mutex_exit(&xcp->excl);
1214*5084Sjohnlev 		freemsg(mp);
1215*5084Sjohnlev 		break;
1216*5084Sjohnlev 
1217*5084Sjohnlev 	case M_CTL:
1218*5084Sjohnlev 		if (MBLKL(mp) >= sizeof (struct iocblk) &&
1219*5084Sjohnlev 		    ((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_POSIXQUERY) {
1220*5084Sjohnlev 			((struct iocblk *)mp->b_rptr)->ioc_cmd = MC_HAS_POSIX;
1221*5084Sjohnlev 			qreply(q, mp);
1222*5084Sjohnlev 		} else {
1223*5084Sjohnlev 			freemsg(mp);
1224*5084Sjohnlev 		}
1225*5084Sjohnlev 		break;
1226*5084Sjohnlev 
1227*5084Sjohnlev 	default:
1228*5084Sjohnlev 		freemsg(mp);
1229*5084Sjohnlev 		break;
1230*5084Sjohnlev 	}
1231*5084Sjohnlev 	return (0);
1232*5084Sjohnlev }
1233*5084Sjohnlev 
1234*5084Sjohnlev /*
1235*5084Sjohnlev  * Retry an "ioctl", now that "bufcall" claims we may be able to allocate
1236*5084Sjohnlev  * the buffer we need.
1237*5084Sjohnlev  */
1238*5084Sjohnlev static void
1239*5084Sjohnlev xcasync_reioctl(void *unit)
1240*5084Sjohnlev {
1241*5084Sjohnlev 	int instance = (uintptr_t)unit;
1242*5084Sjohnlev 	struct asyncline *async;
1243*5084Sjohnlev 	struct xencons *xcp;
1244*5084Sjohnlev 	queue_t	*q;
1245*5084Sjohnlev 	mblk_t	*mp;
1246*5084Sjohnlev 
1247*5084Sjohnlev 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
1248*5084Sjohnlev 	ASSERT(xcp != NULL);
1249*5084Sjohnlev 	async = xcp->priv;
1250*5084Sjohnlev 
1251*5084Sjohnlev 	/*
1252*5084Sjohnlev 	 * The bufcall is no longer pending.
1253*5084Sjohnlev 	 */
1254*5084Sjohnlev 	mutex_enter(&xcp->excl);
1255*5084Sjohnlev 	async->async_wbufcid = 0;
1256*5084Sjohnlev 	if ((q = async->async_ttycommon.t_writeq) == NULL) {
1257*5084Sjohnlev 		mutex_exit(&xcp->excl);
1258*5084Sjohnlev 		return;
1259*5084Sjohnlev 	}
1260*5084Sjohnlev 	if ((mp = async->async_ttycommon.t_iocpending) != NULL) {
1261*5084Sjohnlev 		/* not pending any more */
1262*5084Sjohnlev 		async->async_ttycommon.t_iocpending = NULL;
1263*5084Sjohnlev 		mutex_exit(&xcp->excl);
1264*5084Sjohnlev 		xcasync_ioctl(async, q, mp);
1265*5084Sjohnlev 	} else
1266*5084Sjohnlev 		mutex_exit(&xcp->excl);
1267*5084Sjohnlev }
1268*5084Sjohnlev 
1269*5084Sjohnlev 
1270*5084Sjohnlev /*
1271*5084Sjohnlev  * debugger/console support routines.
1272*5084Sjohnlev  */
1273*5084Sjohnlev 
1274*5084Sjohnlev /*
1275*5084Sjohnlev  * put a character out
1276*5084Sjohnlev  * Do not use interrupts.  If char is LF, put out CR, LF.
1277*5084Sjohnlev  */
1278*5084Sjohnlev /*ARGSUSED*/
1279*5084Sjohnlev static void
1280*5084Sjohnlev xenconsputchar(cons_polledio_arg_t arg, uchar_t c)
1281*5084Sjohnlev {
1282*5084Sjohnlev 	struct xencons *xcp = xencons_console;
1283*5084Sjohnlev 	volatile struct xencons_interface *ifp = xcp->ifp;
1284*5084Sjohnlev 	XENCONS_RING_IDX prod;
1285*5084Sjohnlev 
1286*5084Sjohnlev 	if (c == '\n')
1287*5084Sjohnlev 		xenconsputchar(arg, '\r');
1288*5084Sjohnlev 
1289*5084Sjohnlev 	/*
1290*5084Sjohnlev 	 * domain 0 can use the console I/O...
1291*5084Sjohnlev 	 */
1292*5084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1293*5084Sjohnlev 		char	buffer[1];
1294*5084Sjohnlev 
1295*5084Sjohnlev 		buffer[0] = c;
1296*5084Sjohnlev 		(void) HYPERVISOR_console_io(CONSOLEIO_write, 1, buffer);
1297*5084Sjohnlev 		return;
1298*5084Sjohnlev 	}
1299*5084Sjohnlev 
1300*5084Sjohnlev 	/*
1301*5084Sjohnlev 	 * domU has to go through dom0 virtual console.
1302*5084Sjohnlev 	 */
1303*5084Sjohnlev 	while (ifp->out_prod - ifp->out_cons == sizeof (ifp->out))
1304*5084Sjohnlev 		(void) HYPERVISOR_yield();
1305*5084Sjohnlev 
1306*5084Sjohnlev 	prod = ifp->out_prod;
1307*5084Sjohnlev 	ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] = c;
1308*5084Sjohnlev 	membar_producer();
1309*5084Sjohnlev 	ifp->out_prod = prod;
1310*5084Sjohnlev 	ec_notify_via_evtchn(xcp->evtchn);
1311*5084Sjohnlev }
1312*5084Sjohnlev 
1313*5084Sjohnlev /*
1314*5084Sjohnlev  * See if there's a character available. If no character is
1315*5084Sjohnlev  * available, return 0. Run in polled mode, no interrupts.
1316*5084Sjohnlev  */
1317*5084Sjohnlev static boolean_t
1318*5084Sjohnlev xenconsischar(cons_polledio_arg_t arg)
1319*5084Sjohnlev {
1320*5084Sjohnlev 	struct xencons *xcp = (struct xencons *)arg;
1321*5084Sjohnlev 	volatile struct xencons_interface *ifp = xcp->ifp;
1322*5084Sjohnlev 
1323*5084Sjohnlev 	if (xcp->polldix < xcp->polllen)
1324*5084Sjohnlev 		return (B_TRUE);
1325*5084Sjohnlev 	/*
1326*5084Sjohnlev 	 * domain 0 can use the console I/O...
1327*5084Sjohnlev 	 */
1328*5084Sjohnlev 	xcp->polldix = 0;
1329*5084Sjohnlev 	xcp->polllen = 0;
1330*5084Sjohnlev 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1331*5084Sjohnlev 		xcp->polllen = HYPERVISOR_console_io(CONSOLEIO_read, 1,
1332*5084Sjohnlev 		    (char *)xcp->pollbuf);
1333*5084Sjohnlev 		return (xcp->polllen != 0);
1334*5084Sjohnlev 	}
1335*5084Sjohnlev 
1336*5084Sjohnlev 	/*
1337*5084Sjohnlev 	 * domU has to go through virtual console device.
1338*5084Sjohnlev 	 */
1339*5084Sjohnlev 	if (ifp->in_prod != ifp->in_cons) {
1340*5084Sjohnlev 		XENCONS_RING_IDX cons;
1341*5084Sjohnlev 
1342*5084Sjohnlev 		cons = ifp->in_cons;
1343*5084Sjohnlev 		membar_enter();
1344*5084Sjohnlev 		xcp->pollbuf[0] = ifp->in[MASK_XENCONS_IDX(cons++, ifp->in)];
1345*5084Sjohnlev 		membar_producer();
1346*5084Sjohnlev 		ifp->in_cons = cons;
1347*5084Sjohnlev 		xcp->polllen = 1;
1348*5084Sjohnlev 	}
1349*5084Sjohnlev 	return (xcp->polllen != 0);
1350*5084Sjohnlev }
1351*5084Sjohnlev 
1352*5084Sjohnlev /*
1353*5084Sjohnlev  * Get a character. Run in polled mode, no interrupts.
1354*5084Sjohnlev  */
1355*5084Sjohnlev static int
1356*5084Sjohnlev xenconsgetchar(cons_polledio_arg_t arg)
1357*5084Sjohnlev {
1358*5084Sjohnlev 	struct xencons *xcp = (struct xencons *)arg;
1359*5084Sjohnlev 
1360*5084Sjohnlev 	ec_wait_on_evtchn(xcp->evtchn, (int (*)(void *))xenconsischar, arg);
1361*5084Sjohnlev 
1362*5084Sjohnlev 	return (xcp->pollbuf[xcp->polldix++]);
1363*5084Sjohnlev }
1364*5084Sjohnlev 
1365*5084Sjohnlev static void
1366*5084Sjohnlev xenconserror(int level, const char *fmt, ...)
1367*5084Sjohnlev {
1368*5084Sjohnlev 	va_list adx;
1369*5084Sjohnlev 	static time_t	last;
1370*5084Sjohnlev 	static const char *lastfmt;
1371*5084Sjohnlev 	time_t now;
1372*5084Sjohnlev 
1373*5084Sjohnlev 	/*
1374*5084Sjohnlev 	 * Don't print the same error message too often.
1375*5084Sjohnlev 	 * Print the message only if we have not printed the
1376*5084Sjohnlev 	 * message within the last second.
1377*5084Sjohnlev 	 * Note: that fmt cannot be a pointer to a string
1378*5084Sjohnlev 	 * stored on the stack. The fmt pointer
1379*5084Sjohnlev 	 * must be in the data segment otherwise lastfmt would point
1380*5084Sjohnlev 	 * to non-sense.
1381*5084Sjohnlev 	 */
1382*5084Sjohnlev 	now = gethrestime_sec();
1383*5084Sjohnlev 	if (last == now && lastfmt == fmt)
1384*5084Sjohnlev 		return;
1385*5084Sjohnlev 
1386*5084Sjohnlev 	last = now;
1387*5084Sjohnlev 	lastfmt = fmt;
1388*5084Sjohnlev 
1389*5084Sjohnlev 	va_start(adx, fmt);
1390*5084Sjohnlev 	vcmn_err(level, fmt, adx);
1391*5084Sjohnlev 	va_end(adx);
1392*5084Sjohnlev }
1393*5084Sjohnlev 
1394*5084Sjohnlev 
1395*5084Sjohnlev /*
1396*5084Sjohnlev  * Check for abort character sequence
1397*5084Sjohnlev  */
1398*5084Sjohnlev static boolean_t
1399*5084Sjohnlev abort_charseq_recognize(uchar_t ch)
1400*5084Sjohnlev {
1401*5084Sjohnlev 	static int state = 0;
1402*5084Sjohnlev #define	CNTRL(c) ((c)&037)
1403*5084Sjohnlev 	static char sequence[] = { '\r', '~', CNTRL('b') };
1404*5084Sjohnlev 
1405*5084Sjohnlev 	if (ch == sequence[state]) {
1406*5084Sjohnlev 		if (++state >= sizeof (sequence)) {
1407*5084Sjohnlev 			state = 0;
1408*5084Sjohnlev 			return (B_TRUE);
1409*5084Sjohnlev 		}
1410*5084Sjohnlev 	} else {
1411*5084Sjohnlev 		state = (ch == sequence[0]) ? 1 : 0;
1412*5084Sjohnlev 	}
1413*5084Sjohnlev 	return (B_FALSE);
1414*5084Sjohnlev }
1415*5084Sjohnlev 
1416*5084Sjohnlev /*
1417*5084Sjohnlev  * Flow control functions
1418*5084Sjohnlev  */
1419*5084Sjohnlev 
1420*5084Sjohnlev /*
1421*5084Sjohnlev  * Software output flow control
1422*5084Sjohnlev  * This function can be executed sucessfully at any situation.
1423*5084Sjohnlev  * It does not handle HW, and just change the SW output flow control flag.
1424*5084Sjohnlev  * INPUT VALUE of onoff:
1425*5084Sjohnlev  *                 FLOW_START means to clear SW output flow control flag,
1426*5084Sjohnlev  *			also set ASYNC_OUT_FLW_RESUME.
1427*5084Sjohnlev  *                 FLOW_STOP means to set SW output flow control flag,
1428*5084Sjohnlev  *			also clear ASYNC_OUT_FLW_RESUME.
1429*5084Sjohnlev  */
1430*5084Sjohnlev static void
1431*5084Sjohnlev xcasync_flowcontrol_sw_output(struct xencons *xcp, async_flowc_action onoff)
1432*5084Sjohnlev {
1433*5084Sjohnlev 	struct asyncline *async = xcp->priv;
1434*5084Sjohnlev 	int instance = xcp->unit;
1435*5084Sjohnlev 
1436*5084Sjohnlev 	ASSERT(mutex_owned(&xcp->excl));
1437*5084Sjohnlev 
1438*5084Sjohnlev 	if (!(async->async_ttycommon.t_iflag & IXON))
1439*5084Sjohnlev 		return;
1440*5084Sjohnlev 
1441*5084Sjohnlev 	switch (onoff) {
1442*5084Sjohnlev 	case FLOW_STOP:
1443*5084Sjohnlev 		async->async_flags |= ASYNC_SW_OUT_FLW;
1444*5084Sjohnlev 		async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
1445*5084Sjohnlev 		DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1446*5084Sjohnlev 		    "xencons%d: output sflow stop\n", instance);
1447*5084Sjohnlev 		break;
1448*5084Sjohnlev 	case FLOW_START:
1449*5084Sjohnlev 		async->async_flags &= ~ASYNC_SW_OUT_FLW;
1450*5084Sjohnlev 		async->async_flags |= ASYNC_OUT_FLW_RESUME;
1451*5084Sjohnlev 		DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1452*5084Sjohnlev 		    "xencons%d: output sflow start\n", instance);
1453*5084Sjohnlev 		break;
1454*5084Sjohnlev 	default:
1455*5084Sjohnlev 		break;
1456*5084Sjohnlev 	}
1457*5084Sjohnlev }
1458*5084Sjohnlev 
1459*5084Sjohnlev /*
1460*5084Sjohnlev  * Software input flow control
1461*5084Sjohnlev  * This function can execute software input flow control
1462*5084Sjohnlev  * INPUT VALUE of onoff:
1463*5084Sjohnlev  *               FLOW_START means to send out a XON char
1464*5084Sjohnlev  *                          and clear SW input flow control flag.
1465*5084Sjohnlev  *               FLOW_STOP means to send out a XOFF char
1466*5084Sjohnlev  *                          and set SW input flow control flag.
1467*5084Sjohnlev  *               FLOW_CHECK means to check whether there is pending XON/XOFF
1468*5084Sjohnlev  *                          if it is true, send it out.
1469*5084Sjohnlev  * INPUT VALUE of type:
1470*5084Sjohnlev  *		 IN_FLOW_STREAMS means flow control is due to STREAMS
1471*5084Sjohnlev  *		 IN_FLOW_USER means flow control is due to user's commands
1472*5084Sjohnlev  * RETURN VALUE: B_FALSE means no flow control char is sent
1473*5084Sjohnlev  *               B_TRUE means one flow control char is sent
1474*5084Sjohnlev  */
1475*5084Sjohnlev static boolean_t
1476*5084Sjohnlev xcasync_flowcontrol_sw_input(struct xencons *xcp, async_flowc_action onoff,
1477*5084Sjohnlev     int type)
1478*5084Sjohnlev {
1479*5084Sjohnlev 	struct asyncline *async = xcp->priv;
1480*5084Sjohnlev 	int instance = xcp->unit;
1481*5084Sjohnlev 	int rval = B_FALSE;
1482*5084Sjohnlev 
1483*5084Sjohnlev 	ASSERT(mutex_owned(&xcp->excl));
1484*5084Sjohnlev 
1485*5084Sjohnlev 	if (!(async->async_ttycommon.t_iflag & IXOFF))
1486*5084Sjohnlev 		return (rval);
1487*5084Sjohnlev 
1488*5084Sjohnlev 	/*
1489*5084Sjohnlev 	 * If we get this far, then we know IXOFF is set.
1490*5084Sjohnlev 	 */
1491*5084Sjohnlev 	switch (onoff) {
1492*5084Sjohnlev 	case FLOW_STOP:
1493*5084Sjohnlev 		async->async_inflow_source |= type;
1494*5084Sjohnlev 
1495*5084Sjohnlev 		/*
1496*5084Sjohnlev 		 * We'll send an XOFF character for each of up to
1497*5084Sjohnlev 		 * three different input flow control attempts to stop input.
1498*5084Sjohnlev 		 * If we already send out one XOFF, but FLOW_STOP comes again,
1499*5084Sjohnlev 		 * it seems that input flow control becomes more serious,
1500*5084Sjohnlev 		 * then send XOFF again.
1501*5084Sjohnlev 		 */
1502*5084Sjohnlev 		if (async->async_inflow_source & (IN_FLOW_STREAMS |
1503*5084Sjohnlev 		    IN_FLOW_USER))
1504*5084Sjohnlev 			async->async_flags |= ASYNC_SW_IN_FLOW |
1505*5084Sjohnlev 			    ASYNC_SW_IN_NEEDED;
1506*5084Sjohnlev 		DEBUGCONT2(XENCONS_DEBUG_SFLOW, "xencons%d: input sflow stop, "
1507*5084Sjohnlev 		    "type = %x\n", instance, async->async_inflow_source);
1508*5084Sjohnlev 		break;
1509*5084Sjohnlev 	case FLOW_START:
1510*5084Sjohnlev 		async->async_inflow_source &= ~type;
1511*5084Sjohnlev 		if (async->async_inflow_source == 0) {
1512*5084Sjohnlev 			async->async_flags = (async->async_flags &
1513*5084Sjohnlev 			    ~ASYNC_SW_IN_FLOW) | ASYNC_SW_IN_NEEDED;
1514*5084Sjohnlev 			DEBUGCONT1(XENCONS_DEBUG_SFLOW, "xencons%d: "
1515*5084Sjohnlev 			    "input sflow start\n", instance);
1516*5084Sjohnlev 		}
1517*5084Sjohnlev 		break;
1518*5084Sjohnlev 	default:
1519*5084Sjohnlev 		break;
1520*5084Sjohnlev 	}
1521*5084Sjohnlev 
1522*5084Sjohnlev 	if (async->async_flags & ASYNC_SW_IN_NEEDED) {
1523*5084Sjohnlev 		/*
1524*5084Sjohnlev 		 * If we get this far, then we know we need to send out
1525*5084Sjohnlev 		 * XON or XOFF char.
1526*5084Sjohnlev 		 */
1527*5084Sjohnlev 		char c;
1528*5084Sjohnlev 
1529*5084Sjohnlev 		rval = B_TRUE;
1530*5084Sjohnlev 		c = (async->async_flags & ASYNC_SW_IN_FLOW) ?
1531*5084Sjohnlev 		    async->async_stopc : async->async_startc;
1532*5084Sjohnlev 		if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1533*5084Sjohnlev 			(void) HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
1534*5084Sjohnlev 			async->async_flags &= ~ASYNC_SW_IN_NEEDED;
1535*5084Sjohnlev 			return (rval);
1536*5084Sjohnlev 		} else {
1537*5084Sjohnlev 			xenconsputchar(NULL, c);
1538*5084Sjohnlev 		}
1539*5084Sjohnlev 	}
1540*5084Sjohnlev 	return (rval);
1541*5084Sjohnlev }
1542*5084Sjohnlev 
1543*5084Sjohnlev struct module_info xencons_info = {
1544*5084Sjohnlev 	0,
1545*5084Sjohnlev 	"xencons",
1546*5084Sjohnlev 	0,
1547*5084Sjohnlev 	INFPSZ,
1548*5084Sjohnlev 	4096,
1549*5084Sjohnlev 	128
1550*5084Sjohnlev };
1551*5084Sjohnlev 
1552*5084Sjohnlev static struct qinit xencons_rint = {
1553*5084Sjohnlev 	putq,
1554*5084Sjohnlev 	xenconsrsrv,
1555*5084Sjohnlev 	xenconsopen,
1556*5084Sjohnlev 	xenconsclose,
1557*5084Sjohnlev 	NULL,
1558*5084Sjohnlev 	&xencons_info,
1559*5084Sjohnlev 	NULL
1560*5084Sjohnlev };
1561*5084Sjohnlev 
1562*5084Sjohnlev static struct qinit xencons_wint = {
1563*5084Sjohnlev 	xenconswput,
1564*5084Sjohnlev 	NULL,
1565*5084Sjohnlev 	NULL,
1566*5084Sjohnlev 	NULL,
1567*5084Sjohnlev 	NULL,
1568*5084Sjohnlev 	&xencons_info,
1569*5084Sjohnlev 	NULL
1570*5084Sjohnlev };
1571*5084Sjohnlev 
1572*5084Sjohnlev struct streamtab xencons_str_info = {
1573*5084Sjohnlev 	&xencons_rint,
1574*5084Sjohnlev 	&xencons_wint,
1575*5084Sjohnlev 	NULL,
1576*5084Sjohnlev 	NULL
1577*5084Sjohnlev };
1578*5084Sjohnlev 
1579*5084Sjohnlev static struct cb_ops cb_xencons_ops = {
1580*5084Sjohnlev 	nodev,			/* cb_open */
1581*5084Sjohnlev 	nodev,			/* cb_close */
1582*5084Sjohnlev 	nodev,			/* cb_strategy */
1583*5084Sjohnlev 	nodev,			/* cb_print */
1584*5084Sjohnlev 	nodev,			/* cb_dump */
1585*5084Sjohnlev 	nodev,			/* cb_read */
1586*5084Sjohnlev 	nodev,			/* cb_write */
1587*5084Sjohnlev 	nodev,			/* cb_ioctl */
1588*5084Sjohnlev 	nodev,			/* cb_devmap */
1589*5084Sjohnlev 	nodev,			/* cb_mmap */
1590*5084Sjohnlev 	nodev,			/* cb_segmap */
1591*5084Sjohnlev 	nochpoll,		/* cb_chpoll */
1592*5084Sjohnlev 	ddi_prop_op,		/* cb_prop_op */
1593*5084Sjohnlev 	&xencons_str_info,		/* cb_stream */
1594*5084Sjohnlev 	D_MP			/* cb_flag */
1595*5084Sjohnlev };
1596*5084Sjohnlev 
1597*5084Sjohnlev struct dev_ops xencons_ops = {
1598*5084Sjohnlev 	DEVO_REV,		/* devo_rev */
1599*5084Sjohnlev 	0,			/* devo_refcnt */
1600*5084Sjohnlev 	xenconsinfo,		/* devo_getinfo */
1601*5084Sjohnlev 	nulldev,		/* devo_identify */
1602*5084Sjohnlev 	nulldev,		/* devo_probe */
1603*5084Sjohnlev 	xenconsattach,		/* devo_attach */
1604*5084Sjohnlev 	xenconsdetach,		/* devo_detach */
1605*5084Sjohnlev 	nodev,			/* devo_reset */
1606*5084Sjohnlev 	&cb_xencons_ops,		/* devo_cb_ops */
1607*5084Sjohnlev };
1608*5084Sjohnlev 
1609*5084Sjohnlev static struct modldrv modldrv = {
1610*5084Sjohnlev 	&mod_driverops, /* Type of module.  This one is a driver */
1611*5084Sjohnlev 	"virtual console driver %I%",
1612*5084Sjohnlev 	&xencons_ops,	/* driver ops */
1613*5084Sjohnlev };
1614*5084Sjohnlev 
1615*5084Sjohnlev static struct modlinkage modlinkage = {
1616*5084Sjohnlev 	MODREV_1,
1617*5084Sjohnlev 	(void *)&modldrv,
1618*5084Sjohnlev 	NULL
1619*5084Sjohnlev };
1620*5084Sjohnlev 
1621*5084Sjohnlev int
1622*5084Sjohnlev _init(void)
1623*5084Sjohnlev {
1624*5084Sjohnlev 	int rv;
1625*5084Sjohnlev 
1626*5084Sjohnlev 	if ((rv = ddi_soft_state_init(&xencons_soft_state,
1627*5084Sjohnlev 	    sizeof (struct xencons), 1)) != 0)
1628*5084Sjohnlev 		return (rv);
1629*5084Sjohnlev 	if ((rv = mod_install(&modlinkage)) != 0) {
1630*5084Sjohnlev 		ddi_soft_state_fini(&xencons_soft_state);
1631*5084Sjohnlev 		return (rv);
1632*5084Sjohnlev 	}
1633*5084Sjohnlev 	DEBUGCONT2(XENCONS_DEBUG_INIT, "%s, debug = %x\n",
1634*5084Sjohnlev 	    modldrv.drv_linkinfo, debug);
1635*5084Sjohnlev 	return (0);
1636*5084Sjohnlev }
1637*5084Sjohnlev 
1638*5084Sjohnlev int
1639*5084Sjohnlev _fini(void)
1640*5084Sjohnlev {
1641*5084Sjohnlev 	int rv;
1642*5084Sjohnlev 
1643*5084Sjohnlev 	if ((rv = mod_remove(&modlinkage)) != 0)
1644*5084Sjohnlev 		return (rv);
1645*5084Sjohnlev 
1646*5084Sjohnlev 	ddi_soft_state_fini(&xencons_soft_state);
1647*5084Sjohnlev 	return (0);
1648*5084Sjohnlev }
1649*5084Sjohnlev 
1650*5084Sjohnlev int
1651*5084Sjohnlev _info(struct modinfo *modinfop)
1652*5084Sjohnlev {
1653*5084Sjohnlev 	return (mod_info(&modlinkage, modinfop));
1654*5084Sjohnlev }
1655