xref: /onnv-gate/usr/src/uts/common/io/ecpp.c (revision 11066:cebb50cbe4f9)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57656SSherry.Moore@Sun.COM  * Common Development and Distribution License (the "License").
67656SSherry.Moore@Sun.COM  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*11066Srafael.vanoni@sun.com  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate 
270Sstevel@tonic-gate /*
280Sstevel@tonic-gate  *
290Sstevel@tonic-gate  * IEEE 1284 Parallel Port Device Driver
300Sstevel@tonic-gate  *
310Sstevel@tonic-gate  */
320Sstevel@tonic-gate 
330Sstevel@tonic-gate #include <sys/param.h>
340Sstevel@tonic-gate #include <sys/errno.h>
350Sstevel@tonic-gate #include <sys/file.h>
360Sstevel@tonic-gate #include <sys/cmn_err.h>
370Sstevel@tonic-gate #include <sys/stropts.h>
380Sstevel@tonic-gate #include <sys/debug.h>
390Sstevel@tonic-gate #include <sys/stream.h>
400Sstevel@tonic-gate #include <sys/strsun.h>
410Sstevel@tonic-gate #include <sys/kmem.h>
420Sstevel@tonic-gate #include <sys/ddi.h>
430Sstevel@tonic-gate #include <sys/sunddi.h>
440Sstevel@tonic-gate #include <sys/conf.h>		/* req. by dev_ops flags MTSAFE etc. */
450Sstevel@tonic-gate #include <sys/modctl.h>		/* for modldrv */
460Sstevel@tonic-gate #include <sys/stat.h>		/* ddi_create_minor_node S_IFCHR */
470Sstevel@tonic-gate #include <sys/open.h>
480Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
490Sstevel@tonic-gate #include <sys/kstat.h>
500Sstevel@tonic-gate 
510Sstevel@tonic-gate #include <sys/prnio.h>
520Sstevel@tonic-gate #include <sys/ecppreg.h>	/* hw description */
530Sstevel@tonic-gate #include <sys/ecppio.h>		/* ioctl description */
540Sstevel@tonic-gate #include <sys/ecppvar.h>	/* driver description */
550Sstevel@tonic-gate #include <sys/dma_engine.h>
560Sstevel@tonic-gate #include <sys/dma_i8237A.h>
570Sstevel@tonic-gate 
580Sstevel@tonic-gate /*
590Sstevel@tonic-gate  * Background
600Sstevel@tonic-gate  * ==========
610Sstevel@tonic-gate  * IEEE 1284-1994 standard defines "a signalling method for asynchronous,
620Sstevel@tonic-gate  * fully interlocked, bidirectional parallel communications between hosts
630Sstevel@tonic-gate  * and printers or other peripherals." (1.1) The standard defines 5 modes
640Sstevel@tonic-gate  * of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ
650Sstevel@tonic-gate  * in direction, bandwidth, pins assignment, DMA capability, etc.
660Sstevel@tonic-gate  *
670Sstevel@tonic-gate  * Negotiation is a mechanism for moving between modes. Compatibility mode
680Sstevel@tonic-gate  * is a default mode, from which negotiations to other modes occur and
690Sstevel@tonic-gate  * to which both host and peripheral break in case of interface errors.
700Sstevel@tonic-gate  * Compatibility mode provides a unidirectional (forward) channel for
710Sstevel@tonic-gate  * communicating with old pre-1284 peripherals.
720Sstevel@tonic-gate  *
730Sstevel@tonic-gate  * Each mode has a number of phases. [Mode, phase] pair represents the
740Sstevel@tonic-gate  * interface state. Host initiates all transfers, though peripheral can
750Sstevel@tonic-gate  * request backchannel transfer by asserting nErr pin.
760Sstevel@tonic-gate  *
770Sstevel@tonic-gate  * Ecpp driver implements an IEEE 1284-compliant host using a combination
780Sstevel@tonic-gate  * of hardware and software. Hardware part is represented by a controller,
790Sstevel@tonic-gate  * which is a part of the SuperIO chip. Ecpp supports the following SuperIOs:
800Sstevel@tonic-gate  * PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover).
810Sstevel@tonic-gate  * Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach().
820Sstevel@tonic-gate  *
830Sstevel@tonic-gate  * Negotiation is performed in software. Transfer may be performed either
840Sstevel@tonic-gate  * in software by driving output pins for each byte (PIO method), or with
850Sstevel@tonic-gate  * hardware assistance - SuperIO has a 16-byte FIFO, which is filled by
860Sstevel@tonic-gate  * the driver (normally using DMA), while the chip performs the actual xfer.
870Sstevel@tonic-gate  * PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes.
880Sstevel@tonic-gate  *
890Sstevel@tonic-gate  * Driver currently supports the following modes:
900Sstevel@tonic-gate  *
910Sstevel@tonic-gate  * - Compatibility mode: byte-wide forward channel ~50KB/sec;
920Sstevel@tonic-gate  *   pp->io_mode defines PIO or DMA method of transfer;
930Sstevel@tonic-gate  * - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec;
940Sstevel@tonic-gate  * - ECP mode: byte-wide bidirectional channel (~1MB/sec);
950Sstevel@tonic-gate  *
960Sstevel@tonic-gate  * Theory of operation
970Sstevel@tonic-gate  * ===================
980Sstevel@tonic-gate  * The manner in which ecpp drives 1284 interface is that of a state machine.
990Sstevel@tonic-gate  * State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*}
1000Sstevel@tonic-gate  * and transfer method {PIO, DMA}. State is a function of application actions
1010Sstevel@tonic-gate  * {write(2), ioctl(2)} and peripheral reaction.
1020Sstevel@tonic-gate  *
1030Sstevel@tonic-gate  * 1284 interface state is described by the following variables:
1040Sstevel@tonic-gate  *   pp->current_mode  -- 1284 mode used for forward transfers;
1050Sstevel@tonic-gate  *   pp->backchannel   -- 1284 mode used for backward transfers;
1060Sstevel@tonic-gate  *   pp->curent_phase  -- 1284 phase;
1070Sstevel@tonic-gate  *
1080Sstevel@tonic-gate  * Bidirectional operation in Compatibility mode is provided by a combination:
1090Sstevel@tonic-gate  * pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE
1100Sstevel@tonic-gate  * ECPP_CENTRONICS means no backchannel
1110Sstevel@tonic-gate  *
1120Sstevel@tonic-gate  * Driver internal state is defined by pp->e_busy as follows:
1130Sstevel@tonic-gate  *   ECPP_IDLE	-- idle, no active transfers;
1140Sstevel@tonic-gate  *   ECPP_BUSY	-- transfer is in progress;
1150Sstevel@tonic-gate  *   ECPP_ERR	-- have data to transfer, but peripheral can`t receive data;
1160Sstevel@tonic-gate  *   ECPP_FLUSH	-- flushing the queues;
1170Sstevel@tonic-gate  *
1180Sstevel@tonic-gate  * When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS
1190Sstevel@tonic-gate  * Default negotiation tries to negotiate to the best mode supported by printer,
1200Sstevel@tonic-gate  * sets pp->current_mode and pp->backchannel accordingly.
1210Sstevel@tonic-gate  *
1220Sstevel@tonic-gate  * When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue
1230Sstevel@tonic-gate  * to let ecpp_wsrv() concatenate small blocks into one big transfer
1240Sstevel@tonic-gate  * by copying them into pp->ioblock. If first the mblk data is bigger than
1250Sstevel@tonic-gate  * pp->ioblock, then it is used instead of i/o block (pointed by pp->msg)
1260Sstevel@tonic-gate  *
1270Sstevel@tonic-gate  * Before starting the transfer the driver will check if peripheral is ready
1280Sstevel@tonic-gate  * by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state
1290Sstevel@tonic-gate  * and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively
1300Sstevel@tonic-gate  * rechecking the peripheral readiness and restarting itself until it is ready.
1310Sstevel@tonic-gate  * The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY
1320Sstevel@tonic-gate  *
1330Sstevel@tonic-gate  * While transfer is in progress all arriving messages will be queued up.
1340Sstevel@tonic-gate  * Transfer can end up in either of two ways:
1350Sstevel@tonic-gate  * - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so
1360Sstevel@tonic-gate  *   cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable();
1370Sstevel@tonic-gate  * - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data;
1380Sstevel@tonic-gate  *
1390Sstevel@tonic-gate  * PIO transfer method is very CPU intensive: for each sent byte the peripheral
1400Sstevel@tonic-gate  * state is checked, then the byte is transfered and driver waits for an nAck
1410Sstevel@tonic-gate  * interrupt; ecpp_isr() will then look if there is more data and if so
1420Sstevel@tonic-gate  * triggers the soft interrupt, which transfers the next byte. PIO method
1430Sstevel@tonic-gate  * is needed only for legacy printers which are sensitive to strobe problem
1440Sstevel@tonic-gate  * (Bugid 4192788).
1450Sstevel@tonic-gate  *
1460Sstevel@tonic-gate  * ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and
1470Sstevel@tonic-gate  * going idle (ecpp_idle_phase()). Many routines qenable() the write queue,
1480Sstevel@tonic-gate  * meaning "check if there are pending requests, process them and go idle".
1490Sstevel@tonic-gate  *
1500Sstevel@tonic-gate  * In it`s idle state the driver will always try to listen to the backchannel
1510Sstevel@tonic-gate  * (as advised by 1284).
1520Sstevel@tonic-gate  *
1530Sstevel@tonic-gate  * The mechanism for handling backchannel requests is as follows:
1540Sstevel@tonic-gate  * - when the peripheral has data to send it asserts nErr pin
1550Sstevel@tonic-gate  *   (and also nAck in Nibble Mode) which results in an interrupt on the host;
1560Sstevel@tonic-gate  * - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and
1570Sstevel@tonic-gate  *   puts it back on the write queue;
1580Sstevel@tonic-gate  * - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off
1590Sstevel@tonic-gate  *   the transfer;
1600Sstevel@tonic-gate  *
1610Sstevel@tonic-gate  * This way Nibble and ECP mode backchannel are implemented.
1620Sstevel@tonic-gate  * If the read queue gets full, backchannel request is rejected.
1630Sstevel@tonic-gate  * As the application reads data and queue size falls below the low watermark,
1640Sstevel@tonic-gate  * ecpp_rsrv() gets called and enables the backchannel again.
1650Sstevel@tonic-gate  *
1660Sstevel@tonic-gate  * Future enhancements
1670Sstevel@tonic-gate  * ===================
1680Sstevel@tonic-gate  *
1690Sstevel@tonic-gate  * Support new modes: Byte and EPP.
1700Sstevel@tonic-gate  */
1710Sstevel@tonic-gate 
1720Sstevel@tonic-gate #ifndef ECPP_DEBUG
1730Sstevel@tonic-gate #define	ECPP_DEBUG 0
1740Sstevel@tonic-gate #endif	/* ECPP_DEBUG */
1750Sstevel@tonic-gate int ecpp_debug = ECPP_DEBUG;
1760Sstevel@tonic-gate 
1770Sstevel@tonic-gate int noecp = 0;	/* flag not to use ECP mode */
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate /* driver entry point fn definitions */
1800Sstevel@tonic-gate static int 	ecpp_open(queue_t *, dev_t *, int, int, cred_t *);
1810Sstevel@tonic-gate static int	ecpp_close(queue_t *, int, cred_t *);
1820Sstevel@tonic-gate static uint_t 	ecpp_isr(caddr_t);
1830Sstevel@tonic-gate static uint_t	ecpp_softintr(caddr_t);
1840Sstevel@tonic-gate 
1850Sstevel@tonic-gate /* configuration entry point fn definitions */
1860Sstevel@tonic-gate static int 	ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
1870Sstevel@tonic-gate static int	ecpp_attach(dev_info_t *, ddi_attach_cmd_t);
1880Sstevel@tonic-gate static int	ecpp_detach(dev_info_t *, ddi_detach_cmd_t);
1890Sstevel@tonic-gate static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *);
1900Sstevel@tonic-gate 
1910Sstevel@tonic-gate /* isr support routines */
1920Sstevel@tonic-gate static uint_t 	ecpp_nErr_ihdlr(struct ecppunit *);
1930Sstevel@tonic-gate static uint_t	ecpp_pio_ihdlr(struct ecppunit *);
1940Sstevel@tonic-gate static uint_t	ecpp_dma_ihdlr(struct ecppunit *);
1950Sstevel@tonic-gate static uint_t	ecpp_M1553_intr(struct ecppunit *);
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate /* configuration support routines */
1980Sstevel@tonic-gate static void	ecpp_get_props(struct ecppunit *);
1990Sstevel@tonic-gate 
2000Sstevel@tonic-gate /* Streams Routines */
2010Sstevel@tonic-gate static int	ecpp_wput(queue_t *, mblk_t *);
2020Sstevel@tonic-gate static int	ecpp_wsrv(queue_t *);
2030Sstevel@tonic-gate static int	ecpp_rsrv(queue_t *);
2040Sstevel@tonic-gate static void	ecpp_flush(struct ecppunit *, int);
2050Sstevel@tonic-gate static void	ecpp_start(struct ecppunit *, caddr_t, size_t);
2060Sstevel@tonic-gate 
2070Sstevel@tonic-gate /* ioctl handling */
2080Sstevel@tonic-gate static void	ecpp_putioc(queue_t *, mblk_t *);
2090Sstevel@tonic-gate static void	ecpp_srvioc(queue_t *, mblk_t *);
2100Sstevel@tonic-gate static void	ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t);
2110Sstevel@tonic-gate static void	ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int);
2120Sstevel@tonic-gate static void	ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t);
2130Sstevel@tonic-gate static void	ecpp_srvioc_devid(queue_t *, mblk_t *,
2140Sstevel@tonic-gate 				struct ecpp_device_id *, int *);
2150Sstevel@tonic-gate static void	ecpp_srvioc_prnif(queue_t *, mblk_t *);
2160Sstevel@tonic-gate static void 	ecpp_ack_ioctl(queue_t *, mblk_t *);
2170Sstevel@tonic-gate static void 	ecpp_nack_ioctl(queue_t *, mblk_t *, int);
2180Sstevel@tonic-gate 
2190Sstevel@tonic-gate /* kstat routines */
2200Sstevel@tonic-gate static void	ecpp_kstat_init(struct ecppunit *);
2210Sstevel@tonic-gate static int	ecpp_kstat_update(kstat_t *, int);
2220Sstevel@tonic-gate static int	ecpp_kstatintr_update(kstat_t *, int);
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate /* dma routines */
2250Sstevel@tonic-gate static void	ecpp_putback_untransfered(struct ecppunit *, void *, uint_t);
2260Sstevel@tonic-gate static uint8_t	ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t);
2270Sstevel@tonic-gate static uint8_t	ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t);
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate /* pio routines */
2300Sstevel@tonic-gate static void	ecpp_pio_writeb(struct ecppunit *);
2310Sstevel@tonic-gate static void	ecpp_xfer_cleanup(struct ecppunit *);
2320Sstevel@tonic-gate static uint8_t	ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t);
2330Sstevel@tonic-gate 
2340Sstevel@tonic-gate /* misc */
2350Sstevel@tonic-gate static uchar_t	ecpp_reset_port_regs(struct ecppunit *);
2360Sstevel@tonic-gate static void	ecpp_xfer_timeout(void *);
2370Sstevel@tonic-gate static void	ecpp_fifo_timer(void *);
2380Sstevel@tonic-gate static void	ecpp_wsrv_timer(void *);
2390Sstevel@tonic-gate static uchar_t	dcr_write(struct ecppunit *, uint8_t);
2400Sstevel@tonic-gate static uchar_t	ecr_write(struct ecppunit *, uint8_t);
2410Sstevel@tonic-gate static uchar_t	ecpp_check_status(struct ecppunit *);
2420Sstevel@tonic-gate static int	ecpp_backchan_req(struct ecppunit *);
2430Sstevel@tonic-gate static void	ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *);
2440Sstevel@tonic-gate static uint_t	ecpp_get_prn_ifcap(struct ecppunit *);
2450Sstevel@tonic-gate 
2460Sstevel@tonic-gate /* stubs */
2470Sstevel@tonic-gate static void	empty_config_mode(struct ecppunit *);
2480Sstevel@tonic-gate static void	empty_mask_intr(struct ecppunit *);
2490Sstevel@tonic-gate 
2500Sstevel@tonic-gate /* PC87332 support */
2510Sstevel@tonic-gate static int	pc87332_map_regs(struct ecppunit *);
2520Sstevel@tonic-gate static void	pc87332_unmap_regs(struct ecppunit *);
2530Sstevel@tonic-gate static int	pc87332_config_chip(struct ecppunit *);
2540Sstevel@tonic-gate static void	pc87332_config_mode(struct ecppunit *);
2550Sstevel@tonic-gate static uint8_t	pc87332_read_config_reg(struct ecppunit *, uint8_t);
2560Sstevel@tonic-gate static void	pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
2570Sstevel@tonic-gate static void	cheerio_mask_intr(struct ecppunit *);
2580Sstevel@tonic-gate static void	cheerio_unmask_intr(struct ecppunit *);
2590Sstevel@tonic-gate static int	cheerio_dma_start(struct ecppunit *);
2600Sstevel@tonic-gate static int	cheerio_dma_stop(struct ecppunit *, size_t *);
2610Sstevel@tonic-gate static size_t	cheerio_getcnt(struct ecppunit *);
2620Sstevel@tonic-gate static void	cheerio_reset_dcsr(struct ecppunit *);
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate /* PC97317 support */
2650Sstevel@tonic-gate static int	pc97317_map_regs(struct ecppunit *);
2660Sstevel@tonic-gate static void	pc97317_unmap_regs(struct ecppunit *);
2670Sstevel@tonic-gate static int	pc97317_config_chip(struct ecppunit *);
2680Sstevel@tonic-gate static void	pc97317_config_mode(struct ecppunit *);
2690Sstevel@tonic-gate 
2700Sstevel@tonic-gate /* M1553 Southbridge support */
2710Sstevel@tonic-gate static int	m1553_map_regs(struct ecppunit *pp);
2720Sstevel@tonic-gate static void	m1553_unmap_regs(struct ecppunit *pp);
2730Sstevel@tonic-gate static int	m1553_config_chip(struct ecppunit *);
2740Sstevel@tonic-gate static uint8_t	m1553_read_config_reg(struct ecppunit *, uint8_t);
2750Sstevel@tonic-gate static void	m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
2760Sstevel@tonic-gate 
2770Sstevel@tonic-gate /* M1553 Southbridge DMAC 8237 support routines */
2780Sstevel@tonic-gate static int 	dma8237_dma_start(struct ecppunit *);
2790Sstevel@tonic-gate static int	dma8237_dma_stop(struct ecppunit *, size_t *);
2800Sstevel@tonic-gate static size_t	dma8237_getcnt(struct ecppunit *);
2810Sstevel@tonic-gate static void 	dma8237_write_addr(struct ecppunit *, uint32_t);
2820Sstevel@tonic-gate static void	dma8237_write_count(struct ecppunit *, uint32_t);
2830Sstevel@tonic-gate static uint32_t	dma8237_read_count(struct ecppunit *);
2840Sstevel@tonic-gate static void	dma8237_write(struct ecppunit *, int, uint8_t);
2850Sstevel@tonic-gate static uint8_t	dma8237_read(struct ecppunit *, int);
2860Sstevel@tonic-gate #ifdef INCLUDE_DMA8237_READ_ADDR
2870Sstevel@tonic-gate static uint32_t	dma8237_read_addr(struct ecppunit *);
2880Sstevel@tonic-gate #endif
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate /* i86 PC support rountines */
2910Sstevel@tonic-gate 
2920Sstevel@tonic-gate #if defined(__x86)
2930Sstevel@tonic-gate static int	x86_dma_start(struct ecppunit *);
2940Sstevel@tonic-gate static int	x86_dma_stop(struct ecppunit *, size_t *);
2950Sstevel@tonic-gate static int	x86_map_regs(struct ecppunit *);
2960Sstevel@tonic-gate static void	x86_unmap_regs(struct ecppunit *);
2970Sstevel@tonic-gate static int	x86_config_chip(struct ecppunit *);
2980Sstevel@tonic-gate static size_t	x86_getcnt(struct ecppunit *);
2990Sstevel@tonic-gate #endif
3000Sstevel@tonic-gate 
3010Sstevel@tonic-gate /* IEEE 1284 phase transitions */
3020Sstevel@tonic-gate static void	ecpp_1284_init_interface(struct ecppunit *);
3030Sstevel@tonic-gate static int	ecpp_1284_termination(struct ecppunit *);
3040Sstevel@tonic-gate static uchar_t 	ecpp_idle_phase(struct ecppunit *);
3050Sstevel@tonic-gate static int	ecp_forward2reverse(struct ecppunit *);
3060Sstevel@tonic-gate static int	ecp_reverse2forward(struct ecppunit *);
3070Sstevel@tonic-gate static int	read_nibble_backchan(struct ecppunit *);
3080Sstevel@tonic-gate 
3090Sstevel@tonic-gate /* reverse transfers */
3100Sstevel@tonic-gate static uint_t	ecpp_peripheral2host(struct ecppunit *);
3110Sstevel@tonic-gate static uchar_t	ecp_peripheral2host(struct ecppunit *);
3120Sstevel@tonic-gate static uchar_t	nibble_peripheral2host(struct ecppunit *pp, uint8_t *);
3130Sstevel@tonic-gate static int	ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int);
3140Sstevel@tonic-gate static void	ecpp_ecp_read_timeout(void *);
3150Sstevel@tonic-gate static void	ecpp_ecp_read_completion(struct ecppunit *);
3160Sstevel@tonic-gate 
3170Sstevel@tonic-gate /* IEEE 1284 mode transitions */
3180Sstevel@tonic-gate static void 	ecpp_default_negotiation(struct ecppunit *);
3190Sstevel@tonic-gate static int 	ecpp_mode_negotiation(struct ecppunit *, uchar_t);
3200Sstevel@tonic-gate static int	ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *);
3210Sstevel@tonic-gate static int	ecp_negotiation(struct ecppunit *);
3220Sstevel@tonic-gate static int	nibble_negotiation(struct ecppunit *);
3230Sstevel@tonic-gate static int	devidnib_negotiation(struct ecppunit *);
3240Sstevel@tonic-gate 
3250Sstevel@tonic-gate /* IEEE 1284 utility routines */
3260Sstevel@tonic-gate static int	wait_dsr(struct ecppunit *, uint8_t, uint8_t, int);
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate /* debugging functions */
3290Sstevel@tonic-gate static void	ecpp_error(dev_info_t *, char *, ...);
3300Sstevel@tonic-gate static uchar_t	ecpp_get_error_status(uchar_t);
3310Sstevel@tonic-gate 
3320Sstevel@tonic-gate /*
3330Sstevel@tonic-gate  * Chip-dependent structures
3340Sstevel@tonic-gate  */
3350Sstevel@tonic-gate static ddi_dma_attr_t cheerio_dma_attr = {
3360Sstevel@tonic-gate 	DMA_ATTR_VERSION,	/* version */
3370Sstevel@tonic-gate 	0x00000000ull,		/* dlim_addr_lo */
3380Sstevel@tonic-gate 	0xfffffffeull,		/* dlim_addr_hi */
3390Sstevel@tonic-gate 	0xffffff,		/* DMA counter register */
3400Sstevel@tonic-gate 	1,			/* DMA address alignment */
3410Sstevel@tonic-gate 	0x74,			/* burst sizes */
3420Sstevel@tonic-gate 	0x0001,			/* min effective DMA size */
3430Sstevel@tonic-gate 	0xffff,			/* maximum transfer size */
3440Sstevel@tonic-gate 	0xffff,			/* segment boundary */
3450Sstevel@tonic-gate 	1,			/* s/g list length */
3460Sstevel@tonic-gate 	1,			/* granularity of device */
3470Sstevel@tonic-gate 	0			/* DMA flags */
3480Sstevel@tonic-gate };
3490Sstevel@tonic-gate 
3500Sstevel@tonic-gate static struct ecpp_hw pc87332 = {
3510Sstevel@tonic-gate 	pc87332_map_regs,
3520Sstevel@tonic-gate 	pc87332_unmap_regs,
3530Sstevel@tonic-gate 	pc87332_config_chip,
3540Sstevel@tonic-gate 	pc87332_config_mode,
3550Sstevel@tonic-gate 	cheerio_mask_intr,
3560Sstevel@tonic-gate 	cheerio_unmask_intr,
3570Sstevel@tonic-gate 	cheerio_dma_start,
3580Sstevel@tonic-gate 	cheerio_dma_stop,
3590Sstevel@tonic-gate 	cheerio_getcnt,
3600Sstevel@tonic-gate 	&cheerio_dma_attr
3610Sstevel@tonic-gate };
3620Sstevel@tonic-gate 
3630Sstevel@tonic-gate static struct ecpp_hw pc97317 = {
3640Sstevel@tonic-gate 	pc97317_map_regs,
3650Sstevel@tonic-gate 	pc97317_unmap_regs,
3660Sstevel@tonic-gate 	pc97317_config_chip,
3670Sstevel@tonic-gate 	pc97317_config_mode,
3680Sstevel@tonic-gate 	cheerio_mask_intr,
3690Sstevel@tonic-gate 	cheerio_unmask_intr,
3700Sstevel@tonic-gate 	cheerio_dma_start,
3710Sstevel@tonic-gate 	cheerio_dma_stop,
3720Sstevel@tonic-gate 	cheerio_getcnt,
3730Sstevel@tonic-gate 	&cheerio_dma_attr
3740Sstevel@tonic-gate };
3750Sstevel@tonic-gate 
3760Sstevel@tonic-gate static ddi_dma_attr_t i8237_dma_attr = {
3770Sstevel@tonic-gate 	DMA_ATTR_VERSION,	/* version */
3780Sstevel@tonic-gate 	0x00000000ull,		/* dlim_addr_lo */
3790Sstevel@tonic-gate 	0xfffffffeull,		/* dlim_addr_hi */
3800Sstevel@tonic-gate 	0xffff,			/* DMA counter register */
3810Sstevel@tonic-gate 	1,			/* DMA address alignment */
3820Sstevel@tonic-gate 	0x01,			/* burst sizes */
3830Sstevel@tonic-gate 	0x0001,			/* min effective DMA size */
3840Sstevel@tonic-gate 	0xffff,			/* maximum transfer size */
3850Sstevel@tonic-gate 	0x7fff,			/* segment boundary */
3860Sstevel@tonic-gate 	1,			/* s/g list length */
3870Sstevel@tonic-gate 	1,			/* granularity of device */
3880Sstevel@tonic-gate 	0			/* DMA flags */
3890Sstevel@tonic-gate };
3900Sstevel@tonic-gate 
3910Sstevel@tonic-gate static struct ecpp_hw m1553 = {
3920Sstevel@tonic-gate 	m1553_map_regs,
3930Sstevel@tonic-gate 	m1553_unmap_regs,
3940Sstevel@tonic-gate 	m1553_config_chip,
3950Sstevel@tonic-gate 	empty_config_mode,	/* no config_mode */
3960Sstevel@tonic-gate 	empty_mask_intr,	/* no mask_intr */
3970Sstevel@tonic-gate 	empty_mask_intr,	/* no unmask_intr */
3980Sstevel@tonic-gate 	dma8237_dma_start,
3990Sstevel@tonic-gate 	dma8237_dma_stop,
4000Sstevel@tonic-gate 	dma8237_getcnt,
4010Sstevel@tonic-gate 	&i8237_dma_attr
4020Sstevel@tonic-gate };
4030Sstevel@tonic-gate 
4040Sstevel@tonic-gate #if defined(__x86)
4050Sstevel@tonic-gate static ddi_dma_attr_t sb_dma_attr = {
4060Sstevel@tonic-gate 	DMA_ATTR_VERSION,	/* version */
4070Sstevel@tonic-gate 	0x00000000ull,		/* dlim_addr_lo */
4080Sstevel@tonic-gate 	0xffffff,		/* dlim_addr_hi */
4090Sstevel@tonic-gate 	0xffff,			/* DMA counter register */
4100Sstevel@tonic-gate 	1,			/* DMA address alignment */
4110Sstevel@tonic-gate 	0x01,			/* burst sizes */
4120Sstevel@tonic-gate 	0x0001,			/* min effective DMA size */
4130Sstevel@tonic-gate 	0xffffffff,		/* maximum transfer size */
4140Sstevel@tonic-gate 	0xffff,			/* segment boundary */
4150Sstevel@tonic-gate 	1,			/* s/g list length */
4160Sstevel@tonic-gate 	1,			/* granularity of device */
4170Sstevel@tonic-gate 	0			/* DMA flags */
4180Sstevel@tonic-gate };
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate static struct ecpp_hw x86 = {
4210Sstevel@tonic-gate 	x86_map_regs,
4220Sstevel@tonic-gate 	x86_unmap_regs,
4230Sstevel@tonic-gate 	x86_config_chip,
4240Sstevel@tonic-gate 	empty_config_mode,	/* no config_mode */
4250Sstevel@tonic-gate 	empty_mask_intr,	/* no mask_intr */
4260Sstevel@tonic-gate 	empty_mask_intr,	/* no unmask_intr */
4270Sstevel@tonic-gate 	x86_dma_start,
4280Sstevel@tonic-gate 	x86_dma_stop,
4290Sstevel@tonic-gate 	x86_getcnt,
4300Sstevel@tonic-gate 	&sb_dma_attr
4310Sstevel@tonic-gate };
4320Sstevel@tonic-gate #endif
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate /*
4350Sstevel@tonic-gate  * list of supported devices
4360Sstevel@tonic-gate  */
4370Sstevel@tonic-gate struct ecpp_hw_bind ecpp_hw_bind[] = {
4380Sstevel@tonic-gate 	{ "ns87317-ecpp",	&pc97317,	"PC97317" },
4390Sstevel@tonic-gate 	{ "pnpALI,1533,3",	&m1553,		"M1553" },
4400Sstevel@tonic-gate 	{ "ecpp",		&pc87332,	"PC87332" },
4410Sstevel@tonic-gate #if defined(__x86)
4420Sstevel@tonic-gate 	{ "lp",			&x86,		"i86pc"},
4430Sstevel@tonic-gate #endif
4440Sstevel@tonic-gate };
4450Sstevel@tonic-gate 
4460Sstevel@tonic-gate static ddi_device_acc_attr_t acc_attr = {
4470Sstevel@tonic-gate 	DDI_DEVICE_ATTR_V0,
4480Sstevel@tonic-gate 	DDI_STRUCTURE_LE_ACC,
4490Sstevel@tonic-gate 	DDI_STRICTORDER_ACC
4500Sstevel@tonic-gate };
4510Sstevel@tonic-gate 
4520Sstevel@tonic-gate static struct ecpp_transfer_parms default_xfer_parms = {
4530Sstevel@tonic-gate 	FWD_TIMEOUT_DEFAULT,	/* write timeout in seconds */
4540Sstevel@tonic-gate 	ECPP_CENTRONICS		/* supported mode */
4550Sstevel@tonic-gate };
4560Sstevel@tonic-gate 
4570Sstevel@tonic-gate /* prnio interface info string */
4580Sstevel@tonic-gate static const char prn_ifinfo[] = PRN_PARALLEL;
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate /* prnio timeouts */
4610Sstevel@tonic-gate static const struct prn_timeouts prn_timeouts_default = {
4620Sstevel@tonic-gate 	FWD_TIMEOUT_DEFAULT,	/* forward timeout */
4630Sstevel@tonic-gate 	REV_TIMEOUT_DEFAULT	/* reverse timeout */
4640Sstevel@tonic-gate };
4650Sstevel@tonic-gate 
4660Sstevel@tonic-gate static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY;
4670Sstevel@tonic-gate static int ecpp_def_timeout = 90;  /* left in for 2.7 compatibility */
4680Sstevel@tonic-gate 
4690Sstevel@tonic-gate static void    *ecppsoft_statep;
4700Sstevel@tonic-gate 
4710Sstevel@tonic-gate /*
4720Sstevel@tonic-gate  * STREAMS framework manages locks for these structures
4730Sstevel@tonic-gate  */
4740Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique per call", iocblk))
4750Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique per call", datab))
4760Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique per call", msgb))
4770Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique per call", queue))
4780Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique per call", copyreq))
4790Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique per call", stroptions))
4800Sstevel@tonic-gate 
4810Sstevel@tonic-gate struct module_info ecppinfo = {
4820Sstevel@tonic-gate 	/* id, name, min pkt siz, max pkt siz, hi water, low water */
4830Sstevel@tonic-gate 	42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT
4840Sstevel@tonic-gate };
4850Sstevel@tonic-gate 
4860Sstevel@tonic-gate static struct qinit ecpp_rinit = {
4870Sstevel@tonic-gate 	putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
4880Sstevel@tonic-gate };
4890Sstevel@tonic-gate 
4900Sstevel@tonic-gate static struct qinit ecpp_wint = {
4910Sstevel@tonic-gate 	ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
4920Sstevel@tonic-gate };
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate struct streamtab ecpp_str_info = {
4950Sstevel@tonic-gate 	&ecpp_rinit, &ecpp_wint, NULL, NULL
4960Sstevel@tonic-gate };
4970Sstevel@tonic-gate 
4980Sstevel@tonic-gate static struct cb_ops ecpp_cb_ops = {
4990Sstevel@tonic-gate 	nodev,			/* cb_open */
5000Sstevel@tonic-gate 	nodev,			/* cb_close */
5010Sstevel@tonic-gate 	nodev,			/* cb_strategy */
5020Sstevel@tonic-gate 	nodev,			/* cb_print */
5030Sstevel@tonic-gate 	nodev,			/* cb_dump */
5040Sstevel@tonic-gate 	nodev,			/* cb_read */
5050Sstevel@tonic-gate 	nodev,			/* cb_write */
5060Sstevel@tonic-gate 	nodev,			/* cb_ioctl */
5070Sstevel@tonic-gate 	nodev,			/* cb_devmap */
5080Sstevel@tonic-gate 	nodev,			/* cb_mmap */
5090Sstevel@tonic-gate 	nodev,			/* cb_segmap */
5100Sstevel@tonic-gate 	nochpoll,		/* cb_chpoll */
5110Sstevel@tonic-gate 	ddi_prop_op,		/* cb_prop_op */
5120Sstevel@tonic-gate 	&ecpp_str_info,		/* cb_stream */
5130Sstevel@tonic-gate 	(D_NEW | D_MP | D_MTPERQ)	/* cb_flag */
5140Sstevel@tonic-gate };
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate /*
5170Sstevel@tonic-gate  * Declare ops vectors for auto configuration.
5180Sstevel@tonic-gate  */
5190Sstevel@tonic-gate struct dev_ops  ecpp_ops = {
5200Sstevel@tonic-gate 	DEVO_REV,		/* devo_rev */
5210Sstevel@tonic-gate 	0,			/* devo_refcnt */
5220Sstevel@tonic-gate 	ecpp_getinfo,		/* devo_getinfo */
5230Sstevel@tonic-gate 	nulldev,		/* devo_identify */
5240Sstevel@tonic-gate 	nulldev,		/* devo_probe */
5250Sstevel@tonic-gate 	ecpp_attach,		/* devo_attach */
5260Sstevel@tonic-gate 	ecpp_detach,		/* devo_detach */
5270Sstevel@tonic-gate 	nodev,			/* devo_reset */
5280Sstevel@tonic-gate 	&ecpp_cb_ops,		/* devo_cb_ops */
5290Sstevel@tonic-gate 	(struct bus_ops *)NULL,	/* devo_bus_ops */
5307656SSherry.Moore@Sun.COM 	nulldev,		/* devo_power */
5317656SSherry.Moore@Sun.COM 	ddi_quiesce_not_needed,	/* devo_quiesce */
5320Sstevel@tonic-gate };
5330Sstevel@tonic-gate 
5340Sstevel@tonic-gate extern struct mod_ops mod_driverops;
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate static struct modldrv ecppmodldrv = {
5370Sstevel@tonic-gate 	&mod_driverops,		/* type of module - driver */
5387656SSherry.Moore@Sun.COM 	"parallel port driver",
5390Sstevel@tonic-gate 	&ecpp_ops,
5400Sstevel@tonic-gate };
5410Sstevel@tonic-gate 
5420Sstevel@tonic-gate static struct modlinkage ecppmodlinkage = {
5430Sstevel@tonic-gate 	MODREV_1,
5440Sstevel@tonic-gate 	&ecppmodldrv,
5450Sstevel@tonic-gate 	0
5460Sstevel@tonic-gate };
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 
5490Sstevel@tonic-gate /*
5500Sstevel@tonic-gate  *
5510Sstevel@tonic-gate  * DDI/DKI entry points and supplementary routines
5520Sstevel@tonic-gate  *
5530Sstevel@tonic-gate  */
5540Sstevel@tonic-gate 
5550Sstevel@tonic-gate 
5560Sstevel@tonic-gate int
_init(void)5570Sstevel@tonic-gate _init(void)
5580Sstevel@tonic-gate {
5590Sstevel@tonic-gate 	int    error;
5600Sstevel@tonic-gate 
5610Sstevel@tonic-gate 	if ((error = mod_install(&ecppmodlinkage)) == 0) {
5620Sstevel@tonic-gate 		(void) ddi_soft_state_init(&ecppsoft_statep,
5630Sstevel@tonic-gate 		    sizeof (struct ecppunit), 1);
5640Sstevel@tonic-gate 	}
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate 	return (error);
5670Sstevel@tonic-gate }
5680Sstevel@tonic-gate 
5690Sstevel@tonic-gate int
_fini(void)5700Sstevel@tonic-gate _fini(void)
5710Sstevel@tonic-gate {
5720Sstevel@tonic-gate 	int    error;
5730Sstevel@tonic-gate 
5740Sstevel@tonic-gate 	if ((error = mod_remove(&ecppmodlinkage)) == 0) {
5750Sstevel@tonic-gate 		ddi_soft_state_fini(&ecppsoft_statep);
5760Sstevel@tonic-gate 	}
5770Sstevel@tonic-gate 
5780Sstevel@tonic-gate 	return (error);
5790Sstevel@tonic-gate }
5800Sstevel@tonic-gate 
5810Sstevel@tonic-gate int
_info(struct modinfo * modinfop)5820Sstevel@tonic-gate _info(struct modinfo *modinfop)
5830Sstevel@tonic-gate {
5840Sstevel@tonic-gate 	return (mod_info(&ecppmodlinkage, modinfop));
5850Sstevel@tonic-gate }
5860Sstevel@tonic-gate 
5870Sstevel@tonic-gate static int
ecpp_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)5880Sstevel@tonic-gate ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
5890Sstevel@tonic-gate {
5900Sstevel@tonic-gate 	int			instance;
5910Sstevel@tonic-gate 	char			name[16];
5920Sstevel@tonic-gate 	struct ecppunit		*pp;
5930Sstevel@tonic-gate 	struct ecpp_hw_bind	*hw_bind;
5940Sstevel@tonic-gate 
5950Sstevel@tonic-gate 	instance = ddi_get_instance(dip);
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate 	switch (cmd) {
5980Sstevel@tonic-gate 	case DDI_ATTACH:
5990Sstevel@tonic-gate 		break;
6000Sstevel@tonic-gate 
6010Sstevel@tonic-gate 	case DDI_RESUME:
6020Sstevel@tonic-gate 		if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
6030Sstevel@tonic-gate 			return (DDI_FAILURE);
6040Sstevel@tonic-gate 		}
6050Sstevel@tonic-gate 
6060Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
6070Sstevel@tonic-gate 
6080Sstevel@tonic-gate 		pp->suspended = FALSE;
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate 		/*
6110Sstevel@tonic-gate 		 * Initialize the chip and restore current mode if needed
6120Sstevel@tonic-gate 		 */
6130Sstevel@tonic-gate 		(void) ECPP_CONFIG_CHIP(pp);
6140Sstevel@tonic-gate 		(void) ecpp_reset_port_regs(pp);
6150Sstevel@tonic-gate 
6160Sstevel@tonic-gate 		if (pp->oflag == TRUE) {
6170Sstevel@tonic-gate 			int current_mode = pp->current_mode;
6180Sstevel@tonic-gate 
6190Sstevel@tonic-gate 			(void) ecpp_1284_termination(pp);
6200Sstevel@tonic-gate 			(void) ecpp_mode_negotiation(pp, current_mode);
6210Sstevel@tonic-gate 		}
6220Sstevel@tonic-gate 
6230Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
6240Sstevel@tonic-gate 
6250Sstevel@tonic-gate 		return (DDI_SUCCESS);
6260Sstevel@tonic-gate 
6270Sstevel@tonic-gate 	default:
6280Sstevel@tonic-gate 		return (DDI_FAILURE);
6290Sstevel@tonic-gate 	}
6300Sstevel@tonic-gate 
6310Sstevel@tonic-gate 	if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) {
6320Sstevel@tonic-gate 		ecpp_error(dip, "ddi_soft_state_zalloc failed\n");
6330Sstevel@tonic-gate 		goto fail;
6340Sstevel@tonic-gate 	}
6350Sstevel@tonic-gate 
6360Sstevel@tonic-gate 	pp = ddi_get_soft_state(ecppsoft_statep, instance);
6370Sstevel@tonic-gate 
6380Sstevel@tonic-gate 	pp->dip = dip;
6390Sstevel@tonic-gate 	pp->suspended = FALSE;
6400Sstevel@tonic-gate 
6410Sstevel@tonic-gate 	/*
6420Sstevel@tonic-gate 	 * Determine SuperIO type and set chip-dependent variables
6430Sstevel@tonic-gate 	 */
6440Sstevel@tonic-gate 	hw_bind = ecpp_determine_sio_type(pp);
6450Sstevel@tonic-gate 
6460Sstevel@tonic-gate 	if (hw_bind == NULL) {
6470Sstevel@tonic-gate 		cmn_err(CE_NOTE, "parallel port controller not supported");
6480Sstevel@tonic-gate 		goto fail_sio;
6490Sstevel@tonic-gate 	} else {
6500Sstevel@tonic-gate 		pp->hw = hw_bind->hw;
6510Sstevel@tonic-gate 		ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info);
6520Sstevel@tonic-gate 	}
6530Sstevel@tonic-gate 
6540Sstevel@tonic-gate 	/*
6550Sstevel@tonic-gate 	 * Map registers
6560Sstevel@tonic-gate 	 */
6570Sstevel@tonic-gate 	if (ECPP_MAP_REGS(pp) != SUCCESS) {
6580Sstevel@tonic-gate 		goto fail_map;
6590Sstevel@tonic-gate 	}
6600Sstevel@tonic-gate 
6610Sstevel@tonic-gate 	if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT,
6620Sstevel@tonic-gate 	    NULL, &pp->dma_handle) != DDI_SUCCESS) {
6630Sstevel@tonic-gate 		ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n");
6640Sstevel@tonic-gate 		goto fail_dma;
6650Sstevel@tonic-gate 	}
6660Sstevel@tonic-gate 
6670Sstevel@tonic-gate 	if (ddi_get_iblock_cookie(dip, 0,
6680Sstevel@tonic-gate 	    &pp->ecpp_trap_cookie) != DDI_SUCCESS) {
6690Sstevel@tonic-gate 		ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n");
6700Sstevel@tonic-gate 		goto fail_ibc;
6710Sstevel@tonic-gate 	}
6720Sstevel@tonic-gate 
6730Sstevel@tonic-gate 	mutex_init(&pp->umutex, NULL, MUTEX_DRIVER,
6747656SSherry.Moore@Sun.COM 	    (void *)pp->ecpp_trap_cookie);
6750Sstevel@tonic-gate 
6760Sstevel@tonic-gate 	cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL);
6770Sstevel@tonic-gate 
6780Sstevel@tonic-gate 	if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr,
6790Sstevel@tonic-gate 	    (caddr_t)pp) != DDI_SUCCESS) {
6800Sstevel@tonic-gate 		ecpp_error(dip, "ecpp_attach: failed to add hard intr\n");
6810Sstevel@tonic-gate 		goto fail_intr;
6820Sstevel@tonic-gate 	}
6830Sstevel@tonic-gate 
6840Sstevel@tonic-gate 	if (ddi_add_softintr(dip, DDI_SOFTINT_LOW,
6850Sstevel@tonic-gate 	    &pp->softintr_id, 0, 0, ecpp_softintr,
6860Sstevel@tonic-gate 	    (caddr_t)pp) != DDI_SUCCESS) {
6870Sstevel@tonic-gate 		ecpp_error(dip, "ecpp_attach: failed to add soft intr\n");
6880Sstevel@tonic-gate 		goto fail_softintr;
6890Sstevel@tonic-gate 	}
6900Sstevel@tonic-gate 
6910Sstevel@tonic-gate 	(void) sprintf(name, "ecpp%d", instance);
6920Sstevel@tonic-gate 
6930Sstevel@tonic-gate 	if (ddi_create_minor_node(dip, name, S_IFCHR, instance,
6940Sstevel@tonic-gate 	    DDI_NT_PRINTER, NULL) == DDI_FAILURE) {
6950Sstevel@tonic-gate 		ecpp_error(dip, "ecpp_attach: create_minor_node failed\n");
6960Sstevel@tonic-gate 		goto fail_minor;
6970Sstevel@tonic-gate 	}
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate 	pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP);
7000Sstevel@tonic-gate 	if (pp->ioblock == NULL) {
7010Sstevel@tonic-gate 		ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n");
7020Sstevel@tonic-gate 		goto fail_iob;
7030Sstevel@tonic-gate 	} else {
7040Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock);
7050Sstevel@tonic-gate 	}
7060Sstevel@tonic-gate 
7070Sstevel@tonic-gate 	ecpp_get_props(pp);
7080Sstevel@tonic-gate #if defined(__x86)
7090Sstevel@tonic-gate 	if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) {
7100Sstevel@tonic-gate 		if (ddi_dmae_alloc(dip, pp->uh.x86.chn,
7110Sstevel@tonic-gate 		    DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS)
7120Sstevel@tonic-gate 			ecpp_error(pp->dip, "dmae_alloc success!\n");
7130Sstevel@tonic-gate 	}
7140Sstevel@tonic-gate #endif
7150Sstevel@tonic-gate 	if (ECPP_CONFIG_CHIP(pp) == FAILURE) {
7160Sstevel@tonic-gate 		ecpp_error(pp->dip, "config_chip failed.\n");
7170Sstevel@tonic-gate 		goto fail_config;
7180Sstevel@tonic-gate 	}
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate 	ecpp_kstat_init(pp);
7210Sstevel@tonic-gate 
7220Sstevel@tonic-gate 	ddi_report_dev(dip);
7230Sstevel@tonic-gate 
7240Sstevel@tonic-gate 	return (DDI_SUCCESS);
7250Sstevel@tonic-gate 
7260Sstevel@tonic-gate fail_config:
7270Sstevel@tonic-gate 	ddi_prop_remove_all(dip);
7280Sstevel@tonic-gate 	kmem_free(pp->ioblock, IO_BLOCK_SZ);
7290Sstevel@tonic-gate fail_iob:
7300Sstevel@tonic-gate 	ddi_remove_minor_node(dip, NULL);
7310Sstevel@tonic-gate fail_minor:
7320Sstevel@tonic-gate 	ddi_remove_softintr(pp->softintr_id);
7330Sstevel@tonic-gate fail_softintr:
7340Sstevel@tonic-gate 	ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
7350Sstevel@tonic-gate fail_intr:
7360Sstevel@tonic-gate 	mutex_destroy(&pp->umutex);
7370Sstevel@tonic-gate 	cv_destroy(&pp->pport_cv);
7380Sstevel@tonic-gate fail_ibc:
7390Sstevel@tonic-gate 	ddi_dma_free_handle(&pp->dma_handle);
7400Sstevel@tonic-gate fail_dma:
7410Sstevel@tonic-gate 	ECPP_UNMAP_REGS(pp);
7420Sstevel@tonic-gate fail_map:
7430Sstevel@tonic-gate fail_sio:
7440Sstevel@tonic-gate 	ddi_soft_state_free(ecppsoft_statep, instance);
7450Sstevel@tonic-gate fail:
7460Sstevel@tonic-gate 	ecpp_error(dip, "ecpp_attach: failed.\n");
7470Sstevel@tonic-gate 
7480Sstevel@tonic-gate 	return (DDI_FAILURE);
7490Sstevel@tonic-gate }
7500Sstevel@tonic-gate 
7510Sstevel@tonic-gate static int
ecpp_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)7520Sstevel@tonic-gate ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
7530Sstevel@tonic-gate {
7540Sstevel@tonic-gate 	int		instance;
7550Sstevel@tonic-gate 	struct ecppunit *pp;
7560Sstevel@tonic-gate 
7570Sstevel@tonic-gate 	instance = ddi_get_instance(dip);
7580Sstevel@tonic-gate 
7590Sstevel@tonic-gate 	switch (cmd) {
7600Sstevel@tonic-gate 	case DDI_DETACH:
7610Sstevel@tonic-gate 		break;
7620Sstevel@tonic-gate 
7630Sstevel@tonic-gate 	case DDI_SUSPEND:
7640Sstevel@tonic-gate 		if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
7650Sstevel@tonic-gate 			return (DDI_FAILURE);
7660Sstevel@tonic-gate 		}
7670Sstevel@tonic-gate 
7680Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
7690Sstevel@tonic-gate 		ASSERT(pp->suspended == FALSE);
7700Sstevel@tonic-gate 
7710Sstevel@tonic-gate 		pp->suspended = TRUE;	/* prevent new transfers */
7720Sstevel@tonic-gate 
7730Sstevel@tonic-gate 		/*
7740Sstevel@tonic-gate 		 * Wait if there's any activity on the port
7750Sstevel@tonic-gate 		 */
7760Sstevel@tonic-gate 		if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
777*11066Srafael.vanoni@sun.com 			(void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
778*11066Srafael.vanoni@sun.com 			    SUSPEND_TOUT * drv_usectohz(1000000),
779*11066Srafael.vanoni@sun.com 			    TR_CLOCK_TICK);
7800Sstevel@tonic-gate 			if ((pp->e_busy == ECPP_BUSY) ||
7810Sstevel@tonic-gate 			    (pp->e_busy == ECPP_FLUSH)) {
7820Sstevel@tonic-gate 				pp->suspended = FALSE;
7830Sstevel@tonic-gate 				mutex_exit(&pp->umutex);
7840Sstevel@tonic-gate 				ecpp_error(pp->dip,
7857656SSherry.Moore@Sun.COM 				    "ecpp_detach: suspend timeout\n");
7860Sstevel@tonic-gate 				return (DDI_FAILURE);
7870Sstevel@tonic-gate 			}
7880Sstevel@tonic-gate 		}
7890Sstevel@tonic-gate 
7900Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
7910Sstevel@tonic-gate 		return (DDI_SUCCESS);
7920Sstevel@tonic-gate 
7930Sstevel@tonic-gate 	default:
7940Sstevel@tonic-gate 		return (DDI_FAILURE);
7950Sstevel@tonic-gate 	}
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate 	pp = ddi_get_soft_state(ecppsoft_statep, instance);
7980Sstevel@tonic-gate #if defined(__x86)
7990Sstevel@tonic-gate 	if (pp->hw == &x86 && pp->uh.x86.chn != 0xff)
8000Sstevel@tonic-gate 		(void) ddi_dmae_release(pp->dip, pp->uh.x86.chn);
8010Sstevel@tonic-gate #endif
8020Sstevel@tonic-gate 	if (pp->dma_handle != NULL)
8030Sstevel@tonic-gate 		ddi_dma_free_handle(&pp->dma_handle);
8040Sstevel@tonic-gate 
8050Sstevel@tonic-gate 	ddi_remove_minor_node(dip, NULL);
8060Sstevel@tonic-gate 
8070Sstevel@tonic-gate 	ddi_remove_softintr(pp->softintr_id);
8080Sstevel@tonic-gate 
8090Sstevel@tonic-gate 	ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
8100Sstevel@tonic-gate 
8110Sstevel@tonic-gate 	if (pp->ksp) {
8120Sstevel@tonic-gate 		kstat_delete(pp->ksp);
8130Sstevel@tonic-gate 	}
8140Sstevel@tonic-gate 	if (pp->intrstats) {
8150Sstevel@tonic-gate 		kstat_delete(pp->intrstats);
8160Sstevel@tonic-gate 	}
8170Sstevel@tonic-gate 
8180Sstevel@tonic-gate 	cv_destroy(&pp->pport_cv);
8190Sstevel@tonic-gate 
8200Sstevel@tonic-gate 	mutex_destroy(&pp->umutex);
8210Sstevel@tonic-gate 
8220Sstevel@tonic-gate 	ECPP_UNMAP_REGS(pp);
8230Sstevel@tonic-gate 
8240Sstevel@tonic-gate 	kmem_free(pp->ioblock, IO_BLOCK_SZ);
8250Sstevel@tonic-gate 
8260Sstevel@tonic-gate 	ddi_prop_remove_all(dip);
8270Sstevel@tonic-gate 
8280Sstevel@tonic-gate 	ddi_soft_state_free(ecppsoft_statep, instance);
8290Sstevel@tonic-gate 
8300Sstevel@tonic-gate 	return (DDI_SUCCESS);
8310Sstevel@tonic-gate 
8320Sstevel@tonic-gate }
8330Sstevel@tonic-gate 
8340Sstevel@tonic-gate /*
8350Sstevel@tonic-gate  * ecpp_get_props() reads ecpp.conf for user defineable tuneables.
8360Sstevel@tonic-gate  * If the file or a particular variable is not there, a default value
8370Sstevel@tonic-gate  * is assigned.
8380Sstevel@tonic-gate  */
8390Sstevel@tonic-gate 
8400Sstevel@tonic-gate static void
ecpp_get_props(struct ecppunit * pp)8410Sstevel@tonic-gate ecpp_get_props(struct ecppunit *pp)
8420Sstevel@tonic-gate {
8430Sstevel@tonic-gate 	char	*prop;
8440Sstevel@tonic-gate #if defined(__x86)
8450Sstevel@tonic-gate 	int	len;
8460Sstevel@tonic-gate 	int	value;
8470Sstevel@tonic-gate #endif
8480Sstevel@tonic-gate 	/*
8490Sstevel@tonic-gate 	 * If fast_centronics is TRUE, non-compliant IEEE 1284
8500Sstevel@tonic-gate 	 * peripherals ( Centronics peripherals) will operate in DMA mode.
8510Sstevel@tonic-gate 	 * Transfers betwee main memory and the device will be via DMA;
8520Sstevel@tonic-gate 	 * peripheral handshaking will be conducted by superio logic.
8530Sstevel@tonic-gate 	 * If ecpp can not read the variable correctly fast_centronics will
8540Sstevel@tonic-gate 	 * be set to FALSE.  In this case, transfers and handshaking
8550Sstevel@tonic-gate 	 * will be conducted by PIO for Centronics devices.
8560Sstevel@tonic-gate 	 */
8570Sstevel@tonic-gate 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
8587656SSherry.Moore@Sun.COM 	    "fast-centronics", &prop) == DDI_PROP_SUCCESS) {
8590Sstevel@tonic-gate 		pp->fast_centronics =
8607656SSherry.Moore@Sun.COM 		    (strcmp(prop, "true") == 0) ? TRUE : FALSE;
8610Sstevel@tonic-gate 		ddi_prop_free(prop);
8620Sstevel@tonic-gate 	} else {
8630Sstevel@tonic-gate 		pp->fast_centronics = FALSE;
8640Sstevel@tonic-gate 	}
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	/*
8670Sstevel@tonic-gate 	 * If fast-1284-compatible is set to TRUE, when ecpp communicates
8680Sstevel@tonic-gate 	 * with IEEE 1284 compliant peripherals, data transfers between
8690Sstevel@tonic-gate 	 * main memory and the parallel port will be conducted by DMA.
8700Sstevel@tonic-gate 	 * Handshaking between the port and peripheral will be conducted
8710Sstevel@tonic-gate 	 * by superio logic.  This is the default characteristic.  If
8720Sstevel@tonic-gate 	 * fast-1284-compatible is set to FALSE, transfers and handshaking
8730Sstevel@tonic-gate 	 * will be conducted by PIO.
8740Sstevel@tonic-gate 	 */
8750Sstevel@tonic-gate 
8760Sstevel@tonic-gate 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
8777656SSherry.Moore@Sun.COM 	    "fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) {
8780Sstevel@tonic-gate 		pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
8790Sstevel@tonic-gate 		ddi_prop_free(prop);
8800Sstevel@tonic-gate 	} else {
8810Sstevel@tonic-gate 		pp->fast_compat = TRUE;
8820Sstevel@tonic-gate 	}
8830Sstevel@tonic-gate 
8840Sstevel@tonic-gate 	/*
8850Sstevel@tonic-gate 	 * Some centronics peripherals require the nInit signal to be
8860Sstevel@tonic-gate 	 * toggled to reset the device.  If centronics_init_seq is set
8870Sstevel@tonic-gate 	 * to TRUE, ecpp will toggle the nInit signal upon every ecpp_open().
8880Sstevel@tonic-gate 	 * Applications have the opportunity to toggle the nInit signal
8890Sstevel@tonic-gate 	 * with ioctl(2) calls as well.  The default is to set it to FALSE.
8900Sstevel@tonic-gate 	 */
8910Sstevel@tonic-gate 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
8927656SSherry.Moore@Sun.COM 	    "centronics-init-seq", &prop) == DDI_PROP_SUCCESS) {
8930Sstevel@tonic-gate 		pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
8940Sstevel@tonic-gate 		ddi_prop_free(prop);
8950Sstevel@tonic-gate 	} else {
8960Sstevel@tonic-gate 		pp->init_seq = FALSE;
8970Sstevel@tonic-gate 	}
8980Sstevel@tonic-gate 
8990Sstevel@tonic-gate 	/*
9000Sstevel@tonic-gate 	 * If one of the centronics status signals are in an erroneous
9010Sstevel@tonic-gate 	 * state, ecpp_wsrv() will be reinvoked centronics-retry ms to
9020Sstevel@tonic-gate 	 * check if the status is ok to transfer.  If the property is not
9030Sstevel@tonic-gate 	 * found, wsrv_retry will be set to CENTRONICS_RETRY ms.
9040Sstevel@tonic-gate 	 */
9050Sstevel@tonic-gate 	pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
9067656SSherry.Moore@Sun.COM 	    "centronics-retry", CENTRONICS_RETRY);
9070Sstevel@tonic-gate 
9080Sstevel@tonic-gate 	/*
9090Sstevel@tonic-gate 	 * In PIO mode, ecpp_isr() will loop for wait for the busy signal
9100Sstevel@tonic-gate 	 * to be deasserted before transferring the next byte. wait_for_busy
9110Sstevel@tonic-gate 	 * is specificied in microseconds.  If the property is not found
9120Sstevel@tonic-gate 	 * ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us.
9130Sstevel@tonic-gate 	 */
9140Sstevel@tonic-gate 	pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
9157656SSherry.Moore@Sun.COM 	    "centronics-wait-for-busy", WAIT_FOR_BUSY);
9160Sstevel@tonic-gate 
9170Sstevel@tonic-gate 	/*
9180Sstevel@tonic-gate 	 * In PIO mode, centronics transfers must hold the data signals
9190Sstevel@tonic-gate 	 * for a data_setup_time milliseconds before the strobe is asserted.
9200Sstevel@tonic-gate 	 */
9210Sstevel@tonic-gate 	pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
9227656SSherry.Moore@Sun.COM 	    "centronics-data-setup-time", DATA_SETUP_TIME);
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate 	/*
9250Sstevel@tonic-gate 	 * In PIO mode, centronics transfers asserts the strobe signal
9260Sstevel@tonic-gate 	 * for a period of strobe_pulse_width milliseconds.
9270Sstevel@tonic-gate 	 */
9280Sstevel@tonic-gate 	pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
9297656SSherry.Moore@Sun.COM 	    "centronics-strobe-pulse-width", STROBE_PULSE_WIDTH);
9300Sstevel@tonic-gate 
9310Sstevel@tonic-gate 	/*
9320Sstevel@tonic-gate 	 * Upon a transfer the peripheral, ecpp waits write_timeout seconds
9330Sstevel@tonic-gate 	 * for the transmission to complete.
9340Sstevel@tonic-gate 	 */
9350Sstevel@tonic-gate 	default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
9367656SSherry.Moore@Sun.COM 	    pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout);
9370Sstevel@tonic-gate 
9380Sstevel@tonic-gate 	pp->xfer_parms = default_xfer_parms;
9390Sstevel@tonic-gate 
9400Sstevel@tonic-gate 	/*
9410Sstevel@tonic-gate 	 * Get dma channel for M1553
9420Sstevel@tonic-gate 	 */
9430Sstevel@tonic-gate 	if (pp->hw == &m1553) {
9440Sstevel@tonic-gate 		pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY,
9457656SSherry.Moore@Sun.COM 		    pp->dip, 0, "dma-channel", 0x1);
9460Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn);
9470Sstevel@tonic-gate 	}
9480Sstevel@tonic-gate #if defined(__x86)
9490Sstevel@tonic-gate 	len = sizeof (value);
9500Sstevel@tonic-gate 	/* Get dma channel for i86 pc */
9510Sstevel@tonic-gate 	if (pp->hw == &x86) {
9520Sstevel@tonic-gate 		if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF,
9530Sstevel@tonic-gate 		    DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len)
9540Sstevel@tonic-gate 		    != DDI_PROP_SUCCESS) {
9550Sstevel@tonic-gate 			ecpp_error(pp->dip, "No dma channel found\n");
9560Sstevel@tonic-gate 			pp->uh.x86.chn = 0xff;
9570Sstevel@tonic-gate 			pp->fast_compat = FALSE;
9580Sstevel@tonic-gate 			pp->noecpregs = TRUE;
9590Sstevel@tonic-gate 		} else
9600Sstevel@tonic-gate 			pp->uh.x86.chn = (uint8_t)value;
9610Sstevel@tonic-gate 	}
9620Sstevel@tonic-gate #endif
9630Sstevel@tonic-gate 	/*
9640Sstevel@tonic-gate 	 * these properties are not yet public
9650Sstevel@tonic-gate 	 */
9660Sstevel@tonic-gate 	pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
9677656SSherry.Moore@Sun.COM 	    "ecp-rev-speed", ECP_REV_SPEED);
9680Sstevel@tonic-gate 
9690Sstevel@tonic-gate 	pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
9707656SSherry.Moore@Sun.COM 	    "rev-watchdog", REV_WATCHDOG);
9710Sstevel@tonic-gate 
9720Sstevel@tonic-gate 	ecpp_error(pp->dip,
9737656SSherry.Moore@Sun.COM 	    "ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n"
9747656SSherry.Moore@Sun.COM 	    "ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n"
9757656SSherry.Moore@Sun.COM 	    "ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n"
9767656SSherry.Moore@Sun.COM 	    "ecpp_get_prop: transfer-timeout=%d\n",
9777656SSherry.Moore@Sun.COM 	    pp->fast_centronics, pp->fast_compat,
9787656SSherry.Moore@Sun.COM 	    pp->wsrv_retry, pp->wait_for_busy,
9797656SSherry.Moore@Sun.COM 	    pp->data_setup_time, pp->strobe_pulse_width,
9807656SSherry.Moore@Sun.COM 	    pp->xfer_parms.write_timeout);
9810Sstevel@tonic-gate }
9820Sstevel@tonic-gate 
9830Sstevel@tonic-gate /*ARGSUSED*/
9840Sstevel@tonic-gate int
ecpp_getinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)9850Sstevel@tonic-gate ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
9860Sstevel@tonic-gate {
9870Sstevel@tonic-gate 	dev_t	dev = (dev_t)arg;
9880Sstevel@tonic-gate 	struct ecppunit *pp;
9890Sstevel@tonic-gate 	int	instance, ret;
9900Sstevel@tonic-gate 
9910Sstevel@tonic-gate 	instance = getminor(dev);
9920Sstevel@tonic-gate 
9930Sstevel@tonic-gate 	switch (infocmd) {
9940Sstevel@tonic-gate 	case DDI_INFO_DEVT2DEVINFO:
9950Sstevel@tonic-gate 		pp = ddi_get_soft_state(ecppsoft_statep, instance);
9960Sstevel@tonic-gate 		if (pp != NULL) {
9970Sstevel@tonic-gate 			*result = pp->dip;
9980Sstevel@tonic-gate 			ret = DDI_SUCCESS;
9990Sstevel@tonic-gate 		} else {
10000Sstevel@tonic-gate 			ret = DDI_FAILURE;
10010Sstevel@tonic-gate 		}
10020Sstevel@tonic-gate 		break;
10030Sstevel@tonic-gate 
10040Sstevel@tonic-gate 	case DDI_INFO_DEVT2INSTANCE:
10050Sstevel@tonic-gate 		*result = (void *)(uintptr_t)instance;
10060Sstevel@tonic-gate 		ret = DDI_SUCCESS;
10070Sstevel@tonic-gate 		break;
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate 	default:
10100Sstevel@tonic-gate 		ret = DDI_FAILURE;
10110Sstevel@tonic-gate 		break;
10120Sstevel@tonic-gate 	}
10130Sstevel@tonic-gate 
10140Sstevel@tonic-gate 	return (ret);
10150Sstevel@tonic-gate }
10160Sstevel@tonic-gate 
10170Sstevel@tonic-gate /*ARGSUSED2*/
10180Sstevel@tonic-gate static int
ecpp_open(queue_t * q,dev_t * dev,int flag,int sflag,cred_t * credp)10190Sstevel@tonic-gate ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp)
10200Sstevel@tonic-gate {
10210Sstevel@tonic-gate 	struct ecppunit *pp;
10220Sstevel@tonic-gate 	int		instance;
10230Sstevel@tonic-gate 	struct stroptions *sop;
10240Sstevel@tonic-gate 	mblk_t		*mop;
10250Sstevel@tonic-gate 
10260Sstevel@tonic-gate 	instance = getminor(*dev);
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	if (instance < 0) {
10290Sstevel@tonic-gate 		return (ENXIO);
10300Sstevel@tonic-gate 	}
10310Sstevel@tonic-gate 
10320Sstevel@tonic-gate 	pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance);
10330Sstevel@tonic-gate 
10340Sstevel@tonic-gate 	if (pp == NULL) {
10350Sstevel@tonic-gate 		return (ENXIO);
10360Sstevel@tonic-gate 	}
10370Sstevel@tonic-gate 
10380Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
10390Sstevel@tonic-gate 
10400Sstevel@tonic-gate 	/*
10410Sstevel@tonic-gate 	 * Parallel port is an exclusive-use device
10420Sstevel@tonic-gate 	 * thus providing print job integrity
10430Sstevel@tonic-gate 	 */
10440Sstevel@tonic-gate 	if (pp->oflag == TRUE) {
10450Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp open failed");
10460Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
10470Sstevel@tonic-gate 		return (EBUSY);
10480Sstevel@tonic-gate 	}
10490Sstevel@tonic-gate 
10500Sstevel@tonic-gate 	pp->oflag = TRUE;
10510Sstevel@tonic-gate 
10520Sstevel@tonic-gate 	/* initialize state variables */
10530Sstevel@tonic-gate 	pp->prn_timeouts = prn_timeouts_default;
10540Sstevel@tonic-gate 	pp->xfer_parms = default_xfer_parms;
10550Sstevel@tonic-gate 	pp->current_mode = ECPP_CENTRONICS;
10560Sstevel@tonic-gate 	pp->backchannel = ECPP_CENTRONICS;
10570Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_PO;
10580Sstevel@tonic-gate 	pp->port = ECPP_PORT_DMA;
10590Sstevel@tonic-gate 	pp->instance = instance;
10600Sstevel@tonic-gate 	pp->timeout_error = 0;
10610Sstevel@tonic-gate 	pp->saved_dsr = DSR_READ(pp);
10620Sstevel@tonic-gate 	pp->ecpp_drain_counter = 0;
10630Sstevel@tonic-gate 	pp->dma_cancelled = FALSE;
10640Sstevel@tonic-gate 	pp->io_mode = ECPP_DMA;
10650Sstevel@tonic-gate 	pp->joblen = 0;
10660Sstevel@tonic-gate 	pp->tfifo_intr = 0;
10670Sstevel@tonic-gate 	pp->softintr_pending = 0;
10680Sstevel@tonic-gate 	pp->nread = 0;
10690Sstevel@tonic-gate 
10700Sstevel@tonic-gate 	/* clear the state flag */
10710Sstevel@tonic-gate 	pp->e_busy = ECPP_IDLE;
10720Sstevel@tonic-gate 
10730Sstevel@tonic-gate 	pp->readq = RD(q);
10740Sstevel@tonic-gate 	pp->writeq = WR(q);
10750Sstevel@tonic-gate 	pp->msg = NULL;
10760Sstevel@tonic-gate 
10770Sstevel@tonic-gate 	RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp;
10780Sstevel@tonic-gate 
10790Sstevel@tonic-gate 	/*
10800Sstevel@tonic-gate 	 * Get ready: check host/peripheral, negotiate into default mode
10810Sstevel@tonic-gate 	 */
10820Sstevel@tonic-gate 	if (ecpp_reset_port_regs(pp) == FAILURE) {
10830Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
10840Sstevel@tonic-gate 		return (EIO);
10850Sstevel@tonic-gate 	}
10860Sstevel@tonic-gate 
10870Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
10880Sstevel@tonic-gate 
10890Sstevel@tonic-gate 	/*
10900Sstevel@tonic-gate 	 * Configure the Stream head and enable the Stream
10910Sstevel@tonic-gate 	 */
10920Sstevel@tonic-gate 	if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) {
10930Sstevel@tonic-gate 		return (EAGAIN);
10940Sstevel@tonic-gate 	}
10950Sstevel@tonic-gate 
10960Sstevel@tonic-gate 	mop->b_datap->db_type = M_SETOPTS;
10970Sstevel@tonic-gate 	mop->b_wptr += sizeof (struct stroptions);
10980Sstevel@tonic-gate 
10990Sstevel@tonic-gate 	/*
11000Sstevel@tonic-gate 	 * if device is open with O_NONBLOCK flag set, let read(2) return 0
11010Sstevel@tonic-gate 	 * if no data waiting to be read.  Writes will block on flow control.
11020Sstevel@tonic-gate 	 */
11030Sstevel@tonic-gate 	sop = (struct stroptions *)mop->b_rptr;
11040Sstevel@tonic-gate 	sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON;
11050Sstevel@tonic-gate 	sop->so_hiwat = ECPPHIWAT;
11060Sstevel@tonic-gate 	sop->so_lowat = ECPPLOWAT;
11070Sstevel@tonic-gate 
11080Sstevel@tonic-gate 	/* enable the stream */
11090Sstevel@tonic-gate 	qprocson(q);
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate 	putnext(q, mop);
11120Sstevel@tonic-gate 
11130Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
11140Sstevel@tonic-gate 
11150Sstevel@tonic-gate 	ecpp_default_negotiation(pp);
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate 	/* go revidle */
11180Sstevel@tonic-gate 	(void) ecpp_idle_phase(pp);
11190Sstevel@tonic-gate 
11200Sstevel@tonic-gate 	ecpp_error(pp->dip,
11217656SSherry.Moore@Sun.COM 	    "ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n",
11227656SSherry.Moore@Sun.COM 	    pp->current_mode, pp->current_phase,
11237656SSherry.Moore@Sun.COM 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
11240Sstevel@tonic-gate 
11250Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate 	return (0);
11280Sstevel@tonic-gate }
11290Sstevel@tonic-gate 
11300Sstevel@tonic-gate /*ARGSUSED1*/
11310Sstevel@tonic-gate static int
ecpp_close(queue_t * q,int flag,cred_t * cred_p)11320Sstevel@tonic-gate ecpp_close(queue_t *q, int flag, cred_t *cred_p)
11330Sstevel@tonic-gate {
11340Sstevel@tonic-gate 	struct ecppunit *pp;
11350Sstevel@tonic-gate 	timeout_id_t	timeout_id, fifo_timer_id, wsrv_timer_id;
11360Sstevel@tonic-gate 
11370Sstevel@tonic-gate 	pp = (struct ecppunit *)q->q_ptr;
11380Sstevel@tonic-gate 
11390Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_close: entering ...\n");
11400Sstevel@tonic-gate 
11410Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
11420Sstevel@tonic-gate 
11430Sstevel@tonic-gate 	/*
11440Sstevel@tonic-gate 	 * ecpp_close() will continue to loop until the
11450Sstevel@tonic-gate 	 * queue has been drained or if the thread
11460Sstevel@tonic-gate 	 * has received a SIG.  Typically, when the queue
11470Sstevel@tonic-gate 	 * has data, the port will be ECPP_BUSY.  However,
11480Sstevel@tonic-gate 	 * after a dma completes and before the wsrv
11490Sstevel@tonic-gate 	 * starts the next transfer, the port may be IDLE.
11500Sstevel@tonic-gate 	 * In this case, ecpp_close() will loop within this
11510Sstevel@tonic-gate 	 * while(qsize) segment.  Since, ecpp_wsrv() runs
11520Sstevel@tonic-gate 	 * at software interupt level, this shouldn't loop
11530Sstevel@tonic-gate 	 * very long.
11540Sstevel@tonic-gate 	 */
11550Sstevel@tonic-gate 	while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) {
11560Sstevel@tonic-gate 		if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) {
11570Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_close:B: received SIG\n");
11580Sstevel@tonic-gate 			/*
11590Sstevel@tonic-gate 			 * Returning from a signal such as
11600Sstevel@tonic-gate 			 * SIGTERM or SIGKILL
11610Sstevel@tonic-gate 			 */
11620Sstevel@tonic-gate 			ecpp_flush(pp, FWRITE);
11630Sstevel@tonic-gate 			break;
11640Sstevel@tonic-gate 		} else {
11650Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n");
11660Sstevel@tonic-gate 		}
11670Sstevel@tonic-gate 	}
11680Sstevel@tonic-gate 
11690Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, "
11707656SSherry.Moore@Sun.COM 	    "qsize(WR(q))=%d, qsize(RD(q))=%d\n",
11717656SSherry.Moore@Sun.COM 	    pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q));
11720Sstevel@tonic-gate 
11730Sstevel@tonic-gate 	/*
11740Sstevel@tonic-gate 	 * Cancel all timeouts, disable interrupts
11750Sstevel@tonic-gate 	 *
11760Sstevel@tonic-gate 	 * Note that we can`t call untimeout(9F) with mutex held:
11770Sstevel@tonic-gate 	 * callout may be blocked on the same mutex, and untimeout() will
11780Sstevel@tonic-gate 	 * cv_wait() while callout is executing, thus creating a deadlock
11790Sstevel@tonic-gate 	 * So we zero the timeout id's inside mutex and call untimeout later
11800Sstevel@tonic-gate 	 */
11810Sstevel@tonic-gate 	timeout_id = pp->timeout_id;
11820Sstevel@tonic-gate 	fifo_timer_id = pp->fifo_timer_id;
11830Sstevel@tonic-gate 	wsrv_timer_id = pp->wsrv_timer_id;
11840Sstevel@tonic-gate 
11850Sstevel@tonic-gate 	pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0;
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 	pp->softintr_pending = 0;
11880Sstevel@tonic-gate 	pp->dma_cancelled = TRUE;
11890Sstevel@tonic-gate 	ECPP_MASK_INTR(pp);
11900Sstevel@tonic-gate 
11910Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
11920Sstevel@tonic-gate 
11930Sstevel@tonic-gate 	qprocsoff(q);
11940Sstevel@tonic-gate 
11950Sstevel@tonic-gate 	if (timeout_id) {
11960Sstevel@tonic-gate 		(void) untimeout(timeout_id);
11970Sstevel@tonic-gate 	}
11980Sstevel@tonic-gate 	if (fifo_timer_id) {
11990Sstevel@tonic-gate 		(void) untimeout(fifo_timer_id);
12000Sstevel@tonic-gate 	}
12010Sstevel@tonic-gate 	if (wsrv_timer_id) {
12020Sstevel@tonic-gate 		(void) untimeout(wsrv_timer_id);
12030Sstevel@tonic-gate 	}
12040Sstevel@tonic-gate 
12050Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
12060Sstevel@tonic-gate 
12070Sstevel@tonic-gate 	/* set link to Compatible mode */
12080Sstevel@tonic-gate 	if ((pp->current_mode == ECPP_ECP_MODE) &&
12090Sstevel@tonic-gate 	    (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
12100Sstevel@tonic-gate 		(void) ecp_reverse2forward(pp);
12110Sstevel@tonic-gate 	}
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	(void) ecpp_1284_termination(pp);
12140Sstevel@tonic-gate 
12150Sstevel@tonic-gate 	pp->oflag = FALSE;
12160Sstevel@tonic-gate 	q->q_ptr = WR(q)->q_ptr = NULL;
12170Sstevel@tonic-gate 	pp->readq = pp->writeq = NULL;
12180Sstevel@tonic-gate 	pp->msg = NULL;
12190Sstevel@tonic-gate 
12200Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n",
12217656SSherry.Moore@Sun.COM 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 	return (0);
12260Sstevel@tonic-gate }
12270Sstevel@tonic-gate 
12280Sstevel@tonic-gate /*
12290Sstevel@tonic-gate  * standard put procedure for ecpp
12300Sstevel@tonic-gate  */
12310Sstevel@tonic-gate static int
ecpp_wput(queue_t * q,mblk_t * mp)12320Sstevel@tonic-gate ecpp_wput(queue_t *q, mblk_t *mp)
12330Sstevel@tonic-gate {
12340Sstevel@tonic-gate 	struct msgb *nmp;
12350Sstevel@tonic-gate 	struct ecppunit *pp;
12360Sstevel@tonic-gate 
12370Sstevel@tonic-gate 	pp = (struct ecppunit *)q->q_ptr;
12380Sstevel@tonic-gate 
12390Sstevel@tonic-gate 	if (!mp) {
12400Sstevel@tonic-gate 		return (0);
12410Sstevel@tonic-gate 	}
12420Sstevel@tonic-gate 
12430Sstevel@tonic-gate 	if ((mp->b_wptr - mp->b_rptr) <= 0) {
12440Sstevel@tonic-gate 		ecpp_error(pp->dip,
12457656SSherry.Moore@Sun.COM 		    "ecpp_wput:bogus packet recieved mp=%x\n", mp);
12460Sstevel@tonic-gate 		freemsg(mp);
12470Sstevel@tonic-gate 		return (0);
12480Sstevel@tonic-gate 	}
12490Sstevel@tonic-gate 
12500Sstevel@tonic-gate 	switch (DB_TYPE(mp)) {
12510Sstevel@tonic-gate 	case M_DATA:
12520Sstevel@tonic-gate 		/*
12530Sstevel@tonic-gate 		 * This is a quick fix for multiple message block problem,
12540Sstevel@tonic-gate 		 * it will be changed later with better performance code.
12550Sstevel@tonic-gate 		 */
12560Sstevel@tonic-gate 		if (mp->b_cont) {
12570Sstevel@tonic-gate 			/*
12580Sstevel@tonic-gate 			 * mblk has scattered data ... do msgpullup
12590Sstevel@tonic-gate 			 * if it fails, continue with the current mblk
12600Sstevel@tonic-gate 			 */
12610Sstevel@tonic-gate 			if ((nmp = msgpullup(mp, -1)) != NULL) {
12620Sstevel@tonic-gate 				freemsg(mp);
12630Sstevel@tonic-gate 				mp = nmp;
12640Sstevel@tonic-gate 				ecpp_error(pp->dip,
12650Sstevel@tonic-gate 				    "ecpp_wput:msgpullup: mp=%p len=%d\n",
12660Sstevel@tonic-gate 				    mp, mp->b_wptr - mp->b_rptr);
12670Sstevel@tonic-gate 			}
12680Sstevel@tonic-gate 		}
12690Sstevel@tonic-gate 
12700Sstevel@tonic-gate 		/* let ecpp_wsrv() concatenate small blocks */
12710Sstevel@tonic-gate 		(void) putq(q, mp);
12720Sstevel@tonic-gate 
12730Sstevel@tonic-gate 		break;
12740Sstevel@tonic-gate 
12750Sstevel@tonic-gate 	case M_CTL:
12760Sstevel@tonic-gate 		(void) putq(q, mp);
12770Sstevel@tonic-gate 
12780Sstevel@tonic-gate 		break;
12790Sstevel@tonic-gate 
12800Sstevel@tonic-gate 	case M_IOCTL: {
12810Sstevel@tonic-gate 		struct iocblk *iocbp;
12820Sstevel@tonic-gate 
12830Sstevel@tonic-gate 		iocbp = (struct iocblk *)mp->b_rptr;
12840Sstevel@tonic-gate 
12850Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd);
12860Sstevel@tonic-gate 
12870Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
12880Sstevel@tonic-gate 
12890Sstevel@tonic-gate 		/* TESTIO and GET_STATUS can be used during transfer */
12900Sstevel@tonic-gate 		if ((pp->e_busy == ECPP_BUSY) &&
12910Sstevel@tonic-gate 		    (iocbp->ioc_cmd != BPPIOC_TESTIO) &&
12920Sstevel@tonic-gate 		    (iocbp->ioc_cmd != PRNIOC_GET_STATUS)) {
12930Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
12940Sstevel@tonic-gate 			(void) putq(q, mp);
12950Sstevel@tonic-gate 		} else {
12960Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
12970Sstevel@tonic-gate 			ecpp_putioc(q, mp);
12980Sstevel@tonic-gate 		}
12990Sstevel@tonic-gate 
13000Sstevel@tonic-gate 		break;
13010Sstevel@tonic-gate 	}
13020Sstevel@tonic-gate 
13030Sstevel@tonic-gate 	case M_IOCDATA: {
13040Sstevel@tonic-gate 		struct copyresp *csp;
13050Sstevel@tonic-gate 
13060Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n");
13070Sstevel@tonic-gate 
13080Sstevel@tonic-gate 		csp = (struct copyresp *)mp->b_rptr;
13090Sstevel@tonic-gate 
13100Sstevel@tonic-gate 		/*
13110Sstevel@tonic-gate 		 * If copy request failed, quit now
13120Sstevel@tonic-gate 		 */
13130Sstevel@tonic-gate 		if (csp->cp_rval != 0) {
13140Sstevel@tonic-gate 			freemsg(mp);
13150Sstevel@tonic-gate 			return (0);
13160Sstevel@tonic-gate 		}
13170Sstevel@tonic-gate 
13180Sstevel@tonic-gate 		switch (csp->cp_cmd) {
13190Sstevel@tonic-gate 		case ECPPIOC_SETPARMS:
13200Sstevel@tonic-gate 		case ECPPIOC_SETREGS:
13210Sstevel@tonic-gate 		case ECPPIOC_SETPORT:
13220Sstevel@tonic-gate 		case ECPPIOC_SETDATA:
13230Sstevel@tonic-gate 		case PRNIOC_SET_IFCAP:
13240Sstevel@tonic-gate 		case PRNIOC_SET_TIMEOUTS:
13250Sstevel@tonic-gate 			/*
13260Sstevel@tonic-gate 			 * need to retrieve and use the data, but if the
13270Sstevel@tonic-gate 			 * device is busy, wait.
13280Sstevel@tonic-gate 			 */
13290Sstevel@tonic-gate 			(void) putq(q, mp);
13300Sstevel@tonic-gate 			break;
13310Sstevel@tonic-gate 
13320Sstevel@tonic-gate 		case ECPPIOC_GETPARMS:
13330Sstevel@tonic-gate 		case ECPPIOC_GETREGS:
13340Sstevel@tonic-gate 		case ECPPIOC_GETPORT:
13350Sstevel@tonic-gate 		case ECPPIOC_GETDATA:
13360Sstevel@tonic-gate 		case BPPIOC_GETERR:
13370Sstevel@tonic-gate 		case BPPIOC_TESTIO:
13380Sstevel@tonic-gate 		case PRNIOC_GET_IFCAP:
13390Sstevel@tonic-gate 		case PRNIOC_GET_STATUS:
13400Sstevel@tonic-gate 		case PRNIOC_GET_1284_STATUS:
13410Sstevel@tonic-gate 		case PRNIOC_GET_TIMEOUTS:
13420Sstevel@tonic-gate 			/* data transfered to user space okay */
13430Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
13440Sstevel@tonic-gate 			break;
13450Sstevel@tonic-gate 
13460Sstevel@tonic-gate 		case ECPPIOC_GETDEVID:
13470Sstevel@tonic-gate 			ecpp_wput_iocdata_devid(q, mp,
13487656SSherry.Moore@Sun.COM 			    offsetof(struct ecpp_device_id, rlen));
13490Sstevel@tonic-gate 			break;
13500Sstevel@tonic-gate 
13510Sstevel@tonic-gate 		case PRNIOC_GET_1284_DEVID:
13520Sstevel@tonic-gate 			ecpp_wput_iocdata_devid(q, mp,
13537656SSherry.Moore@Sun.COM 			    offsetof(struct prn_1284_device_id, id_rlen));
13540Sstevel@tonic-gate 			break;
13550Sstevel@tonic-gate 
13560Sstevel@tonic-gate 		case PRNIOC_GET_IFINFO:
13570Sstevel@tonic-gate 			ecpp_wput_iocdata_devid(q, mp,
13587656SSherry.Moore@Sun.COM 			    offsetof(struct prn_interface_info, if_rlen));
13590Sstevel@tonic-gate 			break;
13600Sstevel@tonic-gate 
13610Sstevel@tonic-gate 		default:
13620Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
13630Sstevel@tonic-gate 			break;
13640Sstevel@tonic-gate 		}
13650Sstevel@tonic-gate 
13660Sstevel@tonic-gate 		break;
13670Sstevel@tonic-gate 	}
13680Sstevel@tonic-gate 
13690Sstevel@tonic-gate 	case M_FLUSH:
13700Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n");
13710Sstevel@tonic-gate 
13720Sstevel@tonic-gate 		if (*mp->b_rptr & FLUSHW) {
13730Sstevel@tonic-gate 			mutex_enter(&pp->umutex);
13740Sstevel@tonic-gate 			ecpp_flush(pp, FWRITE);
13750Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
13760Sstevel@tonic-gate 		}
13770Sstevel@tonic-gate 
13780Sstevel@tonic-gate 		if (*mp->b_rptr & FLUSHR) {
13790Sstevel@tonic-gate 			mutex_enter(&pp->umutex);
13800Sstevel@tonic-gate 			ecpp_flush(pp, FREAD);
13810Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
13820Sstevel@tonic-gate 			qreply(q, mp);
13830Sstevel@tonic-gate 		} else {
13840Sstevel@tonic-gate 			freemsg(mp);
13850Sstevel@tonic-gate 		}
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate 		break;
13880Sstevel@tonic-gate 
13890Sstevel@tonic-gate 	case M_READ:
13900Sstevel@tonic-gate 		/*
13910Sstevel@tonic-gate 		 * When the user calls read(2), M_READ message is sent to us,
13920Sstevel@tonic-gate 		 * first byte of which is the number of requested bytes
13930Sstevel@tonic-gate 		 * We add up user requests and use resulting number
13940Sstevel@tonic-gate 		 * to calculate the reverse transfer block size
13950Sstevel@tonic-gate 		 */
13960Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
13970Sstevel@tonic-gate 		if (pp->e_busy == ECPP_IDLE) {
13980Sstevel@tonic-gate 			pp->nread += *(size_t *)mp->b_rptr;
13990Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread);
14000Sstevel@tonic-gate 			freemsg(mp);
14010Sstevel@tonic-gate 		} else {
14020Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_wput: M_READ queueing");
14030Sstevel@tonic-gate 			(void) putq(q, mp);
14040Sstevel@tonic-gate 		}
14050Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
14060Sstevel@tonic-gate 		break;
14070Sstevel@tonic-gate 
14080Sstevel@tonic-gate 	default:
14090Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n",
14100Sstevel@tonic-gate 		    DB_TYPE(mp));
14110Sstevel@tonic-gate 		freemsg(mp);
14120Sstevel@tonic-gate 		break;
14130Sstevel@tonic-gate 	}
14140Sstevel@tonic-gate 
14150Sstevel@tonic-gate 	return (0);
14160Sstevel@tonic-gate }
14170Sstevel@tonic-gate 
14180Sstevel@tonic-gate /*
14190Sstevel@tonic-gate  * Process ECPPIOC_GETDEVID-like ioctls
14200Sstevel@tonic-gate  */
14210Sstevel@tonic-gate static void
ecpp_wput_iocdata_devid(queue_t * q,mblk_t * mp,uintptr_t rlen_offset)14220Sstevel@tonic-gate ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset)
14230Sstevel@tonic-gate {
14240Sstevel@tonic-gate 	struct copyresp		*csp;
14250Sstevel@tonic-gate 	struct ecpp_copystate	*stp;
14260Sstevel@tonic-gate 	mblk_t			*datamp;
14270Sstevel@tonic-gate 
14280Sstevel@tonic-gate 	csp = (struct copyresp *)mp->b_rptr;
14290Sstevel@tonic-gate 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
14300Sstevel@tonic-gate 
14310Sstevel@tonic-gate 	/* determine the state of copyin/copyout process */
14320Sstevel@tonic-gate 	switch (stp->state) {
14330Sstevel@tonic-gate 	case ECPP_STRUCTIN:
14340Sstevel@tonic-gate 		/* user structure has arrived */
14350Sstevel@tonic-gate 		(void) putq(q, mp);
14360Sstevel@tonic-gate 		break;
14370Sstevel@tonic-gate 
14380Sstevel@tonic-gate 	case ECPP_ADDROUT:
14390Sstevel@tonic-gate 		/*
14400Sstevel@tonic-gate 		 * data transfered to user space okay
14410Sstevel@tonic-gate 		 * now update user structure
14420Sstevel@tonic-gate 		 */
14430Sstevel@tonic-gate 		datamp = allocb(sizeof (int), BPRI_MED);
14440Sstevel@tonic-gate 		if (datamp == NULL) {
14450Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, ENOSR);
14460Sstevel@tonic-gate 			break;
14470Sstevel@tonic-gate 		}
14480Sstevel@tonic-gate 
14490Sstevel@tonic-gate 		*(int *)datamp->b_rptr =
14507656SSherry.Moore@Sun.COM 		    *(int *)((char *)&stp->un + rlen_offset);
14510Sstevel@tonic-gate 		stp->state = ECPP_STRUCTOUT;
14520Sstevel@tonic-gate 
14530Sstevel@tonic-gate 		mcopyout(mp, csp->cp_private, sizeof (int),
14547656SSherry.Moore@Sun.COM 		    (char *)stp->uaddr + rlen_offset, datamp);
14550Sstevel@tonic-gate 		qreply(q, mp);
14560Sstevel@tonic-gate 		break;
14570Sstevel@tonic-gate 
14580Sstevel@tonic-gate 	case ECPP_STRUCTOUT:
14590Sstevel@tonic-gate 		/* user structure was updated okay */
14600Sstevel@tonic-gate 		freemsg(csp->cp_private);
14610Sstevel@tonic-gate 		ecpp_ack_ioctl(q, mp);
14620Sstevel@tonic-gate 		break;
14630Sstevel@tonic-gate 
14640Sstevel@tonic-gate 	default:
14650Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EINVAL);
14660Sstevel@tonic-gate 		break;
14670Sstevel@tonic-gate 	}
14680Sstevel@tonic-gate }
14690Sstevel@tonic-gate 
14700Sstevel@tonic-gate static uchar_t
ecpp_get_error_status(uchar_t status)14710Sstevel@tonic-gate ecpp_get_error_status(uchar_t status)
14720Sstevel@tonic-gate {
14730Sstevel@tonic-gate 	uchar_t pin_status = 0;
14740Sstevel@tonic-gate 
14750Sstevel@tonic-gate 	if (!(status & ECPP_nERR)) {
14760Sstevel@tonic-gate 		pin_status |= BPP_ERR_ERR;
14770Sstevel@tonic-gate 	}
14780Sstevel@tonic-gate 
14790Sstevel@tonic-gate 	if (status & ECPP_PE) {
14800Sstevel@tonic-gate 		pin_status |= BPP_PE_ERR;
14810Sstevel@tonic-gate 	}
14820Sstevel@tonic-gate 
14830Sstevel@tonic-gate 	if (!(status & ECPP_SLCT)) {
14840Sstevel@tonic-gate 		pin_status |= BPP_SLCT_ERR;
14850Sstevel@tonic-gate 	}
14860Sstevel@tonic-gate 
14870Sstevel@tonic-gate 	if (!(status & ECPP_nBUSY)) {
14880Sstevel@tonic-gate 		pin_status |= BPP_SLCT_ERR;
14890Sstevel@tonic-gate 	}
14900Sstevel@tonic-gate 
14910Sstevel@tonic-gate 	return (pin_status);
14920Sstevel@tonic-gate }
14930Sstevel@tonic-gate 
14940Sstevel@tonic-gate /*
14950Sstevel@tonic-gate  * ioctl handler for output PUT procedure.
14960Sstevel@tonic-gate  */
14970Sstevel@tonic-gate static void
ecpp_putioc(queue_t * q,mblk_t * mp)14980Sstevel@tonic-gate ecpp_putioc(queue_t *q, mblk_t *mp)
14990Sstevel@tonic-gate {
15000Sstevel@tonic-gate 	struct iocblk	*iocbp;
15010Sstevel@tonic-gate 	struct ecppunit *pp;
15020Sstevel@tonic-gate 
15030Sstevel@tonic-gate 	pp = (struct ecppunit *)q->q_ptr;
15040Sstevel@tonic-gate 
15050Sstevel@tonic-gate 	iocbp = (struct iocblk *)mp->b_rptr;
15060Sstevel@tonic-gate 
15070Sstevel@tonic-gate 	/* I_STR ioctls are invalid */
15080Sstevel@tonic-gate 	if (iocbp->ioc_count != TRANSPARENT) {
15090Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EINVAL);
15100Sstevel@tonic-gate 		return;
15110Sstevel@tonic-gate 	}
15120Sstevel@tonic-gate 
15130Sstevel@tonic-gate 	switch (iocbp->ioc_cmd) {
15140Sstevel@tonic-gate 	case ECPPIOC_SETPARMS: {
15150Sstevel@tonic-gate 		mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL);
15160Sstevel@tonic-gate 		qreply(q, mp);
15170Sstevel@tonic-gate 		break;
15180Sstevel@tonic-gate 	}
15190Sstevel@tonic-gate 
15200Sstevel@tonic-gate 	case ECPPIOC_GETPARMS: {
15210Sstevel@tonic-gate 		struct ecpp_transfer_parms xfer_parms;
15220Sstevel@tonic-gate 
15230Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
15240Sstevel@tonic-gate 
15250Sstevel@tonic-gate 		pp->xfer_parms.mode = pp->current_mode;
15260Sstevel@tonic-gate 		xfer_parms = pp->xfer_parms;
15270Sstevel@tonic-gate 
15280Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
15290Sstevel@tonic-gate 
15300Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms));
15310Sstevel@tonic-gate 		break;
15320Sstevel@tonic-gate 	}
15330Sstevel@tonic-gate 
15340Sstevel@tonic-gate 	case ECPPIOC_SETREGS: {
15350Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
15360Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
15370Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
15380Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
15390Sstevel@tonic-gate 			break;
15400Sstevel@tonic-gate 		}
15410Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
15420Sstevel@tonic-gate 
15430Sstevel@tonic-gate 		mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL);
15440Sstevel@tonic-gate 		qreply(q, mp);
15450Sstevel@tonic-gate 		break;
15460Sstevel@tonic-gate 	}
15470Sstevel@tonic-gate 
15480Sstevel@tonic-gate 	case ECPPIOC_GETREGS: {
15490Sstevel@tonic-gate 		struct ecpp_regs rg;
15500Sstevel@tonic-gate 
15510Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
15520Sstevel@tonic-gate 
15530Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
15540Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
15550Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
15560Sstevel@tonic-gate 			break;
15570Sstevel@tonic-gate 		}
15580Sstevel@tonic-gate 
15590Sstevel@tonic-gate 		rg.dsr = DSR_READ(pp);
15600Sstevel@tonic-gate 		rg.dcr = DCR_READ(pp);
15610Sstevel@tonic-gate 
15620Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
15630Sstevel@tonic-gate 
15640Sstevel@tonic-gate 		ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n",
15657656SSherry.Moore@Sun.COM 		    rg.dsr, rg.dcr);
15660Sstevel@tonic-gate 
15670Sstevel@tonic-gate 		/* these bits must be 1 */
15680Sstevel@tonic-gate 		rg.dsr |= ECPP_SETREGS_DSR_MASK;
15690Sstevel@tonic-gate 		rg.dcr |= ECPP_SETREGS_DCR_MASK;
15700Sstevel@tonic-gate 
15710Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &rg, sizeof (rg));
15720Sstevel@tonic-gate 		break;
15730Sstevel@tonic-gate 	}
15740Sstevel@tonic-gate 
15750Sstevel@tonic-gate 	case ECPPIOC_SETPORT:
15760Sstevel@tonic-gate 	case ECPPIOC_SETDATA: {
15770Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
15780Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
15790Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
15800Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
15810Sstevel@tonic-gate 			break;
15820Sstevel@tonic-gate 		}
15830Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
15840Sstevel@tonic-gate 
15850Sstevel@tonic-gate 		/*
15860Sstevel@tonic-gate 		 * each of the commands fetches a byte quantity.
15870Sstevel@tonic-gate 		 */
15880Sstevel@tonic-gate 		mcopyin(mp, NULL, sizeof (uchar_t), NULL);
15890Sstevel@tonic-gate 		qreply(q, mp);
15900Sstevel@tonic-gate 		break;
15910Sstevel@tonic-gate 	}
15920Sstevel@tonic-gate 
15930Sstevel@tonic-gate 	case ECPPIOC_GETDATA:
15940Sstevel@tonic-gate 	case ECPPIOC_GETPORT: {
15950Sstevel@tonic-gate 		uchar_t	byte;
15960Sstevel@tonic-gate 
15970Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
15980Sstevel@tonic-gate 
15990Sstevel@tonic-gate 		/* must be in diagnostic mode for these commands to work */
16000Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
16010Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
16020Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
16030Sstevel@tonic-gate 			break;
16040Sstevel@tonic-gate 		}
16050Sstevel@tonic-gate 
16060Sstevel@tonic-gate 		if (iocbp->ioc_cmd == ECPPIOC_GETPORT) {
16070Sstevel@tonic-gate 			byte = pp->port;
16080Sstevel@tonic-gate 		} else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) {
16090Sstevel@tonic-gate 			switch (pp->port) {
16100Sstevel@tonic-gate 			case ECPP_PORT_PIO:
16110Sstevel@tonic-gate 				byte = DATAR_READ(pp);
16120Sstevel@tonic-gate 				break;
16130Sstevel@tonic-gate 			case ECPP_PORT_TDMA:
16140Sstevel@tonic-gate 				byte = TFIFO_READ(pp);
16150Sstevel@tonic-gate 				ecpp_error(pp->dip, "GETDATA=0x%x\n", byte);
16160Sstevel@tonic-gate 				break;
16170Sstevel@tonic-gate 			default:
16180Sstevel@tonic-gate 				ecpp_nack_ioctl(q, mp, EINVAL);
16190Sstevel@tonic-gate 				break;
16200Sstevel@tonic-gate 			}
16210Sstevel@tonic-gate 		} else {
16220Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
16230Sstevel@tonic-gate 			ecpp_error(pp->dip, "weird command");
16240Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
16250Sstevel@tonic-gate 			break;
16260Sstevel@tonic-gate 		}
16270Sstevel@tonic-gate 
16280Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
16290Sstevel@tonic-gate 
16300Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &byte, sizeof (byte));
16310Sstevel@tonic-gate 
16320Sstevel@tonic-gate 		break;
16330Sstevel@tonic-gate 	}
16340Sstevel@tonic-gate 
16350Sstevel@tonic-gate 	case BPPIOC_GETERR: {
16360Sstevel@tonic-gate 		struct bpp_error_status bpp_status;
16370Sstevel@tonic-gate 
16380Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
16390Sstevel@tonic-gate 
16400Sstevel@tonic-gate 		bpp_status.timeout_occurred = pp->timeout_error;
16410Sstevel@tonic-gate 		bpp_status.bus_error = 0;	/* not used */
16420Sstevel@tonic-gate 		bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr);
16430Sstevel@tonic-gate 
16440Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
16450Sstevel@tonic-gate 
16460Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status));
16470Sstevel@tonic-gate 
16480Sstevel@tonic-gate 		break;
16490Sstevel@tonic-gate 	}
16500Sstevel@tonic-gate 
16510Sstevel@tonic-gate 	case BPPIOC_TESTIO: {
16520Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate 		if (!((pp->current_mode == ECPP_CENTRONICS) ||
16557656SSherry.Moore@Sun.COM 		    (pp->current_mode == ECPP_COMPAT_MODE))) {
16560Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
16570Sstevel@tonic-gate 		} else {
16580Sstevel@tonic-gate 			pp->saved_dsr = DSR_READ(pp);
16590Sstevel@tonic-gate 
16600Sstevel@tonic-gate 			if ((pp->saved_dsr & ECPP_PE) ||
16610Sstevel@tonic-gate 			    !(pp->saved_dsr & ECPP_SLCT) ||
16620Sstevel@tonic-gate 			    !(pp->saved_dsr & ECPP_nERR)) {
16630Sstevel@tonic-gate 				ecpp_nack_ioctl(q, mp, EIO);
16640Sstevel@tonic-gate 			} else {
16650Sstevel@tonic-gate 				ecpp_ack_ioctl(q, mp);
16660Sstevel@tonic-gate 			}
16670Sstevel@tonic-gate 		}
16680Sstevel@tonic-gate 
16690Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
16700Sstevel@tonic-gate 
16710Sstevel@tonic-gate 		break;
16720Sstevel@tonic-gate 	}
16730Sstevel@tonic-gate 
16740Sstevel@tonic-gate 	case PRNIOC_RESET:
16750Sstevel@tonic-gate 		/*
16760Sstevel@tonic-gate 		 * Initialize interface only if no transfer is in progress
16770Sstevel@tonic-gate 		 */
16780Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
16790Sstevel@tonic-gate 		if (pp->e_busy == ECPP_BUSY) {
16800Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
16810Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EIO);
16820Sstevel@tonic-gate 		} else {
16830Sstevel@tonic-gate 			(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
16840Sstevel@tonic-gate 
16850Sstevel@tonic-gate 			DCR_WRITE(pp, ECPP_SLCTIN);
16860Sstevel@tonic-gate 			drv_usecwait(2);
16870Sstevel@tonic-gate 			DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
16880Sstevel@tonic-gate 
16890Sstevel@tonic-gate 			ecpp_default_negotiation(pp);
16900Sstevel@tonic-gate 
16910Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
16920Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
16930Sstevel@tonic-gate 		}
16940Sstevel@tonic-gate 		break;
16950Sstevel@tonic-gate 
16960Sstevel@tonic-gate 	case PRNIOC_GET_IFCAP: {
16970Sstevel@tonic-gate 		uint_t		ifcap;
16980Sstevel@tonic-gate 
16990Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
17000Sstevel@tonic-gate 
17010Sstevel@tonic-gate 		ifcap = ecpp_get_prn_ifcap(pp);
17020Sstevel@tonic-gate 
17030Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
17040Sstevel@tonic-gate 
17050Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap));
17060Sstevel@tonic-gate 		break;
17070Sstevel@tonic-gate 	}
17080Sstevel@tonic-gate 
17090Sstevel@tonic-gate 	case PRNIOC_SET_IFCAP: {
17100Sstevel@tonic-gate 		mcopyin(mp, NULL, sizeof (uint_t), NULL);
17110Sstevel@tonic-gate 		qreply(q, mp);
17120Sstevel@tonic-gate 		break;
17130Sstevel@tonic-gate 	}
17140Sstevel@tonic-gate 
17150Sstevel@tonic-gate 	case PRNIOC_GET_TIMEOUTS: {
17160Sstevel@tonic-gate 		struct prn_timeouts timeouts;
17170Sstevel@tonic-gate 
17180Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
17190Sstevel@tonic-gate 		timeouts = pp->prn_timeouts;
17200Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
17210Sstevel@tonic-gate 
17220Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts));
17230Sstevel@tonic-gate 
17240Sstevel@tonic-gate 		break;
17250Sstevel@tonic-gate 	}
17260Sstevel@tonic-gate 
17270Sstevel@tonic-gate 	case PRNIOC_SET_TIMEOUTS:
17280Sstevel@tonic-gate 		mcopyin(mp, NULL, sizeof (struct prn_timeouts),
17297656SSherry.Moore@Sun.COM 		    *(caddr_t *)(void *)mp->b_cont->b_rptr);
17300Sstevel@tonic-gate 		qreply(q, mp);
17310Sstevel@tonic-gate 		break;
17320Sstevel@tonic-gate 
17330Sstevel@tonic-gate 	case PRNIOC_GET_STATUS: {
17340Sstevel@tonic-gate 		uint8_t	dsr;
17350Sstevel@tonic-gate 		uint_t	status;
17360Sstevel@tonic-gate 
17370Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
17380Sstevel@tonic-gate 
17390Sstevel@tonic-gate 		/* DSR only makes sense in Centronics & Compat mode */
17400Sstevel@tonic-gate 		if (pp->current_mode == ECPP_CENTRONICS ||
17410Sstevel@tonic-gate 		    pp->current_mode == ECPP_COMPAT_MODE) {
17420Sstevel@tonic-gate 			dsr = DSR_READ(pp);
17430Sstevel@tonic-gate 			if ((dsr & ECPP_PE) ||
17440Sstevel@tonic-gate 			    !(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) {
17450Sstevel@tonic-gate 				status = PRN_ONLINE;
17460Sstevel@tonic-gate 			} else {
17470Sstevel@tonic-gate 				status = PRN_ONLINE | PRN_READY;
17480Sstevel@tonic-gate 			}
17490Sstevel@tonic-gate 		} else {
17500Sstevel@tonic-gate 			status = PRN_ONLINE | PRN_READY;
17510Sstevel@tonic-gate 		}
17520Sstevel@tonic-gate 
17530Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
17540Sstevel@tonic-gate 
17550Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &status, sizeof (status));
17560Sstevel@tonic-gate 		break;
17570Sstevel@tonic-gate 	}
17580Sstevel@tonic-gate 
17590Sstevel@tonic-gate 	case PRNIOC_GET_1284_STATUS: {
17600Sstevel@tonic-gate 		uint8_t	dsr;
17610Sstevel@tonic-gate 		uchar_t	status;
17620Sstevel@tonic-gate 
17630Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
17640Sstevel@tonic-gate 
17650Sstevel@tonic-gate 		/* status only makes sense in Centronics & Compat mode */
17660Sstevel@tonic-gate 		if (pp->current_mode != ECPP_COMPAT_MODE &&
17670Sstevel@tonic-gate 		    pp->current_mode != ECPP_CENTRONICS) {
17680Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
17690Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
17700Sstevel@tonic-gate 			break;
17710Sstevel@tonic-gate 		}
17720Sstevel@tonic-gate 
17730Sstevel@tonic-gate 		dsr = DSR_READ(pp);		/* read status */
17740Sstevel@tonic-gate 
17750Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
17760Sstevel@tonic-gate 
17770Sstevel@tonic-gate 		ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr);
17780Sstevel@tonic-gate 
17790Sstevel@tonic-gate 		status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) |
17807656SSherry.Moore@Sun.COM 		    (~dsr & ECPP_nBUSY);
17810Sstevel@tonic-gate 
17820Sstevel@tonic-gate 		ecpp_putioc_copyout(q, mp, &status, sizeof (status));
17830Sstevel@tonic-gate 		break;
17840Sstevel@tonic-gate 	}
17850Sstevel@tonic-gate 
17860Sstevel@tonic-gate 	case ECPPIOC_GETDEVID:
17870Sstevel@tonic-gate 		ecpp_putioc_stateful_copyin(q, mp,
17887656SSherry.Moore@Sun.COM 		    sizeof (struct ecpp_device_id));
17890Sstevel@tonic-gate 		break;
17900Sstevel@tonic-gate 
17910Sstevel@tonic-gate 	case PRNIOC_GET_1284_DEVID:
17920Sstevel@tonic-gate 		ecpp_putioc_stateful_copyin(q, mp,
17937656SSherry.Moore@Sun.COM 		    sizeof (struct prn_1284_device_id));
17940Sstevel@tonic-gate 		break;
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate 	case PRNIOC_GET_IFINFO:
17970Sstevel@tonic-gate 		ecpp_putioc_stateful_copyin(q, mp,
17987656SSherry.Moore@Sun.COM 		    sizeof (struct prn_interface_info));
17990Sstevel@tonic-gate 		break;
18000Sstevel@tonic-gate 
18010Sstevel@tonic-gate 	default:
18020Sstevel@tonic-gate 		ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n",
18037656SSherry.Moore@Sun.COM 		    iocbp->ioc_cmd);
18040Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EINVAL);
18050Sstevel@tonic-gate 		break;
18060Sstevel@tonic-gate 	}
18070Sstevel@tonic-gate }
18080Sstevel@tonic-gate 
18090Sstevel@tonic-gate /*
18100Sstevel@tonic-gate  * allocate mblk and copyout the requested number of bytes
18110Sstevel@tonic-gate  */
18120Sstevel@tonic-gate static void
ecpp_putioc_copyout(queue_t * q,mblk_t * mp,void * buf,int len)18130Sstevel@tonic-gate ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len)
18140Sstevel@tonic-gate {
18150Sstevel@tonic-gate 	mblk_t	*tmp;
18160Sstevel@tonic-gate 
18170Sstevel@tonic-gate 	if ((tmp = allocb(len, BPRI_MED)) == NULL) {
18180Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, ENOSR);
18190Sstevel@tonic-gate 		return;
18200Sstevel@tonic-gate 	}
18210Sstevel@tonic-gate 
18220Sstevel@tonic-gate 	bcopy(buf, tmp->b_wptr, len);
18230Sstevel@tonic-gate 
18240Sstevel@tonic-gate 	mcopyout(mp, NULL, len, NULL, tmp);
18250Sstevel@tonic-gate 	qreply(q, mp);
18260Sstevel@tonic-gate }
18270Sstevel@tonic-gate 
18280Sstevel@tonic-gate /*
18290Sstevel@tonic-gate  * copyin the structure using struct ecpp_copystate
18300Sstevel@tonic-gate  */
18310Sstevel@tonic-gate static void
ecpp_putioc_stateful_copyin(queue_t * q,mblk_t * mp,size_t size)18320Sstevel@tonic-gate ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size)
18330Sstevel@tonic-gate {
18340Sstevel@tonic-gate 	mblk_t *tmp;
18350Sstevel@tonic-gate 	struct ecpp_copystate *stp;
18360Sstevel@tonic-gate 
18370Sstevel@tonic-gate 	if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) {
18380Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EAGAIN);
18390Sstevel@tonic-gate 		return;
18400Sstevel@tonic-gate 	}
18410Sstevel@tonic-gate 
18420Sstevel@tonic-gate 	stp = (struct ecpp_copystate *)tmp->b_rptr;
18430Sstevel@tonic-gate 	stp->state = ECPP_STRUCTIN;
18440Sstevel@tonic-gate 	stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr;
18450Sstevel@tonic-gate 
18460Sstevel@tonic-gate 	tmp->b_wptr += sizeof (struct ecpp_copystate);
18470Sstevel@tonic-gate 
18480Sstevel@tonic-gate 	mcopyin(mp, tmp, size, stp->uaddr);
18490Sstevel@tonic-gate 	qreply(q, mp);
18500Sstevel@tonic-gate }
18510Sstevel@tonic-gate 
18520Sstevel@tonic-gate /*
18530Sstevel@tonic-gate  * read queue is only used when the peripheral sends data faster,
18540Sstevel@tonic-gate  * then the application consumes it;
18550Sstevel@tonic-gate  * once the low water mark is reached, this routine will be scheduled
18560Sstevel@tonic-gate  */
18570Sstevel@tonic-gate static int
ecpp_rsrv(queue_t * q)18580Sstevel@tonic-gate ecpp_rsrv(queue_t *q)
18590Sstevel@tonic-gate {
18600Sstevel@tonic-gate 	struct msgb	*mp;
18610Sstevel@tonic-gate 
18620Sstevel@tonic-gate 	/*
18630Sstevel@tonic-gate 	 * send data upstream until next queue is full or the queue is empty
18640Sstevel@tonic-gate 	 */
18650Sstevel@tonic-gate 	while (canputnext(q) && (mp = getq(q))) {
18660Sstevel@tonic-gate 		putnext(q, mp);
18670Sstevel@tonic-gate 	}
18680Sstevel@tonic-gate 
18690Sstevel@tonic-gate 	/*
18700Sstevel@tonic-gate 	 * if there is still space on the queue, enable backchannel
18710Sstevel@tonic-gate 	 */
18720Sstevel@tonic-gate 	if (canputnext(q)) {
18730Sstevel@tonic-gate 		struct ecppunit	*pp = (struct ecppunit *)q->q_ptr;
18740Sstevel@tonic-gate 
18750Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
18760Sstevel@tonic-gate 
18770Sstevel@tonic-gate 		if (pp->e_busy == ECPP_IDLE) {
18780Sstevel@tonic-gate 			(void) ecpp_idle_phase(pp);
18790Sstevel@tonic-gate 			cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
18800Sstevel@tonic-gate 		}
18810Sstevel@tonic-gate 
18820Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
18830Sstevel@tonic-gate 	}
18840Sstevel@tonic-gate 
18850Sstevel@tonic-gate 	return (0);
18860Sstevel@tonic-gate }
18870Sstevel@tonic-gate 
18880Sstevel@tonic-gate static int
ecpp_wsrv(queue_t * q)18890Sstevel@tonic-gate ecpp_wsrv(queue_t *q)
18900Sstevel@tonic-gate {
18910Sstevel@tonic-gate 	struct ecppunit	*pp = (struct ecppunit *)q->q_ptr;
18920Sstevel@tonic-gate 	struct msgb	*mp;
18930Sstevel@tonic-gate 	size_t		len, total_len;
18940Sstevel@tonic-gate 	size_t		my_ioblock_sz;
18950Sstevel@tonic-gate 	caddr_t		my_ioblock;
18960Sstevel@tonic-gate 	caddr_t		start_addr;
18970Sstevel@tonic-gate 
18980Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
18990Sstevel@tonic-gate 
19000Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy);
19010Sstevel@tonic-gate 
19020Sstevel@tonic-gate 	/* if channel is actively doing work, wait till completed */
19030Sstevel@tonic-gate 	if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) {
19040Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
19050Sstevel@tonic-gate 		return (0);
19060Sstevel@tonic-gate 	} else if (pp->suspended == TRUE) {
19070Sstevel@tonic-gate 		/*
19080Sstevel@tonic-gate 		 * if the system is about to suspend and ecpp_detach()
19090Sstevel@tonic-gate 		 * is blocked due to active transfers, wake it up and exit
19100Sstevel@tonic-gate 		 */
19110Sstevel@tonic-gate 		cv_signal(&pp->pport_cv);
19120Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
19130Sstevel@tonic-gate 		return (0);
19140Sstevel@tonic-gate 	}
19150Sstevel@tonic-gate 
19160Sstevel@tonic-gate 	/* peripheral status should be okay before starting transfer */
19170Sstevel@tonic-gate 	if (pp->e_busy == ECPP_ERR) {
19180Sstevel@tonic-gate 		if (ecpp_check_status(pp) == FAILURE) {
19190Sstevel@tonic-gate 			if (pp->wsrv_timer_id == 0) {
19200Sstevel@tonic-gate 				ecpp_error(pp->dip, "wsrv: start wrsv_timer\n");
19210Sstevel@tonic-gate 				pp->wsrv_timer_id = timeout(ecpp_wsrv_timer,
19227656SSherry.Moore@Sun.COM 				    (caddr_t)pp,
19237656SSherry.Moore@Sun.COM 				    drv_usectohz(pp->wsrv_retry * 1000));
19240Sstevel@tonic-gate 			} else {
19250Sstevel@tonic-gate 				ecpp_error(pp->dip,
19267656SSherry.Moore@Sun.COM 				    "ecpp_wsrv: wrsv_timer is active\n");
19270Sstevel@tonic-gate 			}
19280Sstevel@tonic-gate 
19290Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
19300Sstevel@tonic-gate 			return (0);
19310Sstevel@tonic-gate 		} else {
19320Sstevel@tonic-gate 			pp->e_busy = ECPP_IDLE;
19330Sstevel@tonic-gate 		}
19340Sstevel@tonic-gate 	}
19350Sstevel@tonic-gate 
19360Sstevel@tonic-gate 	my_ioblock = pp->ioblock;
19370Sstevel@tonic-gate 	my_ioblock_sz = IO_BLOCK_SZ;
19380Sstevel@tonic-gate 
19390Sstevel@tonic-gate 	/*
19400Sstevel@tonic-gate 	 * it`s important to null pp->msg here,
19410Sstevel@tonic-gate 	 * cleaning up from the previous transfer attempts
19420Sstevel@tonic-gate 	 */
19430Sstevel@tonic-gate 	pp->msg = NULL;
19440Sstevel@tonic-gate 
19450Sstevel@tonic-gate 	start_addr = NULL;
19460Sstevel@tonic-gate 	len = total_len = 0;
19470Sstevel@tonic-gate 	/*
19480Sstevel@tonic-gate 	 * The following loop is implemented to gather the
19490Sstevel@tonic-gate 	 * many small writes that the lp subsystem makes and
19500Sstevel@tonic-gate 	 * compile them into one large dma transfer. The len and
19510Sstevel@tonic-gate 	 * total_len variables are a running count of the number of
19520Sstevel@tonic-gate 	 * bytes that have been gathered. They are bcopied to the
19530Sstevel@tonic-gate 	 * ioblock buffer. The pp->e_busy is set to E_BUSY as soon as
19540Sstevel@tonic-gate 	 * we start gathering packets to indicate the following transfer.
19550Sstevel@tonic-gate 	 */
19560Sstevel@tonic-gate 	while (mp = getq(q)) {
19570Sstevel@tonic-gate 		switch (DB_TYPE(mp)) {
19580Sstevel@tonic-gate 		case M_DATA:
19590Sstevel@tonic-gate 			pp->e_busy = ECPP_BUSY;
19600Sstevel@tonic-gate 			len = mp->b_wptr - mp->b_rptr;
19610Sstevel@tonic-gate 
19620Sstevel@tonic-gate 			if ((total_len == 0) && (len >= my_ioblock_sz)) {
19630Sstevel@tonic-gate 				/*
19640Sstevel@tonic-gate 				 * if the first M_DATA is bigger than ioblock,
19650Sstevel@tonic-gate 				 * just use this mblk and start the transfer
19660Sstevel@tonic-gate 				 */
19670Sstevel@tonic-gate 				total_len = len;
19680Sstevel@tonic-gate 				start_addr = (caddr_t)mp->b_rptr;
19690Sstevel@tonic-gate 				pp->msg = mp;
19700Sstevel@tonic-gate 				goto breakout;
19710Sstevel@tonic-gate 			} else if (total_len + len > my_ioblock_sz) {
19720Sstevel@tonic-gate 				/*
19730Sstevel@tonic-gate 				 * current M_DATA does not fit in ioblock,
19740Sstevel@tonic-gate 				 * put it back and start the transfer
19750Sstevel@tonic-gate 				 */
19760Sstevel@tonic-gate 				(void) putbq(q, mp);
19770Sstevel@tonic-gate 				goto breakout;
19780Sstevel@tonic-gate 			} else {
19790Sstevel@tonic-gate 				/*
19800Sstevel@tonic-gate 				 * otherwise add data to ioblock and free mblk
19810Sstevel@tonic-gate 				 */
19820Sstevel@tonic-gate 				bcopy(mp->b_rptr, my_ioblock, len);
19830Sstevel@tonic-gate 				my_ioblock += len;
19840Sstevel@tonic-gate 				total_len += len;
19850Sstevel@tonic-gate 				start_addr = (caddr_t)pp->ioblock;
19860Sstevel@tonic-gate 				freemsg(mp);
19870Sstevel@tonic-gate 			}
19880Sstevel@tonic-gate 			break;
19890Sstevel@tonic-gate 
19900Sstevel@tonic-gate 		case M_IOCTL:
19910Sstevel@tonic-gate 			/*
19920Sstevel@tonic-gate 			 * Assume a simple loopback test: an application
19930Sstevel@tonic-gate 			 * writes data into the TFIFO, reads it using
19940Sstevel@tonic-gate 			 * ECPPIOC_GETDATA and compares. If the transfer
19950Sstevel@tonic-gate 			 * times out (which is only possible on Grover),
19960Sstevel@tonic-gate 			 * the ioctl might be processed before the data
19970Sstevel@tonic-gate 			 * got to the TFIFO, which leads to miscompare.
19980Sstevel@tonic-gate 			 * So if we met ioctl, postpone it until after xfer.
19990Sstevel@tonic-gate 			 */
20000Sstevel@tonic-gate 			if (total_len > 0) {
20010Sstevel@tonic-gate 				(void) putbq(q, mp);
20020Sstevel@tonic-gate 				goto breakout;
20030Sstevel@tonic-gate 			}
20040Sstevel@tonic-gate 
20050Sstevel@tonic-gate 			ecpp_error(pp->dip, "M_IOCTL.\n");
20060Sstevel@tonic-gate 
20070Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
20080Sstevel@tonic-gate 
20090Sstevel@tonic-gate 			ecpp_putioc(q, mp);
20100Sstevel@tonic-gate 
20110Sstevel@tonic-gate 			mutex_enter(&pp->umutex);
20120Sstevel@tonic-gate 
20130Sstevel@tonic-gate 			break;
20140Sstevel@tonic-gate 
20150Sstevel@tonic-gate 		case M_IOCDATA: {
20160Sstevel@tonic-gate 			struct copyresp *csp = (struct copyresp *)mp->b_rptr;
20170Sstevel@tonic-gate 
20180Sstevel@tonic-gate 			ecpp_error(pp->dip, "M_IOCDATA\n");
20190Sstevel@tonic-gate 
20200Sstevel@tonic-gate 			/*
20210Sstevel@tonic-gate 			 * If copy request failed, quit now
20220Sstevel@tonic-gate 			 */
20230Sstevel@tonic-gate 			if (csp->cp_rval != 0) {
20240Sstevel@tonic-gate 				freemsg(mp);
20250Sstevel@tonic-gate 				break;
20260Sstevel@tonic-gate 			}
20270Sstevel@tonic-gate 
20280Sstevel@tonic-gate 			switch (csp->cp_cmd) {
20290Sstevel@tonic-gate 			case ECPPIOC_SETPARMS:
20300Sstevel@tonic-gate 			case ECPPIOC_SETREGS:
20310Sstevel@tonic-gate 			case ECPPIOC_SETPORT:
20320Sstevel@tonic-gate 			case ECPPIOC_SETDATA:
20330Sstevel@tonic-gate 			case ECPPIOC_GETDEVID:
20340Sstevel@tonic-gate 			case PRNIOC_SET_IFCAP:
20350Sstevel@tonic-gate 			case PRNIOC_GET_1284_DEVID:
20360Sstevel@tonic-gate 			case PRNIOC_SET_TIMEOUTS:
20370Sstevel@tonic-gate 			case PRNIOC_GET_IFINFO:
20380Sstevel@tonic-gate 				ecpp_srvioc(q, mp);
20390Sstevel@tonic-gate 				break;
20400Sstevel@tonic-gate 
20410Sstevel@tonic-gate 			default:
20420Sstevel@tonic-gate 				ecpp_nack_ioctl(q, mp, EINVAL);
20430Sstevel@tonic-gate 				break;
20440Sstevel@tonic-gate 			}
20450Sstevel@tonic-gate 
20460Sstevel@tonic-gate 			break;
20470Sstevel@tonic-gate 		}
20480Sstevel@tonic-gate 
20490Sstevel@tonic-gate 		case M_CTL:
20500Sstevel@tonic-gate 			if (pp->e_busy != ECPP_IDLE) {
20510Sstevel@tonic-gate 				ecpp_error(pp->dip, "wsrv: M_CTL postponed\n");
20520Sstevel@tonic-gate 				(void) putbq(q, mp);
20530Sstevel@tonic-gate 				goto breakout;
20540Sstevel@tonic-gate 			} else {
20550Sstevel@tonic-gate 				ecpp_error(pp->dip, "wsrv: M_CTL\n");
20560Sstevel@tonic-gate 			}
20570Sstevel@tonic-gate 
20580Sstevel@tonic-gate 			/* sanity check */
20590Sstevel@tonic-gate 			if ((mp->b_wptr - mp->b_rptr != sizeof (int)) ||
20600Sstevel@tonic-gate 			    (*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) {
20610Sstevel@tonic-gate 				ecpp_error(pp->dip, "wsrv: bogus M_CTL");
20620Sstevel@tonic-gate 				freemsg(mp);
20630Sstevel@tonic-gate 				break;
20640Sstevel@tonic-gate 			} else {
20650Sstevel@tonic-gate 				freemsg(mp);
20660Sstevel@tonic-gate 			}
20670Sstevel@tonic-gate 
20680Sstevel@tonic-gate 			/* This was a backchannel request */
20690Sstevel@tonic-gate 			(void) ecpp_peripheral2host(pp);
20700Sstevel@tonic-gate 
20710Sstevel@tonic-gate 			/* exit if transfer have been initiated */
20720Sstevel@tonic-gate 			if (pp->e_busy == ECPP_BUSY) {
20730Sstevel@tonic-gate 				goto breakout;
20740Sstevel@tonic-gate 			}
20750Sstevel@tonic-gate 			break;
20760Sstevel@tonic-gate 
20770Sstevel@tonic-gate 		case M_READ:
20780Sstevel@tonic-gate 			pp->nread += *(size_t *)mp->b_rptr;
20790Sstevel@tonic-gate 			freemsg(mp);
20800Sstevel@tonic-gate 			ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread);
20810Sstevel@tonic-gate 			break;
20820Sstevel@tonic-gate 
20830Sstevel@tonic-gate 		default:
20840Sstevel@tonic-gate 			ecpp_error(pp->dip, "wsrv: should never get here\n");
20850Sstevel@tonic-gate 			freemsg(mp);
20860Sstevel@tonic-gate 			break;
20870Sstevel@tonic-gate 		}
20880Sstevel@tonic-gate 	}
20890Sstevel@tonic-gate breakout:
20900Sstevel@tonic-gate 	/*
20910Sstevel@tonic-gate 	 * If total_len > 0 then start the transfer, otherwise goto idle state
20920Sstevel@tonic-gate 	 */
20930Sstevel@tonic-gate 	if (total_len > 0) {
20940Sstevel@tonic-gate 		ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len);
20950Sstevel@tonic-gate 		pp->e_busy = ECPP_BUSY;
20960Sstevel@tonic-gate 		ecpp_start(pp, start_addr, total_len);
20970Sstevel@tonic-gate 	} else {
20980Sstevel@tonic-gate 		ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy);
20990Sstevel@tonic-gate 
21000Sstevel@tonic-gate 		/* IDLE if xfer_timeout, or FIFO_EMPTY */
21010Sstevel@tonic-gate 		if (pp->e_busy == ECPP_IDLE) {
21020Sstevel@tonic-gate 			(void) ecpp_idle_phase(pp);
21030Sstevel@tonic-gate 			cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
21040Sstevel@tonic-gate 		}
21050Sstevel@tonic-gate 	}
21060Sstevel@tonic-gate 
21070Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
21080Sstevel@tonic-gate 	return (1);
21090Sstevel@tonic-gate }
21100Sstevel@tonic-gate 
21110Sstevel@tonic-gate /*
21120Sstevel@tonic-gate  * Ioctl processor for queued ioctl data transfer messages.
21130Sstevel@tonic-gate  */
21140Sstevel@tonic-gate static void
ecpp_srvioc(queue_t * q,mblk_t * mp)21150Sstevel@tonic-gate ecpp_srvioc(queue_t *q, mblk_t *mp)
21160Sstevel@tonic-gate {
21170Sstevel@tonic-gate 	struct iocblk	*iocbp;
21180Sstevel@tonic-gate 	struct ecppunit *pp;
21190Sstevel@tonic-gate 
21200Sstevel@tonic-gate 	iocbp = (struct iocblk *)mp->b_rptr;
21210Sstevel@tonic-gate 	pp = (struct ecppunit *)q->q_ptr;
21220Sstevel@tonic-gate 
21230Sstevel@tonic-gate 	switch (iocbp->ioc_cmd) {
21240Sstevel@tonic-gate 	case ECPPIOC_SETPARMS: {
21250Sstevel@tonic-gate 		struct ecpp_transfer_parms *xferp;
21260Sstevel@tonic-gate 
21270Sstevel@tonic-gate 		xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr;
21280Sstevel@tonic-gate 
21290Sstevel@tonic-gate 		if (xferp->write_timeout <= 0 ||
21307656SSherry.Moore@Sun.COM 		    xferp->write_timeout >= ECPP_MAX_TIMEOUT) {
21310Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
21320Sstevel@tonic-gate 			break;
21330Sstevel@tonic-gate 		}
21340Sstevel@tonic-gate 
21350Sstevel@tonic-gate 		if (!((xferp->mode == ECPP_CENTRONICS) ||
21367656SSherry.Moore@Sun.COM 		    (xferp->mode == ECPP_COMPAT_MODE) ||
21377656SSherry.Moore@Sun.COM 		    (xferp->mode == ECPP_NIBBLE_MODE) ||
21387656SSherry.Moore@Sun.COM 		    (xferp->mode == ECPP_ECP_MODE) ||
21397656SSherry.Moore@Sun.COM 		    (xferp->mode == ECPP_DIAG_MODE))) {
21400Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
21410Sstevel@tonic-gate 			break;
21420Sstevel@tonic-gate 		}
21430Sstevel@tonic-gate 
21440Sstevel@tonic-gate 		pp->xfer_parms = *xferp;
21450Sstevel@tonic-gate 		pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout;
21460Sstevel@tonic-gate 
21470Sstevel@tonic-gate 		ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n",
21487656SSherry.Moore@Sun.COM 		    pp->current_mode, pp->xfer_parms.mode);
21490Sstevel@tonic-gate 
21500Sstevel@tonic-gate 		if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) {
21510Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
21520Sstevel@tonic-gate 		} else {
21530Sstevel@tonic-gate 			/*
21540Sstevel@tonic-gate 			 * mode nego was a success.  If nibble mode check
21550Sstevel@tonic-gate 			 * back channel and set into REVIDLE.
21560Sstevel@tonic-gate 			 */
21570Sstevel@tonic-gate 			if ((pp->current_mode == ECPP_NIBBLE_MODE) &&
21580Sstevel@tonic-gate 			    (read_nibble_backchan(pp) == FAILURE)) {
21590Sstevel@tonic-gate 				/*
21600Sstevel@tonic-gate 				 * problems reading the backchannel
21610Sstevel@tonic-gate 				 * returned to centronics;
21620Sstevel@tonic-gate 				 * ioctl fails.
21630Sstevel@tonic-gate 				 */
21640Sstevel@tonic-gate 				ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
21650Sstevel@tonic-gate 				break;
21660Sstevel@tonic-gate 			}
21670Sstevel@tonic-gate 
21680Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
21690Sstevel@tonic-gate 		}
21700Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
21710Sstevel@tonic-gate 			pp->port = ECPP_PORT_DMA;
21720Sstevel@tonic-gate 		} else {
21730Sstevel@tonic-gate 			pp->port = ECPP_PORT_PIO;
21740Sstevel@tonic-gate 		}
21750Sstevel@tonic-gate 
21760Sstevel@tonic-gate 		pp->xfer_parms.mode = pp->current_mode;
21770Sstevel@tonic-gate 
21780Sstevel@tonic-gate 		break;
21790Sstevel@tonic-gate 	}
21800Sstevel@tonic-gate 
21810Sstevel@tonic-gate 	case ECPPIOC_SETREGS: {
21820Sstevel@tonic-gate 		struct ecpp_regs *rg;
21830Sstevel@tonic-gate 		uint8_t dcr;
21840Sstevel@tonic-gate 
21850Sstevel@tonic-gate 		rg = (struct ecpp_regs *)mp->b_cont->b_rptr;
21860Sstevel@tonic-gate 
21870Sstevel@tonic-gate 		/* must be in diagnostic mode for these commands to work */
21880Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
21890Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
21900Sstevel@tonic-gate 			break;
21910Sstevel@tonic-gate 		}
21920Sstevel@tonic-gate 
21930Sstevel@tonic-gate 		/* bits 4-7 must be 1 or return EINVAL */
21940Sstevel@tonic-gate 		if ((rg->dcr & ECPP_SETREGS_DCR_MASK) !=
21957656SSherry.Moore@Sun.COM 		    ECPP_SETREGS_DCR_MASK) {
21960Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
21970Sstevel@tonic-gate 			break;
21980Sstevel@tonic-gate 		}
21990Sstevel@tonic-gate 
22000Sstevel@tonic-gate 		/* get the old dcr */
22010Sstevel@tonic-gate 		dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
22020Sstevel@tonic-gate 		/* get the new dcr */
22030Sstevel@tonic-gate 		dcr = (dcr & ECPP_SETREGS_DCR_MASK) |
22047656SSherry.Moore@Sun.COM 		    (rg->dcr & ~ECPP_SETREGS_DCR_MASK);
22050Sstevel@tonic-gate 		DCR_WRITE(pp, dcr);
22060Sstevel@tonic-gate 		ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr);
22070Sstevel@tonic-gate 		ecpp_ack_ioctl(q, mp);
22080Sstevel@tonic-gate 		break;
22090Sstevel@tonic-gate 	}
22100Sstevel@tonic-gate 
22110Sstevel@tonic-gate 	case ECPPIOC_SETPORT: {
22120Sstevel@tonic-gate 		uchar_t *port;
22130Sstevel@tonic-gate 
22140Sstevel@tonic-gate 		port = (uchar_t *)mp->b_cont->b_rptr;
22150Sstevel@tonic-gate 
22160Sstevel@tonic-gate 		/* must be in diagnostic mode for these commands to work */
22170Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
22180Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
22190Sstevel@tonic-gate 			break;
22200Sstevel@tonic-gate 		}
22210Sstevel@tonic-gate 
22220Sstevel@tonic-gate 		switch (*port) {
22230Sstevel@tonic-gate 		case ECPP_PORT_PIO:
22240Sstevel@tonic-gate 			/* put superio into PIO mode */
22250Sstevel@tonic-gate 			ECR_WRITE(pp,
22267656SSherry.Moore@Sun.COM 			    ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
22270Sstevel@tonic-gate 			pp->port = *port;
22280Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
22290Sstevel@tonic-gate 			break;
22300Sstevel@tonic-gate 
22310Sstevel@tonic-gate 		case ECPP_PORT_TDMA:
22320Sstevel@tonic-gate 			ecpp_error(pp->dip, "SETPORT: to TDMA\n");
22330Sstevel@tonic-gate 			pp->tfifo_intr = 1;
22340Sstevel@tonic-gate 			/* change to mode 110 */
22350Sstevel@tonic-gate 			ECR_WRITE(pp,
22367656SSherry.Moore@Sun.COM 			    ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV);
22370Sstevel@tonic-gate 			pp->port = *port;
22380Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
22390Sstevel@tonic-gate 			break;
22400Sstevel@tonic-gate 
22410Sstevel@tonic-gate 		default:
22420Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
22430Sstevel@tonic-gate 		}
22440Sstevel@tonic-gate 
22450Sstevel@tonic-gate 		break;
22460Sstevel@tonic-gate 	}
22470Sstevel@tonic-gate 
22480Sstevel@tonic-gate 	case ECPPIOC_SETDATA: {
22490Sstevel@tonic-gate 		uchar_t *data;
22500Sstevel@tonic-gate 
22510Sstevel@tonic-gate 		data = (uchar_t *)mp->b_cont->b_rptr;
22520Sstevel@tonic-gate 
22530Sstevel@tonic-gate 		/* must be in diagnostic mode for these commands to work */
22540Sstevel@tonic-gate 		if (pp->current_mode != ECPP_DIAG_MODE) {
22550Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
22560Sstevel@tonic-gate 			break;
22570Sstevel@tonic-gate 		}
22580Sstevel@tonic-gate 
22590Sstevel@tonic-gate 		switch (pp->port) {
22600Sstevel@tonic-gate 		case ECPP_PORT_PIO:
22610Sstevel@tonic-gate 			DATAR_WRITE(pp, *data);
22620Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
22630Sstevel@tonic-gate 			break;
22640Sstevel@tonic-gate 
22650Sstevel@tonic-gate 		case ECPP_PORT_TDMA:
22660Sstevel@tonic-gate 			TFIFO_WRITE(pp, *data);
22670Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
22680Sstevel@tonic-gate 			break;
22690Sstevel@tonic-gate 
22700Sstevel@tonic-gate 		default:
22710Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
22720Sstevel@tonic-gate 		}
22730Sstevel@tonic-gate 
22740Sstevel@tonic-gate 		break;
22750Sstevel@tonic-gate 	}
22760Sstevel@tonic-gate 
22770Sstevel@tonic-gate 	case ECPPIOC_GETDEVID: {
22780Sstevel@tonic-gate 		struct copyresp		*csp;
22790Sstevel@tonic-gate 		struct ecpp_copystate	*stp;
22800Sstevel@tonic-gate 		struct ecpp_device_id	*dp;
22810Sstevel@tonic-gate 		struct ecpp_device_id	id;
22820Sstevel@tonic-gate 
22830Sstevel@tonic-gate 		csp = (struct copyresp *)mp->b_rptr;
22840Sstevel@tonic-gate 		stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
22850Sstevel@tonic-gate 		dp = (struct ecpp_device_id *)mp->b_cont->b_rptr;
22860Sstevel@tonic-gate 
22870Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
22880Sstevel@tonic-gate 		if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
22890Sstevel@tonic-gate 			struct ecpp_device_id32 *dp32;
22900Sstevel@tonic-gate 
22910Sstevel@tonic-gate 			dp32 = (struct ecpp_device_id32 *)dp;
22920Sstevel@tonic-gate 			id.mode = dp32->mode;
22930Sstevel@tonic-gate 			id.len = dp32->len;
22940Sstevel@tonic-gate 			id.addr = (char *)(uintptr_t)dp32->addr;
22950Sstevel@tonic-gate 		} else {
22960Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
22970Sstevel@tonic-gate 			id = *dp;
22980Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
22990Sstevel@tonic-gate 		}
23000Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
23010Sstevel@tonic-gate 
23020Sstevel@tonic-gate 		ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen);
23030Sstevel@tonic-gate 		break;
23040Sstevel@tonic-gate 	}
23050Sstevel@tonic-gate 
23060Sstevel@tonic-gate 	case PRNIOC_GET_1284_DEVID: {
23070Sstevel@tonic-gate 		struct copyresp			*csp;
23080Sstevel@tonic-gate 		struct ecpp_copystate		*stp;
23090Sstevel@tonic-gate 		struct prn_1284_device_id	*dp;
23100Sstevel@tonic-gate 		struct ecpp_device_id		id;
23110Sstevel@tonic-gate 
23120Sstevel@tonic-gate 		csp = (struct copyresp *)mp->b_rptr;
23130Sstevel@tonic-gate 		stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
23140Sstevel@tonic-gate 		dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr;
23150Sstevel@tonic-gate 
23160Sstevel@tonic-gate 		/* imitate struct ecpp_device_id */
23170Sstevel@tonic-gate 		id.mode = ECPP_NIBBLE_MODE;
23180Sstevel@tonic-gate 
23190Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
23200Sstevel@tonic-gate 		if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
23210Sstevel@tonic-gate 			struct prn_1284_device_id32 *dp32;
23220Sstevel@tonic-gate 
23230Sstevel@tonic-gate 			dp32 = (struct prn_1284_device_id32 *)dp;
23240Sstevel@tonic-gate 			id.len = dp32->id_len;
23250Sstevel@tonic-gate 			id.addr = (char *)(uintptr_t)dp32->id_data;
23260Sstevel@tonic-gate 		} else {
23270Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
23280Sstevel@tonic-gate 			id.len = dp->id_len;
23290Sstevel@tonic-gate 			id.addr = (char *)dp->id_data;
23300Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
23310Sstevel@tonic-gate 		}
23320Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
23330Sstevel@tonic-gate 
23340Sstevel@tonic-gate 		ecpp_srvioc_devid(q, mp, &id,
23357656SSherry.Moore@Sun.COM 		    (int *)&stp->un.prn_devid.id_rlen);
23360Sstevel@tonic-gate 		break;
23370Sstevel@tonic-gate 	}
23380Sstevel@tonic-gate 
23390Sstevel@tonic-gate 	case PRNIOC_SET_IFCAP: {
23400Sstevel@tonic-gate 		uint_t	ifcap, new_ifcap;
23410Sstevel@tonic-gate 
23420Sstevel@tonic-gate 		ifcap = ecpp_get_prn_ifcap(pp);
23430Sstevel@tonic-gate 		new_ifcap = *(uint_t *)mp->b_cont->b_rptr;
23440Sstevel@tonic-gate 
23450Sstevel@tonic-gate 		if (ifcap == new_ifcap) {
23460Sstevel@tonic-gate 			ecpp_ack_ioctl(q, mp);
23470Sstevel@tonic-gate 			break;
23480Sstevel@tonic-gate 		}
23490Sstevel@tonic-gate 
23500Sstevel@tonic-gate 		/* only changing PRN_BIDI is supported */
23510Sstevel@tonic-gate 		if ((ifcap ^ new_ifcap) & ~PRN_BIDI) {
23520Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
23530Sstevel@tonic-gate 			break;
23540Sstevel@tonic-gate 		}
23550Sstevel@tonic-gate 
23560Sstevel@tonic-gate 		if (new_ifcap & PRN_BIDI) { 	/* go bidirectional */
23570Sstevel@tonic-gate 			ecpp_default_negotiation(pp);
23580Sstevel@tonic-gate 		} else {			/* go unidirectional */
23590Sstevel@tonic-gate 			(void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
23600Sstevel@tonic-gate 		}
23610Sstevel@tonic-gate 
23620Sstevel@tonic-gate 		ecpp_ack_ioctl(q, mp);
23630Sstevel@tonic-gate 		break;
23640Sstevel@tonic-gate 	}
23650Sstevel@tonic-gate 
23660Sstevel@tonic-gate 	case PRNIOC_SET_TIMEOUTS: {
23670Sstevel@tonic-gate 		struct prn_timeouts	*prn_timeouts;
23680Sstevel@tonic-gate 
23690Sstevel@tonic-gate 		prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr;
23700Sstevel@tonic-gate 
23710Sstevel@tonic-gate 		if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) {
23720Sstevel@tonic-gate 			ecpp_nack_ioctl(q, mp, EINVAL);
23730Sstevel@tonic-gate 			break;
23740Sstevel@tonic-gate 		}
23750Sstevel@tonic-gate 
23760Sstevel@tonic-gate 		pp->prn_timeouts = *prn_timeouts;
23770Sstevel@tonic-gate 		pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward;
23780Sstevel@tonic-gate 
23790Sstevel@tonic-gate 		ecpp_ack_ioctl(q, mp);
23800Sstevel@tonic-gate 		break;
23810Sstevel@tonic-gate 	}
23820Sstevel@tonic-gate 
23830Sstevel@tonic-gate 	case PRNIOC_GET_IFINFO:
23840Sstevel@tonic-gate 		ecpp_srvioc_prnif(q, mp);
23850Sstevel@tonic-gate 		break;
23860Sstevel@tonic-gate 
23870Sstevel@tonic-gate 	default:		/* unexpected ioctl type */
23880Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EINVAL);
23890Sstevel@tonic-gate 		break;
23900Sstevel@tonic-gate 	}
23910Sstevel@tonic-gate }
23920Sstevel@tonic-gate 
23930Sstevel@tonic-gate static void
ecpp_srvioc_devid(queue_t * q,mblk_t * mp,struct ecpp_device_id * id,int * rlen)23940Sstevel@tonic-gate ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen)
23950Sstevel@tonic-gate {
23960Sstevel@tonic-gate 	struct ecppunit 	*pp;
23970Sstevel@tonic-gate 	struct copyresp		*csp;
23980Sstevel@tonic-gate 	struct ecpp_copystate	*stp;
23990Sstevel@tonic-gate 	int			error;
24000Sstevel@tonic-gate 	int			len;
24010Sstevel@tonic-gate 	int			mode;
24020Sstevel@tonic-gate 	mblk_t			*datamp;
24030Sstevel@tonic-gate 
24040Sstevel@tonic-gate 	pp = (struct ecppunit *)q->q_ptr;
24050Sstevel@tonic-gate 	csp = (struct copyresp *)mp->b_rptr;
24060Sstevel@tonic-gate 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
24070Sstevel@tonic-gate 	mode = id->mode;
24080Sstevel@tonic-gate 
24090Sstevel@tonic-gate 	/* check arguments */
24100Sstevel@tonic-gate 	if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) {
24110Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n",
24127656SSherry.Moore@Sun.COM 		    mode, id->len);
24130Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EINVAL);
24140Sstevel@tonic-gate 		return;
24150Sstevel@tonic-gate 	}
24160Sstevel@tonic-gate 
24170Sstevel@tonic-gate 	/* Currently only Nibble mode is supported */
24180Sstevel@tonic-gate 	if (mode != ECPP_NIBBLE_MODE) {
24190Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
24200Sstevel@tonic-gate 		return;
24210Sstevel@tonic-gate 	}
24220Sstevel@tonic-gate 
24230Sstevel@tonic-gate 	if ((id->addr == NULL) && (id->len != 0)) {
24240Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EFAULT);
24250Sstevel@tonic-gate 		return;
24260Sstevel@tonic-gate 	}
24270Sstevel@tonic-gate 
24280Sstevel@tonic-gate 	/* read device ID length */
24290Sstevel@tonic-gate 	if (error = ecpp_getdevid(pp, NULL, &len, mode)) {
24300Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, error);
24310Sstevel@tonic-gate 		goto breakout;
24320Sstevel@tonic-gate 	}
24330Sstevel@tonic-gate 
24340Sstevel@tonic-gate 	/* don't take into account two length bytes */
24350Sstevel@tonic-gate 	len -= 2;
24360Sstevel@tonic-gate 	*rlen = len;
24370Sstevel@tonic-gate 
24380Sstevel@tonic-gate 	/* limit transfer to user buffer length */
24390Sstevel@tonic-gate 	if (id->len < len) {
24400Sstevel@tonic-gate 		len = id->len;
24410Sstevel@tonic-gate 	}
24420Sstevel@tonic-gate 
24430Sstevel@tonic-gate 	if (len == 0) {
24440Sstevel@tonic-gate 		/* just return rlen */
24450Sstevel@tonic-gate 		stp->state = ECPP_ADDROUT;
24460Sstevel@tonic-gate 		ecpp_wput_iocdata_devid(q, mp,
24477656SSherry.Moore@Sun.COM 		    (uintptr_t)rlen - (uintptr_t)&stp->un);
24480Sstevel@tonic-gate 		goto breakout;
24490Sstevel@tonic-gate 	}
24500Sstevel@tonic-gate 
24510Sstevel@tonic-gate 	if ((datamp = allocb(len, BPRI_MED)) == NULL) {
24520Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, ENOSR);
24530Sstevel@tonic-gate 		goto breakout;
24540Sstevel@tonic-gate 	}
24550Sstevel@tonic-gate 
24560Sstevel@tonic-gate 	/* read ID string */
24570Sstevel@tonic-gate 	error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode);
24580Sstevel@tonic-gate 	if (error) {
24590Sstevel@tonic-gate 		freemsg(datamp);
24600Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, error);
24610Sstevel@tonic-gate 		goto breakout;
24620Sstevel@tonic-gate 	} else {
24630Sstevel@tonic-gate 		datamp->b_wptr += len;
24640Sstevel@tonic-gate 
24650Sstevel@tonic-gate 		stp->state = ECPP_ADDROUT;
24660Sstevel@tonic-gate 		mcopyout(mp, csp->cp_private, len, id->addr, datamp);
24670Sstevel@tonic-gate 		qreply(q, mp);
24680Sstevel@tonic-gate 	}
24690Sstevel@tonic-gate 
24700Sstevel@tonic-gate 	return;
24710Sstevel@tonic-gate 
24720Sstevel@tonic-gate breakout:
24730Sstevel@tonic-gate 	(void) ecpp_1284_termination(pp);
24740Sstevel@tonic-gate }
24750Sstevel@tonic-gate 
24760Sstevel@tonic-gate /*
24770Sstevel@tonic-gate  * PRNIOC_GET_IFINFO: return prnio interface info string
24780Sstevel@tonic-gate  */
24790Sstevel@tonic-gate static void
ecpp_srvioc_prnif(queue_t * q,mblk_t * mp)24800Sstevel@tonic-gate ecpp_srvioc_prnif(queue_t *q, mblk_t *mp)
24810Sstevel@tonic-gate {
24820Sstevel@tonic-gate 	struct copyresp			*csp;
24830Sstevel@tonic-gate 	struct ecpp_copystate		*stp;
24840Sstevel@tonic-gate 	uint_t				len;
24850Sstevel@tonic-gate 	struct prn_interface_info	*ip;
24860Sstevel@tonic-gate 	struct prn_interface_info	info;
24870Sstevel@tonic-gate 	mblk_t				*datamp;
24880Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
24890Sstevel@tonic-gate 	struct iocblk		*iocbp = (struct iocblk *)mp->b_rptr;
24900Sstevel@tonic-gate #endif
24910Sstevel@tonic-gate 
24920Sstevel@tonic-gate 	csp = (struct copyresp *)mp->b_rptr;
24930Sstevel@tonic-gate 	stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
24940Sstevel@tonic-gate 	ip = (struct prn_interface_info *)mp->b_cont->b_rptr;
24950Sstevel@tonic-gate 
24960Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
24970Sstevel@tonic-gate 	if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
24980Sstevel@tonic-gate 		struct prn_interface_info32 *ip32;
24990Sstevel@tonic-gate 
25000Sstevel@tonic-gate 		ip32 = (struct prn_interface_info32 *)ip;
25010Sstevel@tonic-gate 		info.if_len = ip32->if_len;
25020Sstevel@tonic-gate 		info.if_data = (char *)(uintptr_t)ip32->if_data;
25030Sstevel@tonic-gate 	} else {
25040Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
25050Sstevel@tonic-gate 		info = *ip;
25060Sstevel@tonic-gate #ifdef _MULTI_DATAMODEL
25070Sstevel@tonic-gate 	}
25080Sstevel@tonic-gate #endif /* _MULTI_DATAMODEL */
25090Sstevel@tonic-gate 
25100Sstevel@tonic-gate 	len = strlen(prn_ifinfo);
25110Sstevel@tonic-gate 	stp->un.prn_if.if_rlen = len;
25120Sstevel@tonic-gate 	stp->state = ECPP_ADDROUT;
25130Sstevel@tonic-gate 
25140Sstevel@tonic-gate 	/* check arguments */
25150Sstevel@tonic-gate 	if ((info.if_data == NULL) && (info.if_len != 0)) {
25160Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, EFAULT);
25170Sstevel@tonic-gate 		return;
25180Sstevel@tonic-gate 	}
25190Sstevel@tonic-gate 
25200Sstevel@tonic-gate 	if (info.if_len == 0) {
25210Sstevel@tonic-gate 		/* just copyout rlen */
25220Sstevel@tonic-gate 		ecpp_wput_iocdata_devid(q, mp,
25237656SSherry.Moore@Sun.COM 		    offsetof(struct prn_interface_info, if_rlen));
25240Sstevel@tonic-gate 		return;
25250Sstevel@tonic-gate 	}
25260Sstevel@tonic-gate 
25270Sstevel@tonic-gate 	/* if needed, trim to the buffer size */
25280Sstevel@tonic-gate 	if (len > info.if_len) {
25290Sstevel@tonic-gate 		len = info.if_len;
25300Sstevel@tonic-gate 	}
25310Sstevel@tonic-gate 
25320Sstevel@tonic-gate 	if ((datamp = allocb(len, BPRI_MED)) == NULL) {
25330Sstevel@tonic-gate 		ecpp_nack_ioctl(q, mp, ENOSR);
25340Sstevel@tonic-gate 		return;
25350Sstevel@tonic-gate 	}
25360Sstevel@tonic-gate 
25370Sstevel@tonic-gate 	bcopy(&prn_ifinfo[0], datamp->b_wptr, len);
25380Sstevel@tonic-gate 	datamp->b_wptr += len;
25390Sstevel@tonic-gate 
25400Sstevel@tonic-gate 	mcopyout(mp, csp->cp_private, len, info.if_data, datamp);
25410Sstevel@tonic-gate 	qreply(q, mp);
25420Sstevel@tonic-gate }
25430Sstevel@tonic-gate 
25440Sstevel@tonic-gate static void
ecpp_flush(struct ecppunit * pp,int cmd)25450Sstevel@tonic-gate ecpp_flush(struct ecppunit *pp, int cmd)
25460Sstevel@tonic-gate {
25470Sstevel@tonic-gate 	queue_t		*q;
25480Sstevel@tonic-gate 	uint8_t		ecr, dcr;
25490Sstevel@tonic-gate 	timeout_id_t	timeout_id, fifo_timer_id, wsrv_timer_id;
25500Sstevel@tonic-gate 
25510Sstevel@tonic-gate 	ASSERT(mutex_owned(&pp->umutex));
25520Sstevel@tonic-gate 
25530Sstevel@tonic-gate 	if (!(cmd & FWRITE)) {
25540Sstevel@tonic-gate 		return;
25550Sstevel@tonic-gate 	}
25560Sstevel@tonic-gate 
25570Sstevel@tonic-gate 	q = pp->writeq;
25580Sstevel@tonic-gate 	timeout_id = fifo_timer_id = wsrv_timer_id = 0;
25590Sstevel@tonic-gate 
25600Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy);
25610Sstevel@tonic-gate 
25620Sstevel@tonic-gate 	/* if there is an ongoing DMA, it needs to be turned off. */
25630Sstevel@tonic-gate 	switch (pp->e_busy) {
25640Sstevel@tonic-gate 	case ECPP_BUSY:
25650Sstevel@tonic-gate 		/*
25660Sstevel@tonic-gate 		 * Change the port status to ECPP_FLUSH to
25670Sstevel@tonic-gate 		 * indicate to ecpp_wsrv that the wq is being flushed.
25680Sstevel@tonic-gate 		 */
25690Sstevel@tonic-gate 		pp->e_busy = ECPP_FLUSH;
25700Sstevel@tonic-gate 
25710Sstevel@tonic-gate 		/*
25720Sstevel@tonic-gate 		 * dma_cancelled indicates to ecpp_isr() that we have
25730Sstevel@tonic-gate 		 * turned off the DMA.  Since the mutex is held, ecpp_isr()
25740Sstevel@tonic-gate 		 * may be blocked.  Once ecpp_flush() finishes and ecpp_isr()
25750Sstevel@tonic-gate 		 * gains the mutex, ecpp_isr() will have a _reset_ DMAC.  Most
25760Sstevel@tonic-gate 		 * significantly, the DMAC will be reset after ecpp_isr() was
25770Sstevel@tonic-gate 		 * invoked.  Therefore we need to have a flag "dma_cancelled"
25780Sstevel@tonic-gate 		 * to signify when the described condition has occured.  If
25790Sstevel@tonic-gate 		 * ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr
25800Sstevel@tonic-gate 		 * and simply claim the interupt.
25810Sstevel@tonic-gate 		 */
25820Sstevel@tonic-gate 
25830Sstevel@tonic-gate 		pp->dma_cancelled = TRUE;
25840Sstevel@tonic-gate 
25850Sstevel@tonic-gate 		/* either DMA or PIO transfer */
25860Sstevel@tonic-gate 		if (COMPAT_DMA(pp) ||
25870Sstevel@tonic-gate 		    (pp->current_mode == ECPP_ECP_MODE) ||
25880Sstevel@tonic-gate 		    (pp->current_mode == ECPP_DIAG_MODE)) {
25890Sstevel@tonic-gate 			/*
25900Sstevel@tonic-gate 			 * if the bcr is zero, then DMA is complete and
25910Sstevel@tonic-gate 			 * we are waiting for the fifo to drain.  Therefore,
25920Sstevel@tonic-gate 			 * turn off dma.
25930Sstevel@tonic-gate 			 */
25940Sstevel@tonic-gate 			if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
25950Sstevel@tonic-gate 				ecpp_error(pp->dip,
25967656SSherry.Moore@Sun.COM 				    "ecpp_flush: dma_stop failed.\n");
25970Sstevel@tonic-gate 			}
25980Sstevel@tonic-gate 
25990Sstevel@tonic-gate 			/*
26000Sstevel@tonic-gate 			 * If the status of the port is ECPP_BUSY,
26010Sstevel@tonic-gate 			 * the DMA is stopped by either explicitly above, or by
26020Sstevel@tonic-gate 			 * ecpp_isr() but the FIFO hasn't drained yet. In either
26030Sstevel@tonic-gate 			 * case, we need to unbind the dma mappings.
26040Sstevel@tonic-gate 			 */
26050Sstevel@tonic-gate 			if (ddi_dma_unbind_handle(
26067656SSherry.Moore@Sun.COM 			    pp->dma_handle) != DDI_SUCCESS)
26070Sstevel@tonic-gate 				ecpp_error(pp->dip,
26087656SSherry.Moore@Sun.COM 				    "ecpp_flush: unbind failed.\n");
26090Sstevel@tonic-gate 
26100Sstevel@tonic-gate 			if (pp->msg != NULL) {
26110Sstevel@tonic-gate 				freemsg(pp->msg);
26120Sstevel@tonic-gate 				pp->msg = NULL;
26130Sstevel@tonic-gate 			}
26140Sstevel@tonic-gate 		} else {
26150Sstevel@tonic-gate 			/*
26160Sstevel@tonic-gate 			 * PIO transfer: disable nAck interrups
26170Sstevel@tonic-gate 			 */
26180Sstevel@tonic-gate 			dcr = DCR_READ(pp);
26190Sstevel@tonic-gate 			dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN);
26200Sstevel@tonic-gate 			DCR_WRITE(pp, dcr);
26210Sstevel@tonic-gate 			ECPP_MASK_INTR(pp);
26220Sstevel@tonic-gate 		}
26230Sstevel@tonic-gate 
26240Sstevel@tonic-gate 		/*
26250Sstevel@tonic-gate 		 * The transfer is cleaned up.  There may or may not be data
26260Sstevel@tonic-gate 		 * in the fifo.  We don't care at this point.  Ie. SuperIO may
26270Sstevel@tonic-gate 		 * transfer the remaining bytes in the fifo or not. it doesn't
26280Sstevel@tonic-gate 		 * matter.  All that is important at this stage is that no more
26290Sstevel@tonic-gate 		 * fifo timers are started.
26300Sstevel@tonic-gate 		 */
26310Sstevel@tonic-gate 
26320Sstevel@tonic-gate 		timeout_id = pp->timeout_id;
26330Sstevel@tonic-gate 		fifo_timer_id = pp->fifo_timer_id;
26340Sstevel@tonic-gate 		pp->timeout_id = pp->fifo_timer_id = 0;
26350Sstevel@tonic-gate 		pp->softintr_pending = 0;
26360Sstevel@tonic-gate 
26370Sstevel@tonic-gate 		break;
26380Sstevel@tonic-gate 
26390Sstevel@tonic-gate 	case ECPP_ERR:
26400Sstevel@tonic-gate 		/*
26410Sstevel@tonic-gate 		 * Change the port status to ECPP_FLUSH to
26420Sstevel@tonic-gate 		 * indicate to ecpp_wsrv that the wq is being flushed.
26430Sstevel@tonic-gate 		 */
26440Sstevel@tonic-gate 		pp->e_busy = ECPP_FLUSH;
26450Sstevel@tonic-gate 
26460Sstevel@tonic-gate 		/*
26470Sstevel@tonic-gate 		 *  Most likely there are mblks in the queue,
26480Sstevel@tonic-gate 		 *  but the driver can not transmit because
26490Sstevel@tonic-gate 		 *  of the bad port status.  In this case,
26500Sstevel@tonic-gate 		 *  ecpp_flush() should make sure ecpp_wsrv_timer()
26510Sstevel@tonic-gate 		 *  is turned off.
26520Sstevel@tonic-gate 		 */
26530Sstevel@tonic-gate 		wsrv_timer_id = pp->wsrv_timer_id;
26540Sstevel@tonic-gate 		pp->wsrv_timer_id = 0;
26550Sstevel@tonic-gate 
26560Sstevel@tonic-gate 		break;
26570Sstevel@tonic-gate 
26580Sstevel@tonic-gate 	case ECPP_IDLE:
26590Sstevel@tonic-gate 		/* No work to do. Ready to flush */
26600Sstevel@tonic-gate 		break;
26610Sstevel@tonic-gate 
26620Sstevel@tonic-gate 	default:
26630Sstevel@tonic-gate 		ecpp_error(pp->dip,
26647656SSherry.Moore@Sun.COM 		    "ecpp_flush: illegal state %x\n", pp->e_busy);
26650Sstevel@tonic-gate 	}
26660Sstevel@tonic-gate 
26670Sstevel@tonic-gate 	/* in DIAG mode clear TFIFO if needed */
26680Sstevel@tonic-gate 	if (pp->current_mode == ECPP_DIAG_MODE) {
26690Sstevel@tonic-gate 		ecr = ECR_READ(pp);
26700Sstevel@tonic-gate 		if (!(ecr & ECPP_FIFO_EMPTY)) {
26710Sstevel@tonic-gate 			ECR_WRITE(pp,
26727656SSherry.Moore@Sun.COM 			    ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
26730Sstevel@tonic-gate 			ECR_WRITE(pp, ecr);
26740Sstevel@tonic-gate 		}
26750Sstevel@tonic-gate 	}
26760Sstevel@tonic-gate 
26770Sstevel@tonic-gate 	/* Discard all messages on the output queue. */
26780Sstevel@tonic-gate 	flushq(q, FLUSHDATA);
26790Sstevel@tonic-gate 
26800Sstevel@tonic-gate 	/* The port is no longer flushing or dma'ing for that matter. */
26810Sstevel@tonic-gate 	pp->e_busy = ECPP_IDLE;
26820Sstevel@tonic-gate 
26830Sstevel@tonic-gate 	/* Set the right phase */
26840Sstevel@tonic-gate 	if (pp->current_mode == ECPP_ECP_MODE) {
26850Sstevel@tonic-gate 		if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
26860Sstevel@tonic-gate 			pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
26870Sstevel@tonic-gate 		} else {
26880Sstevel@tonic-gate 			pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
26890Sstevel@tonic-gate 		}
26900Sstevel@tonic-gate 	}
26910Sstevel@tonic-gate 
26920Sstevel@tonic-gate 	/* cancel timeouts if any */
26930Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
26940Sstevel@tonic-gate 
26950Sstevel@tonic-gate 	if (timeout_id) {
26960Sstevel@tonic-gate 		(void) untimeout(timeout_id);
26970Sstevel@tonic-gate 	}
26980Sstevel@tonic-gate 	if (fifo_timer_id) {
26990Sstevel@tonic-gate 		(void) untimeout(fifo_timer_id);
27000Sstevel@tonic-gate 	}
27010Sstevel@tonic-gate 	if (wsrv_timer_id) {
27020Sstevel@tonic-gate 		(void) untimeout(wsrv_timer_id);
27030Sstevel@tonic-gate 	}
27040Sstevel@tonic-gate 
27050Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
27060Sstevel@tonic-gate 
27070Sstevel@tonic-gate 	cv_signal(&pp->pport_cv);	/* wake up ecpp_close() */
27080Sstevel@tonic-gate }
27090Sstevel@tonic-gate 
27100Sstevel@tonic-gate static void
ecpp_start(struct ecppunit * pp,caddr_t addr,size_t len)27110Sstevel@tonic-gate ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len)
27120Sstevel@tonic-gate {
27130Sstevel@tonic-gate 	ASSERT(mutex_owned(&pp->umutex));
27140Sstevel@tonic-gate 	ASSERT(pp->e_busy == ECPP_BUSY);
27150Sstevel@tonic-gate 
27160Sstevel@tonic-gate 	ecpp_error(pp->dip,
27177656SSherry.Moore@Sun.COM 	    "ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n",
27187656SSherry.Moore@Sun.COM 	    pp->current_mode, pp->current_phase, ECR_READ(pp), len);
27190Sstevel@tonic-gate 
27200Sstevel@tonic-gate 	pp->dma_dir = DDI_DMA_WRITE;	/* this is a forward transfer */
27210Sstevel@tonic-gate 
27220Sstevel@tonic-gate 	switch (pp->current_mode) {
27230Sstevel@tonic-gate 	case ECPP_NIBBLE_MODE:
27240Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
27250Sstevel@tonic-gate 
27260Sstevel@tonic-gate 		/* After termination we are either Compatible or Centronics */
27270Sstevel@tonic-gate 
27280Sstevel@tonic-gate 		/* FALLTHRU */
27290Sstevel@tonic-gate 
27300Sstevel@tonic-gate 	case ECPP_CENTRONICS:
27310Sstevel@tonic-gate 	case ECPP_COMPAT_MODE:
27320Sstevel@tonic-gate 		if (pp->io_mode == ECPP_DMA) {
27330Sstevel@tonic-gate 			if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
27340Sstevel@tonic-gate 				return;
27350Sstevel@tonic-gate 			}
27360Sstevel@tonic-gate 		} else {
27370Sstevel@tonic-gate 			/* PIO mode */
27380Sstevel@tonic-gate 			if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) {
27390Sstevel@tonic-gate 				return;
27400Sstevel@tonic-gate 			}
27410Sstevel@tonic-gate 			(void) ecpp_pio_writeb(pp);
27420Sstevel@tonic-gate 		}
27430Sstevel@tonic-gate 		break;
27440Sstevel@tonic-gate 
27450Sstevel@tonic-gate 	case ECPP_DIAG_MODE: {
27460Sstevel@tonic-gate 		int	oldlen;
27470Sstevel@tonic-gate 
27480Sstevel@tonic-gate 		/* put superio into TFIFO mode, if not already */
27490Sstevel@tonic-gate 		ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
27500Sstevel@tonic-gate 		/*
27510Sstevel@tonic-gate 		 * DMA would block if the TFIFO is not empty
27520Sstevel@tonic-gate 		 * if by this moment nobody read these bytes, they`re gone
27530Sstevel@tonic-gate 		 */
27540Sstevel@tonic-gate 		drv_usecwait(1);
27550Sstevel@tonic-gate 		if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
27560Sstevel@tonic-gate 			ecpp_error(pp->dip,
27577656SSherry.Moore@Sun.COM 			    "ecpp_start: TFIFO not empty, clearing\n");
27580Sstevel@tonic-gate 			ECR_WRITE(pp,
27597656SSherry.Moore@Sun.COM 			    ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
27600Sstevel@tonic-gate 			ECR_WRITE(pp,
27617656SSherry.Moore@Sun.COM 			    ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
27620Sstevel@tonic-gate 		}
27630Sstevel@tonic-gate 
27640Sstevel@tonic-gate 		/* we can DMA at most 16 bytes into TFIFO */
27650Sstevel@tonic-gate 		oldlen = len;
27660Sstevel@tonic-gate 		if (len > ECPP_FIFO_SZ) {
27670Sstevel@tonic-gate 			len = ECPP_FIFO_SZ;
27680Sstevel@tonic-gate 		}
27690Sstevel@tonic-gate 
27700Sstevel@tonic-gate 		if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
27710Sstevel@tonic-gate 			return;
27720Sstevel@tonic-gate 		}
27730Sstevel@tonic-gate 
27740Sstevel@tonic-gate 		/* put the rest of data back on the queue */
27750Sstevel@tonic-gate 		if (oldlen > len) {
27760Sstevel@tonic-gate 			ecpp_putback_untransfered(pp, addr + len, oldlen - len);
27770Sstevel@tonic-gate 		}
27780Sstevel@tonic-gate 
27790Sstevel@tonic-gate 		break;
27800Sstevel@tonic-gate 	}
27810Sstevel@tonic-gate 
27820Sstevel@tonic-gate 	case ECPP_ECP_MODE:
27830Sstevel@tonic-gate 		ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
27847656SSherry.Moore@Sun.COM 		    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
27850Sstevel@tonic-gate 
27860Sstevel@tonic-gate 		/* if in Reverse Phase negotiate to Forward */
27870Sstevel@tonic-gate 		if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
27880Sstevel@tonic-gate 			if (ecp_reverse2forward(pp) == FAILURE) {
27890Sstevel@tonic-gate 				if (pp->msg) {
27900Sstevel@tonic-gate 					(void) putbq(pp->writeq, pp->msg);
27910Sstevel@tonic-gate 				} else {
27920Sstevel@tonic-gate 					ecpp_putback_untransfered(pp,
27937656SSherry.Moore@Sun.COM 					    addr, len);
27940Sstevel@tonic-gate 				}
27950Sstevel@tonic-gate 			}
27960Sstevel@tonic-gate 		}
27970Sstevel@tonic-gate 
27980Sstevel@tonic-gate 		if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
27990Sstevel@tonic-gate 			return;
28000Sstevel@tonic-gate 		}
28010Sstevel@tonic-gate 
28020Sstevel@tonic-gate 		break;
28030Sstevel@tonic-gate 	}
28040Sstevel@tonic-gate 
28050Sstevel@tonic-gate 	/* schedule transfer timeout */
28060Sstevel@tonic-gate 	pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
28077656SSherry.Moore@Sun.COM 	    pp->xfer_parms.write_timeout * drv_usectohz(1000000));
28080Sstevel@tonic-gate }
28090Sstevel@tonic-gate 
28100Sstevel@tonic-gate /*
28110Sstevel@tonic-gate  * Transfer a PIO "block" a byte at a time.
28120Sstevel@tonic-gate  * The block is starts at addr and ends at pp->last_byte
28130Sstevel@tonic-gate  */
28140Sstevel@tonic-gate static uint8_t
ecpp_prep_pio_xfer(struct ecppunit * pp,caddr_t addr,size_t len)28150Sstevel@tonic-gate ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
28160Sstevel@tonic-gate {
28170Sstevel@tonic-gate 	pp->next_byte = addr;
28180Sstevel@tonic-gate 	pp->last_byte = (caddr_t)((ulong_t)addr + len);
28190Sstevel@tonic-gate 
28200Sstevel@tonic-gate 	if (ecpp_check_status(pp) == FAILURE) {
28210Sstevel@tonic-gate 		/*
28220Sstevel@tonic-gate 		 * if status signals are bad, do not start PIO,
28230Sstevel@tonic-gate 		 * put everything back on the queue.
28240Sstevel@tonic-gate 		 */
28250Sstevel@tonic-gate 		ecpp_error(pp->dip,
28267656SSherry.Moore@Sun.COM 		    "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
28270Sstevel@tonic-gate 
28280Sstevel@tonic-gate 		if (pp->msg != NULL) {
28290Sstevel@tonic-gate 			/*
28300Sstevel@tonic-gate 			 * this circumstance we want to copy the
28310Sstevel@tonic-gate 			 * untransfered section of msg to a new mblk,
28320Sstevel@tonic-gate 			 * then free the orignal one.
28330Sstevel@tonic-gate 			 */
28340Sstevel@tonic-gate 			ecpp_putback_untransfered(pp,
28357656SSherry.Moore@Sun.COM 			    (void *)pp->msg->b_rptr, len);
28360Sstevel@tonic-gate 			ecpp_error(pp->dip,
28377656SSherry.Moore@Sun.COM 			    "ecpp_prep_pio_xfer: len1=%d\n", len);
28380Sstevel@tonic-gate 
28390Sstevel@tonic-gate 			freemsg(pp->msg);
28400Sstevel@tonic-gate 			pp->msg = NULL;
28410Sstevel@tonic-gate 		} else {
28420Sstevel@tonic-gate 			ecpp_putback_untransfered(pp, pp->ioblock, len);
28430Sstevel@tonic-gate 			ecpp_error(pp->dip,
28447656SSherry.Moore@Sun.COM 			    "ecpp_prep_pio_xfer: len2=%d\n", len);
28450Sstevel@tonic-gate 		}
28460Sstevel@tonic-gate 		qenable(pp->writeq);
28470Sstevel@tonic-gate 
28480Sstevel@tonic-gate 		return (FAILURE);
28490Sstevel@tonic-gate 	}
28500Sstevel@tonic-gate 
28510Sstevel@tonic-gate 	pp->dma_cancelled = FALSE;
28520Sstevel@tonic-gate 
28530Sstevel@tonic-gate 	/* pport must be in PIO mode */
28540Sstevel@tonic-gate 	if (ecr_write(pp, ECR_mode_001 |
28557656SSherry.Moore@Sun.COM 	    ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) {
28560Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n");
28570Sstevel@tonic-gate 	}
28580Sstevel@tonic-gate 
28590Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n",
28607656SSherry.Moore@Sun.COM 	    DCR_READ(pp), ECR_READ(pp));
28610Sstevel@tonic-gate 
28620Sstevel@tonic-gate 	return (SUCCESS);
28630Sstevel@tonic-gate }
28640Sstevel@tonic-gate 
28650Sstevel@tonic-gate static uint8_t
ecpp_init_dma_xfer(struct ecppunit * pp,caddr_t addr,size_t len)28660Sstevel@tonic-gate ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
28670Sstevel@tonic-gate {
28680Sstevel@tonic-gate 	uint8_t ecr_mode[] = {
28690Sstevel@tonic-gate 		0,
28700Sstevel@tonic-gate 		ECR_mode_010,	/* Centronix */
28710Sstevel@tonic-gate 		ECR_mode_010,	/* Compat */
28720Sstevel@tonic-gate 		0,		/* Byte */
28730Sstevel@tonic-gate 		0,		/* Nibble */
28740Sstevel@tonic-gate 		ECR_mode_011,	/* ECP */
28750Sstevel@tonic-gate 		0,		/* Failure */
28760Sstevel@tonic-gate 		ECR_mode_110,	/* Diag */
28770Sstevel@tonic-gate 	};
28780Sstevel@tonic-gate 	uint8_t	ecr;
28790Sstevel@tonic-gate 
28800Sstevel@tonic-gate 	ASSERT((pp->current_mode <= ECPP_DIAG_MODE) &&
28817656SSherry.Moore@Sun.COM 	    (ecr_mode[pp->current_mode] != 0));
28820Sstevel@tonic-gate 
28830Sstevel@tonic-gate 	if (ecpp_setup_dma_resources(pp, addr, len) == FAILURE) {
28840Sstevel@tonic-gate 		qenable(pp->writeq);
28850Sstevel@tonic-gate 		return (FAILURE);
28860Sstevel@tonic-gate 	}
28870Sstevel@tonic-gate 
28880Sstevel@tonic-gate 	if (ecpp_check_status(pp) == FAILURE) {
28890Sstevel@tonic-gate 		/*
28900Sstevel@tonic-gate 		 * if status signals are bad, do not start DMA, but
28910Sstevel@tonic-gate 		 * rather put everything back on the queue.
28920Sstevel@tonic-gate 		 */
28930Sstevel@tonic-gate 		ecpp_error(pp->dip,
28947656SSherry.Moore@Sun.COM 		    "ecpp_init_dma_xfer: suspending DMA len=%d\n",
28957656SSherry.Moore@Sun.COM 		    pp->dma_cookie.dmac_size);
28960Sstevel@tonic-gate 
28970Sstevel@tonic-gate 		if (pp->msg != NULL) {
28980Sstevel@tonic-gate 			/*
28990Sstevel@tonic-gate 			 * this circumstance we want to copy the
29000Sstevel@tonic-gate 			 * untransfered section of msg to a new mblk,
29010Sstevel@tonic-gate 			 * then free the orignal one.
29020Sstevel@tonic-gate 			 */
29030Sstevel@tonic-gate 			ecpp_putback_untransfered(pp,
29047656SSherry.Moore@Sun.COM 			    (void *)pp->msg->b_rptr, len);
29050Sstevel@tonic-gate 			ecpp_error(pp->dip,
29067656SSherry.Moore@Sun.COM 			    "ecpp_init_dma_xfer:a:len=%d\n", len);
29070Sstevel@tonic-gate 
29080Sstevel@tonic-gate 			freemsg(pp->msg);
29090Sstevel@tonic-gate 			pp->msg = NULL;
29100Sstevel@tonic-gate 		} else {
29110Sstevel@tonic-gate 			ecpp_putback_untransfered(pp, pp->ioblock, len);
29120Sstevel@tonic-gate 			ecpp_error(pp->dip,
29137656SSherry.Moore@Sun.COM 			    "ecpp_init_dma_xfer:b:len=%d\n", len);
29140Sstevel@tonic-gate 		}
29150Sstevel@tonic-gate 
29160Sstevel@tonic-gate 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
29170Sstevel@tonic-gate 			ecpp_error(pp->dip,
29187656SSherry.Moore@Sun.COM 			    "ecpp_init_dma_xfer: unbind FAILURE.\n");
29190Sstevel@tonic-gate 		}
29200Sstevel@tonic-gate 		qenable(pp->writeq);
29210Sstevel@tonic-gate 		return (FAILURE);
29220Sstevel@tonic-gate 	}
29230Sstevel@tonic-gate 
29240Sstevel@tonic-gate 	pp->xfercnt = pp->resid = len;
29250Sstevel@tonic-gate 	pp->dma_cancelled = FALSE;
29260Sstevel@tonic-gate 	pp->tfifo_intr = 0;
29270Sstevel@tonic-gate 
29280Sstevel@tonic-gate 	/* set the right ECR mode and disable DMA */
29290Sstevel@tonic-gate 	ecr = ecr_mode[pp->current_mode];
29300Sstevel@tonic-gate 	(void) ecr_write(pp, ecr | ECPP_INTR_SRV | ECPP_INTR_MASK);
29310Sstevel@tonic-gate 
29320Sstevel@tonic-gate 	/* prepare DMAC for a transfer */
29330Sstevel@tonic-gate 	if (ECPP_DMA_START(pp) == FAILURE) {
29340Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_init_dma_xfer: dma_start FAILED.\n");
29350Sstevel@tonic-gate 		return (FAILURE);
29360Sstevel@tonic-gate 	}
29370Sstevel@tonic-gate 
29380Sstevel@tonic-gate 	/* GO! */
29390Sstevel@tonic-gate 	(void) ecr_write(pp, ecr | ECPP_DMA_ENABLE | ECPP_INTR_MASK);
29400Sstevel@tonic-gate 
29410Sstevel@tonic-gate 	return (SUCCESS);
29420Sstevel@tonic-gate }
29430Sstevel@tonic-gate 
29440Sstevel@tonic-gate static uint8_t
ecpp_setup_dma_resources(struct ecppunit * pp,caddr_t addr,size_t len)29450Sstevel@tonic-gate ecpp_setup_dma_resources(struct ecppunit *pp, caddr_t addr, size_t len)
29460Sstevel@tonic-gate {
29470Sstevel@tonic-gate 	int	err;
29480Sstevel@tonic-gate 	off_t	woff;
29490Sstevel@tonic-gate 	size_t	wlen;
29500Sstevel@tonic-gate 
29510Sstevel@tonic-gate 	ASSERT(pp->dma_dir == DDI_DMA_READ || pp->dma_dir == DDI_DMA_WRITE);
29520Sstevel@tonic-gate 
29530Sstevel@tonic-gate 	err = ddi_dma_addr_bind_handle(pp->dma_handle, NULL,
29547656SSherry.Moore@Sun.COM 	    addr, len, pp->dma_dir | DDI_DMA_PARTIAL,
29557656SSherry.Moore@Sun.COM 	    DDI_DMA_DONTWAIT, NULL,
29567656SSherry.Moore@Sun.COM 	    &pp->dma_cookie, &pp->dma_cookie_count);
29570Sstevel@tonic-gate 
29580Sstevel@tonic-gate 	switch (err) {
29590Sstevel@tonic-gate 	case DDI_DMA_MAPPED:
29600Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_setup_dma: DMA_MAPPED\n");
29610Sstevel@tonic-gate 
29620Sstevel@tonic-gate 		pp->dma_nwin = 1;
29630Sstevel@tonic-gate 		pp->dma_curwin = 1;
29640Sstevel@tonic-gate 		break;
29650Sstevel@tonic-gate 
29660Sstevel@tonic-gate 	case DDI_DMA_PARTIAL_MAP: {
29670Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_setup_dma: DMA_PARTIAL_MAP\n");
29680Sstevel@tonic-gate 
29690Sstevel@tonic-gate 		if (ddi_dma_numwin(pp->dma_handle,
29707656SSherry.Moore@Sun.COM 		    &pp->dma_nwin) != DDI_SUCCESS) {
29710Sstevel@tonic-gate 			(void) ddi_dma_unbind_handle(pp->dma_handle);
29720Sstevel@tonic-gate 			return (FAILURE);
29730Sstevel@tonic-gate 		}
29740Sstevel@tonic-gate 		pp->dma_curwin = 1;
29750Sstevel@tonic-gate 
29760Sstevel@tonic-gate 		/*
29770Sstevel@tonic-gate 		 * The very first window is returned by bind_handle,
29780Sstevel@tonic-gate 		 * but we must do this explicitly here, otherwise
29790Sstevel@tonic-gate 		 * next getwin would return wrong cookie dmac_size
29800Sstevel@tonic-gate 		 */
29810Sstevel@tonic-gate 		if (ddi_dma_getwin(pp->dma_handle, 0, &woff, &wlen,
29820Sstevel@tonic-gate 		    &pp->dma_cookie, &pp->dma_cookie_count) != DDI_SUCCESS) {
29830Sstevel@tonic-gate 			ecpp_error(pp->dip,
29847656SSherry.Moore@Sun.COM 			    "ecpp_setup_dma: ddi_dma_getwin failed!");
29850Sstevel@tonic-gate 			(void) ddi_dma_unbind_handle(pp->dma_handle);
29860Sstevel@tonic-gate 			return (FAILURE);
29870Sstevel@tonic-gate 		}
29880Sstevel@tonic-gate 
29890Sstevel@tonic-gate 		ecpp_error(pp->dip,
29907656SSherry.Moore@Sun.COM 		    "ecpp_setup_dma: cookies=%d, windows=%d"
29917656SSherry.Moore@Sun.COM 		    " addr=%lx len=%d\n",
29927656SSherry.Moore@Sun.COM 		    pp->dma_cookie_count, pp->dma_nwin,
29937656SSherry.Moore@Sun.COM 		    pp->dma_cookie.dmac_address, pp->dma_cookie.dmac_size);
29940Sstevel@tonic-gate 
29950Sstevel@tonic-gate 		break;
29960Sstevel@tonic-gate 	}
29970Sstevel@tonic-gate 
29980Sstevel@tonic-gate 	default:
29990Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_setup_dma: err=%x\n", err);
30000Sstevel@tonic-gate 		return (FAILURE);
30010Sstevel@tonic-gate 	}
30020Sstevel@tonic-gate 
30030Sstevel@tonic-gate 	return (SUCCESS);
30040Sstevel@tonic-gate }
30050Sstevel@tonic-gate 
30060Sstevel@tonic-gate static void
ecpp_ack_ioctl(queue_t * q,mblk_t * mp)30070Sstevel@tonic-gate ecpp_ack_ioctl(queue_t *q, mblk_t *mp)
30080Sstevel@tonic-gate {
30090Sstevel@tonic-gate 	struct iocblk  *iocbp;
30100Sstevel@tonic-gate 
30110Sstevel@tonic-gate 	mp->b_datap->db_type = M_IOCACK;
30120Sstevel@tonic-gate 	mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
30130Sstevel@tonic-gate 
30140Sstevel@tonic-gate 	if (mp->b_cont) {
30150Sstevel@tonic-gate 		freemsg(mp->b_cont);
30160Sstevel@tonic-gate 		mp->b_cont = NULL;
30170Sstevel@tonic-gate 	}
30180Sstevel@tonic-gate 
30190Sstevel@tonic-gate 	iocbp = (struct iocblk *)mp->b_rptr;
30200Sstevel@tonic-gate 	iocbp->ioc_error = 0;
30210Sstevel@tonic-gate 	iocbp->ioc_count = 0;
30220Sstevel@tonic-gate 	iocbp->ioc_rval = 0;
30230Sstevel@tonic-gate 
30240Sstevel@tonic-gate 	qreply(q, mp);
30250Sstevel@tonic-gate }
30260Sstevel@tonic-gate 
30270Sstevel@tonic-gate static void
ecpp_nack_ioctl(queue_t * q,mblk_t * mp,int err)30280Sstevel@tonic-gate ecpp_nack_ioctl(queue_t *q, mblk_t *mp, int err)
30290Sstevel@tonic-gate {
30300Sstevel@tonic-gate 	struct iocblk  *iocbp;
30310Sstevel@tonic-gate 
30320Sstevel@tonic-gate 	mp->b_datap->db_type = M_IOCNAK;
30330Sstevel@tonic-gate 	mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
30340Sstevel@tonic-gate 	iocbp = (struct iocblk *)mp->b_rptr;
30350Sstevel@tonic-gate 	iocbp->ioc_error = err;
30360Sstevel@tonic-gate 
30370Sstevel@tonic-gate 	if (mp->b_cont) {
30380Sstevel@tonic-gate 		freemsg(mp->b_cont);
30390Sstevel@tonic-gate 		mp->b_cont = NULL;
30400Sstevel@tonic-gate 	}
30410Sstevel@tonic-gate 
30420Sstevel@tonic-gate 	qreply(q, mp);
30430Sstevel@tonic-gate }
30440Sstevel@tonic-gate 
30450Sstevel@tonic-gate uint_t
ecpp_isr(caddr_t arg)30460Sstevel@tonic-gate ecpp_isr(caddr_t arg)
30470Sstevel@tonic-gate {
30480Sstevel@tonic-gate 	struct ecppunit *pp = (struct ecppunit *)(void *)arg;
30490Sstevel@tonic-gate 	uint32_t	dcsr;
30500Sstevel@tonic-gate 	uint8_t		dsr;
30510Sstevel@tonic-gate 	int		cheerio_pend_counter;
30520Sstevel@tonic-gate 	int		retval = DDI_INTR_UNCLAIMED;
30530Sstevel@tonic-gate 	hrtime_t	now;
30540Sstevel@tonic-gate 
30550Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
30560Sstevel@tonic-gate 	/*
30570Sstevel@tonic-gate 	 * interrupt may occur while other thread is holding the lock
30580Sstevel@tonic-gate 	 * and cancels DMA transfer (e.g. ecpp_flush())
30590Sstevel@tonic-gate 	 * since it cannot cancel the interrupt thread,
30600Sstevel@tonic-gate 	 * it just sets dma_cancelled to TRUE,
30610Sstevel@tonic-gate 	 * telling interrupt handler to exit immediately
30620Sstevel@tonic-gate 	 */
30630Sstevel@tonic-gate 	if (pp->dma_cancelled == TRUE) {
30640Sstevel@tonic-gate 		ecpp_error(pp->dip, "dma-cancel isr\n");
30650Sstevel@tonic-gate 
30660Sstevel@tonic-gate 		pp->intr_hard++;
30670Sstevel@tonic-gate 		pp->dma_cancelled = FALSE;
30680Sstevel@tonic-gate 
30690Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
30700Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
30710Sstevel@tonic-gate 	}
30720Sstevel@tonic-gate 
30730Sstevel@tonic-gate 	/* Southbridge interrupts are handled separately */
30740Sstevel@tonic-gate #if defined(__x86)
30750Sstevel@tonic-gate 	if (pp->hw == &x86)
30760Sstevel@tonic-gate #else
30770Sstevel@tonic-gate 	if (pp->hw == &m1553)
30780Sstevel@tonic-gate #endif
30790Sstevel@tonic-gate 	{
30800Sstevel@tonic-gate 		retval = ecpp_M1553_intr(pp);
30810Sstevel@tonic-gate 		if (retval == DDI_INTR_UNCLAIMED) {
30820Sstevel@tonic-gate 			goto unexpected;
30830Sstevel@tonic-gate 		}
30840Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
30850Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
30860Sstevel@tonic-gate 	}
30870Sstevel@tonic-gate 
30880Sstevel@tonic-gate 	/*
30890Sstevel@tonic-gate 	 * the intr is through the motherboard. it is faster than PCI route.
30900Sstevel@tonic-gate 	 * sometimes ecpp_isr() is invoked before cheerio csr is updated.
30910Sstevel@tonic-gate 	 */
30920Sstevel@tonic-gate 	cheerio_pend_counter = ecpp_isr_max_delay;
30930Sstevel@tonic-gate 	dcsr = GET_DMAC_CSR(pp);
30940Sstevel@tonic-gate 
30950Sstevel@tonic-gate 	while (!(dcsr & DCSR_INT_PEND) && cheerio_pend_counter-- > 0) {
30960Sstevel@tonic-gate 		drv_usecwait(1);
30970Sstevel@tonic-gate 		dcsr = GET_DMAC_CSR(pp);
30980Sstevel@tonic-gate 	}
30990Sstevel@tonic-gate 
31000Sstevel@tonic-gate 	/*
31010Sstevel@tonic-gate 	 * This is a workaround for what seems to be a timing problem
31020Sstevel@tonic-gate 	 * with the delivery of interrupts and CSR updating with the
31030Sstevel@tonic-gate 	 * ebus2 csr, superio and the n_ERR pin from the peripheral.
31040Sstevel@tonic-gate 	 *
31050Sstevel@tonic-gate 	 * delay is not needed for PIO mode
31060Sstevel@tonic-gate 	 */
31070Sstevel@tonic-gate 	if (!COMPAT_PIO(pp)) {
31080Sstevel@tonic-gate 		drv_usecwait(100);
31090Sstevel@tonic-gate 		dcsr = GET_DMAC_CSR(pp);
31100Sstevel@tonic-gate 	}
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate 	/* on 97317 in Extended mode IRQ_ST of DSR is deasserted when read */
31130Sstevel@tonic-gate 	dsr = DSR_READ(pp);
31140Sstevel@tonic-gate 
31150Sstevel@tonic-gate 	/*
31160Sstevel@tonic-gate 	 * check if interrupt is for this device:
31170Sstevel@tonic-gate 	 * it should be reflected either in cheerio DCSR register
31180Sstevel@tonic-gate 	 * or in IRQ_ST bit of DSR on 97317
31190Sstevel@tonic-gate 	 */
31200Sstevel@tonic-gate 	if ((dcsr & DCSR_INT_PEND) == 0) {
31210Sstevel@tonic-gate 		if (pp->hw != &pc97317) {
31220Sstevel@tonic-gate 			goto unclaimed;
31230Sstevel@tonic-gate 		}
31240Sstevel@tonic-gate 		/*
31250Sstevel@tonic-gate 		 * on Excalibur, reading DSR will deassert SuperIO IRQx line
31260Sstevel@tonic-gate 		 * RIO's DCSR_INT_PEND seems to follow IRQx transitions,
31270Sstevel@tonic-gate 		 * so if DSR is read after interrupt occured, but before
31280Sstevel@tonic-gate 		 * we get here, IRQx and hence INT_PEND will be deasserted
31290Sstevel@tonic-gate 		 * as a result, we can miss a service interrupt in PIO mode
31300Sstevel@tonic-gate 		 *
31310Sstevel@tonic-gate 		 * malicious DSR reader is BPPIOC_TESTIO, which is called
31320Sstevel@tonic-gate 		 * by LP in between data blocks to check printer status
31330Sstevel@tonic-gate 		 * this workaround lets us not to miss an interrupt
31340Sstevel@tonic-gate 		 *
31350Sstevel@tonic-gate 		 * also, nErr interrupt (ECP mode) not always reflected in DCSR
31360Sstevel@tonic-gate 		 */
31370Sstevel@tonic-gate 		if (((dsr & ECPP_IRQ_ST) == 0) ||
31380Sstevel@tonic-gate 		    ((COMPAT_PIO(pp)) && (pp->e_busy == ECPP_BUSY)) ||
31390Sstevel@tonic-gate 		    (((dsr & ECPP_nERR) == 0) &&
31400Sstevel@tonic-gate 		    (pp->current_mode == ECPP_ECP_MODE))) {
31410Sstevel@tonic-gate 			dcsr = 0;
31420Sstevel@tonic-gate 		} else {
31430Sstevel@tonic-gate 			goto unclaimed;
31440Sstevel@tonic-gate 		}
31450Sstevel@tonic-gate 	}
31460Sstevel@tonic-gate 
31470Sstevel@tonic-gate 	pp->intr_hard++;
31480Sstevel@tonic-gate 
31490Sstevel@tonic-gate 	/* the intr is for us - check all possible interrupt sources */
31500Sstevel@tonic-gate 	if (dcsr & DCSR_ERR_PEND) {
31510Sstevel@tonic-gate 		size_t	bcr;
31520Sstevel@tonic-gate 
31530Sstevel@tonic-gate 		/* we are expecting a data transfer interrupt */
31540Sstevel@tonic-gate 		ASSERT(pp->e_busy == ECPP_BUSY);
31550Sstevel@tonic-gate 
31560Sstevel@tonic-gate 		/*
31570Sstevel@tonic-gate 		 * some kind of DMA error
31580Sstevel@tonic-gate 		 */
31590Sstevel@tonic-gate 		if (ECPP_DMA_STOP(pp, &bcr) == FAILURE) {
31600Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_isr: dma_stop failed\n");
31610Sstevel@tonic-gate 		}
31620Sstevel@tonic-gate 
31630Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_isr: DMAC ERROR bcr=%d\n", bcr);
31640Sstevel@tonic-gate 
31650Sstevel@tonic-gate 		ecpp_xfer_cleanup(pp);
31660Sstevel@tonic-gate 
31670Sstevel@tonic-gate 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
31680Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_isr(e): unbind failed\n");
31690Sstevel@tonic-gate 		}
31700Sstevel@tonic-gate 
31710Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
31720Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
31730Sstevel@tonic-gate 	}
31740Sstevel@tonic-gate 
31750Sstevel@tonic-gate 	if (dcsr & DCSR_TC) {
31760Sstevel@tonic-gate 		retval = ecpp_dma_ihdlr(pp);
31770Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
31780Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
31790Sstevel@tonic-gate 	}
31800Sstevel@tonic-gate 
31810Sstevel@tonic-gate 	if (COMPAT_PIO(pp)) {
31820Sstevel@tonic-gate 		retval = ecpp_pio_ihdlr(pp);
31830Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
31840Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
31850Sstevel@tonic-gate 	}
31860Sstevel@tonic-gate 
31870Sstevel@tonic-gate 	/* does peripheral need attention? */
31880Sstevel@tonic-gate 	if ((dsr & ECPP_nERR) == 0) {
31890Sstevel@tonic-gate 		retval = ecpp_nErr_ihdlr(pp);
31900Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
31910Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
31920Sstevel@tonic-gate 	}
31930Sstevel@tonic-gate 
31940Sstevel@tonic-gate 	pp->intr_hard--;
31950Sstevel@tonic-gate 
31960Sstevel@tonic-gate unexpected:
31970Sstevel@tonic-gate 
31980Sstevel@tonic-gate 	pp->intr_spurious++;
31990Sstevel@tonic-gate 
32000Sstevel@tonic-gate 	/*
32010Sstevel@tonic-gate 	 * The following procedure tries to prevent soft hangs
32020Sstevel@tonic-gate 	 * in event of peripheral/superio misbehaviour:
32030Sstevel@tonic-gate 	 * if number of unexpected interrupts in the last SPUR_PERIOD ns
32040Sstevel@tonic-gate 	 * exceeded SPUR_CRITICAL, then shut up interrupts
32050Sstevel@tonic-gate 	 */
32060Sstevel@tonic-gate 	now = gethrtime();
32070Sstevel@tonic-gate 	if (pp->lastspur == 0 || now - pp->lastspur > SPUR_PERIOD) {
32080Sstevel@tonic-gate 		/* last unexpected interrupt was long ago */
32090Sstevel@tonic-gate 		pp->lastspur = now;
32100Sstevel@tonic-gate 		pp->nspur = 1;
32110Sstevel@tonic-gate 	} else {
32120Sstevel@tonic-gate 		/* last unexpected interrupt was recently */
32130Sstevel@tonic-gate 		pp->nspur++;
32140Sstevel@tonic-gate 	}
32150Sstevel@tonic-gate 
32160Sstevel@tonic-gate 	if (pp->nspur >= SPUR_CRITICAL) {
32170Sstevel@tonic-gate 		ECPP_MASK_INTR(pp);
32180Sstevel@tonic-gate 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK | ECPP_INTR_SRV);
32190Sstevel@tonic-gate 		pp->nspur = 0;
32200Sstevel@tonic-gate 		cmn_err(CE_NOTE, "%s%d: too many interrupt requests",
32217656SSherry.Moore@Sun.COM 		    ddi_get_name(pp->dip), ddi_get_instance(pp->dip));
32220Sstevel@tonic-gate 	} else {
32230Sstevel@tonic-gate 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_SRV | ECPP_INTR_MASK);
32240Sstevel@tonic-gate 	}
32250Sstevel@tonic-gate 
32260Sstevel@tonic-gate 	ecpp_error(pp->dip,
32277656SSherry.Moore@Sun.COM 	    "isr:unknown: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
32287656SSherry.Moore@Sun.COM 	    dcsr, ECR_READ(pp), dsr, DCR_READ(pp),
32297656SSherry.Moore@Sun.COM 	    pp->current_mode, pp->current_phase);
32300Sstevel@tonic-gate 
32310Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
32320Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
32330Sstevel@tonic-gate 
32340Sstevel@tonic-gate unclaimed:
32350Sstevel@tonic-gate 
32360Sstevel@tonic-gate 	pp->intr_spurious++;
32370Sstevel@tonic-gate 
32380Sstevel@tonic-gate 	ecpp_error(pp->dip,
32397656SSherry.Moore@Sun.COM 	    "isr:UNCL: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
32407656SSherry.Moore@Sun.COM 	    dcsr, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp),
32417656SSherry.Moore@Sun.COM 	    pp->current_mode, pp->current_phase);
32420Sstevel@tonic-gate 
32430Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
32440Sstevel@tonic-gate 	return (DDI_INTR_UNCLAIMED);
32450Sstevel@tonic-gate }
32460Sstevel@tonic-gate 
32470Sstevel@tonic-gate /*
32480Sstevel@tonic-gate  * M1553 intr handler
32490Sstevel@tonic-gate  */
32500Sstevel@tonic-gate static uint_t
ecpp_M1553_intr(struct ecppunit * pp)32510Sstevel@tonic-gate ecpp_M1553_intr(struct ecppunit *pp)
32520Sstevel@tonic-gate {
32530Sstevel@tonic-gate 	int retval = DDI_INTR_UNCLAIMED;
32540Sstevel@tonic-gate 
32550Sstevel@tonic-gate 	pp->intr_hard++;
32560Sstevel@tonic-gate 
32570Sstevel@tonic-gate 	if (pp->e_busy == ECPP_BUSY) {
32580Sstevel@tonic-gate 		/* Centronics or Compat PIO transfer */
32590Sstevel@tonic-gate 		if (COMPAT_PIO(pp)) {
32600Sstevel@tonic-gate 			return (ecpp_pio_ihdlr(pp));
32610Sstevel@tonic-gate 		}
32620Sstevel@tonic-gate 
32630Sstevel@tonic-gate 		/* Centronics or Compat DMA transfer */
32640Sstevel@tonic-gate 		if (COMPAT_DMA(pp) ||
32650Sstevel@tonic-gate 		    (pp->current_mode == ECPP_ECP_MODE) ||
32660Sstevel@tonic-gate 		    (pp->current_mode == ECPP_DIAG_MODE)) {
32670Sstevel@tonic-gate 			return (ecpp_dma_ihdlr(pp));
32680Sstevel@tonic-gate 		}
32690Sstevel@tonic-gate 	}
32700Sstevel@tonic-gate 
32710Sstevel@tonic-gate 	/* Nibble or ECP backchannel request? */
32720Sstevel@tonic-gate 	if ((DSR_READ(pp) & ECPP_nERR) == 0) {
32730Sstevel@tonic-gate 		return (ecpp_nErr_ihdlr(pp));
32740Sstevel@tonic-gate 	}
32750Sstevel@tonic-gate 
32760Sstevel@tonic-gate 	return (retval);
32770Sstevel@tonic-gate }
32780Sstevel@tonic-gate 
32790Sstevel@tonic-gate /*
32800Sstevel@tonic-gate  * DMA completion interrupt handler
32810Sstevel@tonic-gate  */
32820Sstevel@tonic-gate static uint_t
ecpp_dma_ihdlr(struct ecppunit * pp)32830Sstevel@tonic-gate ecpp_dma_ihdlr(struct ecppunit *pp)
32840Sstevel@tonic-gate {
32850Sstevel@tonic-gate 	clock_t	tm;
32860Sstevel@tonic-gate 
32870Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_dma_ihdlr(%x): ecr=%x, dsr=%x, dcr=%x\n",
32887656SSherry.Moore@Sun.COM 	    pp->current_mode, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
32890Sstevel@tonic-gate 
32900Sstevel@tonic-gate 	/* we are expecting a data transfer interrupt */
32910Sstevel@tonic-gate 	ASSERT(pp->e_busy == ECPP_BUSY);
32920Sstevel@tonic-gate 
32930Sstevel@tonic-gate 	/* Intr generated while invoking TFIFO mode. Exit */
32940Sstevel@tonic-gate 	if (pp->tfifo_intr == 1) {
32950Sstevel@tonic-gate 		pp->tfifo_intr = 0;
32960Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_dma_ihdlr: tfifo_intr is 1\n");
32970Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
32980Sstevel@tonic-gate 	}
32990Sstevel@tonic-gate 
33000Sstevel@tonic-gate 	if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
33010Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_dma_ihdlr: dma_stop failed\n");
33020Sstevel@tonic-gate 	}
33030Sstevel@tonic-gate 
33040Sstevel@tonic-gate 	if (pp->current_mode == ECPP_ECP_MODE &&
33050Sstevel@tonic-gate 	    pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
33060Sstevel@tonic-gate 		ecpp_ecp_read_completion(pp);
33070Sstevel@tonic-gate 	} else {
33080Sstevel@tonic-gate 		/*
33090Sstevel@tonic-gate 		 * fifo_timer() will do the cleanup when the FIFO drains
33100Sstevel@tonic-gate 		 */
33110Sstevel@tonic-gate 		if ((ECR_READ(pp) & ECPP_FIFO_EMPTY) ||
33120Sstevel@tonic-gate 		    (pp->current_mode == ECPP_DIAG_MODE)) {
33130Sstevel@tonic-gate 			tm = 0;	/* no use in waiting if FIFO is already empty */
33140Sstevel@tonic-gate 		} else {
33150Sstevel@tonic-gate 			tm = drv_usectohz(FIFO_DRAIN_PERIOD);
33160Sstevel@tonic-gate 		}
33170Sstevel@tonic-gate 		pp->fifo_timer_id = timeout(ecpp_fifo_timer, (caddr_t)pp, tm);
33180Sstevel@tonic-gate 	}
33190Sstevel@tonic-gate 
33200Sstevel@tonic-gate 	/*
33210Sstevel@tonic-gate 	 * Stop the DMA transfer timeout timer
33220Sstevel@tonic-gate 	 * this operation will temporarily give up the mutex,
33230Sstevel@tonic-gate 	 * so we do it in the end of the handler to avoid races
33240Sstevel@tonic-gate 	 */
33250Sstevel@tonic-gate 	ecpp_untimeout_unblock(pp, &pp->timeout_id);
33260Sstevel@tonic-gate 
33270Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
33280Sstevel@tonic-gate }
33290Sstevel@tonic-gate 
33300Sstevel@tonic-gate /*
33310Sstevel@tonic-gate  * ecpp_pio_ihdlr() is a PIO interrupt processing routine
33320Sstevel@tonic-gate  * It masks interrupts, updates statistics and initiates next byte transfer
33330Sstevel@tonic-gate  */
33340Sstevel@tonic-gate static uint_t
ecpp_pio_ihdlr(struct ecppunit * pp)33350Sstevel@tonic-gate ecpp_pio_ihdlr(struct ecppunit *pp)
33360Sstevel@tonic-gate {
33370Sstevel@tonic-gate 	ASSERT(mutex_owned(&pp->umutex));
33380Sstevel@tonic-gate 	ASSERT(pp->e_busy == ECPP_BUSY);
33390Sstevel@tonic-gate 
33400Sstevel@tonic-gate 	/* update statistics */
33410Sstevel@tonic-gate 	pp->joblen++;
33420Sstevel@tonic-gate 	pp->ctxpio_obytes++;
33430Sstevel@tonic-gate 
33440Sstevel@tonic-gate 	/* disable nAck interrups */
33450Sstevel@tonic-gate 	ECPP_MASK_INTR(pp);
33460Sstevel@tonic-gate 	DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_REV_DIR | ECPP_INTR_EN));
33470Sstevel@tonic-gate 
33480Sstevel@tonic-gate 	/*
33490Sstevel@tonic-gate 	 * If it was the last byte of the data block cleanup,
33500Sstevel@tonic-gate 	 * otherwise trigger a soft interrupt to send the next byte
33510Sstevel@tonic-gate 	 */
33520Sstevel@tonic-gate 	if (pp->next_byte >= pp->last_byte) {
33530Sstevel@tonic-gate 		ecpp_xfer_cleanup(pp);
33540Sstevel@tonic-gate 		ecpp_error(pp->dip,
33557656SSherry.Moore@Sun.COM 		    "ecpp_pio_ihdlr: pp->joblen=%d,pp->ctx_cf=%d,\n",
33567656SSherry.Moore@Sun.COM 		    pp->joblen, pp->ctx_cf);
33570Sstevel@tonic-gate 	} else {
33580Sstevel@tonic-gate 		if (pp->softintr_pending) {
33590Sstevel@tonic-gate 			ecpp_error(pp->dip,
33607656SSherry.Moore@Sun.COM 			    "ecpp_pio_ihdlr:E: next byte in progress\n");
33610Sstevel@tonic-gate 		} else {
33620Sstevel@tonic-gate 			pp->softintr_flags = ECPP_SOFTINTR_PIONEXT;
33630Sstevel@tonic-gate 			pp->softintr_pending = 1;
33640Sstevel@tonic-gate 			ddi_trigger_softintr(pp->softintr_id);
33650Sstevel@tonic-gate 		}
33660Sstevel@tonic-gate 	}
33670Sstevel@tonic-gate 
33680Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
33690Sstevel@tonic-gate }
33700Sstevel@tonic-gate 
33710Sstevel@tonic-gate /*
33720Sstevel@tonic-gate  * ecpp_pio_writeb() sends a byte using Centronics handshake
33730Sstevel@tonic-gate  */
33740Sstevel@tonic-gate static void
ecpp_pio_writeb(struct ecppunit * pp)33750Sstevel@tonic-gate ecpp_pio_writeb(struct ecppunit *pp)
33760Sstevel@tonic-gate {
33770Sstevel@tonic-gate 	uint8_t	dcr;
33780Sstevel@tonic-gate 
33790Sstevel@tonic-gate 	dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
33800Sstevel@tonic-gate 	dcr |= ECPP_INTR_EN;
33810Sstevel@tonic-gate 
33820Sstevel@tonic-gate 	/* send the next byte */
33830Sstevel@tonic-gate 	DATAR_WRITE(pp, *(pp->next_byte++));
33840Sstevel@tonic-gate 
33850Sstevel@tonic-gate 	drv_usecwait(pp->data_setup_time);
33860Sstevel@tonic-gate 
33870Sstevel@tonic-gate 	/* Now Assert (neg logic) nStrobe */
33880Sstevel@tonic-gate 	if (dcr_write(pp, dcr | ECPP_STB) == FAILURE) {
33890Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_pio_writeb:1: failed w/DCR\n");
33900Sstevel@tonic-gate 	}
33910Sstevel@tonic-gate 
33920Sstevel@tonic-gate 	/* Enable nAck interrupts */
33930Sstevel@tonic-gate 	(void) DSR_READ(pp);	/* ensure IRQ_ST is armed */
33940Sstevel@tonic-gate 	ECPP_UNMASK_INTR(pp);
33950Sstevel@tonic-gate 
33960Sstevel@tonic-gate 	drv_usecwait(pp->strobe_pulse_width);
33970Sstevel@tonic-gate 
33980Sstevel@tonic-gate 	if (dcr_write(pp, dcr & ~ECPP_STB) == FAILURE) {
33990Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_pio_writeb:2: failed w/DCR\n");
34000Sstevel@tonic-gate 	}
34010Sstevel@tonic-gate }
34020Sstevel@tonic-gate 
34030Sstevel@tonic-gate /*
34040Sstevel@tonic-gate  * Backchannel request interrupt handler
34050Sstevel@tonic-gate  */
34060Sstevel@tonic-gate static uint_t
ecpp_nErr_ihdlr(struct ecppunit * pp)34070Sstevel@tonic-gate ecpp_nErr_ihdlr(struct ecppunit *pp)
34080Sstevel@tonic-gate {
34090Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_nErr_ihdlr: mode=%x, phase=%x\n",
34107656SSherry.Moore@Sun.COM 	    pp->current_mode, pp->current_phase);
34110Sstevel@tonic-gate 
34120Sstevel@tonic-gate 	if (pp->oflag != TRUE) {
34130Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: not open!\n");
34140Sstevel@tonic-gate 		return (DDI_INTR_UNCLAIMED);
34150Sstevel@tonic-gate 	}
34160Sstevel@tonic-gate 
34170Sstevel@tonic-gate 	if (pp->e_busy == ECPP_BUSY) {
34180Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: busy\n");
34190Sstevel@tonic-gate 		ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
34200Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
34210Sstevel@tonic-gate 	}
34220Sstevel@tonic-gate 
34230Sstevel@tonic-gate 	/* mask nErr & nAck interrupts */
34240Sstevel@tonic-gate 	ECPP_MASK_INTR(pp);
34250Sstevel@tonic-gate 	DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_INTR_EN | ECPP_REV_DIR));
34260Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
34270Sstevel@tonic-gate 
34280Sstevel@tonic-gate 	/* going reverse */
34290Sstevel@tonic-gate 	switch (pp->current_mode) {
34300Sstevel@tonic-gate 	case ECPP_ECP_MODE:
34310Sstevel@tonic-gate 		/*
34320Sstevel@tonic-gate 		 * Peripheral asserts nPeriphRequest (nFault)
34330Sstevel@tonic-gate 		 */
34340Sstevel@tonic-gate 		break;
34350Sstevel@tonic-gate 	case ECPP_NIBBLE_MODE:
34360Sstevel@tonic-gate 		/*
34370Sstevel@tonic-gate 		 * Event 18: Periph asserts nErr to indicate data avail
34380Sstevel@tonic-gate 		 * Event 19: After waiting minimum pulse width,
34390Sstevel@tonic-gate 		 *   periph sets nAck high to generate an interrupt
34400Sstevel@tonic-gate 		 *
34410Sstevel@tonic-gate 		 * Interface is in Interrupt Phase
34420Sstevel@tonic-gate 		 */
34430Sstevel@tonic-gate 		pp->current_phase = ECPP_PHASE_NIBT_REVINTR;
34440Sstevel@tonic-gate 
34450Sstevel@tonic-gate 		break;
34460Sstevel@tonic-gate 	default:
34470Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_nErr_ihdlr: wrong mode!\n");
34480Sstevel@tonic-gate 		return (DDI_INTR_UNCLAIMED);
34490Sstevel@tonic-gate 	}
34500Sstevel@tonic-gate 
34510Sstevel@tonic-gate 	(void) ecpp_backchan_req(pp);	/* put backchannel request on the wq */
34520Sstevel@tonic-gate 
34530Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
34540Sstevel@tonic-gate }
34550Sstevel@tonic-gate 
34560Sstevel@tonic-gate /*
34570Sstevel@tonic-gate  * Softintr handler does work according to softintr_flags:
34580Sstevel@tonic-gate  * in case of ECPP_SOFTINTR_PIONEXT it sends next byte of PIO transfer
34590Sstevel@tonic-gate  */
34600Sstevel@tonic-gate static uint_t
ecpp_softintr(caddr_t arg)34610Sstevel@tonic-gate ecpp_softintr(caddr_t arg)
34620Sstevel@tonic-gate {
34630Sstevel@tonic-gate 	struct ecppunit *pp = (struct ecppunit *)arg;
34640Sstevel@tonic-gate 	uint32_t unx_len, ecpp_reattempts = 0;
34650Sstevel@tonic-gate 
34660Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
34670Sstevel@tonic-gate 
34680Sstevel@tonic-gate 	pp->intr_soft++;
34690Sstevel@tonic-gate 
34700Sstevel@tonic-gate 	if (!pp->softintr_pending) {
34710Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
34720Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
34730Sstevel@tonic-gate 	} else {
34740Sstevel@tonic-gate 		pp->softintr_pending = 0;
34750Sstevel@tonic-gate 	}
34760Sstevel@tonic-gate 
34770Sstevel@tonic-gate 	if (pp->softintr_flags & ECPP_SOFTINTR_PIONEXT) {
34780Sstevel@tonic-gate 		pp->softintr_flags &= ~ECPP_SOFTINTR_PIONEXT;
34790Sstevel@tonic-gate 		/*
34800Sstevel@tonic-gate 		 * Sent next byte in PIO mode
34810Sstevel@tonic-gate 		 */
34820Sstevel@tonic-gate 		ecpp_reattempts = 0;
34830Sstevel@tonic-gate 		do {
34840Sstevel@tonic-gate 			if (ecpp_check_status(pp) == SUCCESS) {
34850Sstevel@tonic-gate 				pp->e_busy = ECPP_BUSY;
34860Sstevel@tonic-gate 				break;
34870Sstevel@tonic-gate 			}
34880Sstevel@tonic-gate 			drv_usecwait(1);
34890Sstevel@tonic-gate 			if (pp->isr_reattempt_high < ecpp_reattempts) {
34900Sstevel@tonic-gate 				pp->isr_reattempt_high = ecpp_reattempts;
34910Sstevel@tonic-gate 			}
34920Sstevel@tonic-gate 		} while (++ecpp_reattempts < pp->wait_for_busy);
34930Sstevel@tonic-gate 
34940Sstevel@tonic-gate 		/* if the peripheral still not recovered suspend the transfer */
34950Sstevel@tonic-gate 		if (pp->e_busy == ECPP_ERR) {
34960Sstevel@tonic-gate 			++pp->ctx_cf; /* check status fail */
34970Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_softintr:check_status:F: "
34987656SSherry.Moore@Sun.COM 			    "dsr=%x jl=%d cf_isr=%d\n",
34997656SSherry.Moore@Sun.COM 			    DSR_READ(pp), pp->joblen, pp->ctx_cf);
35000Sstevel@tonic-gate 
35010Sstevel@tonic-gate 			/*
35020Sstevel@tonic-gate 			 * if status signals are bad,
35030Sstevel@tonic-gate 			 * put everything back on the wq.
35040Sstevel@tonic-gate 			 */
35050Sstevel@tonic-gate 			unx_len = pp->last_byte - pp->next_byte;
35060Sstevel@tonic-gate 			if (pp->msg != NULL) {
35070Sstevel@tonic-gate 				ecpp_putback_untransfered(pp,
35087656SSherry.Moore@Sun.COM 				    (void *)pp->msg->b_rptr, unx_len);
35090Sstevel@tonic-gate 				ecpp_error(pp->dip,
35100Sstevel@tonic-gate 				    "ecpp_softintr:e1:unx_len=%d\n", unx_len);
35110Sstevel@tonic-gate 
35120Sstevel@tonic-gate 				freemsg(pp->msg);
35130Sstevel@tonic-gate 				pp->msg = NULL;
35140Sstevel@tonic-gate 			} else {
35150Sstevel@tonic-gate 				ecpp_putback_untransfered(pp,
35167656SSherry.Moore@Sun.COM 				    pp->next_byte, unx_len);
35170Sstevel@tonic-gate 				ecpp_error(pp->dip,
35180Sstevel@tonic-gate 				    "ecpp_softintr:e2:unx_len=%d\n", unx_len);
35190Sstevel@tonic-gate 			}
35200Sstevel@tonic-gate 
35210Sstevel@tonic-gate 			ecpp_xfer_cleanup(pp);
35220Sstevel@tonic-gate 			pp->e_busy = ECPP_ERR;
35230Sstevel@tonic-gate 			qenable(pp->writeq);
35240Sstevel@tonic-gate 		} else {
35250Sstevel@tonic-gate 			/* send the next one */
35260Sstevel@tonic-gate 			pp->e_busy = ECPP_BUSY;
35270Sstevel@tonic-gate 			(void) ecpp_pio_writeb(pp);
35280Sstevel@tonic-gate 		}
35290Sstevel@tonic-gate 	}
35300Sstevel@tonic-gate 
35310Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
35320Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
35330Sstevel@tonic-gate }
35340Sstevel@tonic-gate 
35350Sstevel@tonic-gate 
35360Sstevel@tonic-gate /*
35370Sstevel@tonic-gate  * Transfer clean-up:
35380Sstevel@tonic-gate  * 	shut down the DMAC
35390Sstevel@tonic-gate  *	stop the transfer timer
35400Sstevel@tonic-gate  *	enable write queue
35410Sstevel@tonic-gate  */
35420Sstevel@tonic-gate static void
ecpp_xfer_cleanup(struct ecppunit * pp)35430Sstevel@tonic-gate ecpp_xfer_cleanup(struct ecppunit *pp)
35440Sstevel@tonic-gate {
35450Sstevel@tonic-gate 	ASSERT(mutex_owned(&pp->umutex));
35460Sstevel@tonic-gate 
35470Sstevel@tonic-gate 	/*
35480Sstevel@tonic-gate 	 * if we did not use the ioblock, the mblk that
35490Sstevel@tonic-gate 	 * was used should be freed.
35500Sstevel@tonic-gate 	 */
35510Sstevel@tonic-gate 	if (pp->msg != NULL) {
35520Sstevel@tonic-gate 		freemsg(pp->msg);
35530Sstevel@tonic-gate 		pp->msg = NULL;
35540Sstevel@tonic-gate 	}
35550Sstevel@tonic-gate 
35560Sstevel@tonic-gate 	/* The port is no longer active */
35570Sstevel@tonic-gate 	pp->e_busy = ECPP_IDLE;
35580Sstevel@tonic-gate 
35590Sstevel@tonic-gate 	/* Stop the transfer timeout timer */
35600Sstevel@tonic-gate 	ecpp_untimeout_unblock(pp, &pp->timeout_id);
35610Sstevel@tonic-gate 
35620Sstevel@tonic-gate 	qenable(pp->writeq);
35630Sstevel@tonic-gate }
35640Sstevel@tonic-gate 
35650Sstevel@tonic-gate /*VARARGS*/
35660Sstevel@tonic-gate static void
ecpp_error(dev_info_t * dip,char * fmt,...)35670Sstevel@tonic-gate ecpp_error(dev_info_t *dip, char *fmt, ...)
35680Sstevel@tonic-gate {
35690Sstevel@tonic-gate 	static	long	last;
35700Sstevel@tonic-gate 	static	char	*lastfmt;
35710Sstevel@tonic-gate 	char		msg_buffer[255];
35720Sstevel@tonic-gate 	va_list	ap;
35730Sstevel@tonic-gate 	time_t	now;
35740Sstevel@tonic-gate 
35750Sstevel@tonic-gate 	if (!ecpp_debug) {
35760Sstevel@tonic-gate 		return;
35770Sstevel@tonic-gate 	}
35780Sstevel@tonic-gate 
35790Sstevel@tonic-gate 	/*
35800Sstevel@tonic-gate 	 * This function is supposed to be a quick non-blockable
35810Sstevel@tonic-gate 	 * wrapper for cmn_err(9F), which provides a sensible degree
35820Sstevel@tonic-gate 	 * of debug message throttling.  Not using any type of lock
35830Sstevel@tonic-gate 	 * is a requirement, but this also leaves two static variables
35840Sstevel@tonic-gate 	 * - last and lastfmt - unprotected. However, this will not do
35850Sstevel@tonic-gate 	 * any harm to driver functionality, it can only weaken throttling.
35860Sstevel@tonic-gate 	 * The following directive asks warlock to not worry about these
35870Sstevel@tonic-gate 	 * variables.
35880Sstevel@tonic-gate 	 */
35890Sstevel@tonic-gate 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(last, lastfmt))
35900Sstevel@tonic-gate 
35910Sstevel@tonic-gate 	/*
35920Sstevel@tonic-gate 	 * Don't print same error message too often.
35930Sstevel@tonic-gate 	 */
35940Sstevel@tonic-gate 	now = gethrestime_sec();
35950Sstevel@tonic-gate 	if ((last == (now & ~1)) && (lastfmt == fmt))
35960Sstevel@tonic-gate 		return;
35970Sstevel@tonic-gate 
35980Sstevel@tonic-gate 	last = now & ~1;
35990Sstevel@tonic-gate 	lastfmt = fmt;
36000Sstevel@tonic-gate 
36010Sstevel@tonic-gate 	va_start(ap, fmt);
36020Sstevel@tonic-gate 	(void) vsprintf(msg_buffer, fmt, ap);
36030Sstevel@tonic-gate 	cmn_err(CE_CONT, "%s%d: %s", ddi_get_name(dip),
36047656SSherry.Moore@Sun.COM 	    ddi_get_instance(dip), msg_buffer);
36050Sstevel@tonic-gate 	va_end(ap);
36060Sstevel@tonic-gate }
36070Sstevel@tonic-gate 
36080Sstevel@tonic-gate /*
36090Sstevel@tonic-gate  * Forward transfer timeout
36100Sstevel@tonic-gate  */
36110Sstevel@tonic-gate static void
ecpp_xfer_timeout(void * arg)36120Sstevel@tonic-gate ecpp_xfer_timeout(void *arg)
36130Sstevel@tonic-gate {
36140Sstevel@tonic-gate 	struct ecppunit	*pp = arg;
36150Sstevel@tonic-gate 	void		*unx_addr;
36160Sstevel@tonic-gate 	size_t		unx_len, xferd;
36170Sstevel@tonic-gate 	uint8_t		dcr;
36180Sstevel@tonic-gate 	timeout_id_t	fifo_timer_id;
36190Sstevel@tonic-gate 
36200Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
36210Sstevel@tonic-gate 
36220Sstevel@tonic-gate 	if (pp->timeout_id == 0) {
36230Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
36240Sstevel@tonic-gate 		return;
36250Sstevel@tonic-gate 	} else {
36260Sstevel@tonic-gate 		pp->timeout_id = 0;
36270Sstevel@tonic-gate 	}
36280Sstevel@tonic-gate 
36290Sstevel@tonic-gate 	pp->xfer_tout++;
36300Sstevel@tonic-gate 
36310Sstevel@tonic-gate 	pp->dma_cancelled = TRUE;	/* prevent race with isr() */
36320Sstevel@tonic-gate 
36330Sstevel@tonic-gate 	if (COMPAT_PIO(pp)) {
36340Sstevel@tonic-gate 		/*
36350Sstevel@tonic-gate 		 * PIO mode timeout
36360Sstevel@tonic-gate 		 */
36370Sstevel@tonic-gate 
36380Sstevel@tonic-gate 		/* turn off nAck interrupts */
36390Sstevel@tonic-gate 		dcr = DCR_READ(pp);
36400Sstevel@tonic-gate 		(void) dcr_write(pp, dcr & ~(ECPP_REV_DIR | ECPP_INTR_EN));
36410Sstevel@tonic-gate 		ECPP_MASK_INTR(pp);
36420Sstevel@tonic-gate 
36430Sstevel@tonic-gate 		pp->softintr_pending = 0;
36440Sstevel@tonic-gate 		unx_len = pp->last_byte - pp->next_byte;
36450Sstevel@tonic-gate 		ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
36460Sstevel@tonic-gate 
36470Sstevel@tonic-gate 		if (unx_len > 0) {
36480Sstevel@tonic-gate 			unx_addr = pp->next_byte;
36490Sstevel@tonic-gate 		} else {
36500Sstevel@tonic-gate 			ecpp_xfer_cleanup(pp);
36510Sstevel@tonic-gate 			qenable(pp->writeq);
36520Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
36530Sstevel@tonic-gate 			return;
36540Sstevel@tonic-gate 		}
36550Sstevel@tonic-gate 	} else {
36560Sstevel@tonic-gate 		/*
36570Sstevel@tonic-gate 		 * DMA mode timeout
36580Sstevel@tonic-gate 		 *
36590Sstevel@tonic-gate 		 * If DMAC fails to shut off, continue anyways and attempt
36600Sstevel@tonic-gate 		 * to put untransfered data back on queue.
36610Sstevel@tonic-gate 		 */
36620Sstevel@tonic-gate 		if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
36630Sstevel@tonic-gate 			ecpp_error(pp->dip,
36647656SSherry.Moore@Sun.COM 			    "ecpp_xfer_timeout: failed dma_stop\n");
36650Sstevel@tonic-gate 		}
36660Sstevel@tonic-gate 
36670Sstevel@tonic-gate 		ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
36680Sstevel@tonic-gate 
36690Sstevel@tonic-gate 		if (ddi_dma_unbind_handle(pp->dma_handle) == DDI_FAILURE) {
36700Sstevel@tonic-gate 			ecpp_error(pp->dip,
36717656SSherry.Moore@Sun.COM 			    "ecpp_xfer_timeout: failed unbind\n");
36720Sstevel@tonic-gate 		}
36730Sstevel@tonic-gate 
36740Sstevel@tonic-gate 		/*
36750Sstevel@tonic-gate 		 * if the bcr is zero, then DMA is complete and
36760Sstevel@tonic-gate 		 * we are waiting for the fifo to drain.  So let
36770Sstevel@tonic-gate 		 * ecpp_fifo_timer() look after the clean up.
36780Sstevel@tonic-gate 		 */
36790Sstevel@tonic-gate 		if (unx_len == 0) {
36800Sstevel@tonic-gate 			qenable(pp->writeq);
36810Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
36820Sstevel@tonic-gate 			return;
36830Sstevel@tonic-gate 		} else {
36840Sstevel@tonic-gate 			xferd = pp->dma_cookie.dmac_size - unx_len;
36850Sstevel@tonic-gate 			pp->resid -= xferd;
36860Sstevel@tonic-gate 			unx_len = pp->resid;
36870Sstevel@tonic-gate 
36880Sstevel@tonic-gate 			/* update statistics */
36890Sstevel@tonic-gate 			pp->obytes[pp->current_mode] += xferd;
36900Sstevel@tonic-gate 			pp->joblen += xferd;
36910Sstevel@tonic-gate 
36920Sstevel@tonic-gate 			if (pp->msg != NULL) {
36930Sstevel@tonic-gate 				unx_addr = (caddr_t)pp->msg->b_wptr - unx_len;
36940Sstevel@tonic-gate 			} else {
36950Sstevel@tonic-gate 				unx_addr = pp->ioblock +
36967656SSherry.Moore@Sun.COM 				    (pp->xfercnt - unx_len);
36970Sstevel@tonic-gate 			}
36980Sstevel@tonic-gate 		}
36990Sstevel@tonic-gate 	}
37000Sstevel@tonic-gate 
37010Sstevel@tonic-gate 	/* Following code is common for PIO and DMA modes */
37020Sstevel@tonic-gate 
37030Sstevel@tonic-gate 	ecpp_putback_untransfered(pp, (caddr_t)unx_addr, unx_len);
37040Sstevel@tonic-gate 
37050Sstevel@tonic-gate 	if (pp->msg != NULL) {
37060Sstevel@tonic-gate 		freemsg(pp->msg);
37070Sstevel@tonic-gate 		pp->msg = NULL;
37080Sstevel@tonic-gate 	}
37090Sstevel@tonic-gate 
37100Sstevel@tonic-gate 	/* mark the error status structure */
37110Sstevel@tonic-gate 	pp->timeout_error = 1;
37120Sstevel@tonic-gate 	pp->e_busy = ECPP_ERR;
37130Sstevel@tonic-gate 	fifo_timer_id = pp->fifo_timer_id;
37140Sstevel@tonic-gate 	pp->fifo_timer_id = 0;
37150Sstevel@tonic-gate 
37160Sstevel@tonic-gate 	qenable(pp->writeq);
37170Sstevel@tonic-gate 
37180Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
37190Sstevel@tonic-gate 
37200Sstevel@tonic-gate 	if (fifo_timer_id) {
37210Sstevel@tonic-gate 		(void) untimeout(fifo_timer_id);
37220Sstevel@tonic-gate 	}
37230Sstevel@tonic-gate }
37240Sstevel@tonic-gate 
37250Sstevel@tonic-gate static void
ecpp_putback_untransfered(struct ecppunit * pp,void * startp,uint_t len)37260Sstevel@tonic-gate ecpp_putback_untransfered(struct ecppunit *pp, void *startp, uint_t len)
37270Sstevel@tonic-gate {
37280Sstevel@tonic-gate 	mblk_t *new_mp;
37290Sstevel@tonic-gate 
37300Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_putback_untrans=%d\n", len);
37310Sstevel@tonic-gate 
37320Sstevel@tonic-gate 	if (len == 0) {
37330Sstevel@tonic-gate 		return;
37340Sstevel@tonic-gate 	}
37350Sstevel@tonic-gate 
37360Sstevel@tonic-gate 	new_mp = allocb(len, BPRI_MED);
37370Sstevel@tonic-gate 	if (new_mp == NULL) {
37380Sstevel@tonic-gate 		ecpp_error(pp->dip,
37397656SSherry.Moore@Sun.COM 		    "ecpp_putback_untransfered: allocb FAILURE.\n");
37400Sstevel@tonic-gate 		return;
37410Sstevel@tonic-gate 	}
37420Sstevel@tonic-gate 
37430Sstevel@tonic-gate 	bcopy(startp, new_mp->b_rptr, len);
37440Sstevel@tonic-gate 	new_mp->b_wptr = new_mp->b_rptr + len;
37450Sstevel@tonic-gate 
37460Sstevel@tonic-gate 	if (!putbq(pp->writeq, new_mp)) {
37470Sstevel@tonic-gate 		freemsg(new_mp);
37480Sstevel@tonic-gate 	}
37490Sstevel@tonic-gate }
37500Sstevel@tonic-gate 
37510Sstevel@tonic-gate static uchar_t
ecr_write(struct ecppunit * pp,uint8_t ecr_byte)37520Sstevel@tonic-gate ecr_write(struct ecppunit *pp, uint8_t ecr_byte)
37530Sstevel@tonic-gate {
37540Sstevel@tonic-gate 	int i, current_ecr;
37550Sstevel@tonic-gate 
37560Sstevel@tonic-gate 	for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
37570Sstevel@tonic-gate 		ECR_WRITE(pp, ecr_byte);
37580Sstevel@tonic-gate 
37590Sstevel@tonic-gate 		current_ecr = ECR_READ(pp);
37600Sstevel@tonic-gate 
37610Sstevel@tonic-gate 		/* mask off the lower two read-only bits */
37620Sstevel@tonic-gate 		if ((ecr_byte & 0xFC) == (current_ecr & 0xFC))
37630Sstevel@tonic-gate 			return (SUCCESS);
37640Sstevel@tonic-gate 	}
37650Sstevel@tonic-gate 	return (FAILURE);
37660Sstevel@tonic-gate }
37670Sstevel@tonic-gate 
37680Sstevel@tonic-gate static uchar_t
dcr_write(struct ecppunit * pp,uint8_t dcr_byte)37690Sstevel@tonic-gate dcr_write(struct ecppunit *pp, uint8_t dcr_byte)
37700Sstevel@tonic-gate {
37710Sstevel@tonic-gate 	uint8_t current_dcr;
37720Sstevel@tonic-gate 	int i;
37730Sstevel@tonic-gate 
37740Sstevel@tonic-gate 	for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
37750Sstevel@tonic-gate 		DCR_WRITE(pp, dcr_byte);
37760Sstevel@tonic-gate 
37770Sstevel@tonic-gate 		current_dcr = DCR_READ(pp);
37780Sstevel@tonic-gate 
37790Sstevel@tonic-gate 		/* compare only bits 0-4 (direction bit return 1) */
37800Sstevel@tonic-gate 		if ((dcr_byte & 0x1F) == (current_dcr & 0x1F))
37810Sstevel@tonic-gate 			return (SUCCESS);
37820Sstevel@tonic-gate 	}
37830Sstevel@tonic-gate 	ecpp_error(pp->dip,
37847656SSherry.Moore@Sun.COM 	    "(%d)dcr_write: dcr written =%x, dcr readback =%x\n",
37857656SSherry.Moore@Sun.COM 	    i, dcr_byte, current_dcr);
37860Sstevel@tonic-gate 
37870Sstevel@tonic-gate 	return (FAILURE);
37880Sstevel@tonic-gate }
37890Sstevel@tonic-gate 
37900Sstevel@tonic-gate static uchar_t
ecpp_reset_port_regs(struct ecppunit * pp)37910Sstevel@tonic-gate ecpp_reset_port_regs(struct ecppunit *pp)
37920Sstevel@tonic-gate {
37930Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
37940Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
37950Sstevel@tonic-gate 	return (SUCCESS);
37960Sstevel@tonic-gate }
37970Sstevel@tonic-gate 
37980Sstevel@tonic-gate /*
37990Sstevel@tonic-gate  * The data transferred by the DMA engine goes through the FIFO,
38000Sstevel@tonic-gate  * so that when the DMA counter reaches zero (and an interrupt occurs)
38010Sstevel@tonic-gate  * the FIFO can still contain data. If this is the case, the ISR will
38020Sstevel@tonic-gate  * schedule this callback to wait until the FIFO drains or a timeout occurs.
38030Sstevel@tonic-gate  */
38040Sstevel@tonic-gate static void
ecpp_fifo_timer(void * arg)38050Sstevel@tonic-gate ecpp_fifo_timer(void *arg)
38060Sstevel@tonic-gate {
38070Sstevel@tonic-gate 	struct ecppunit *pp = arg;
38080Sstevel@tonic-gate 	uint8_t	ecr;
38090Sstevel@tonic-gate 	timeout_id_t	timeout_id;
38100Sstevel@tonic-gate 
38110Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
38120Sstevel@tonic-gate 
38130Sstevel@tonic-gate 	/*
38140Sstevel@tonic-gate 	 * If the FIFO timer has been turned off, exit.
38150Sstevel@tonic-gate 	 */
38160Sstevel@tonic-gate 	if (pp->fifo_timer_id == 0) {
38170Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_fifo_timer: untimedout\n");
38180Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
38190Sstevel@tonic-gate 		return;
38200Sstevel@tonic-gate 	} else {
38210Sstevel@tonic-gate 		pp->fifo_timer_id = 0;
38220Sstevel@tonic-gate 	}
38230Sstevel@tonic-gate 
38240Sstevel@tonic-gate 	/*
38250Sstevel@tonic-gate 	 * If the FIFO is not empty restart timer.  Wait FIFO_DRAIN_PERIOD
38260Sstevel@tonic-gate 	 * (250 ms) and check FIFO_EMPTY bit again. Repeat until FIFO is
38270Sstevel@tonic-gate 	 * empty or until 10 * FIFO_DRAIN_PERIOD expires.
38280Sstevel@tonic-gate 	 */
38290Sstevel@tonic-gate 	ecr = ECR_READ(pp);
38300Sstevel@tonic-gate 
38310Sstevel@tonic-gate 	if ((pp->current_mode != ECPP_DIAG_MODE) &&
38320Sstevel@tonic-gate 	    (((ecr & ECPP_FIFO_EMPTY) == 0) &&
38330Sstevel@tonic-gate 	    (pp->ecpp_drain_counter < 10))) {
38340Sstevel@tonic-gate 
38350Sstevel@tonic-gate 		ecpp_error(pp->dip,
38367656SSherry.Moore@Sun.COM 		    "ecpp_fifo_timer(%d):FIFO not empty:ecr=%x\n",
38377656SSherry.Moore@Sun.COM 		    pp->ecpp_drain_counter, ecr);
38380Sstevel@tonic-gate 
38390Sstevel@tonic-gate 		pp->fifo_timer_id = timeout(ecpp_fifo_timer,
38407656SSherry.Moore@Sun.COM 		    (caddr_t)pp, drv_usectohz(FIFO_DRAIN_PERIOD));
38410Sstevel@tonic-gate 		++pp->ecpp_drain_counter;
38420Sstevel@tonic-gate 
38430Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
38440Sstevel@tonic-gate 		return;
38450Sstevel@tonic-gate 	}
38460Sstevel@tonic-gate 
38470Sstevel@tonic-gate 	if (pp->current_mode != ECPP_DIAG_MODE) {
38480Sstevel@tonic-gate 		/*
38490Sstevel@tonic-gate 		 * If the FIFO won't drain after 10 FIFO_DRAIN_PERIODs
38500Sstevel@tonic-gate 		 * then don't wait any longer.  Simply clean up the transfer.
38510Sstevel@tonic-gate 		 */
38520Sstevel@tonic-gate 		if (pp->ecpp_drain_counter >= 10) {
38530Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_fifo_timer(%d):"
38547656SSherry.Moore@Sun.COM 			    " clearing FIFO,can't wait:ecr=%x\n",
38557656SSherry.Moore@Sun.COM 			    pp->ecpp_drain_counter, ecr);
38560Sstevel@tonic-gate 		} else {
38570Sstevel@tonic-gate 			ecpp_error(pp->dip,
38587656SSherry.Moore@Sun.COM 			    "ecpp_fifo_timer(%d):FIFO empty:ecr=%x\n",
38597656SSherry.Moore@Sun.COM 			    pp->ecpp_drain_counter, ecr);
38600Sstevel@tonic-gate 		}
38610Sstevel@tonic-gate 
38620Sstevel@tonic-gate 		pp->ecpp_drain_counter = 0;
38630Sstevel@tonic-gate 	}
38640Sstevel@tonic-gate 
38650Sstevel@tonic-gate 	/*
38660Sstevel@tonic-gate 	 * Main section of routine:
38670Sstevel@tonic-gate 	 *  - stop the DMA transfer timer
38680Sstevel@tonic-gate 	 *  - program DMA with next cookie/window or unbind the DMA mapping
38690Sstevel@tonic-gate 	 *  - update stats
38700Sstevel@tonic-gate 	 *  - if last mblk in queue, signal to close() & return to idle state
38710Sstevel@tonic-gate 	 */
38720Sstevel@tonic-gate 
38730Sstevel@tonic-gate 	/* Stop the DMA transfer timeout timer */
38740Sstevel@tonic-gate 	timeout_id = pp->timeout_id;
38750Sstevel@tonic-gate 	pp->timeout_id = 0;
38760Sstevel@tonic-gate 
38770Sstevel@tonic-gate 	/* data has drained from fifo, it is ok to free dma resource */
38780Sstevel@tonic-gate 	if (pp->current_mode == ECPP_ECP_MODE ||
38790Sstevel@tonic-gate 	    pp->current_mode == ECPP_DIAG_MODE ||
38800Sstevel@tonic-gate 	    COMPAT_DMA(pp)) {
38810Sstevel@tonic-gate 		off_t	off;
38820Sstevel@tonic-gate 		size_t	len;
38830Sstevel@tonic-gate 
38840Sstevel@tonic-gate 		/* update residual */
38850Sstevel@tonic-gate 		pp->resid -= pp->dma_cookie.dmac_size;
38860Sstevel@tonic-gate 
38870Sstevel@tonic-gate 		/* update statistics */
38880Sstevel@tonic-gate 		pp->joblen += pp->dma_cookie.dmac_size;
38890Sstevel@tonic-gate 		if (pp->dma_dir == DDI_DMA_WRITE) {
38900Sstevel@tonic-gate 			pp->obytes[pp->current_mode] +=
38917656SSherry.Moore@Sun.COM 			    pp->dma_cookie.dmac_size;
38920Sstevel@tonic-gate 		} else {
38930Sstevel@tonic-gate 			pp->ibytes[pp->current_mode] +=
38947656SSherry.Moore@Sun.COM 			    pp->dma_cookie.dmac_size;
38950Sstevel@tonic-gate 		}
38960Sstevel@tonic-gate 
38970Sstevel@tonic-gate 		/*
38980Sstevel@tonic-gate 		 * Look if any cookies/windows left
38990Sstevel@tonic-gate 		 */
39000Sstevel@tonic-gate 		if (--pp->dma_cookie_count > 0) {
39010Sstevel@tonic-gate 			/* process the next cookie */
39020Sstevel@tonic-gate 			ddi_dma_nextcookie(pp->dma_handle,
39037656SSherry.Moore@Sun.COM 			    &pp->dma_cookie);
39040Sstevel@tonic-gate 		} else if (pp->dma_curwin < pp->dma_nwin) {
39050Sstevel@tonic-gate 			/* process the next window */
39060Sstevel@tonic-gate 			if (ddi_dma_getwin(pp->dma_handle,
39070Sstevel@tonic-gate 			    pp->dma_curwin, &off, &len,
39080Sstevel@tonic-gate 			    &pp->dma_cookie,
39090Sstevel@tonic-gate 			    &pp->dma_cookie_count) != DDI_SUCCESS) {
39100Sstevel@tonic-gate 				ecpp_error(pp->dip,
39110Sstevel@tonic-gate 				    "ecpp_fifo_timer: ddi_dma_getwin failed\n");
39120Sstevel@tonic-gate 				goto dma_done;
39130Sstevel@tonic-gate 			}
39140Sstevel@tonic-gate 
39150Sstevel@tonic-gate 			pp->dma_curwin++;
39160Sstevel@tonic-gate 		} else {
39170Sstevel@tonic-gate 			goto dma_done;
39180Sstevel@tonic-gate 		}
39190Sstevel@tonic-gate 
39200Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_fifo_timer: next addr=%llx len=%d\n",
39217656SSherry.Moore@Sun.COM 		    pp->dma_cookie.dmac_address,
39227656SSherry.Moore@Sun.COM 		    pp->dma_cookie.dmac_size);
39230Sstevel@tonic-gate 
39240Sstevel@tonic-gate 		/* kick off new transfer */
39250Sstevel@tonic-gate 		if (ECPP_DMA_START(pp) != SUCCESS) {
39260Sstevel@tonic-gate 			ecpp_error(pp->dip,
39277656SSherry.Moore@Sun.COM 			    "ecpp_fifo_timer: dma_start failed\n");
39280Sstevel@tonic-gate 			goto dma_done;
39290Sstevel@tonic-gate 		}
39300Sstevel@tonic-gate 
39310Sstevel@tonic-gate 		(void) ecr_write(pp, (ecr & 0xe0) |
39327656SSherry.Moore@Sun.COM 		    ECPP_DMA_ENABLE | ECPP_INTR_MASK);
39330Sstevel@tonic-gate 
39340Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
39350Sstevel@tonic-gate 
39360Sstevel@tonic-gate 		if (timeout_id) {
39370Sstevel@tonic-gate 			(void) untimeout(timeout_id);
39380Sstevel@tonic-gate 		}
39390Sstevel@tonic-gate 		return;
39400Sstevel@tonic-gate 
39410Sstevel@tonic-gate 	dma_done:
39420Sstevel@tonic-gate 		if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
39430Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_fifo_timer: unbind failed\n");
39440Sstevel@tonic-gate 		} else {
39450Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_fifo_timer: unbind ok\n");
39460Sstevel@tonic-gate 		}
39470Sstevel@tonic-gate 	}
39480Sstevel@tonic-gate 
39490Sstevel@tonic-gate 	/*
39500Sstevel@tonic-gate 	 * if we did not use the dmablock, the mblk that
39510Sstevel@tonic-gate 	 * was used should be freed.
39520Sstevel@tonic-gate 	 */
39530Sstevel@tonic-gate 	if (pp->msg != NULL) {
39540Sstevel@tonic-gate 		freemsg(pp->msg);
39550Sstevel@tonic-gate 		pp->msg = NULL;
39560Sstevel@tonic-gate 	}
39570Sstevel@tonic-gate 
39580Sstevel@tonic-gate 	/* The port is no longer active */
39590Sstevel@tonic-gate 	pp->e_busy = ECPP_IDLE;
39600Sstevel@tonic-gate 
39610Sstevel@tonic-gate 	qenable(pp->writeq);
39620Sstevel@tonic-gate 
39630Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
39640Sstevel@tonic-gate 
39650Sstevel@tonic-gate 	if (timeout_id) {
39660Sstevel@tonic-gate 		(void) untimeout(timeout_id);
39670Sstevel@tonic-gate 	}
39680Sstevel@tonic-gate }
39690Sstevel@tonic-gate 
39700Sstevel@tonic-gate /*
39710Sstevel@tonic-gate  * In Compatibility mode, check if the peripheral is ready to accept data
39720Sstevel@tonic-gate  */
39730Sstevel@tonic-gate static uint8_t
ecpp_check_status(struct ecppunit * pp)39740Sstevel@tonic-gate ecpp_check_status(struct ecppunit *pp)
39750Sstevel@tonic-gate {
39760Sstevel@tonic-gate 	uint8_t	dsr;
39770Sstevel@tonic-gate 	uint8_t statmask;
39780Sstevel@tonic-gate 
39790Sstevel@tonic-gate 	if (pp->current_mode == ECPP_ECP_MODE ||
39800Sstevel@tonic-gate 	    pp->current_mode == ECPP_DIAG_MODE)
39810Sstevel@tonic-gate 		return (SUCCESS);
39820Sstevel@tonic-gate 
39830Sstevel@tonic-gate 	statmask = ECPP_nERR | ECPP_SLCT | ECPP_nBUSY | ECPP_nACK;
39840Sstevel@tonic-gate 
39850Sstevel@tonic-gate 	dsr = DSR_READ(pp);
39860Sstevel@tonic-gate 	if ((dsr & ECPP_PE) || ((dsr & statmask) != statmask)) {
39870Sstevel@tonic-gate 		pp->e_busy = ECPP_ERR;
39880Sstevel@tonic-gate 		return (FAILURE);
39890Sstevel@tonic-gate 	} else {
39900Sstevel@tonic-gate 		return (SUCCESS);
39910Sstevel@tonic-gate 	}
39920Sstevel@tonic-gate }
39930Sstevel@tonic-gate 
39940Sstevel@tonic-gate /*
39950Sstevel@tonic-gate  * if the peripheral is not ready to accept data, write service routine
39960Sstevel@tonic-gate  * periodically reschedules itself to recheck peripheral status
39970Sstevel@tonic-gate  * and start data transfer as soon as possible
39980Sstevel@tonic-gate  */
39990Sstevel@tonic-gate static void
ecpp_wsrv_timer(void * arg)40000Sstevel@tonic-gate ecpp_wsrv_timer(void *arg)
40010Sstevel@tonic-gate {
40020Sstevel@tonic-gate 	struct ecppunit *pp = arg;
40030Sstevel@tonic-gate 
40040Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_wsrv_timer: starting\n");
40050Sstevel@tonic-gate 
40060Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
40070Sstevel@tonic-gate 
40080Sstevel@tonic-gate 	if (pp->wsrv_timer_id == 0) {
40090Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
40100Sstevel@tonic-gate 		return;
40110Sstevel@tonic-gate 	} else {
40120Sstevel@tonic-gate 		pp->wsrv_timer_id = 0;
40130Sstevel@tonic-gate 	}
40140Sstevel@tonic-gate 
40150Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_wsrv_timer: qenabling...\n");
40160Sstevel@tonic-gate 
40170Sstevel@tonic-gate 	qenable(pp->writeq);
40180Sstevel@tonic-gate 
40190Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
40200Sstevel@tonic-gate }
40210Sstevel@tonic-gate 
40220Sstevel@tonic-gate /*
40230Sstevel@tonic-gate  * Allocate a message indicating a backchannel request
40240Sstevel@tonic-gate  * and put it on the write queue
40250Sstevel@tonic-gate  */
40260Sstevel@tonic-gate static int
ecpp_backchan_req(struct ecppunit * pp)40270Sstevel@tonic-gate ecpp_backchan_req(struct ecppunit *pp)
40280Sstevel@tonic-gate {
40290Sstevel@tonic-gate 	mblk_t	*mp;
40300Sstevel@tonic-gate 
40310Sstevel@tonic-gate 	if ((mp = allocb(sizeof (int), BPRI_MED)) == NULL) {
40320Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_backchan_req: allocb failed\n");
40330Sstevel@tonic-gate 		return (FAILURE);
40340Sstevel@tonic-gate 	} else {
40350Sstevel@tonic-gate 		mp->b_datap->db_type = M_CTL;
40360Sstevel@tonic-gate 		*(int *)mp->b_rptr = ECPP_BACKCHANNEL;
40370Sstevel@tonic-gate 		mp->b_wptr = mp->b_rptr + sizeof (int);
40380Sstevel@tonic-gate 		if (!putbq(pp->writeq, mp)) {
40390Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_backchan_req:putbq failed\n");
40400Sstevel@tonic-gate 			freemsg(mp);
40410Sstevel@tonic-gate 			return (FAILURE);
40420Sstevel@tonic-gate 		}
40430Sstevel@tonic-gate 		return (SUCCESS);
40440Sstevel@tonic-gate 	}
40450Sstevel@tonic-gate }
40460Sstevel@tonic-gate 
40470Sstevel@tonic-gate /*
40480Sstevel@tonic-gate  * Cancel the function scheduled with timeout(9F)
40490Sstevel@tonic-gate  * This function is to be called with the mutex held
40500Sstevel@tonic-gate  */
40510Sstevel@tonic-gate static void
ecpp_untimeout_unblock(struct ecppunit * pp,timeout_id_t * id)40520Sstevel@tonic-gate ecpp_untimeout_unblock(struct ecppunit *pp, timeout_id_t *id)
40530Sstevel@tonic-gate {
40540Sstevel@tonic-gate 	timeout_id_t	saved_id;
40550Sstevel@tonic-gate 
40560Sstevel@tonic-gate 	ASSERT(mutex_owned(&pp->umutex));
40570Sstevel@tonic-gate 
40580Sstevel@tonic-gate 	if (*id) {
40590Sstevel@tonic-gate 		saved_id = *id;
40600Sstevel@tonic-gate 		*id = 0;
40610Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
40620Sstevel@tonic-gate 		(void) untimeout(saved_id);
40630Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
40640Sstevel@tonic-gate 	}
40650Sstevel@tonic-gate }
40660Sstevel@tonic-gate 
40670Sstevel@tonic-gate /*
40680Sstevel@tonic-gate  * get prnio interface capabilities
40690Sstevel@tonic-gate  */
40700Sstevel@tonic-gate static uint_t
ecpp_get_prn_ifcap(struct ecppunit * pp)40710Sstevel@tonic-gate ecpp_get_prn_ifcap(struct ecppunit *pp)
40720Sstevel@tonic-gate {
40730Sstevel@tonic-gate 	uint_t	ifcap;
40740Sstevel@tonic-gate 
40750Sstevel@tonic-gate 	ifcap = PRN_1284_DEVID | PRN_TIMEOUTS | PRN_STREAMS;
40760Sstevel@tonic-gate 
40770Sstevel@tonic-gate 	/* status (DSR) only makes sense in Centronics & Compat modes */
40780Sstevel@tonic-gate 	if (pp->current_mode == ECPP_CENTRONICS ||
40790Sstevel@tonic-gate 	    pp->current_mode == ECPP_COMPAT_MODE) {
40800Sstevel@tonic-gate 		ifcap |= PRN_1284_STATUS;
40810Sstevel@tonic-gate 	} else if (pp->current_mode == ECPP_NIBBLE_MODE ||
40827656SSherry.Moore@Sun.COM 	    pp->current_mode == ECPP_ECP_MODE) {
40830Sstevel@tonic-gate 		ifcap |= PRN_BIDI;
40840Sstevel@tonic-gate 	}
40850Sstevel@tonic-gate 
40860Sstevel@tonic-gate 	return (ifcap);
40870Sstevel@tonic-gate }
40880Sstevel@tonic-gate 
40890Sstevel@tonic-gate /*
40900Sstevel@tonic-gate  * Determine SuperI/O type
40910Sstevel@tonic-gate  */
40920Sstevel@tonic-gate static struct ecpp_hw_bind *
ecpp_determine_sio_type(struct ecppunit * pp)40930Sstevel@tonic-gate ecpp_determine_sio_type(struct ecppunit *pp)
40940Sstevel@tonic-gate {
40950Sstevel@tonic-gate 	struct ecpp_hw_bind	*hw_bind;
40960Sstevel@tonic-gate 	char			*name;
40970Sstevel@tonic-gate 	int			i;
40980Sstevel@tonic-gate 
40990Sstevel@tonic-gate 	name = ddi_binding_name(pp->dip);
41000Sstevel@tonic-gate 
41010Sstevel@tonic-gate 	for (hw_bind = NULL, i = 0; i < NELEM(ecpp_hw_bind); i++) {
41020Sstevel@tonic-gate 		if (strcmp(name, ecpp_hw_bind[i].name) == 0) {
41030Sstevel@tonic-gate 			hw_bind = &ecpp_hw_bind[i];
41040Sstevel@tonic-gate 			break;
41050Sstevel@tonic-gate 		}
41060Sstevel@tonic-gate 	}
41070Sstevel@tonic-gate 
41080Sstevel@tonic-gate 	return (hw_bind);
41090Sstevel@tonic-gate }
41100Sstevel@tonic-gate 
41110Sstevel@tonic-gate 
41120Sstevel@tonic-gate /*
41130Sstevel@tonic-gate  *
41140Sstevel@tonic-gate  * IEEE 1284 support routines:
41150Sstevel@tonic-gate  * 	negotiation and termination;
41160Sstevel@tonic-gate  *	phase transitions;
41170Sstevel@tonic-gate  *	device ID;
41180Sstevel@tonic-gate  *
41190Sstevel@tonic-gate  */
41200Sstevel@tonic-gate 
41210Sstevel@tonic-gate /*
41220Sstevel@tonic-gate  * Interface initialization, abnormal termination into Compatibility mode
41230Sstevel@tonic-gate  *
41240Sstevel@tonic-gate  * Peripheral may be non-1284, so we set current mode to ECPP_CENTRONICS
41250Sstevel@tonic-gate  */
41260Sstevel@tonic-gate static void
ecpp_1284_init_interface(struct ecppunit * pp)41270Sstevel@tonic-gate ecpp_1284_init_interface(struct ecppunit *pp)
41280Sstevel@tonic-gate {
41290Sstevel@tonic-gate 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
41300Sstevel@tonic-gate 
41310Sstevel@tonic-gate 	/*
41320Sstevel@tonic-gate 	 * Toggle the nInit signal if configured in ecpp.conf
41330Sstevel@tonic-gate 	 * for most peripherals it is not needed
41340Sstevel@tonic-gate 	 */
41350Sstevel@tonic-gate 	if (pp->init_seq == TRUE) {
41360Sstevel@tonic-gate 		DCR_WRITE(pp, ECPP_SLCTIN);
41370Sstevel@tonic-gate 		drv_usecwait(50);	/* T(ER) = 50us */
41380Sstevel@tonic-gate 	}
41390Sstevel@tonic-gate 
41400Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
41410Sstevel@tonic-gate 
41420Sstevel@tonic-gate 	pp->current_mode = pp->backchannel = ECPP_CENTRONICS;
41430Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_C_IDLE;
41440Sstevel@tonic-gate 	ECPP_CONFIG_MODE(pp);
41450Sstevel@tonic-gate 	pp->to_mode[pp->current_mode]++;
41460Sstevel@tonic-gate 
41470Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_1284_init_interface: ok\n");
41480Sstevel@tonic-gate }
41490Sstevel@tonic-gate 
41500Sstevel@tonic-gate /*
41510Sstevel@tonic-gate  * ECP mode negotiation
41520Sstevel@tonic-gate  */
41530Sstevel@tonic-gate static int
ecp_negotiation(struct ecppunit * pp)41540Sstevel@tonic-gate ecp_negotiation(struct ecppunit *pp)
41550Sstevel@tonic-gate {
41560Sstevel@tonic-gate 	uint8_t dsr;
41570Sstevel@tonic-gate 
41580Sstevel@tonic-gate 	/* ECP mode negotiation */
41590Sstevel@tonic-gate 
41600Sstevel@tonic-gate 	if (ecpp_1284_negotiation(pp, ECPP_XREQ_ECP, &dsr) == FAILURE)
41610Sstevel@tonic-gate 		return (FAILURE);
41620Sstevel@tonic-gate 
41630Sstevel@tonic-gate 	/* Event 5: peripheral deasserts PError and Busy, asserts Select */
41640Sstevel@tonic-gate 	if ((dsr & (ECPP_PE | ECPP_nBUSY | ECPP_SLCT)) !=
41657656SSherry.Moore@Sun.COM 	    (ECPP_nBUSY | ECPP_SLCT)) {
41660Sstevel@tonic-gate 		ecpp_error(pp->dip,
41677656SSherry.Moore@Sun.COM 		    "ecp_negotiation: failed event 5 %x\n", DSR_READ(pp));
41680Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
41690Sstevel@tonic-gate 		return (FAILURE);
41700Sstevel@tonic-gate 	}
41710Sstevel@tonic-gate 
41720Sstevel@tonic-gate 	/* entered Setup Phase */
41730Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_SETUP;
41740Sstevel@tonic-gate 
41750Sstevel@tonic-gate 	/* Event 30: host asserts nAutoFd */
41760Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
41770Sstevel@tonic-gate 
41780Sstevel@tonic-gate 	/* Event 31: peripheral asserts PError */
41790Sstevel@tonic-gate 	if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
41800Sstevel@tonic-gate 		ecpp_error(pp->dip,
41817656SSherry.Moore@Sun.COM 		    "ecp_negotiation: failed event 31 %x\n", DSR_READ(pp));
41820Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
41830Sstevel@tonic-gate 		return (FAILURE);
41840Sstevel@tonic-gate 	}
41850Sstevel@tonic-gate 
41860Sstevel@tonic-gate 	/* entered Forward Idle Phase */
41870Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
41880Sstevel@tonic-gate 
41890Sstevel@tonic-gate 	/* successful negotiation into ECP mode */
41900Sstevel@tonic-gate 	pp->current_mode = ECPP_ECP_MODE;
41910Sstevel@tonic-gate 	pp->backchannel = ECPP_ECP_MODE;
41920Sstevel@tonic-gate 
41930Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecp_negotiation: ok\n");
41940Sstevel@tonic-gate 
41950Sstevel@tonic-gate 	return (SUCCESS);
41960Sstevel@tonic-gate }
41970Sstevel@tonic-gate 
41980Sstevel@tonic-gate /*
41990Sstevel@tonic-gate  * Nibble mode negotiation
42000Sstevel@tonic-gate  */
42010Sstevel@tonic-gate static int
nibble_negotiation(struct ecppunit * pp)42020Sstevel@tonic-gate nibble_negotiation(struct ecppunit *pp)
42030Sstevel@tonic-gate {
42040Sstevel@tonic-gate 	uint8_t	dsr;
42050Sstevel@tonic-gate 
42060Sstevel@tonic-gate 	if (ecpp_1284_negotiation(pp, ECPP_XREQ_NIBBLE, &dsr) == FAILURE) {
42070Sstevel@tonic-gate 		return (FAILURE);
42080Sstevel@tonic-gate 	}
42090Sstevel@tonic-gate 
42100Sstevel@tonic-gate 	/*
42110Sstevel@tonic-gate 	 * If peripheral has data available, PE and nErr will
42120Sstevel@tonic-gate 	 * be set low at Event 5 & 6.
42130Sstevel@tonic-gate 	 */
42140Sstevel@tonic-gate 	if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
42150Sstevel@tonic-gate 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
42160Sstevel@tonic-gate 	} else {
42170Sstevel@tonic-gate 		pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
42180Sstevel@tonic-gate 	}
42190Sstevel@tonic-gate 
42200Sstevel@tonic-gate 	/* successful negotiation into Nibble mode */
42210Sstevel@tonic-gate 	pp->current_mode = ECPP_NIBBLE_MODE;
42220Sstevel@tonic-gate 	pp->backchannel = ECPP_NIBBLE_MODE;
42230Sstevel@tonic-gate 
42240Sstevel@tonic-gate 	ecpp_error(pp->dip, "nibble_negotiation: ok (phase=%x)\n",
42257656SSherry.Moore@Sun.COM 	    pp->current_phase);
42260Sstevel@tonic-gate 
42270Sstevel@tonic-gate 	return (SUCCESS);
42280Sstevel@tonic-gate 
42290Sstevel@tonic-gate }
42300Sstevel@tonic-gate 
42310Sstevel@tonic-gate /*
42320Sstevel@tonic-gate  * Wait ptimeout usec for periph to set 'mask' bits to 'val' state
42330Sstevel@tonic-gate  *
42340Sstevel@tonic-gate  * return value < 0 indicates timeout
42350Sstevel@tonic-gate  */
42360Sstevel@tonic-gate static int
wait_dsr(struct ecppunit * pp,uint8_t mask,uint8_t val,int ptimeout)42370Sstevel@tonic-gate wait_dsr(struct ecppunit *pp, uint8_t mask, uint8_t val, int ptimeout)
42380Sstevel@tonic-gate {
42390Sstevel@tonic-gate 	while (((DSR_READ(pp) & mask) != val) && ptimeout--) {
42400Sstevel@tonic-gate 		drv_usecwait(1);
42410Sstevel@tonic-gate 	}
42420Sstevel@tonic-gate 
42430Sstevel@tonic-gate 	return (ptimeout);
42440Sstevel@tonic-gate }
42450Sstevel@tonic-gate 
42460Sstevel@tonic-gate /*
42470Sstevel@tonic-gate  * 1284 negotiation Events 0..6
42480Sstevel@tonic-gate  * required mode is indicated by extensibility request value
42490Sstevel@tonic-gate  *
42500Sstevel@tonic-gate  * After successful negotiation SUCCESS is returned and
42510Sstevel@tonic-gate  * current mode is set according to xreq,
42520Sstevel@tonic-gate  * otherwise FAILURE is returned and current mode is set to
42530Sstevel@tonic-gate  * either COMPAT (1284 periph) or CENTRONICS (non-1284 periph)
42540Sstevel@tonic-gate  *
42550Sstevel@tonic-gate  * Current phase must be set by the caller (mode-specific negotiation)
42560Sstevel@tonic-gate  *
42570Sstevel@tonic-gate  * If rdsr is not NULL, DSR value after Event 6 is stored here
42580Sstevel@tonic-gate  */
42590Sstevel@tonic-gate static int
ecpp_1284_negotiation(struct ecppunit * pp,uint8_t xreq,uint8_t * rdsr)42600Sstevel@tonic-gate ecpp_1284_negotiation(struct ecppunit *pp, uint8_t xreq, uint8_t *rdsr)
42610Sstevel@tonic-gate {
42620Sstevel@tonic-gate 	int xflag;
42630Sstevel@tonic-gate 
42640Sstevel@tonic-gate 	ecpp_error(pp->dip, "nego(%x): entering...\n", xreq);
42650Sstevel@tonic-gate 
42660Sstevel@tonic-gate 	/* negotiation should start in Compatibility mode */
42670Sstevel@tonic-gate 	(void) ecpp_1284_termination(pp);
42680Sstevel@tonic-gate 
42690Sstevel@tonic-gate 	/* Set host into Compat mode */
42700Sstevel@tonic-gate 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
42710Sstevel@tonic-gate 
42720Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_NEGO;
42730Sstevel@tonic-gate 
42740Sstevel@tonic-gate 	/* Event 0: host sets extensibility request on data lines */
42750Sstevel@tonic-gate 	DATAR_WRITE(pp, xreq);
42760Sstevel@tonic-gate 
42770Sstevel@tonic-gate 	/* Event 1: host deassert nSelectin and assert nAutoFd */
42780Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
42790Sstevel@tonic-gate 
42800Sstevel@tonic-gate 	drv_usecwait(1);	/* Tp(ecp) == 0.5us */
42810Sstevel@tonic-gate 
42820Sstevel@tonic-gate 	/*
42830Sstevel@tonic-gate 	 * Event 2: peripheral asserts nAck, deasserts nFault,
42840Sstevel@tonic-gate 	 * 			asserts Select, asserts PError
42850Sstevel@tonic-gate 	 */
42860Sstevel@tonic-gate 	if (wait_dsr(pp, ECPP_nERR | ECPP_SLCT | ECPP_PE | ECPP_nACK,
42877656SSherry.Moore@Sun.COM 	    ECPP_nERR | ECPP_SLCT | ECPP_PE, 35000) < 0) {
42880Sstevel@tonic-gate 		/* peripheral is not 1284-compliant */
42890Sstevel@tonic-gate 		ecpp_error(pp->dip,
42907656SSherry.Moore@Sun.COM 		    "nego(%x): failed event 2 %x\n", xreq, DSR_READ(pp));
42910Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
42920Sstevel@tonic-gate 		return (FAILURE);
42930Sstevel@tonic-gate 	}
42940Sstevel@tonic-gate 
42950Sstevel@tonic-gate 	/*
42960Sstevel@tonic-gate 	 * Event 3: host asserts nStrobe, latching extensibility value into
42970Sstevel@tonic-gate 	 * peripherals input latch.
42980Sstevel@tonic-gate 	 */
42990Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_STB);
43000Sstevel@tonic-gate 
43010Sstevel@tonic-gate 	drv_usecwait(2);	/* Tp(ecp) = 0.5us */
43020Sstevel@tonic-gate 
43030Sstevel@tonic-gate 	/*
43040Sstevel@tonic-gate 	 * Event 4: hosts deasserts nStrobe and nAutoFD to acknowledge that
43050Sstevel@tonic-gate 	 * it has recognized an 1284 compatible peripheral
43060Sstevel@tonic-gate 	 */
43070Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT);
43080Sstevel@tonic-gate 
43090Sstevel@tonic-gate 	/*
43100Sstevel@tonic-gate 	 * Event 5: Peripheral confirms it supports requested extension
43110Sstevel@tonic-gate 	 * For Nibble mode Xflag must be low, otherwise it must be high
43120Sstevel@tonic-gate 	 */
43130Sstevel@tonic-gate 	xflag = (xreq == ECPP_XREQ_NIBBLE) ? 0 : ECPP_SLCT;
43140Sstevel@tonic-gate 
43150Sstevel@tonic-gate 	/*
43160Sstevel@tonic-gate 	 * Event 6: Peripheral sets nAck high
43170Sstevel@tonic-gate 	 * indicating that status lines are valid
43180Sstevel@tonic-gate 	 */
43190Sstevel@tonic-gate 	if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
43200Sstevel@tonic-gate 		/* Something wrong with peripheral */
43210Sstevel@tonic-gate 		ecpp_error(pp->dip,
43227656SSherry.Moore@Sun.COM 		    "nego(%x): failed event 6 %x\n", xreq, DSR_READ(pp));
43230Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
43240Sstevel@tonic-gate 		return (FAILURE);
43250Sstevel@tonic-gate 	}
43260Sstevel@tonic-gate 
43270Sstevel@tonic-gate 	if ((DSR_READ(pp) & ECPP_SLCT) != xflag) {
43280Sstevel@tonic-gate 		/* Extensibility value is not supported */
43290Sstevel@tonic-gate 		ecpp_error(pp->dip,
43307656SSherry.Moore@Sun.COM 		    "nego(%x): failed event 5 %x\n", xreq, DSR_READ(pp));
43310Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
43320Sstevel@tonic-gate 		return (FAILURE);
43330Sstevel@tonic-gate 	}
43340Sstevel@tonic-gate 
43350Sstevel@tonic-gate 	if (rdsr) {
43360Sstevel@tonic-gate 		*rdsr = DSR_READ(pp);
43370Sstevel@tonic-gate 	}
43380Sstevel@tonic-gate 
43390Sstevel@tonic-gate 	return (SUCCESS);
43400Sstevel@tonic-gate }
43410Sstevel@tonic-gate 
43420Sstevel@tonic-gate /*
43430Sstevel@tonic-gate  * 1284 Termination: Events 22..28 - set link to Compatibility mode
43440Sstevel@tonic-gate  *
43450Sstevel@tonic-gate  * This routine is not designed for Immediate termination,
43460Sstevel@tonic-gate  * caller must take care of waiting for a valid state,
43470Sstevel@tonic-gate  * (in particular, in ECP mode current phase must be Forward Idle)
43480Sstevel@tonic-gate  * otherwise interface will be reinitialized
43490Sstevel@tonic-gate  *
43500Sstevel@tonic-gate  * In case of Valid state termination SUCCESS is returned and
43510Sstevel@tonic-gate  * current_mode is ECPP_COMPAT_MODE, current phase is ECPP_PHASE_C_IDLE
43520Sstevel@tonic-gate  * Otherwise interface is reinitialized, FAILURE is returned and
43530Sstevel@tonic-gate  * current mode is ECPP_CENTRONICS, current phase is ECPP_PHASE_C_IDLE
43540Sstevel@tonic-gate  */
43550Sstevel@tonic-gate static int
ecpp_1284_termination(struct ecppunit * pp)43560Sstevel@tonic-gate ecpp_1284_termination(struct ecppunit *pp)
43570Sstevel@tonic-gate {
43580Sstevel@tonic-gate 	int	previous_mode = pp->current_mode;
43590Sstevel@tonic-gate 
43600Sstevel@tonic-gate 	if (((pp->current_mode == ECPP_COMPAT_MODE ||
43610Sstevel@tonic-gate 	    pp->current_mode == ECPP_CENTRONICS) &&
43620Sstevel@tonic-gate 	    pp->current_phase == ECPP_PHASE_C_IDLE) ||
43630Sstevel@tonic-gate 	    pp->current_mode == ECPP_DIAG_MODE) {
43640Sstevel@tonic-gate 		ecpp_error(pp->dip, "termination: not needed\n");
43650Sstevel@tonic-gate 		return (SUCCESS);
43660Sstevel@tonic-gate 	}
43670Sstevel@tonic-gate 
43680Sstevel@tonic-gate 	/* Set host into Compat mode, interrupts disabled */
43690Sstevel@tonic-gate 	ECPP_MASK_INTR(pp);
43700Sstevel@tonic-gate 	ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
43710Sstevel@tonic-gate 
43720Sstevel@tonic-gate 	pp->current_mode = ECPP_COMPAT_MODE;	/* needed by next function */
43730Sstevel@tonic-gate 
43740Sstevel@tonic-gate 	ECPP_CONFIG_MODE(pp);
43750Sstevel@tonic-gate 
43760Sstevel@tonic-gate 	/*
43770Sstevel@tonic-gate 	 * EPP mode uses simple nInit pulse for termination
43780Sstevel@tonic-gate 	 */
43790Sstevel@tonic-gate 	if (previous_mode == ECPP_EPP_MODE) {
43800Sstevel@tonic-gate 		/* Event 68: host sets nInit low */
43810Sstevel@tonic-gate 		DCR_WRITE(pp, 0);
43820Sstevel@tonic-gate 
43830Sstevel@tonic-gate 		drv_usecwait(55);	/* T(ER) = 50us */
43840Sstevel@tonic-gate 
43850Sstevel@tonic-gate 		/* Event 69: host sets nInit high */
43860Sstevel@tonic-gate 		DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
43870Sstevel@tonic-gate 
43880Sstevel@tonic-gate 		goto endterm;
43890Sstevel@tonic-gate 	}
43900Sstevel@tonic-gate 
43910Sstevel@tonic-gate 	/* terminate peripheral to Compat mode */
43920Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_TERM;
43930Sstevel@tonic-gate 
43940Sstevel@tonic-gate 	/* Event 22: hosts sets nSelectIn low and nAutoFd high */
43950Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
43960Sstevel@tonic-gate 
43970Sstevel@tonic-gate 	/* Event 23: peripheral deasserts nFault and nBusy */
43980Sstevel@tonic-gate 	/* Event 24: peripheral asserts nAck */
43990Sstevel@tonic-gate 	if (wait_dsr(pp, ECPP_nERR | ECPP_nBUSY | ECPP_nACK,
44007656SSherry.Moore@Sun.COM 	    ECPP_nERR, 35000) < 0) {
44010Sstevel@tonic-gate 		ecpp_error(pp->dip,
44027656SSherry.Moore@Sun.COM 		    "termination: failed events 23,24 %x\n", DSR_READ(pp));
44030Sstevel@tonic-gate 		ecpp_1284_init_interface(pp);
44040Sstevel@tonic-gate 		return (FAILURE);
44050Sstevel@tonic-gate 	}
44060Sstevel@tonic-gate 
44070Sstevel@tonic-gate 	drv_usecwait(1);	/* Tp = 0.5us */
44080Sstevel@tonic-gate 
44090Sstevel@tonic-gate 	/* Event 25: hosts sets nAutoFd low */
44100Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN | ECPP_AFX);
44110Sstevel@tonic-gate 
44120Sstevel@tonic-gate 	/* Event 26: the peripheral puts itself in Compatible mode */
44130Sstevel@tonic-gate 
44140Sstevel@tonic-gate 	/* Event 27: peripheral deasserts nAck */
44150Sstevel@tonic-gate 	if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
44160Sstevel@tonic-gate 		ecpp_error(pp->dip,
44177656SSherry.Moore@Sun.COM 		    "termination: failed event 27 %x\n", DSR_READ(pp));
44180Sstevel@tonic-gate 		ecpp_1284_init_interface(pp);
44190Sstevel@tonic-gate 		return (FAILURE);
44200Sstevel@tonic-gate 	}
44210Sstevel@tonic-gate 
44220Sstevel@tonic-gate 	drv_usecwait(1);	/* Tp = 0.5us */
44230Sstevel@tonic-gate 
44240Sstevel@tonic-gate 	/* Event 28: hosts deasserts nAutoFd */
44250Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
44260Sstevel@tonic-gate 
44270Sstevel@tonic-gate 	drv_usecwait(1);	/* Tp = 0.5us */
44280Sstevel@tonic-gate 
44290Sstevel@tonic-gate endterm:
44300Sstevel@tonic-gate 	/* Compatible mode Idle Phase */
44310Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_C_IDLE;
44320Sstevel@tonic-gate 
44330Sstevel@tonic-gate 	ecpp_error(pp->dip, "termination: completed %x %x\n",
44347656SSherry.Moore@Sun.COM 	    DSR_READ(pp), DCR_READ(pp));
44350Sstevel@tonic-gate 
44360Sstevel@tonic-gate 	return (SUCCESS);
44370Sstevel@tonic-gate }
44380Sstevel@tonic-gate 
44390Sstevel@tonic-gate /*
44400Sstevel@tonic-gate  * Initiate ECP backchannel DMA transfer
44410Sstevel@tonic-gate  */
44420Sstevel@tonic-gate static uchar_t
ecp_peripheral2host(struct ecppunit * pp)44430Sstevel@tonic-gate ecp_peripheral2host(struct ecppunit *pp)
44440Sstevel@tonic-gate {
44450Sstevel@tonic-gate 	mblk_t		*mp = NULL;
44460Sstevel@tonic-gate 	size_t		len;
44470Sstevel@tonic-gate 	uint32_t	xfer_time;
44480Sstevel@tonic-gate 
44490Sstevel@tonic-gate 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
44507656SSherry.Moore@Sun.COM 	    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
44510Sstevel@tonic-gate 
44520Sstevel@tonic-gate 	/*
44530Sstevel@tonic-gate 	 * hardware generates cycles to receive data from the peripheral
44540Sstevel@tonic-gate 	 * we only need to read from FIFO
44550Sstevel@tonic-gate 	 */
44560Sstevel@tonic-gate 
44570Sstevel@tonic-gate 	/*
44580Sstevel@tonic-gate 	 * If user issued read(2) of rev_resid bytes, xfer exactly this amount
44590Sstevel@tonic-gate 	 * unless it exceeds ECP_REV_BLKSZ_MAX; otherwise try to read
44600Sstevel@tonic-gate 	 * ECP_REV_BLKSZ_MAX or at least ECP_REV_BLKSZ bytes
44610Sstevel@tonic-gate 	 */
44620Sstevel@tonic-gate 	if (pp->nread > 0) {
44630Sstevel@tonic-gate 		len = min(pp->nread, ECP_REV_BLKSZ_MAX);
44640Sstevel@tonic-gate 	} else {
44650Sstevel@tonic-gate 		len = ECP_REV_BLKSZ_MAX;
44660Sstevel@tonic-gate 	}
44670Sstevel@tonic-gate 
44680Sstevel@tonic-gate 	pp->nread = 0;	/* clear after use */
44690Sstevel@tonic-gate 
44700Sstevel@tonic-gate 	/*
44710Sstevel@tonic-gate 	 * Allocate mblk for data, make max 2 attepmts:
44720Sstevel@tonic-gate 	 * if len bytes block fails, try our block size
44730Sstevel@tonic-gate 	 */
44740Sstevel@tonic-gate 	while ((mp = allocb(len, BPRI_MED)) == NULL) {
44750Sstevel@tonic-gate 		ecpp_error(pp->dip,
44767656SSherry.Moore@Sun.COM 		    "ecp_periph2host: failed allocb(%d)\n", len);
44770Sstevel@tonic-gate 		if (len > ECP_REV_BLKSZ) {
44780Sstevel@tonic-gate 			len = ECP_REV_BLKSZ;
44790Sstevel@tonic-gate 		} else {
44800Sstevel@tonic-gate 			break;
44810Sstevel@tonic-gate 		}
44820Sstevel@tonic-gate 	}
44830Sstevel@tonic-gate 
44840Sstevel@tonic-gate 	if (mp == NULL) {
44850Sstevel@tonic-gate 		goto fail;
44860Sstevel@tonic-gate 	}
44870Sstevel@tonic-gate 
44880Sstevel@tonic-gate 	pp->msg = mp;
44890Sstevel@tonic-gate 	pp->e_busy = ECPP_BUSY;
44900Sstevel@tonic-gate 	pp->dma_dir = DDI_DMA_READ;
44910Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_REV_XFER;
44920Sstevel@tonic-gate 
44930Sstevel@tonic-gate 	if (ecpp_init_dma_xfer(pp, (caddr_t)mp->b_rptr, len) == FAILURE) {
44940Sstevel@tonic-gate 		goto fail;
44950Sstevel@tonic-gate 	}
44960Sstevel@tonic-gate 
44970Sstevel@tonic-gate 	/*
44980Sstevel@tonic-gate 	 * there are two problems with defining ECP backchannel xfer timeout
44990Sstevel@tonic-gate 	 *
45000Sstevel@tonic-gate 	 * a) IEEE 1284 allows infinite time between backchannel bytes,
45010Sstevel@tonic-gate 	 *    but we must stop at some point to send the data upstream,
45020Sstevel@tonic-gate 	 *    look if any forward transfer requests are pending, etc;
45030Sstevel@tonic-gate 	 *    all that done, we can continue with backchannel data;
45040Sstevel@tonic-gate 	 *
45050Sstevel@tonic-gate 	 * b) we don`t know how much data peripheral has;
45060Sstevel@tonic-gate 	 *    DMA counter is set to our buffer size, which can be bigger
45070Sstevel@tonic-gate 	 *    than needed - in this case a timeout must detect this;
45080Sstevel@tonic-gate 	 *
45090Sstevel@tonic-gate 	 * The timeout we schedule here serves as both the transfer timeout
45100Sstevel@tonic-gate 	 * and a means of detecting backchannel stalls; in fact, there are
45110Sstevel@tonic-gate 	 * two timeouts in one:
45120Sstevel@tonic-gate 	 *
45130Sstevel@tonic-gate 	 * - transfer timeout is based on the ECP bandwidth of ~1MB/sec and
45140Sstevel@tonic-gate 	 *   equals the time needed to transfer the whole buffer
45150Sstevel@tonic-gate 	 *   (but not less than ECP_REV_MINTOUT ms); if it occurs,
45160Sstevel@tonic-gate 	 *   DMA is stopped and the data is sent upstream;
45170Sstevel@tonic-gate 	 *
45180Sstevel@tonic-gate 	 * - backchannel watchdog, which would look at DMA counter
45190Sstevel@tonic-gate 	 *   every rev_watchdog ms and stop the transfer only
45200Sstevel@tonic-gate 	 *   if the counter hasn`t changed since the last time;
45210Sstevel@tonic-gate 	 *   otherwise it would save DMA counter value and restart itself;
45220Sstevel@tonic-gate 	 *
45230Sstevel@tonic-gate 	 * transfer timeout is a multiple of rev_watchdog
45240Sstevel@tonic-gate 	 * and implemented as a downward counter
45250Sstevel@tonic-gate 	 *
45260Sstevel@tonic-gate 	 * on Grover, we can`t access DMAC registers while DMA is in flight,
45270Sstevel@tonic-gate 	 * so we can`t have watchdog on Grover, only timeout
45280Sstevel@tonic-gate 	 */
45290Sstevel@tonic-gate 
45300Sstevel@tonic-gate 	/* calculate number of watchdog invocations equal to the xfer timeout */
45310Sstevel@tonic-gate 	xfer_time = max((1000 * len) / pp->ecp_rev_speed, ECP_REV_MINTOUT);
45320Sstevel@tonic-gate #if defined(__x86)
45330Sstevel@tonic-gate 	pp->rev_timeout_cnt = (pp->hw == &x86) ? 1 :
45347656SSherry.Moore@Sun.COM 	    max(xfer_time / pp->rev_watchdog, 1);
45350Sstevel@tonic-gate #else
45360Sstevel@tonic-gate 	pp->rev_timeout_cnt = (pp->hw == &m1553) ? 1 :
45377656SSherry.Moore@Sun.COM 	    max(xfer_time / pp->rev_watchdog, 1);
45380Sstevel@tonic-gate #endif
45390Sstevel@tonic-gate 
45400Sstevel@tonic-gate 	pp->last_dmacnt = len;	/* nothing xferred yet */
45410Sstevel@tonic-gate 
45420Sstevel@tonic-gate 	pp->timeout_id = timeout(ecpp_ecp_read_timeout, (caddr_t)pp,
45437656SSherry.Moore@Sun.COM 	    drv_usectohz(pp->rev_watchdog * 1000));
45440Sstevel@tonic-gate 
45450Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecp_periph2host: DMA started len=%d\n"
45467656SSherry.Moore@Sun.COM 	    "xfer_time=%d wdog=%d cnt=%d\n",
45477656SSherry.Moore@Sun.COM 	    len, xfer_time, pp->rev_watchdog, pp->rev_timeout_cnt);
45480Sstevel@tonic-gate 
45490Sstevel@tonic-gate 	return (SUCCESS);
45500Sstevel@tonic-gate 
45510Sstevel@tonic-gate fail:
45520Sstevel@tonic-gate 	if (mp) {
45530Sstevel@tonic-gate 		freemsg(mp);
45540Sstevel@tonic-gate 	}
45550Sstevel@tonic-gate 	pp->e_busy = ECPP_IDLE;
45560Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
45570Sstevel@tonic-gate 
45580Sstevel@tonic-gate 	return (FAILURE);
45590Sstevel@tonic-gate }
45600Sstevel@tonic-gate 
45610Sstevel@tonic-gate /*
45620Sstevel@tonic-gate  * ECP backchannel read timeout
45630Sstevel@tonic-gate  * implements both backchannel watchdog and transfer timeout in ECP mode
45640Sstevel@tonic-gate  * if the transfer is still in progress, reschedule itself,
45650Sstevel@tonic-gate  * otherwise call completion routine
45660Sstevel@tonic-gate  */
45670Sstevel@tonic-gate static void
ecpp_ecp_read_timeout(void * arg)45680Sstevel@tonic-gate ecpp_ecp_read_timeout(void *arg)
45690Sstevel@tonic-gate {
45700Sstevel@tonic-gate 	struct ecppunit	*pp = arg;
45710Sstevel@tonic-gate 	size_t		dmacnt;
45720Sstevel@tonic-gate 
45730Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
45740Sstevel@tonic-gate 
45750Sstevel@tonic-gate 	if (pp->timeout_id == 0) {
45760Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
45770Sstevel@tonic-gate 		return;
45780Sstevel@tonic-gate 	} else {
45790Sstevel@tonic-gate 		pp->timeout_id = 0;
45800Sstevel@tonic-gate 	}
45810Sstevel@tonic-gate 
45820Sstevel@tonic-gate 	if (--pp->rev_timeout_cnt == 0) {
45830Sstevel@tonic-gate 		/*
45840Sstevel@tonic-gate 		 * Transfer timed out
45850Sstevel@tonic-gate 		 */
45860Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecp_read_timeout: timeout\n");
45870Sstevel@tonic-gate 		pp->xfer_tout++;
45880Sstevel@tonic-gate 		ecpp_ecp_read_completion(pp);
45890Sstevel@tonic-gate 	} else {
45900Sstevel@tonic-gate 		/*
45910Sstevel@tonic-gate 		 * Backchannel watchdog:
45920Sstevel@tonic-gate 		 * look if DMA made any progress from the last time
45930Sstevel@tonic-gate 		 */
45940Sstevel@tonic-gate 		dmacnt = ECPP_DMA_GETCNT(pp);
45950Sstevel@tonic-gate 		if (dmacnt - pp->last_dmacnt == 0) {
45960Sstevel@tonic-gate 			/*
45970Sstevel@tonic-gate 			 * No progress - stop the transfer and send
45980Sstevel@tonic-gate 			 * whatever has been read so far up the stream
45990Sstevel@tonic-gate 			 */
46000Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecp_read_timeout: no progress\n");
46010Sstevel@tonic-gate 			pp->xfer_tout++;
46020Sstevel@tonic-gate 			ecpp_ecp_read_completion(pp);
46030Sstevel@tonic-gate 		} else {
46040Sstevel@tonic-gate 			/*
46050Sstevel@tonic-gate 			 * Something was transferred - restart ourselves
46060Sstevel@tonic-gate 			 */
46070Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecp_read_timeout: restarting\n");
46080Sstevel@tonic-gate 			pp->last_dmacnt = dmacnt;
46090Sstevel@tonic-gate 			pp->timeout_id = timeout(ecpp_ecp_read_timeout,
46107656SSherry.Moore@Sun.COM 			    (caddr_t)pp,
46117656SSherry.Moore@Sun.COM 			    drv_usectohz(pp->rev_watchdog * 1000));
46120Sstevel@tonic-gate 		}
46130Sstevel@tonic-gate 	}
46140Sstevel@tonic-gate 
46150Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
46160Sstevel@tonic-gate }
46170Sstevel@tonic-gate 
46180Sstevel@tonic-gate /*
46190Sstevel@tonic-gate  * ECP backchannel read completion:
46200Sstevel@tonic-gate  * stop the DMA, free DMA resources and send read data upstream
46210Sstevel@tonic-gate  */
46220Sstevel@tonic-gate static void
ecpp_ecp_read_completion(struct ecppunit * pp)46230Sstevel@tonic-gate ecpp_ecp_read_completion(struct ecppunit *pp)
46240Sstevel@tonic-gate {
46250Sstevel@tonic-gate 	size_t	xfer_len, unx_len;
46260Sstevel@tonic-gate 	mblk_t	*mp;
46270Sstevel@tonic-gate 
46280Sstevel@tonic-gate 	ASSERT(mutex_owned(&pp->umutex));
46290Sstevel@tonic-gate 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
46307656SSherry.Moore@Sun.COM 	    pp->current_phase == ECPP_PHASE_ECP_REV_XFER);
46310Sstevel@tonic-gate 	ASSERT(pp->msg != NULL);
46320Sstevel@tonic-gate 
46330Sstevel@tonic-gate 	/*
46340Sstevel@tonic-gate 	 * Stop the transfer and unbind DMA handle
46350Sstevel@tonic-gate 	 */
46360Sstevel@tonic-gate 	if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
46370Sstevel@tonic-gate 		unx_len = pp->resid;
46380Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecp_read_completion: failed dma_stop\n");
46390Sstevel@tonic-gate 	}
46400Sstevel@tonic-gate 
46410Sstevel@tonic-gate 	mp = pp->msg;
46420Sstevel@tonic-gate 	xfer_len = pp->resid - unx_len;	/* how much data was transferred */
46430Sstevel@tonic-gate 
46440Sstevel@tonic-gate 	if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
46450Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecp_read_completion: unbind failed.\n");
46460Sstevel@tonic-gate 	}
46470Sstevel@tonic-gate 
46480Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecp_read_completion: xfered %d bytes of %d\n",
46497656SSherry.Moore@Sun.COM 	    xfer_len, pp->resid);
46500Sstevel@tonic-gate 
46510Sstevel@tonic-gate 	/* clean up and update statistics */
46520Sstevel@tonic-gate 	pp->msg = NULL;
46530Sstevel@tonic-gate 	pp->resid -= xfer_len;
46540Sstevel@tonic-gate 	pp->ibytes[pp->current_mode] += xfer_len;
46550Sstevel@tonic-gate 	pp->e_busy = ECPP_IDLE;
46560Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
46570Sstevel@tonic-gate 
46580Sstevel@tonic-gate 	/*
46590Sstevel@tonic-gate 	 * Send the read data up the stream
46600Sstevel@tonic-gate 	 */
46610Sstevel@tonic-gate 	mp->b_wptr += xfer_len;
46620Sstevel@tonic-gate 	if (canputnext(pp->readq)) {
46630Sstevel@tonic-gate 		mutex_exit(&pp->umutex);
46640Sstevel@tonic-gate 		putnext(pp->readq, mp);
46650Sstevel@tonic-gate 		mutex_enter(&pp->umutex);
46660Sstevel@tonic-gate 	} else {
46670Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecp_read_completion: fail canputnext\n");
46680Sstevel@tonic-gate 		if (!putq(pp->readq, mp)) {
46690Sstevel@tonic-gate 			freemsg(mp);
46700Sstevel@tonic-gate 		}
46710Sstevel@tonic-gate 	}
46720Sstevel@tonic-gate 
46730Sstevel@tonic-gate 	/* if bytes left in the FIFO another transfer is needed */
46740Sstevel@tonic-gate 	if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
46750Sstevel@tonic-gate 		(void) ecpp_backchan_req(pp);
46760Sstevel@tonic-gate 	}
46770Sstevel@tonic-gate 
46780Sstevel@tonic-gate 	qenable(pp->writeq);
46790Sstevel@tonic-gate }
46800Sstevel@tonic-gate 
46810Sstevel@tonic-gate /*
46820Sstevel@tonic-gate  * Read one byte in the Nibble mode
46830Sstevel@tonic-gate  */
46840Sstevel@tonic-gate static uchar_t
nibble_peripheral2host(struct ecppunit * pp,uint8_t * byte)46850Sstevel@tonic-gate nibble_peripheral2host(struct ecppunit *pp, uint8_t *byte)
46860Sstevel@tonic-gate {
46870Sstevel@tonic-gate 	uint8_t	n[2];	/* two nibbles */
46880Sstevel@tonic-gate 	int	i;
46890Sstevel@tonic-gate 
46900Sstevel@tonic-gate 	/*
46910Sstevel@tonic-gate 	 * One byte is made of two nibbles
46920Sstevel@tonic-gate 	 */
46930Sstevel@tonic-gate 	for (i = 0; i < 2; i++) {
46940Sstevel@tonic-gate 		/* Event 7, 12: host asserts nAutoFd to move to read a nibble */
46950Sstevel@tonic-gate 		DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
46960Sstevel@tonic-gate 
46970Sstevel@tonic-gate 		/* Event 8: peripheral puts data on the status lines */
46980Sstevel@tonic-gate 
46990Sstevel@tonic-gate 		/* Event 9: peripheral asserts nAck, data available */
47000Sstevel@tonic-gate 		if (wait_dsr(pp, ECPP_nACK, 0, 35000) < 0) {
47010Sstevel@tonic-gate 			ecpp_error(pp->dip,
47027656SSherry.Moore@Sun.COM 			    "nibble_periph2host(%d): failed event 9 %x\n",
47037656SSherry.Moore@Sun.COM 			    i + 1, DSR_READ(pp));
47040Sstevel@tonic-gate 			(void) ecpp_1284_termination(pp);
47050Sstevel@tonic-gate 			return (FAILURE);
47060Sstevel@tonic-gate 		}
47070Sstevel@tonic-gate 
47080Sstevel@tonic-gate 		n[i] = DSR_READ(pp);	/* get a nibble */
47090Sstevel@tonic-gate 
47100Sstevel@tonic-gate 		/* Event 10: host deasserts nAutoFd to say it grabbed data */
47110Sstevel@tonic-gate 		DCR_WRITE(pp, ECPP_nINIT);
47120Sstevel@tonic-gate 
47130Sstevel@tonic-gate 		/* (2) Event 13: peripheral asserts PE - end of data phase */
47140Sstevel@tonic-gate 
47150Sstevel@tonic-gate 		/* Event 11: peripheral deasserts nAck to finish handshake */
47160Sstevel@tonic-gate 		if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
47170Sstevel@tonic-gate 			ecpp_error(pp->dip,
47187656SSherry.Moore@Sun.COM 			    "nibble_periph2host(%d): failed event 11 %x\n",
47197656SSherry.Moore@Sun.COM 			    i + 1, DSR_READ(pp));
47200Sstevel@tonic-gate 			(void) ecpp_1284_termination(pp);
47210Sstevel@tonic-gate 			return (FAILURE);
47220Sstevel@tonic-gate 		}
47230Sstevel@tonic-gate 	}
47240Sstevel@tonic-gate 
47250Sstevel@tonic-gate 	/* extract data byte from two nibbles - optimized formula */
47260Sstevel@tonic-gate 	*byte = ((((n[1] & ~ECPP_nACK) << 1) | (~n[1] & ECPP_nBUSY)) & 0xf0) |
47270Sstevel@tonic-gate 	    ((((n[0] & ~ECPP_nACK) >> 3) | ((~n[0] & ECPP_nBUSY) >> 4)) & 0x0f);
47280Sstevel@tonic-gate 
47290Sstevel@tonic-gate 	pp->ibytes[ECPP_NIBBLE_MODE]++;
47300Sstevel@tonic-gate 	return (SUCCESS);
47310Sstevel@tonic-gate }
47320Sstevel@tonic-gate 
47330Sstevel@tonic-gate /*
47340Sstevel@tonic-gate  * process data transfers requested by the peripheral
47350Sstevel@tonic-gate  */
47360Sstevel@tonic-gate static uint_t
ecpp_peripheral2host(struct ecppunit * pp)47370Sstevel@tonic-gate ecpp_peripheral2host(struct ecppunit *pp)
47380Sstevel@tonic-gate {
47390Sstevel@tonic-gate 	if (!canputnext(pp->readq)) {
47400Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_peripheral2host: readq full\n");
47410Sstevel@tonic-gate 		return (SUCCESS);
47420Sstevel@tonic-gate 	}
47430Sstevel@tonic-gate 
47440Sstevel@tonic-gate 	switch (pp->backchannel) {
47450Sstevel@tonic-gate 	case ECPP_CENTRONICS:
47460Sstevel@tonic-gate 		/* no backchannel */
47470Sstevel@tonic-gate 		return (SUCCESS);
47480Sstevel@tonic-gate 
47490Sstevel@tonic-gate 	case ECPP_NIBBLE_MODE:
47500Sstevel@tonic-gate 		ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
47510Sstevel@tonic-gate 
47520Sstevel@tonic-gate 		/*
47530Sstevel@tonic-gate 		 * Event 20: Host sets nAutoFd high to ack request
47540Sstevel@tonic-gate 		 */
47550Sstevel@tonic-gate 		DCR_WRITE(pp, ECPP_nINIT);
47560Sstevel@tonic-gate 
47570Sstevel@tonic-gate 		/* Event 21: Periph sets PError low to ack host */
47580Sstevel@tonic-gate 		if (wait_dsr(pp, ECPP_PE, 0, 35000) < 0) {
47590Sstevel@tonic-gate 			ecpp_error(pp->dip,
47607656SSherry.Moore@Sun.COM 			    "ecpp_periph2host: failed event 21 %x\n",
47617656SSherry.Moore@Sun.COM 			    DSR_READ(pp));
47620Sstevel@tonic-gate 			(void) ecpp_1284_termination(pp);
47630Sstevel@tonic-gate 			return (FAILURE);
47640Sstevel@tonic-gate 		}
47650Sstevel@tonic-gate 
47660Sstevel@tonic-gate 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
47670Sstevel@tonic-gate 
47680Sstevel@tonic-gate 		/* this routine will read the data in Nibble mode */
47690Sstevel@tonic-gate 		return (ecpp_idle_phase(pp));
47700Sstevel@tonic-gate 
47710Sstevel@tonic-gate 	case ECPP_ECP_MODE:
47720Sstevel@tonic-gate 		if ((pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE) &&
47730Sstevel@tonic-gate 		    (ecp_forward2reverse(pp) == FAILURE)) {
47740Sstevel@tonic-gate 			return (FAILURE);
47750Sstevel@tonic-gate 		}
47760Sstevel@tonic-gate 
47770Sstevel@tonic-gate 		return (ecp_peripheral2host(pp));	/* start the transfer */
47780Sstevel@tonic-gate 
47790Sstevel@tonic-gate 	case ECPP_DIAG_MODE: {
47800Sstevel@tonic-gate 		mblk_t		*mp;
47810Sstevel@tonic-gate 		int		i;
47820Sstevel@tonic-gate 
47830Sstevel@tonic-gate 		if (ECR_READ(pp) & ECPP_FIFO_EMPTY) {
47840Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_periph2host: fifo empty\n");
47850Sstevel@tonic-gate 			return (SUCCESS);
47860Sstevel@tonic-gate 		}
47870Sstevel@tonic-gate 
47880Sstevel@tonic-gate 		/* allocate the FIFO size */
47890Sstevel@tonic-gate 		if ((mp = allocb(ECPP_FIFO_SZ, BPRI_MED)) == NULL) {
47900Sstevel@tonic-gate 			ecpp_error(pp->dip,
47917656SSherry.Moore@Sun.COM 			    "ecpp_periph2host: allocb FAILURE.\n");
47920Sstevel@tonic-gate 			return (FAILURE);
47930Sstevel@tonic-gate 		}
47940Sstevel@tonic-gate 
47950Sstevel@tonic-gate 		/*
47960Sstevel@tonic-gate 		 * For the time being just read it byte by byte
47970Sstevel@tonic-gate 		 */
47980Sstevel@tonic-gate 		i = ECPP_FIFO_SZ;
47990Sstevel@tonic-gate 		while (i-- && (!(ECR_READ(pp) & ECPP_FIFO_EMPTY))) {
48000Sstevel@tonic-gate 			*mp->b_wptr++ = TFIFO_READ(pp);
48010Sstevel@tonic-gate 			drv_usecwait(1); /* ECR is sometimes slow to update */
48020Sstevel@tonic-gate 		}
48030Sstevel@tonic-gate 
48040Sstevel@tonic-gate 		if (canputnext(pp->readq)) {
48050Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
48060Sstevel@tonic-gate 			mp->b_datap->db_type = M_DATA;
48070Sstevel@tonic-gate 			ecpp_error(pp->dip,
48087656SSherry.Moore@Sun.COM 			    "ecpp_periph2host: sending %d bytes\n",
48097656SSherry.Moore@Sun.COM 			    mp->b_wptr - mp->b_rptr);
48100Sstevel@tonic-gate 			putnext(pp->readq, mp);
48110Sstevel@tonic-gate 			mutex_enter(&pp->umutex);
48120Sstevel@tonic-gate 			return (SUCCESS);
48130Sstevel@tonic-gate 		} else {
48140Sstevel@tonic-gate 			ecpp_error(pp->dip,
48157656SSherry.Moore@Sun.COM 			    "ecpp_periph2host: !canputnext data lost\n");
48160Sstevel@tonic-gate 			freemsg(mp);
48170Sstevel@tonic-gate 			return (FAILURE);
48180Sstevel@tonic-gate 		}
48190Sstevel@tonic-gate 	}
48200Sstevel@tonic-gate 
48210Sstevel@tonic-gate 	default:
48220Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_peripheraltohost: illegal back");
48230Sstevel@tonic-gate 		return (FAILURE);
48240Sstevel@tonic-gate 	}
48250Sstevel@tonic-gate }
48260Sstevel@tonic-gate 
48270Sstevel@tonic-gate /*
48280Sstevel@tonic-gate  * Negotiate from ECP Forward Idle to Reverse Idle Phase
48290Sstevel@tonic-gate  *
48300Sstevel@tonic-gate  * (manipulations with dcr/ecr are according to ECP Specification)
48310Sstevel@tonic-gate  */
48320Sstevel@tonic-gate static int
ecp_forward2reverse(struct ecppunit * pp)48330Sstevel@tonic-gate ecp_forward2reverse(struct ecppunit *pp)
48340Sstevel@tonic-gate {
48350Sstevel@tonic-gate 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
48367656SSherry.Moore@Sun.COM 	    pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE);
48370Sstevel@tonic-gate 
48380Sstevel@tonic-gate 	/* place port into PS2 mode */
48390Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
48400Sstevel@tonic-gate 
48410Sstevel@tonic-gate 	/* set direction bit (DCR3-0 must be 0100 - National) */
48420Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
48430Sstevel@tonic-gate 
48440Sstevel@tonic-gate 	/* enable hardware assist */
48450Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
48460Sstevel@tonic-gate 
48470Sstevel@tonic-gate 	drv_usecwait(1);	/* Tp(ecp) = 0.5us */
48480Sstevel@tonic-gate 
48490Sstevel@tonic-gate 	/* Event 39: host sets nInit low */
48500Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_REV_DIR);
48510Sstevel@tonic-gate 
48520Sstevel@tonic-gate 	/* Event 40: peripheral sets PError low */
48530Sstevel@tonic-gate 
48540Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
48550Sstevel@tonic-gate 
48560Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecp_forward2reverse ok\n");
48570Sstevel@tonic-gate 
48580Sstevel@tonic-gate 	return (SUCCESS);
48590Sstevel@tonic-gate }
48600Sstevel@tonic-gate 
48610Sstevel@tonic-gate /*
48620Sstevel@tonic-gate  * Negotiate from ECP Reverse Idle to Forward Idle Phase
48630Sstevel@tonic-gate  *
48640Sstevel@tonic-gate  * (manipulations with dcr/ecr are according to ECP Specification)
48650Sstevel@tonic-gate  */
48660Sstevel@tonic-gate static int
ecp_reverse2forward(struct ecppunit * pp)48670Sstevel@tonic-gate ecp_reverse2forward(struct ecppunit *pp)
48680Sstevel@tonic-gate {
48690Sstevel@tonic-gate 	ASSERT(pp->current_mode == ECPP_ECP_MODE &&
48707656SSherry.Moore@Sun.COM 	    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
48710Sstevel@tonic-gate 
48720Sstevel@tonic-gate 	/* Event 47: host deasserts nInit */
48730Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
48740Sstevel@tonic-gate 
48750Sstevel@tonic-gate 	/*
48760Sstevel@tonic-gate 	 * Event 48: peripheral deasserts nAck
48770Sstevel@tonic-gate 	 * Event 49: peripheral asserts PError
48780Sstevel@tonic-gate 	 */
48790Sstevel@tonic-gate 	if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
48800Sstevel@tonic-gate 		ecpp_error(pp->dip,
48810Sstevel@tonic-gate 		    "ecp_reverse2forward: failed event 49 %x\n", DSR_READ(pp));
48820Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
48830Sstevel@tonic-gate 		return (FAILURE);
48840Sstevel@tonic-gate 	}
48850Sstevel@tonic-gate 
48860Sstevel@tonic-gate 	/* place port into PS2 mode */
48870Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
48880Sstevel@tonic-gate 
48890Sstevel@tonic-gate 	/* clear direction bit */
48900Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_nINIT);
48910Sstevel@tonic-gate 
48920Sstevel@tonic-gate 	/* reenable hardware assist */
48930Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
48940Sstevel@tonic-gate 
48950Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
48960Sstevel@tonic-gate 
48970Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecp_reverse2forward ok\n");
48980Sstevel@tonic-gate 
48990Sstevel@tonic-gate 	return (SUCCESS);
49000Sstevel@tonic-gate }
49010Sstevel@tonic-gate 
49020Sstevel@tonic-gate /*
49030Sstevel@tonic-gate  * Default negotiation chooses the best mode supported by peripheral
49040Sstevel@tonic-gate  * Note that backchannel mode may be different from forward mode
49050Sstevel@tonic-gate  */
49060Sstevel@tonic-gate static void
ecpp_default_negotiation(struct ecppunit * pp)49070Sstevel@tonic-gate ecpp_default_negotiation(struct ecppunit *pp)
49080Sstevel@tonic-gate {
49090Sstevel@tonic-gate 	if (!noecp && (ecpp_mode_negotiation(pp, ECPP_ECP_MODE) == SUCCESS)) {
49100Sstevel@tonic-gate 		/* 1284 compatible device */
49110Sstevel@tonic-gate 		pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
49120Sstevel@tonic-gate 		return;
49130Sstevel@tonic-gate 	} else if (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == SUCCESS) {
49140Sstevel@tonic-gate 		/* 1284 compatible device */
49150Sstevel@tonic-gate 		pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
49160Sstevel@tonic-gate 	} else {
49170Sstevel@tonic-gate 		/* Centronics device */
49180Sstevel@tonic-gate 		pp->io_mode =
49197656SSherry.Moore@Sun.COM 		    (pp->fast_centronics == TRUE) ? ECPP_DMA : ECPP_PIO;
49200Sstevel@tonic-gate 	}
49210Sstevel@tonic-gate 	ECPP_CONFIG_MODE(pp);
49220Sstevel@tonic-gate }
49230Sstevel@tonic-gate 
49240Sstevel@tonic-gate /*
49250Sstevel@tonic-gate  * Negotiate to the mode indicated by newmode
49260Sstevel@tonic-gate  */
49270Sstevel@tonic-gate static int
ecpp_mode_negotiation(struct ecppunit * pp,uchar_t newmode)49280Sstevel@tonic-gate ecpp_mode_negotiation(struct ecppunit *pp, uchar_t newmode)
49290Sstevel@tonic-gate {
49300Sstevel@tonic-gate 	/* any other mode is impossible */
49310Sstevel@tonic-gate 	ASSERT(pp->current_mode == ECPP_CENTRONICS ||
49327656SSherry.Moore@Sun.COM 	    pp->current_mode == ECPP_COMPAT_MODE ||
49337656SSherry.Moore@Sun.COM 	    pp->current_mode == ECPP_NIBBLE_MODE ||
49347656SSherry.Moore@Sun.COM 	    pp->current_mode == ECPP_ECP_MODE ||
49357656SSherry.Moore@Sun.COM 	    pp->current_mode == ECPP_DIAG_MODE);
49360Sstevel@tonic-gate 
49370Sstevel@tonic-gate 	if (pp->current_mode == newmode) {
49380Sstevel@tonic-gate 		return (SUCCESS);
49390Sstevel@tonic-gate 	}
49400Sstevel@tonic-gate 
49410Sstevel@tonic-gate 	/* termination from ECP is only allowed from the Forward Idle Phase */
49420Sstevel@tonic-gate 	if ((pp->current_mode == ECPP_ECP_MODE) &&
49430Sstevel@tonic-gate 	    (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
49440Sstevel@tonic-gate 		/* this may break into Centronics */
49450Sstevel@tonic-gate 		(void) ecp_reverse2forward(pp);
49460Sstevel@tonic-gate 	}
49470Sstevel@tonic-gate 
49480Sstevel@tonic-gate 	switch (newmode) {
49490Sstevel@tonic-gate 	case ECPP_CENTRONICS:
49500Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
49510Sstevel@tonic-gate 
49520Sstevel@tonic-gate 		/* put superio into PIO mode */
49530Sstevel@tonic-gate 		ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
49540Sstevel@tonic-gate 
49550Sstevel@tonic-gate 		pp->current_mode = ECPP_CENTRONICS;
49560Sstevel@tonic-gate 		pp->backchannel = ECPP_CENTRONICS;
49570Sstevel@tonic-gate 		ECPP_CONFIG_MODE(pp);
49580Sstevel@tonic-gate 
49590Sstevel@tonic-gate 		pp->to_mode[pp->current_mode]++;
49600Sstevel@tonic-gate 		return (SUCCESS);
49610Sstevel@tonic-gate 
49620Sstevel@tonic-gate 	case ECPP_COMPAT_MODE:
49630Sstevel@tonic-gate 		/* ECPP_COMPAT_MODE should support Nibble as a backchannel */
49640Sstevel@tonic-gate 		if (pp->current_mode == ECPP_NIBBLE_MODE) {
49650Sstevel@tonic-gate 			if (ecpp_1284_termination(pp) == SUCCESS) {
49660Sstevel@tonic-gate 				pp->current_mode = ECPP_COMPAT_MODE;
49670Sstevel@tonic-gate 				pp->backchannel = ECPP_NIBBLE_MODE;
49680Sstevel@tonic-gate 				ECPP_CONFIG_MODE(pp);
49690Sstevel@tonic-gate 				pp->to_mode[pp->current_mode]++;
49700Sstevel@tonic-gate 				return (SUCCESS);
49710Sstevel@tonic-gate 			} else {
49720Sstevel@tonic-gate 				return (FAILURE);
49730Sstevel@tonic-gate 			}
49740Sstevel@tonic-gate 		}
49750Sstevel@tonic-gate 
49760Sstevel@tonic-gate 		if ((nibble_negotiation(pp) == SUCCESS) &&
49770Sstevel@tonic-gate 		    (ecpp_1284_termination(pp) == SUCCESS)) {
49780Sstevel@tonic-gate 			pp->backchannel = ECPP_NIBBLE_MODE;
49790Sstevel@tonic-gate 			pp->current_mode = ECPP_COMPAT_MODE;
49800Sstevel@tonic-gate 			ECPP_CONFIG_MODE(pp);
49810Sstevel@tonic-gate 			pp->to_mode[pp->current_mode]++;
49820Sstevel@tonic-gate 			return (SUCCESS);
49830Sstevel@tonic-gate 		} else {
49840Sstevel@tonic-gate 			return (FAILURE);
49850Sstevel@tonic-gate 		}
49860Sstevel@tonic-gate 
49870Sstevel@tonic-gate 	case ECPP_NIBBLE_MODE:
49880Sstevel@tonic-gate 		if (nibble_negotiation(pp) == FAILURE) {
49890Sstevel@tonic-gate 			return (FAILURE);
49900Sstevel@tonic-gate 		}
49910Sstevel@tonic-gate 
49920Sstevel@tonic-gate 		pp->backchannel = ECPP_NIBBLE_MODE;
49930Sstevel@tonic-gate 		ECPP_CONFIG_MODE(pp);
49940Sstevel@tonic-gate 		pp->to_mode[pp->current_mode]++;
49950Sstevel@tonic-gate 
49960Sstevel@tonic-gate 		return (SUCCESS);
49970Sstevel@tonic-gate 
49980Sstevel@tonic-gate 	case ECPP_ECP_MODE:
49990Sstevel@tonic-gate 		if (pp->noecpregs)
50000Sstevel@tonic-gate 			return (FAILURE);
50010Sstevel@tonic-gate 		if (ecp_negotiation(pp) == FAILURE) {
50020Sstevel@tonic-gate 			return (FAILURE);
50030Sstevel@tonic-gate 		}
50040Sstevel@tonic-gate 
50050Sstevel@tonic-gate 		/*
50060Sstevel@tonic-gate 		 * National says CTR[3:0] should be 0100b before moving to 011
50070Sstevel@tonic-gate 		 */
50080Sstevel@tonic-gate 		DCR_WRITE(pp, ECPP_nINIT);
50090Sstevel@tonic-gate 
50100Sstevel@tonic-gate 		if (ecr_write(pp, ECR_mode_011 |
50117656SSherry.Moore@Sun.COM 		    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
50120Sstevel@tonic-gate 			ecpp_error(pp->dip, "mode_nego:ECP: failed w/ecr\n");
50130Sstevel@tonic-gate 			return (FAILURE);
50140Sstevel@tonic-gate 		}
50150Sstevel@tonic-gate 
50160Sstevel@tonic-gate 		ECPP_CONFIG_MODE(pp);
50170Sstevel@tonic-gate 		pp->to_mode[pp->current_mode]++;
50180Sstevel@tonic-gate 
50190Sstevel@tonic-gate 		return (SUCCESS);
50200Sstevel@tonic-gate 
50210Sstevel@tonic-gate 	case ECPP_DIAG_MODE:
50220Sstevel@tonic-gate 		/*
50230Sstevel@tonic-gate 		 * In DIAG mode application can do nasty things(e.g drive pins)
50240Sstevel@tonic-gate 		 * To keep peripheral sane, terminate to Compatibility mode
50250Sstevel@tonic-gate 		 */
50260Sstevel@tonic-gate 		(void) ecpp_1284_termination(pp);
50270Sstevel@tonic-gate 
50280Sstevel@tonic-gate 		/* put superio into TFIFO mode */
50290Sstevel@tonic-gate 		if (ecr_write(pp, ECR_mode_001 |
50300Sstevel@tonic-gate 		    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
50310Sstevel@tonic-gate 			ecpp_error(pp->dip, "put to TFIFO: failed w/ecr\n");
50320Sstevel@tonic-gate 			return (FAILURE);
50330Sstevel@tonic-gate 		}
50340Sstevel@tonic-gate 
50350Sstevel@tonic-gate 		pp->current_mode = ECPP_DIAG_MODE;
50360Sstevel@tonic-gate 		pp->backchannel = ECPP_DIAG_MODE;
50370Sstevel@tonic-gate 		ECPP_CONFIG_MODE(pp);
50380Sstevel@tonic-gate 		pp->to_mode[pp->current_mode]++;
50390Sstevel@tonic-gate 
50400Sstevel@tonic-gate 		return (SUCCESS);
50410Sstevel@tonic-gate 
50420Sstevel@tonic-gate 	default:
50430Sstevel@tonic-gate 		ecpp_error(pp->dip,
50440Sstevel@tonic-gate 		    "ecpp_mode_negotiation: mode %d not supported\n", newmode);
50450Sstevel@tonic-gate 		return (FAILURE);
50460Sstevel@tonic-gate 	}
50470Sstevel@tonic-gate }
50480Sstevel@tonic-gate 
50490Sstevel@tonic-gate /*
50500Sstevel@tonic-gate  * Standard (9.1): Peripheral data is available only when the host places
50510Sstevel@tonic-gate  * the interface in a mode capable of peripheral-to-host data transfer.
50520Sstevel@tonic-gate  * This requires the host periodically to place the interface in such a mode.
50530Sstevel@tonic-gate  * Polling can be eliminated by leaving the interface in an 1284 idle phase.
50540Sstevel@tonic-gate  */
50550Sstevel@tonic-gate static uchar_t
ecpp_idle_phase(struct ecppunit * pp)50560Sstevel@tonic-gate ecpp_idle_phase(struct ecppunit *pp)
50570Sstevel@tonic-gate {
50580Sstevel@tonic-gate 	uchar_t		rval = FAILURE;
50590Sstevel@tonic-gate 
50600Sstevel@tonic-gate 	/*
50610Sstevel@tonic-gate 	 * If there is no space on the read queue, do not reverse channel
50620Sstevel@tonic-gate 	 */
50630Sstevel@tonic-gate 	if (!canputnext(pp->readq)) {
50640Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_idle_phase: readq full\n");
50650Sstevel@tonic-gate 		return (SUCCESS);
50660Sstevel@tonic-gate 	}
50670Sstevel@tonic-gate 
50680Sstevel@tonic-gate 	switch (pp->backchannel) {
50690Sstevel@tonic-gate 	case ECPP_CENTRONICS:
50700Sstevel@tonic-gate 	case ECPP_COMPAT_MODE:
50710Sstevel@tonic-gate 	case ECPP_DIAG_MODE:
50720Sstevel@tonic-gate 		/* nothing */
50730Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_idle_phase: compat idle\n");
50740Sstevel@tonic-gate 		return (SUCCESS);
50750Sstevel@tonic-gate 
50760Sstevel@tonic-gate 	case ECPP_NIBBLE_MODE:
50770Sstevel@tonic-gate 		/*
50780Sstevel@tonic-gate 		 * read as much data as possible, ending up in either
50790Sstevel@tonic-gate 		 * Reverse Idle or Host Busy Data Available phase
50800Sstevel@tonic-gate 		 */
50810Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_idle_phase: nibble backchannel\n");
50820Sstevel@tonic-gate 		if ((pp->current_mode != ECPP_NIBBLE_MODE) &&
50830Sstevel@tonic-gate 		    (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == FAILURE)) {
50840Sstevel@tonic-gate 			break;
50850Sstevel@tonic-gate 		}
50860Sstevel@tonic-gate 
50870Sstevel@tonic-gate 		rval = read_nibble_backchan(pp);
50880Sstevel@tonic-gate 
50890Sstevel@tonic-gate 		/* put interface into Reverse Idle phase */
50900Sstevel@tonic-gate 		if (pp->current_phase == ECPP_PHASE_NIBT_NAVAIL &&
50910Sstevel@tonic-gate 		    canputnext(pp->readq)) {
50920Sstevel@tonic-gate 			ecpp_error(pp->dip, "ecpp_idle_phase: going revidle\n");
50930Sstevel@tonic-gate 
50940Sstevel@tonic-gate 			/*
50950Sstevel@tonic-gate 			 * Event 7: host asserts nAutoFd
50960Sstevel@tonic-gate 			 * enable nAck interrupt to get a backchannel request
50970Sstevel@tonic-gate 			 */
50980Sstevel@tonic-gate 			DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_INTR_EN);
50990Sstevel@tonic-gate 
51000Sstevel@tonic-gate 			ECPP_UNMASK_INTR(pp);
51010Sstevel@tonic-gate 		}
51020Sstevel@tonic-gate 
51030Sstevel@tonic-gate 		break;
51040Sstevel@tonic-gate 
51050Sstevel@tonic-gate 	case ECPP_ECP_MODE:
51060Sstevel@tonic-gate 		/*
51070Sstevel@tonic-gate 		 * if data is already available, request the backchannel xfer
51080Sstevel@tonic-gate 		 * otherwise stay in Forward Idle and enable nErr interrupts
51090Sstevel@tonic-gate 		 */
51100Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_idle_phase: ECP forward\n");
51110Sstevel@tonic-gate 
51120Sstevel@tonic-gate 		ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
51137656SSherry.Moore@Sun.COM 		    pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
51140Sstevel@tonic-gate 
51150Sstevel@tonic-gate 		/* put interface into Forward Idle phase */
51160Sstevel@tonic-gate 		if ((pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) &&
51170Sstevel@tonic-gate 		    (ecp_reverse2forward(pp) == FAILURE)) {
51180Sstevel@tonic-gate 			return (FAILURE);
51190Sstevel@tonic-gate 		}
51200Sstevel@tonic-gate 
51210Sstevel@tonic-gate 		/*
51220Sstevel@tonic-gate 		 * if data already available, put backchannel request on the wq
51230Sstevel@tonic-gate 		 * otherwise enable nErr interrupts
51240Sstevel@tonic-gate 		 */
51250Sstevel@tonic-gate 		if ((DSR_READ(pp) & ECPP_nERR) == 0) {
51260Sstevel@tonic-gate 			(void) ecpp_backchan_req(pp);
51270Sstevel@tonic-gate 		} else {
51280Sstevel@tonic-gate 			ECR_WRITE(pp,
51297656SSherry.Moore@Sun.COM 			    ECR_READ(pp) & ~ECPP_INTR_MASK | ECPP_INTR_SRV);
51300Sstevel@tonic-gate 
51310Sstevel@tonic-gate 			ECPP_UNMASK_INTR(pp);
51320Sstevel@tonic-gate 		}
51330Sstevel@tonic-gate 
51340Sstevel@tonic-gate 		return (SUCCESS);
51350Sstevel@tonic-gate 
51360Sstevel@tonic-gate 	default:
51370Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_idle_phase: illegal backchannel");
51380Sstevel@tonic-gate 	}
51390Sstevel@tonic-gate 
51400Sstevel@tonic-gate 	return (rval);
51410Sstevel@tonic-gate }
51420Sstevel@tonic-gate 
51430Sstevel@tonic-gate /*
51440Sstevel@tonic-gate  * This routine will leave the port in ECPP_PHASE_NIBT_REVIDLE
51450Sstevel@tonic-gate  * Due to flow control, though, it may stop at ECPP_PHASE_NIBT_AVAIL,
51460Sstevel@tonic-gate  * and continue later as the user consumes data from the read queue
51470Sstevel@tonic-gate  *
51480Sstevel@tonic-gate  * The current phase should be NIBT_AVAIL or NIBT_NAVAIL
51490Sstevel@tonic-gate  * If some events fail during transfer, termination puts link
51500Sstevel@tonic-gate  * to Compatibility mode and FAILURE is returned
51510Sstevel@tonic-gate  */
51520Sstevel@tonic-gate static int
read_nibble_backchan(struct ecppunit * pp)51530Sstevel@tonic-gate read_nibble_backchan(struct ecppunit *pp)
51540Sstevel@tonic-gate {
51550Sstevel@tonic-gate 	mblk_t		*mp;
51560Sstevel@tonic-gate 	int		i;
51570Sstevel@tonic-gate 	int		rval = SUCCESS;
51580Sstevel@tonic-gate 
51590Sstevel@tonic-gate 	ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
51600Sstevel@tonic-gate 
51610Sstevel@tonic-gate 	pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
51627656SSherry.Moore@Sun.COM 	    ? ECPP_PHASE_NIBT_NAVAIL : ECPP_PHASE_NIBT_AVAIL;
51630Sstevel@tonic-gate 
51640Sstevel@tonic-gate 	ecpp_error(pp->dip, "read_nibble_backchan: %x\n", DSR_READ(pp));
51650Sstevel@tonic-gate 
51660Sstevel@tonic-gate 	/*
51670Sstevel@tonic-gate 	 * While data is available, read it in NIBBLE_REV_BLKSZ byte chunks
51680Sstevel@tonic-gate 	 * and send up the stream
51690Sstevel@tonic-gate 	 */
51700Sstevel@tonic-gate 	while (pp->current_phase == ECPP_PHASE_NIBT_AVAIL && rval == SUCCESS) {
51710Sstevel@tonic-gate 		/* see if there's space on the queue */
51720Sstevel@tonic-gate 		if (!canputnext(pp->readq)) {
51730Sstevel@tonic-gate 			ecpp_error(pp->dip,
51747656SSherry.Moore@Sun.COM 			    "read_nibble_backchan: canputnext failed\n");
51750Sstevel@tonic-gate 			return (SUCCESS);
51760Sstevel@tonic-gate 		}
51770Sstevel@tonic-gate 
51780Sstevel@tonic-gate 		if ((mp = allocb(NIBBLE_REV_BLKSZ, BPRI_MED)) == NULL) {
51790Sstevel@tonic-gate 			ecpp_error(pp->dip,
51807656SSherry.Moore@Sun.COM 			    "read_nibble_backchan: allocb failed\n");
51810Sstevel@tonic-gate 			return (SUCCESS);
51820Sstevel@tonic-gate 		}
51830Sstevel@tonic-gate 
51840Sstevel@tonic-gate 		/* read a chunk of data from the peripheral byte by byte */
51850Sstevel@tonic-gate 		i = NIBBLE_REV_BLKSZ;
51860Sstevel@tonic-gate 		while (i-- && !(DSR_READ(pp) & ECPP_nERR)) {
51870Sstevel@tonic-gate 			if (nibble_peripheral2host(pp, mp->b_wptr) != SUCCESS) {
51880Sstevel@tonic-gate 				rval = FAILURE;
51890Sstevel@tonic-gate 				break;
51900Sstevel@tonic-gate 			}
51910Sstevel@tonic-gate 			mp->b_wptr++;
51920Sstevel@tonic-gate 		}
51930Sstevel@tonic-gate 
51940Sstevel@tonic-gate 		pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
51957656SSherry.Moore@Sun.COM 		    ? ECPP_PHASE_NIBT_NAVAIL
51967656SSherry.Moore@Sun.COM 		    : ECPP_PHASE_NIBT_AVAIL;
51970Sstevel@tonic-gate 
51980Sstevel@tonic-gate 		if (mp->b_wptr - mp->b_rptr > 0) {
51990Sstevel@tonic-gate 			ecpp_error(pp->dip,
52007656SSherry.Moore@Sun.COM 			    "read_nibble_backchan: sending %d bytes\n",
52017656SSherry.Moore@Sun.COM 			    mp->b_wptr - mp->b_rptr);
52020Sstevel@tonic-gate 			pp->nread = 0;
52030Sstevel@tonic-gate 			mutex_exit(&pp->umutex);
52040Sstevel@tonic-gate 			putnext(pp->readq, mp);
52050Sstevel@tonic-gate 			mutex_enter(&pp->umutex);
52060Sstevel@tonic-gate 		} else {
52070Sstevel@tonic-gate 			freemsg(mp);
52080Sstevel@tonic-gate 		}
52090Sstevel@tonic-gate 	}
52100Sstevel@tonic-gate 
52110Sstevel@tonic-gate 	return (rval);
52120Sstevel@tonic-gate }
52130Sstevel@tonic-gate 
52140Sstevel@tonic-gate /*
52150Sstevel@tonic-gate  * 'Request Device ID using nibble mode' negotiation
52160Sstevel@tonic-gate  */
52170Sstevel@tonic-gate static int
devidnib_negotiation(struct ecppunit * pp)52180Sstevel@tonic-gate devidnib_negotiation(struct ecppunit *pp)
52190Sstevel@tonic-gate {
52200Sstevel@tonic-gate 	uint8_t dsr;
52210Sstevel@tonic-gate 
52220Sstevel@tonic-gate 	if (ecpp_1284_negotiation(pp,
52237656SSherry.Moore@Sun.COM 	    ECPP_XREQ_NIBBLE | ECPP_XREQ_ID, &dsr) == FAILURE) {
52240Sstevel@tonic-gate 		return (FAILURE);
52250Sstevel@tonic-gate 	}
52260Sstevel@tonic-gate 
52270Sstevel@tonic-gate 	/*
52280Sstevel@tonic-gate 	 * If peripheral has data available, PE and nErr will
52290Sstevel@tonic-gate 	 * be set low at Event 5 & 6.
52300Sstevel@tonic-gate 	 */
52310Sstevel@tonic-gate 	if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
52320Sstevel@tonic-gate 		pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
52330Sstevel@tonic-gate 	} else {
52340Sstevel@tonic-gate 		pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
52350Sstevel@tonic-gate 	}
52360Sstevel@tonic-gate 
52370Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_devidnib_nego: current_phase=%x\n",
52387656SSherry.Moore@Sun.COM 	    pp->current_phase);
52390Sstevel@tonic-gate 
52400Sstevel@tonic-gate 	/* successful negotiation into Nibble mode */
52410Sstevel@tonic-gate 	pp->current_mode = ECPP_NIBBLE_MODE;
52420Sstevel@tonic-gate 	pp->backchannel = ECPP_NIBBLE_MODE;
52430Sstevel@tonic-gate 
52440Sstevel@tonic-gate 	ecpp_error(pp->dip, "ecpp_devidnib_nego: ok\n");
52450Sstevel@tonic-gate 
52460Sstevel@tonic-gate 	return (SUCCESS);
52470Sstevel@tonic-gate }
52480Sstevel@tonic-gate 
52490Sstevel@tonic-gate /*
52500Sstevel@tonic-gate  * Read 1284 device ID sequence
52510Sstevel@tonic-gate  *
52520Sstevel@tonic-gate  * This function should be called two times:
52530Sstevel@tonic-gate  * 1) ecpp_getdevid(pp, NULL, &len) - to retrieve ID length;
52540Sstevel@tonic-gate  * 2) ecpp_getdevid(pp, buffer, &len) - to read len bytes into buffer
52550Sstevel@tonic-gate  *
52560Sstevel@tonic-gate  * After 2) port is in Compatible mode
52570Sstevel@tonic-gate  * If the caller fails to make second call, it must reset port to Centronics
52580Sstevel@tonic-gate  *
52590Sstevel@tonic-gate  */
52600Sstevel@tonic-gate static int
ecpp_getdevid(struct ecppunit * pp,uint8_t * id,int * lenp,int mode)52610Sstevel@tonic-gate ecpp_getdevid(struct ecppunit *pp, uint8_t *id, int *lenp, int mode)
52620Sstevel@tonic-gate {
52630Sstevel@tonic-gate 	uint8_t lenhi, lenlo;
52640Sstevel@tonic-gate 	uint8_t dsr;
52650Sstevel@tonic-gate 	int i;
52660Sstevel@tonic-gate 
52670Sstevel@tonic-gate 	switch (mode) {
52680Sstevel@tonic-gate 	case ECPP_NIBBLE_MODE:
52690Sstevel@tonic-gate 		/* negotiate only if neccessary */
52700Sstevel@tonic-gate 		if ((pp->current_mode != mode) || (id == NULL)) {
52710Sstevel@tonic-gate 			if (devidnib_negotiation(pp) == FAILURE) {
52720Sstevel@tonic-gate 				return (EIO);
52730Sstevel@tonic-gate 			}
52740Sstevel@tonic-gate 		}
52750Sstevel@tonic-gate 
52760Sstevel@tonic-gate 		if (pp->current_phase != ECPP_PHASE_NIBT_AVAIL) {
52770Sstevel@tonic-gate 			return (EIO);
52780Sstevel@tonic-gate 		}
52790Sstevel@tonic-gate 
52800Sstevel@tonic-gate 		/*
52810Sstevel@tonic-gate 		 * Event 14: Host tristates data bus, peripheral
52820Sstevel@tonic-gate 		 * asserts nERR if data available, usually the
52830Sstevel@tonic-gate 		 * status bits (7-0) and requires two reads since
52840Sstevel@tonic-gate 		 * only nibbles are transfered.
52850Sstevel@tonic-gate 		 */
52860Sstevel@tonic-gate 		dsr = DSR_READ(pp);
52870Sstevel@tonic-gate 
52880Sstevel@tonic-gate 		if (id == NULL) {
52890Sstevel@tonic-gate 			/*
52900Sstevel@tonic-gate 			 * first two bytes are the length of the sequence
52910Sstevel@tonic-gate 			 * (incl. these bytes)
52920Sstevel@tonic-gate 			 * first byte is MSB
52930Sstevel@tonic-gate 			 */
52940Sstevel@tonic-gate 			if ((dsr & ECPP_nERR) ||
52950Sstevel@tonic-gate 			    (nibble_peripheral2host(pp, &lenhi) == FAILURE) ||
52960Sstevel@tonic-gate 			    (dsr & ECPP_nERR) ||
52970Sstevel@tonic-gate 			    (nibble_peripheral2host(pp, &lenlo) == FAILURE)) {
52980Sstevel@tonic-gate 				ecpp_error(pp->dip,
52990Sstevel@tonic-gate 				    "ecpp_getdevid: id length read error\n");
53000Sstevel@tonic-gate 				return (EIO);
53010Sstevel@tonic-gate 			}
53020Sstevel@tonic-gate 
53030Sstevel@tonic-gate 			*lenp = (lenhi << 8) | (lenlo);
53040Sstevel@tonic-gate 
53050Sstevel@tonic-gate 			ecpp_error(pp->dip,
53067656SSherry.Moore@Sun.COM 			    "ecpp_getdevid: id length = %d\n", *lenp);
53070Sstevel@tonic-gate 
53080Sstevel@tonic-gate 			if (*lenp < 2) {
53090Sstevel@tonic-gate 				return (EIO);
53100Sstevel@tonic-gate 			}
53110Sstevel@tonic-gate 		} else {
53120Sstevel@tonic-gate 			/*
53130Sstevel@tonic-gate 			 * read the rest of the data
53140Sstevel@tonic-gate 			 */
53150Sstevel@tonic-gate 			i = *lenp;
53160Sstevel@tonic-gate 			while (i && ((dsr & ECPP_nERR) == 0)) {
53170Sstevel@tonic-gate 				if (nibble_peripheral2host(pp, id++) == FAILURE)
53180Sstevel@tonic-gate 					break;
53190Sstevel@tonic-gate 
53200Sstevel@tonic-gate 				i--;
53210Sstevel@tonic-gate 				dsr = DSR_READ(pp);
53220Sstevel@tonic-gate 			}
53230Sstevel@tonic-gate 			ecpp_error(pp->dip,
53247656SSherry.Moore@Sun.COM 			    "ecpp_getdevid: read %d bytes\n", *lenp - i);
53250Sstevel@tonic-gate 
53260Sstevel@tonic-gate 			/*
53270Sstevel@tonic-gate 			 * 1284: After receiving the sequence, the host is
53280Sstevel@tonic-gate 			 * required to return the link to the Compatibility mode
53290Sstevel@tonic-gate 			 */
53300Sstevel@tonic-gate 			(void) ecpp_1284_termination(pp);
53310Sstevel@tonic-gate 		}
53320Sstevel@tonic-gate 
53330Sstevel@tonic-gate 		break;
53340Sstevel@tonic-gate 
53350Sstevel@tonic-gate 	/* Other modes are not yet supported */
53360Sstevel@tonic-gate 	default:
53370Sstevel@tonic-gate 		return (EINVAL);
53380Sstevel@tonic-gate 	}
53390Sstevel@tonic-gate 
53400Sstevel@tonic-gate 	return (0);
53410Sstevel@tonic-gate }
53420Sstevel@tonic-gate 
53430Sstevel@tonic-gate /*
53440Sstevel@tonic-gate  * Various hardware support
53450Sstevel@tonic-gate  *
53460Sstevel@tonic-gate  * First define some stubs for functions that do nothing
53470Sstevel@tonic-gate  */
53480Sstevel@tonic-gate 
53490Sstevel@tonic-gate /*ARGSUSED*/
53500Sstevel@tonic-gate static void
empty_config_mode(struct ecppunit * pp)53510Sstevel@tonic-gate empty_config_mode(struct ecppunit *pp)
53520Sstevel@tonic-gate {
53530Sstevel@tonic-gate }
53540Sstevel@tonic-gate 
53550Sstevel@tonic-gate /*ARGSUSED*/
53560Sstevel@tonic-gate static void
empty_mask_intr(struct ecppunit * pp)53570Sstevel@tonic-gate empty_mask_intr(struct ecppunit *pp)
53580Sstevel@tonic-gate {
53590Sstevel@tonic-gate }
53600Sstevel@tonic-gate 
53610Sstevel@tonic-gate #if defined(__x86)
53620Sstevel@tonic-gate static size_t
x86_getcnt(struct ecppunit * pp)53630Sstevel@tonic-gate x86_getcnt(struct ecppunit *pp)
53640Sstevel@tonic-gate {
53650Sstevel@tonic-gate 	int count;
53660Sstevel@tonic-gate 
53670Sstevel@tonic-gate 	(void) ddi_dmae_getcnt(pp->dip, pp->uh.x86.chn, &count);
53680Sstevel@tonic-gate 	return (count);
53690Sstevel@tonic-gate }
53700Sstevel@tonic-gate #endif
53710Sstevel@tonic-gate 
53720Sstevel@tonic-gate /*
53730Sstevel@tonic-gate  *
53740Sstevel@tonic-gate  * National PC87332 and PC97317 SuperIOs support routines
53750Sstevel@tonic-gate  * These chips are used in PCI-based Darwin, Quark, Quasar, Excalibur
53760Sstevel@tonic-gate  * and use EBus DMA facilities (Cheerio or RIO)
53770Sstevel@tonic-gate  *
53780Sstevel@tonic-gate  */
53790Sstevel@tonic-gate 
53800Sstevel@tonic-gate static int
pc87332_map_regs(struct ecppunit * pp)53810Sstevel@tonic-gate pc87332_map_regs(struct ecppunit *pp)
53820Sstevel@tonic-gate {
53830Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.ebus.c_reg, 0,
53840Sstevel@tonic-gate 	    sizeof (struct config_reg), &acc_attr,
53850Sstevel@tonic-gate 	    &pp->uh.ebus.c_handle) != DDI_SUCCESS) {
53860Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc87332_map_regs: failed c_reg\n");
53870Sstevel@tonic-gate 		goto fail;
53880Sstevel@tonic-gate 	}
53890Sstevel@tonic-gate 
53900Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
53910Sstevel@tonic-gate 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
53920Sstevel@tonic-gate 	    != DDI_SUCCESS) {
53930Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc87332_map_regs: failed i_reg\n");
53940Sstevel@tonic-gate 		goto fail;
53950Sstevel@tonic-gate 	}
53960Sstevel@tonic-gate 
53970Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
53980Sstevel@tonic-gate 	    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
53990Sstevel@tonic-gate 	    != DDI_SUCCESS) {
54000Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc87332_map_regs: failed f_reg\n");
54010Sstevel@tonic-gate 		goto fail;
54020Sstevel@tonic-gate 	}
54030Sstevel@tonic-gate 
54040Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 2, (caddr_t *)&pp->uh.ebus.dmac, 0,
54050Sstevel@tonic-gate 	    sizeof (struct cheerio_dma_reg), &acc_attr,
54060Sstevel@tonic-gate 	    &pp->uh.ebus.d_handle) != DDI_SUCCESS) {
54070Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc87332_map_regs: failed dmac\n");
54080Sstevel@tonic-gate 		goto fail;
54090Sstevel@tonic-gate 	}
54100Sstevel@tonic-gate 
54110Sstevel@tonic-gate 	return (SUCCESS);
54120Sstevel@tonic-gate 
54130Sstevel@tonic-gate fail:
54140Sstevel@tonic-gate 	pc87332_unmap_regs(pp);
54150Sstevel@tonic-gate 	return (FAILURE);
54160Sstevel@tonic-gate }
54170Sstevel@tonic-gate 
54180Sstevel@tonic-gate static void
pc87332_unmap_regs(struct ecppunit * pp)54190Sstevel@tonic-gate pc87332_unmap_regs(struct ecppunit *pp)
54200Sstevel@tonic-gate {
54210Sstevel@tonic-gate 	if (pp->uh.ebus.c_handle) {
54220Sstevel@tonic-gate 		ddi_regs_map_free(&pp->uh.ebus.c_handle);
54230Sstevel@tonic-gate 	}
54240Sstevel@tonic-gate 	if (pp->uh.ebus.d_handle) {
54250Sstevel@tonic-gate 		ddi_regs_map_free(&pp->uh.ebus.d_handle);
54260Sstevel@tonic-gate 	}
54270Sstevel@tonic-gate 	if (pp->i_handle) {
54280Sstevel@tonic-gate 		ddi_regs_map_free(&pp->i_handle);
54290Sstevel@tonic-gate 	}
54300Sstevel@tonic-gate 	if (pp->f_handle) {
54310Sstevel@tonic-gate 		ddi_regs_map_free(&pp->f_handle);
54320Sstevel@tonic-gate 	}
54330Sstevel@tonic-gate }
54340Sstevel@tonic-gate 
54350Sstevel@tonic-gate static uint8_t
pc87332_read_config_reg(struct ecppunit * pp,uint8_t reg_num)54360Sstevel@tonic-gate pc87332_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
54370Sstevel@tonic-gate {
54380Sstevel@tonic-gate 	uint8_t retval;
54390Sstevel@tonic-gate 
54400Sstevel@tonic-gate 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
54410Sstevel@tonic-gate 	retval = PP_GETB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data);
54420Sstevel@tonic-gate 
54430Sstevel@tonic-gate 	return (retval);
54440Sstevel@tonic-gate }
54450Sstevel@tonic-gate 
54460Sstevel@tonic-gate static void
pc87332_write_config_reg(struct ecppunit * pp,uint8_t reg_num,uint8_t val)54470Sstevel@tonic-gate pc87332_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
54480Sstevel@tonic-gate {
54490Sstevel@tonic-gate 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
54500Sstevel@tonic-gate 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
54510Sstevel@tonic-gate 
54520Sstevel@tonic-gate 	/*
54530Sstevel@tonic-gate 	 * second write to this register is needed.  the register behaves as
54540Sstevel@tonic-gate 	 * a fifo.  the first value written goes to the data register.  the
54550Sstevel@tonic-gate 	 * second write pushes the initial value to the register indexed.
54560Sstevel@tonic-gate 	 */
54570Sstevel@tonic-gate 
54580Sstevel@tonic-gate 	PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
54590Sstevel@tonic-gate }
54600Sstevel@tonic-gate 
54610Sstevel@tonic-gate static int
pc87332_config_chip(struct ecppunit * pp)54620Sstevel@tonic-gate pc87332_config_chip(struct ecppunit *pp)
54630Sstevel@tonic-gate {
54640Sstevel@tonic-gate 	uint8_t pmc, fcr;
54650Sstevel@tonic-gate 
54660Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_INIT;
54670Sstevel@tonic-gate 
54680Sstevel@tonic-gate 	/* ECP DMA configuration bit (PMC4) must be set */
54690Sstevel@tonic-gate 	pmc = pc87332_read_config_reg(pp, PMC);
54700Sstevel@tonic-gate 	if (!(pmc & PC87332_PMC_ECP_DMA_CONFIG)) {
54710Sstevel@tonic-gate 		pc87332_write_config_reg(pp, PMC,
54727656SSherry.Moore@Sun.COM 		    pmc | PC87332_PMC_ECP_DMA_CONFIG);
54730Sstevel@tonic-gate 	}
54740Sstevel@tonic-gate 
54750Sstevel@tonic-gate 	/*
54760Sstevel@tonic-gate 	 * The Parallel Port Multiplexor pins must be driven.
54770Sstevel@tonic-gate 	 * Check to see if FCR3 is zero, if not clear FCR3.
54780Sstevel@tonic-gate 	 */
54790Sstevel@tonic-gate 	fcr = pc87332_read_config_reg(pp, FCR);
54800Sstevel@tonic-gate 	if (fcr & PC87332_FCR_PPM_FLOAT_CTL) {
54810Sstevel@tonic-gate 		pc87332_write_config_reg(pp, FCR,
54827656SSherry.Moore@Sun.COM 		    fcr & ~PC87332_FCR_PPM_FLOAT_CTL);
54830Sstevel@tonic-gate 	}
54840Sstevel@tonic-gate 
54850Sstevel@tonic-gate 	/*
54860Sstevel@tonic-gate 	 * clear bits 3-0 in CTR (aka DCR) prior to enabling ECP mode
54870Sstevel@tonic-gate 	 * CTR5 can not be cleared in SPP mode, CTR5 will return 1.
54880Sstevel@tonic-gate 	 * "FAILURE" in this case is ok.  Better to use dcr_write()
54890Sstevel@tonic-gate 	 * to ensure reliable writing to DCR.
54900Sstevel@tonic-gate 	 */
54910Sstevel@tonic-gate 	if (dcr_write(pp, ECPP_DCR_SET | ECPP_nINIT) == FAILURE) {
54920Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_config_87332: DCR config\n");
54930Sstevel@tonic-gate 	}
54940Sstevel@tonic-gate 
54950Sstevel@tonic-gate 	/* enable ECP mode, level intr (note that DCR bits 3-0 == 0x0) */
54960Sstevel@tonic-gate 	pc87332_write_config_reg(pp, PCR,
54977656SSherry.Moore@Sun.COM 	    PC87332_PCR_INTR_LEVL | PC87332_PCR_ECP_EN);
54980Sstevel@tonic-gate 
54990Sstevel@tonic-gate 	/* put SuperIO in initial state */
55000Sstevel@tonic-gate 	if (ecr_write(pp, ECR_mode_001 |
55017656SSherry.Moore@Sun.COM 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
55020Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_config_87332: ECR\n");
55030Sstevel@tonic-gate 	}
55040Sstevel@tonic-gate 
55050Sstevel@tonic-gate 	if (dcr_write(pp, ECPP_DCR_SET | ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
55060Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_config_87332: w/DCR failed2.\n");
55070Sstevel@tonic-gate 		return (FAILURE);
55080Sstevel@tonic-gate 
55090Sstevel@tonic-gate 	}
55100Sstevel@tonic-gate 	/* we are in centronic mode */
55110Sstevel@tonic-gate 	pp->current_mode = ECPP_CENTRONICS;
55120Sstevel@tonic-gate 
55130Sstevel@tonic-gate 	/* in compatible mode with no data transfer in progress */
55140Sstevel@tonic-gate 	pp->current_phase = ECPP_PHASE_C_IDLE;
55150Sstevel@tonic-gate 
55160Sstevel@tonic-gate 	return (SUCCESS);
55170Sstevel@tonic-gate }
55180Sstevel@tonic-gate 
55190Sstevel@tonic-gate /*
55200Sstevel@tonic-gate  * A new mode was set, do some mode specific reconfiguration
55210Sstevel@tonic-gate  * in this case - set interrupt characteristic
55220Sstevel@tonic-gate  */
55230Sstevel@tonic-gate static void
pc87332_config_mode(struct ecppunit * pp)55240Sstevel@tonic-gate pc87332_config_mode(struct ecppunit *pp)
55250Sstevel@tonic-gate {
55260Sstevel@tonic-gate 	if (COMPAT_PIO(pp)) {
55270Sstevel@tonic-gate 		pc87332_write_config_reg(pp, PCR, 0x04);
55280Sstevel@tonic-gate 	} else {
55290Sstevel@tonic-gate 		pc87332_write_config_reg(pp, PCR, 0x14);
55300Sstevel@tonic-gate 	}
55310Sstevel@tonic-gate }
55320Sstevel@tonic-gate 
55330Sstevel@tonic-gate static int
pc97317_map_regs(struct ecppunit * pp)55340Sstevel@tonic-gate pc97317_map_regs(struct ecppunit *pp)
55350Sstevel@tonic-gate {
55360Sstevel@tonic-gate 	if (pc87332_map_regs(pp) != SUCCESS) {
55370Sstevel@tonic-gate 		return (FAILURE);
55380Sstevel@tonic-gate 	}
55390Sstevel@tonic-gate 
55400Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->uh.ebus.c2_reg,
55417656SSherry.Moore@Sun.COM 	    0x403, sizeof (struct config2_reg), &acc_attr,
55427656SSherry.Moore@Sun.COM 	    &pp->uh.ebus.c2_handle) != DDI_SUCCESS) {
55430Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc97317_map_regs: failed c2_reg\n");
55440Sstevel@tonic-gate 		pc87332_unmap_regs(pp);
55450Sstevel@tonic-gate 		return (FAILURE);
55460Sstevel@tonic-gate 	} else {
55470Sstevel@tonic-gate 		return (SUCCESS);
55480Sstevel@tonic-gate 	}
55490Sstevel@tonic-gate }
55500Sstevel@tonic-gate 
55510Sstevel@tonic-gate static void
pc97317_unmap_regs(struct ecppunit * pp)55520Sstevel@tonic-gate pc97317_unmap_regs(struct ecppunit *pp)
55530Sstevel@tonic-gate {
55540Sstevel@tonic-gate 	if (pp->uh.ebus.c2_handle) {
55550Sstevel@tonic-gate 		ddi_regs_map_free(&pp->uh.ebus.c2_handle);
55560Sstevel@tonic-gate 	}
55570Sstevel@tonic-gate 
55580Sstevel@tonic-gate 	pc87332_unmap_regs(pp);
55590Sstevel@tonic-gate }
55600Sstevel@tonic-gate 
55610Sstevel@tonic-gate /*
55620Sstevel@tonic-gate  * OBP should configure the PC97317 such that it does not need further
55630Sstevel@tonic-gate  * configuration.  Upon sustaining, it may be necessary to examine
55640Sstevel@tonic-gate  * or change the configuration registers.  This routine is left in
55650Sstevel@tonic-gate  * the file for that purpose.
55660Sstevel@tonic-gate  */
55670Sstevel@tonic-gate static int
pc97317_config_chip(struct ecppunit * pp)55680Sstevel@tonic-gate pc97317_config_chip(struct ecppunit *pp)
55690Sstevel@tonic-gate {
55700Sstevel@tonic-gate 	uint8_t conreg;
55710Sstevel@tonic-gate 
55720Sstevel@tonic-gate 	/* set the logical device name */
55730Sstevel@tonic-gate 	pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
55740Sstevel@tonic-gate 
55750Sstevel@tonic-gate 	/* SPP Compatibility */
55760Sstevel@tonic-gate 	PP_PUTB(pp->uh.ebus.c2_handle,
55777656SSherry.Moore@Sun.COM 	    &pp->uh.ebus.c2_reg->eir, PC97317_CONFIG2_CONTROL2);
55780Sstevel@tonic-gate 	PP_PUTB(pp->uh.ebus.c2_handle, &pp->uh.ebus.c2_reg->edr, 0x80);
55790Sstevel@tonic-gate 
55800Sstevel@tonic-gate 	/* low interrupt polarity */
55810Sstevel@tonic-gate 	pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
55820Sstevel@tonic-gate 
55830Sstevel@tonic-gate 	/* ECP mode */
55840Sstevel@tonic-gate 	pc87332_write_config_reg(pp, PC97317_CONFIG_PP_CONFIG, 0xf2);
55850Sstevel@tonic-gate 
55860Sstevel@tonic-gate 	if (dcr_write(pp, ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
55870Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc97317_config_chip: failed w/DCR\n");
55880Sstevel@tonic-gate 	}
55890Sstevel@tonic-gate 
55900Sstevel@tonic-gate 	if (ecr_write(pp, ECR_mode_001 |
55917656SSherry.Moore@Sun.COM 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
55920Sstevel@tonic-gate 		ecpp_error(pp->dip, "pc97317_config_chip: failed w/ECR\n");
55930Sstevel@tonic-gate 	}
55940Sstevel@tonic-gate 
55950Sstevel@tonic-gate #ifdef DEBUG
55960Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DEV_NO);
55970Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg7(logical dev)=%x\n", conreg);
55980Sstevel@tonic-gate 
55990Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_MSB);
56000Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg60(addrHi)=%x\n", conreg);
56010Sstevel@tonic-gate 
56020Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_LSB);
56030Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg61(addrLo)=%x\n", conreg);
56040Sstevel@tonic-gate 
56050Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_SEL);
56060Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg70(IRQL)=%x\n", conreg);
56070Sstevel@tonic-gate 
56080Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_TYPE);
56090Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg71(intr type)=%x\n", conreg);
56100Sstevel@tonic-gate 
56110Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_ACTIVATE);
56120Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg30(Active)=%x\n", conreg);
56130Sstevel@tonic-gate 
56140Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_IO_RANGE);
56150Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg31(IO Range Check)=%x\n", conreg);
56160Sstevel@tonic-gate 
56170Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA0_CHAN);
56180Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg74(DMA0 Chan)=%x\n", conreg);
56190Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA1_CHAN);
56200Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conreg75(DMA1 Chan)=%x\n", conreg);
56210Sstevel@tonic-gate 
56220Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
56230Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
56240Sstevel@tonic-gate 
56250Sstevel@tonic-gate 	conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
56260Sstevel@tonic-gate 	ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
56270Sstevel@tonic-gate #endif /* DEBUG */
56280Sstevel@tonic-gate 
56290Sstevel@tonic-gate 	return (SUCCESS);
56300Sstevel@tonic-gate }
56310Sstevel@tonic-gate 
56320Sstevel@tonic-gate /*
56330Sstevel@tonic-gate  * A new mode was set, do some mode specific reconfiguration
56340Sstevel@tonic-gate  * in this case - set interrupt polarity
56350Sstevel@tonic-gate  */
56360Sstevel@tonic-gate static void
pc97317_config_mode(struct ecppunit * pp)56370Sstevel@tonic-gate pc97317_config_mode(struct ecppunit *pp)
56380Sstevel@tonic-gate {
56390Sstevel@tonic-gate 	/* set the logical device name */
56400Sstevel@tonic-gate 	pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
56410Sstevel@tonic-gate 
56420Sstevel@tonic-gate 	if (COMPAT_PIO(pp) || pp->current_mode == ECPP_NIBBLE_MODE) {
56430Sstevel@tonic-gate 		pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x02);
56440Sstevel@tonic-gate 	} else {
56450Sstevel@tonic-gate 		pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
56460Sstevel@tonic-gate 	}
56470Sstevel@tonic-gate }
56480Sstevel@tonic-gate 
56490Sstevel@tonic-gate static void
cheerio_mask_intr(struct ecppunit * pp)56500Sstevel@tonic-gate cheerio_mask_intr(struct ecppunit *pp)
56510Sstevel@tonic-gate {
56520Sstevel@tonic-gate 	/* mask Cheerio interrupts */
56530Sstevel@tonic-gate 	AND_SET_LONG_R(pp->uh.ebus.d_handle,
56547656SSherry.Moore@Sun.COM 	    &pp->uh.ebus.dmac->csr, ~DCSR_INT_EN);
56550Sstevel@tonic-gate }
56560Sstevel@tonic-gate 
56570Sstevel@tonic-gate static void
cheerio_unmask_intr(struct ecppunit * pp)56580Sstevel@tonic-gate cheerio_unmask_intr(struct ecppunit *pp)
56590Sstevel@tonic-gate {
56600Sstevel@tonic-gate 	/* unmask Cheerio interrupts */
56610Sstevel@tonic-gate 	OR_SET_LONG_R(pp->uh.ebus.d_handle,
56627656SSherry.Moore@Sun.COM 	    &pp->uh.ebus.dmac->csr, DCSR_INT_EN | DCSR_TCI_DIS);
56630Sstevel@tonic-gate }
56640Sstevel@tonic-gate 
56650Sstevel@tonic-gate static int
cheerio_dma_start(struct ecppunit * pp)56660Sstevel@tonic-gate cheerio_dma_start(struct ecppunit *pp)
56670Sstevel@tonic-gate {
56680Sstevel@tonic-gate 	cheerio_reset_dcsr(pp);
56690Sstevel@tonic-gate 	SET_DMAC_BCR(pp, pp->dma_cookie.dmac_size);
56700Sstevel@tonic-gate 	SET_DMAC_ACR(pp, pp->dma_cookie.dmac_address);
56710Sstevel@tonic-gate 
56720Sstevel@tonic-gate 	if (pp->dma_dir == DDI_DMA_READ) {
56730Sstevel@tonic-gate 		SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
56740Sstevel@tonic-gate 		    DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0 | DCSR_WRITE);
56750Sstevel@tonic-gate 	} else {
56760Sstevel@tonic-gate 		SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
56777656SSherry.Moore@Sun.COM 		    DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0);
56780Sstevel@tonic-gate 	}
56790Sstevel@tonic-gate 
56800Sstevel@tonic-gate 	return (SUCCESS);
56810Sstevel@tonic-gate }
56820Sstevel@tonic-gate 
56830Sstevel@tonic-gate /*
56840Sstevel@tonic-gate  * Note: BCR is reset to 0, so counter should always be read before dma_stop
56850Sstevel@tonic-gate  */
56860Sstevel@tonic-gate static int
cheerio_dma_stop(struct ecppunit * pp,size_t * countp)56870Sstevel@tonic-gate cheerio_dma_stop(struct ecppunit *pp, size_t *countp)
56880Sstevel@tonic-gate {
56890Sstevel@tonic-gate 	uint8_t ecr;
56900Sstevel@tonic-gate 
56910Sstevel@tonic-gate 	/* disable DMA and byte counter */
56920Sstevel@tonic-gate 	AND_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
56937656SSherry.Moore@Sun.COM 	    ~(DCSR_EN_DMA | DCSR_EN_CNT| DCSR_INT_EN));
56940Sstevel@tonic-gate 
56950Sstevel@tonic-gate 	/* ACK and disable the TC interrupt */
56960Sstevel@tonic-gate 	OR_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
56977656SSherry.Moore@Sun.COM 	    DCSR_TC | DCSR_TCI_DIS);
56980Sstevel@tonic-gate 
56990Sstevel@tonic-gate 	/* read DMA count if requested */
57000Sstevel@tonic-gate 	if (countp) {
57010Sstevel@tonic-gate 		*countp = cheerio_getcnt(pp);
57020Sstevel@tonic-gate 	}
57030Sstevel@tonic-gate 
57040Sstevel@tonic-gate 	cheerio_reset_dcsr(pp);
57050Sstevel@tonic-gate 	SET_DMAC_BCR(pp, 0);
57060Sstevel@tonic-gate 
57070Sstevel@tonic-gate 	/* turn off SuperIO's DMA */
57080Sstevel@tonic-gate 	ecr = ECR_READ(pp);
57090Sstevel@tonic-gate 	if (ecr_write(pp, ecr & ~ECPP_DMA_ENABLE) == FAILURE) {
57100Sstevel@tonic-gate 		return (FAILURE);
57110Sstevel@tonic-gate 	}
57120Sstevel@tonic-gate 
57130Sstevel@tonic-gate 	/* Disable SuperIO interrupts and DMA */
57140Sstevel@tonic-gate 	ecr = ECR_READ(pp);
57150Sstevel@tonic-gate 
57160Sstevel@tonic-gate 	return (ecr_write(pp, ecr | ECPP_INTR_SRV));
57170Sstevel@tonic-gate }
57180Sstevel@tonic-gate 
57190Sstevel@tonic-gate static size_t
cheerio_getcnt(struct ecppunit * pp)57200Sstevel@tonic-gate cheerio_getcnt(struct ecppunit *pp)
57210Sstevel@tonic-gate {
57220Sstevel@tonic-gate 	return (GET_DMAC_BCR(pp));
57230Sstevel@tonic-gate }
57240Sstevel@tonic-gate 
57250Sstevel@tonic-gate /*
57260Sstevel@tonic-gate  * Reset the DCSR by first setting the RESET bit to 1.  Poll the
57270Sstevel@tonic-gate  * DCSR_CYC_PEND bit to make sure there are no more pending DMA cycles.
57280Sstevel@tonic-gate  * If there are no more pending cycles, clear the RESET bit.
57290Sstevel@tonic-gate  */
57300Sstevel@tonic-gate static void
cheerio_reset_dcsr(struct ecppunit * pp)57310Sstevel@tonic-gate cheerio_reset_dcsr(struct ecppunit *pp)
57320Sstevel@tonic-gate {
57330Sstevel@tonic-gate 	int	timeout = DMAC_RESET_TIMEOUT;
57340Sstevel@tonic-gate 
57350Sstevel@tonic-gate 	SET_DMAC_CSR(pp, DCSR_RESET);
57360Sstevel@tonic-gate 
57370Sstevel@tonic-gate 	while (GET_DMAC_CSR(pp) & DCSR_CYC_PEND) {
57380Sstevel@tonic-gate 		if (timeout == 0) {
57390Sstevel@tonic-gate 			ecpp_error(pp->dip, "cheerio_reset_dcsr: timeout\n");
57400Sstevel@tonic-gate 			break;
57410Sstevel@tonic-gate 		} else {
57420Sstevel@tonic-gate 			drv_usecwait(1);
57430Sstevel@tonic-gate 			timeout--;
57440Sstevel@tonic-gate 		}
57450Sstevel@tonic-gate 	}
57460Sstevel@tonic-gate 
57470Sstevel@tonic-gate 	SET_DMAC_CSR(pp, 0);
57480Sstevel@tonic-gate }
57490Sstevel@tonic-gate 
57500Sstevel@tonic-gate /*
57510Sstevel@tonic-gate  *
57520Sstevel@tonic-gate  * Grover Southbridge (M1553) support routines
57530Sstevel@tonic-gate  * Southbridge contains an Intel 8237 DMAC onboard which is used
57540Sstevel@tonic-gate  * to transport data to/from PCI space to superio parallel port
57550Sstevel@tonic-gate  *
57560Sstevel@tonic-gate  */
57570Sstevel@tonic-gate 
57580Sstevel@tonic-gate 
57590Sstevel@tonic-gate static int
m1553_map_regs(struct ecppunit * pp)57600Sstevel@tonic-gate m1553_map_regs(struct ecppunit *pp)
57610Sstevel@tonic-gate {
57620Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.m1553.isa_space,
57637656SSherry.Moore@Sun.COM 	    0, sizeof (struct isaspace), &acc_attr,
57647656SSherry.Moore@Sun.COM 	    &pp->uh.m1553.d_handle) != DDI_SUCCESS) {
57650Sstevel@tonic-gate 		ecpp_error(pp->dip, "m1553_map_regs: failed isa space\n");
57660Sstevel@tonic-gate 		goto fail;
57670Sstevel@tonic-gate 	}
57680Sstevel@tonic-gate 
57690Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
57707656SSherry.Moore@Sun.COM 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
57717656SSherry.Moore@Sun.COM 	    != DDI_SUCCESS) {
57720Sstevel@tonic-gate 		ecpp_error(pp->dip, "m1553_map_regs: failed i_reg\n");
57730Sstevel@tonic-gate 		goto fail;
57740Sstevel@tonic-gate 	}
57750Sstevel@tonic-gate 
57760Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
57777656SSherry.Moore@Sun.COM 	    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
57787656SSherry.Moore@Sun.COM 	    != DDI_SUCCESS) {
57790Sstevel@tonic-gate 		ecpp_error(pp->dip, "m1553_map_regs: failed f_reg\n");
57800Sstevel@tonic-gate 		goto fail;
57810Sstevel@tonic-gate 	}
57820Sstevel@tonic-gate 
57830Sstevel@tonic-gate 	return (SUCCESS);
57840Sstevel@tonic-gate 
57850Sstevel@tonic-gate fail:
57860Sstevel@tonic-gate 	m1553_unmap_regs(pp);
57870Sstevel@tonic-gate 	return (FAILURE);
57880Sstevel@tonic-gate }
57890Sstevel@tonic-gate 
57900Sstevel@tonic-gate static void
m1553_unmap_regs(struct ecppunit * pp)57910Sstevel@tonic-gate m1553_unmap_regs(struct ecppunit *pp)
57920Sstevel@tonic-gate {
57930Sstevel@tonic-gate 	if (pp->uh.m1553.d_handle) {
57940Sstevel@tonic-gate 		ddi_regs_map_free(&pp->uh.m1553.d_handle);
57950Sstevel@tonic-gate 	}
57960Sstevel@tonic-gate 	if (pp->i_handle) {
57970Sstevel@tonic-gate 		ddi_regs_map_free(&pp->i_handle);
57980Sstevel@tonic-gate 	}
57990Sstevel@tonic-gate 	if (pp->f_handle) {
58000Sstevel@tonic-gate 		ddi_regs_map_free(&pp->f_handle);
58010Sstevel@tonic-gate 	}
58020Sstevel@tonic-gate }
58030Sstevel@tonic-gate 
58040Sstevel@tonic-gate #if defined(__x86)
58050Sstevel@tonic-gate static int
x86_map_regs(struct ecppunit * pp)58060Sstevel@tonic-gate x86_map_regs(struct ecppunit *pp)
58070Sstevel@tonic-gate {
58080Sstevel@tonic-gate 	int nregs = 0;
58090Sstevel@tonic-gate 
58100Sstevel@tonic-gate 	if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
58110Sstevel@tonic-gate 	    sizeof (struct info_reg), &acc_attr, &pp->i_handle)
58120Sstevel@tonic-gate 	    != DDI_SUCCESS) {
58130Sstevel@tonic-gate 		ecpp_error(pp->dip, "x86_map_regs: failed i_reg\n");
58140Sstevel@tonic-gate 		goto fail;
58150Sstevel@tonic-gate 	}
58160Sstevel@tonic-gate 	if (ddi_dev_nregs(pp->dip, &nregs) == DDI_SUCCESS && nregs == 2) {
58170Sstevel@tonic-gate 		if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->f_reg, 0,
58180Sstevel@tonic-gate 		    sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
58190Sstevel@tonic-gate 		    != DDI_SUCCESS) {
58200Sstevel@tonic-gate 			ecpp_error(pp->dip, "x86_map_regs: failed f_reg\n");
58210Sstevel@tonic-gate 			goto fail;
58220Sstevel@tonic-gate 		} else
58230Sstevel@tonic-gate 			pp->noecpregs = FALSE;
58240Sstevel@tonic-gate 	} else {
58250Sstevel@tonic-gate 		pp->noecpregs = TRUE;
58260Sstevel@tonic-gate 	}
58270Sstevel@tonic-gate 	return (SUCCESS);
58280Sstevel@tonic-gate fail:
58290Sstevel@tonic-gate 	x86_unmap_regs(pp);
58300Sstevel@tonic-gate 	return (FAILURE);
58310Sstevel@tonic-gate }
58320Sstevel@tonic-gate 
58330Sstevel@tonic-gate static void
x86_unmap_regs(struct ecppunit * pp)58340Sstevel@tonic-gate x86_unmap_regs(struct ecppunit *pp)
58350Sstevel@tonic-gate {
58360Sstevel@tonic-gate 	if (pp->i_handle) {
58370Sstevel@tonic-gate 		ddi_regs_map_free(&pp->i_handle);
58380Sstevel@tonic-gate 	}
58390Sstevel@tonic-gate 	if (pp->f_handle) {
58400Sstevel@tonic-gate 		ddi_regs_map_free(&pp->f_handle);
58410Sstevel@tonic-gate 	}
58420Sstevel@tonic-gate }
58430Sstevel@tonic-gate #endif
58440Sstevel@tonic-gate 
58450Sstevel@tonic-gate static uint8_t
m1553_read_config_reg(struct ecppunit * pp,uint8_t reg_num)58460Sstevel@tonic-gate m1553_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
58470Sstevel@tonic-gate {
58480Sstevel@tonic-gate 	uint8_t retval;
58490Sstevel@tonic-gate 
58500Sstevel@tonic-gate 	dma8237_write(pp, 0x3F0, reg_num);
58510Sstevel@tonic-gate 	retval = dma8237_read(pp, 0x3F1);
58520Sstevel@tonic-gate 
58530Sstevel@tonic-gate 	return (retval);
58540Sstevel@tonic-gate }
58550Sstevel@tonic-gate 
58560Sstevel@tonic-gate static void
m1553_write_config_reg(struct ecppunit * pp,uint8_t reg_num,uint8_t val)58570Sstevel@tonic-gate m1553_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
58580Sstevel@tonic-gate {
58590Sstevel@tonic-gate 	dma8237_write(pp, 0x3F0, reg_num);
58600Sstevel@tonic-gate 	dma8237_write(pp, 0x3F1, val);
58610Sstevel@tonic-gate }
58620Sstevel@tonic-gate 
58630Sstevel@tonic-gate static int
m1553_config_chip(struct ecppunit * pp)58640Sstevel@tonic-gate m1553_config_chip(struct ecppunit *pp)
58650Sstevel@tonic-gate {
58660Sstevel@tonic-gate 	uint8_t conreg;
58670Sstevel@tonic-gate 
58680Sstevel@tonic-gate 	/* Unlock configuration regs with "key sequence" */
58690Sstevel@tonic-gate 	dma8237_write(pp, 0x3F0, 0x51);
58700Sstevel@tonic-gate 	dma8237_write(pp, 0x3F0, 0x23);
58710Sstevel@tonic-gate 
58720Sstevel@tonic-gate 	m1553_write_config_reg(pp, PnP_CONFIG_DEV_NO, 0x3);
58730Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_DEV_NO);
58740Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conreg7(logical dev)=%x\n", conreg);
58750Sstevel@tonic-gate 
58760Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_ACTIVATE);
58770Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conreg30(Active)=%x\n", conreg);
58780Sstevel@tonic-gate 
58790Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_MSB);
58800Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conreg60(addrHi)=%x\n", conreg);
58810Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_LSB);
58820Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conreg61(addrLo)=%x\n", conreg);
58830Sstevel@tonic-gate 
58840Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_INTR_SEL);
58850Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conreg70(IRQL)=%x\n", conreg);
58860Sstevel@tonic-gate 
58870Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_DMA0_CHAN);
58880Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conreg74(DMA0 Chan)=%x\n", conreg);
58890Sstevel@tonic-gate 
58900Sstevel@tonic-gate 	/* set FIFO threshold 1 and ECP mode, preserve bit 7 (IRQ polarity) */
58910Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
58920Sstevel@tonic-gate 	conreg = (conreg & ~0x7F) | 0x0A;
58930Sstevel@tonic-gate 	m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG0, conreg);
58940Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
58950Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conregFO(pport conf)=%x\n", conreg);
58960Sstevel@tonic-gate 
58970Sstevel@tonic-gate 	m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG1, 0x04);
58980Sstevel@tonic-gate 	conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG1);
58990Sstevel@tonic-gate 	ecpp_error(pp->dip, "M1553:conregF1(outconf)=%x\n", conreg);
59000Sstevel@tonic-gate 
59010Sstevel@tonic-gate 	/* lock configuration regs with key */
59020Sstevel@tonic-gate 	dma8237_write(pp, 0x3F0, 0xBB);
59030Sstevel@tonic-gate 
59040Sstevel@tonic-gate 	/* Set ECR, DCR in known state */
59050Sstevel@tonic-gate 	ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
59060Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
59070Sstevel@tonic-gate 
59080Sstevel@tonic-gate 	ecpp_error(pp->dip, "m1553_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
59097656SSherry.Moore@Sun.COM 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
59100Sstevel@tonic-gate 
59110Sstevel@tonic-gate 	return (SUCCESS);
59120Sstevel@tonic-gate }
59130Sstevel@tonic-gate 
59140Sstevel@tonic-gate #if defined(__x86)
59150Sstevel@tonic-gate static int
x86_config_chip(struct ecppunit * pp)59160Sstevel@tonic-gate x86_config_chip(struct ecppunit *pp)
59170Sstevel@tonic-gate {
59180Sstevel@tonic-gate 	if (ecr_write(pp, ECR_mode_001 |
59190Sstevel@tonic-gate 	    ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
59200Sstevel@tonic-gate 		ecpp_error(pp->dip, "config chip: failed w/ecr\n");
59210Sstevel@tonic-gate 		pp->noecpregs = TRUE;
59220Sstevel@tonic-gate 	}
59230Sstevel@tonic-gate 	if (pp->noecpregs)
59240Sstevel@tonic-gate 		pp->fast_compat = FALSE;
59250Sstevel@tonic-gate 	DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
59260Sstevel@tonic-gate 	ecpp_error(pp->dip, "x86_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
59270Sstevel@tonic-gate 	    ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
59280Sstevel@tonic-gate 	return (SUCCESS);
59290Sstevel@tonic-gate }
59300Sstevel@tonic-gate #endif
59310Sstevel@tonic-gate 
59320Sstevel@tonic-gate /*
59330Sstevel@tonic-gate  * dma8237_dma_start() programs the selected 8 bit channel
59340Sstevel@tonic-gate  * of DMAC1 with the dma cookie.  pp->dma_cookie must
59350Sstevel@tonic-gate  * be set before this routine is called.
59360Sstevel@tonic-gate  */
59370Sstevel@tonic-gate static int
dma8237_dma_start(struct ecppunit * pp)59380Sstevel@tonic-gate dma8237_dma_start(struct ecppunit *pp)
59390Sstevel@tonic-gate {
59400Sstevel@tonic-gate 	uint8_t chn;
59410Sstevel@tonic-gate 
59420Sstevel@tonic-gate 	chn = pp->uh.m1553.chn;
59430Sstevel@tonic-gate 
59440Sstevel@tonic-gate 	ASSERT(chn <= DMAE_CH3 &&
59457656SSherry.Moore@Sun.COM 	    pp->dma_cookie.dmac_size != 0 &&
59467656SSherry.Moore@Sun.COM 	    pp->dma_cookie.dmac_address != NULL);
59470Sstevel@tonic-gate 
59480Sstevel@tonic-gate 	/* At this point Southbridge has not yet asserted DREQ */
59490Sstevel@tonic-gate 
59500Sstevel@tonic-gate 	/* set mode to read-from-memory. */
59510Sstevel@tonic-gate 	dma8237_write(pp, DMAC2_MODE, DMAMODE_CASC);
59520Sstevel@tonic-gate 	if (pp->dma_dir == DDI_DMA_READ) {
59530Sstevel@tonic-gate 		dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
59547656SSherry.Moore@Sun.COM 		    DMAMODE_READ | chn);
59550Sstevel@tonic-gate 	} else {
59560Sstevel@tonic-gate 		dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
59577656SSherry.Moore@Sun.COM 		    DMAMODE_WRITE | chn);
59580Sstevel@tonic-gate 	}
59590Sstevel@tonic-gate 
59600Sstevel@tonic-gate 	dma8237_write_addr(pp, pp->dma_cookie.dmac_address);
59610Sstevel@tonic-gate 	dma8237_write_count(pp, pp->dma_cookie.dmac_size - 1);
59620Sstevel@tonic-gate 
59630Sstevel@tonic-gate 	/*
59640Sstevel@tonic-gate 	 * M1553 chip does not permit to access DMA register banks
59650Sstevel@tonic-gate 	 * while DMA is in flight. As a result, ecpp and floppy drivers
59660Sstevel@tonic-gate 	 * can potentially corrupt each other's DMA. The interlocking mechanism
59670Sstevel@tonic-gate 	 * is provided by a parent nexus driver (isadma), which is enabled
59680Sstevel@tonic-gate 	 * indirectly through a DMAC1_ALLMASK register access:
59690Sstevel@tonic-gate 	 *
59700Sstevel@tonic-gate 	 * writing a non-zero value to this register enters a lock,
59710Sstevel@tonic-gate 	 * writing zero releases the lock.
59720Sstevel@tonic-gate 	 *
59730Sstevel@tonic-gate 	 * DMA transfer must only occur after entering a lock.
59740Sstevel@tonic-gate 	 * If the lock is already owned by other driver, we will block.
59750Sstevel@tonic-gate 	 *
59760Sstevel@tonic-gate 	 * The following operation unmasks our channel and masks all others
59770Sstevel@tonic-gate 	 */
59780Sstevel@tonic-gate 	dma8237_write(pp, DMAC1_ALLMASK, ~(1 << chn));
59790Sstevel@tonic-gate 	pp->uh.m1553.isadma_entered = 1;
59800Sstevel@tonic-gate 
59810Sstevel@tonic-gate 	return (SUCCESS);
59820Sstevel@tonic-gate }
59830Sstevel@tonic-gate 
59840Sstevel@tonic-gate static int
dma8237_dma_stop(struct ecppunit * pp,size_t * countp)59850Sstevel@tonic-gate dma8237_dma_stop(struct ecppunit *pp, size_t *countp)
59860Sstevel@tonic-gate {
59870Sstevel@tonic-gate 	uint8_t ecr;
59880Sstevel@tonic-gate 
59890Sstevel@tonic-gate 	/* stop DMA */
59900Sstevel@tonic-gate 	ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
59910Sstevel@tonic-gate 	(void) ecr_write(pp, ecr);
59920Sstevel@tonic-gate 
59930Sstevel@tonic-gate 	if (pp->uh.m1553.isadma_entered) {
59940Sstevel@tonic-gate 		/* reset the channel mask so we can issue PIO's to our device */
59950Sstevel@tonic-gate 		dma8237_write(pp, DMAC1_ALLMASK, 0);
59960Sstevel@tonic-gate 		pp->uh.m1553.isadma_entered = 0;
59970Sstevel@tonic-gate 
59980Sstevel@tonic-gate 	}
59990Sstevel@tonic-gate 
60000Sstevel@tonic-gate 	/* read DMA count if requested */
60010Sstevel@tonic-gate 	if (countp) {
60020Sstevel@tonic-gate 		*countp = dma8237_getcnt(pp);
60030Sstevel@tonic-gate 		if (pp->dma_dir == DDI_DMA_READ && *countp > 0) {
60040Sstevel@tonic-gate 			(*countp)++;	/* need correction for reverse xfers */
60050Sstevel@tonic-gate 		}
60060Sstevel@tonic-gate 	}
60070Sstevel@tonic-gate 	return (SUCCESS);
60080Sstevel@tonic-gate }
60090Sstevel@tonic-gate #if defined(__x86)
60100Sstevel@tonic-gate static int
x86_dma_start(struct ecppunit * pp)60110Sstevel@tonic-gate x86_dma_start(struct ecppunit *pp)
60120Sstevel@tonic-gate {
60130Sstevel@tonic-gate 	uint8_t chn;
60140Sstevel@tonic-gate 	struct ddi_dmae_req dmaereq;
60150Sstevel@tonic-gate 
60160Sstevel@tonic-gate 	chn = pp->uh.x86.chn;
60170Sstevel@tonic-gate 	ASSERT(chn <= DMAE_CH3 &&
60180Sstevel@tonic-gate 	    pp->dma_cookie.dmac_size != 0 &&
60190Sstevel@tonic-gate 	    pp->dma_cookie.dmac_address != NULL);
60200Sstevel@tonic-gate 	bzero(&dmaereq, sizeof (struct ddi_dmae_req));
60210Sstevel@tonic-gate 	dmaereq.der_command =
60220Sstevel@tonic-gate 	    (pp->dma_dir & DDI_DMA_READ) ? DMAE_CMD_READ : DMAE_CMD_WRITE;
60230Sstevel@tonic-gate 	if (ddi_dmae_prog(pp->dip, &dmaereq, &pp->dma_cookie, chn)
60240Sstevel@tonic-gate 	    != DDI_SUCCESS)
60250Sstevel@tonic-gate 		ecpp_error(pp->dip, "prog failed !!!\n");
60260Sstevel@tonic-gate 	ecpp_error(pp->dip, "dma_started..\n");
60270Sstevel@tonic-gate 	return (SUCCESS);
60280Sstevel@tonic-gate }
60290Sstevel@tonic-gate 
60300Sstevel@tonic-gate static int
x86_dma_stop(struct ecppunit * pp,size_t * countp)60310Sstevel@tonic-gate x86_dma_stop(struct ecppunit *pp, size_t *countp)
60320Sstevel@tonic-gate {
60330Sstevel@tonic-gate 	uint8_t ecr;
60340Sstevel@tonic-gate 
60350Sstevel@tonic-gate 	/* stop DMA */
60360Sstevel@tonic-gate 	if (pp->uh.x86.chn == 0xff)
60370Sstevel@tonic-gate 		return (FAILURE);
60380Sstevel@tonic-gate 	ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
60390Sstevel@tonic-gate 	(void) ecr_write(pp, ecr);
60400Sstevel@tonic-gate 	ecpp_error(pp->dip, "dma_stop\n");
60410Sstevel@tonic-gate 
60420Sstevel@tonic-gate 	/* read DMA count if requested */
60430Sstevel@tonic-gate 	if (countp) {
60440Sstevel@tonic-gate 		*countp = x86_getcnt(pp);
60450Sstevel@tonic-gate 	}
60460Sstevel@tonic-gate 	ecpp_error(pp->dip, "dma_stoped..\n");
60470Sstevel@tonic-gate 	return (SUCCESS);
60480Sstevel@tonic-gate }
60490Sstevel@tonic-gate #endif
60500Sstevel@tonic-gate 
60510Sstevel@tonic-gate /* channel must be masked */
60520Sstevel@tonic-gate static void
dma8237_write_addr(struct ecppunit * pp,uint32_t addr)60530Sstevel@tonic-gate dma8237_write_addr(struct ecppunit *pp, uint32_t addr)
60540Sstevel@tonic-gate {
60550Sstevel@tonic-gate 	uint8_t c_addr, c_lpage;
60560Sstevel@tonic-gate 	uint16_t c_hpage, *p;
60570Sstevel@tonic-gate 
60580Sstevel@tonic-gate 	switch (pp->uh.m1553.chn) {
60590Sstevel@tonic-gate 	case DMAE_CH0:
60600Sstevel@tonic-gate 		c_addr = DMA_0ADR;
60610Sstevel@tonic-gate 		c_lpage = DMA_0PAGE;
60620Sstevel@tonic-gate 		c_hpage = DMA_0HPG;
60630Sstevel@tonic-gate 		break;
60640Sstevel@tonic-gate 
60650Sstevel@tonic-gate 	case DMAE_CH1:
60660Sstevel@tonic-gate 		c_addr = DMA_1ADR;
60670Sstevel@tonic-gate 		c_lpage = DMA_1PAGE;
60680Sstevel@tonic-gate 		c_hpage = DMA_1HPG;
60690Sstevel@tonic-gate 		break;
60700Sstevel@tonic-gate 
60710Sstevel@tonic-gate 	case DMAE_CH2:
60720Sstevel@tonic-gate 		c_addr = DMA_2ADR;
60730Sstevel@tonic-gate 		c_lpage = DMA_2PAGE;
60740Sstevel@tonic-gate 		c_hpage = DMA_2HPG;
60750Sstevel@tonic-gate 		break;
60760Sstevel@tonic-gate 
60770Sstevel@tonic-gate 	case DMAE_CH3:
60780Sstevel@tonic-gate 		c_addr = DMA_3ADR;
60790Sstevel@tonic-gate 		c_lpage = DMA_3PAGE;
60800Sstevel@tonic-gate 		c_hpage = DMA_3HPG;
60810Sstevel@tonic-gate 		break;
60820Sstevel@tonic-gate 
60830Sstevel@tonic-gate 	default:
60840Sstevel@tonic-gate 		return;
60850Sstevel@tonic-gate 	}
60860Sstevel@tonic-gate 
60870Sstevel@tonic-gate 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
60880Sstevel@tonic-gate 	ddi_put16(pp->uh.m1553.d_handle, p, addr & 0xFFFF);
60890Sstevel@tonic-gate 
60900Sstevel@tonic-gate 	dma8237_write(pp, c_lpage, (addr & 0xFF0000) >> 16);
60910Sstevel@tonic-gate 	dma8237_write(pp, c_hpage, (addr & 0xFF000000) >> 24);
60920Sstevel@tonic-gate 
60930Sstevel@tonic-gate }
60940Sstevel@tonic-gate 
60950Sstevel@tonic-gate /*
60960Sstevel@tonic-gate  * This function may be useful during debugging,
60970Sstevel@tonic-gate  * so we leave it in, but do not include in the binary
60980Sstevel@tonic-gate  */
60990Sstevel@tonic-gate #ifdef INCLUDE_DMA8237_READ_ADDR
61000Sstevel@tonic-gate static uint32_t
dma8237_read_addr(struct ecppunit * pp)61010Sstevel@tonic-gate dma8237_read_addr(struct ecppunit *pp)
61020Sstevel@tonic-gate {
61030Sstevel@tonic-gate 	uint8_t rval3, rval4;
61040Sstevel@tonic-gate 	uint16_t rval16;
61050Sstevel@tonic-gate 	uint32_t rval;
61060Sstevel@tonic-gate 	uint8_t c_addr, c_lpage;
61070Sstevel@tonic-gate 	uint16_t c_hpage, *p;
61080Sstevel@tonic-gate 
61090Sstevel@tonic-gate 	switch (pp->uh.m1553.chn) {
61100Sstevel@tonic-gate 	case DMAE_CH0:
61110Sstevel@tonic-gate 		c_addr = DMA_0ADR;
61120Sstevel@tonic-gate 		c_lpage = DMA_0PAGE;
61130Sstevel@tonic-gate 		c_hpage = DMA_0HPG;
61140Sstevel@tonic-gate 		break;
61150Sstevel@tonic-gate 
61160Sstevel@tonic-gate 	case DMAE_CH1:
61170Sstevel@tonic-gate 		c_addr = DMA_1ADR;
61180Sstevel@tonic-gate 		c_lpage = DMA_1PAGE;
61190Sstevel@tonic-gate 		c_hpage = DMA_1HPG;
61200Sstevel@tonic-gate 		break;
61210Sstevel@tonic-gate 
61220Sstevel@tonic-gate 	case DMAE_CH2:
61230Sstevel@tonic-gate 		c_addr = DMA_2ADR;
61240Sstevel@tonic-gate 		c_lpage = DMA_2PAGE;
61250Sstevel@tonic-gate 		c_hpage = DMA_2HPG;
61260Sstevel@tonic-gate 		break;
61270Sstevel@tonic-gate 
61280Sstevel@tonic-gate 	case DMAE_CH3:
61290Sstevel@tonic-gate 		c_addr = DMA_3ADR;
61300Sstevel@tonic-gate 		c_lpage = DMA_3PAGE;
61310Sstevel@tonic-gate 		c_hpage = DMA_3HPG;
61320Sstevel@tonic-gate 		break;
61330Sstevel@tonic-gate 
61340Sstevel@tonic-gate 	default:
61350Sstevel@tonic-gate 		return (NULL);
61360Sstevel@tonic-gate 	}
61370Sstevel@tonic-gate 
61380Sstevel@tonic-gate 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
61390Sstevel@tonic-gate 	rval16 = ddi_get16(pp->uh.m1553.d_handle, p);
61400Sstevel@tonic-gate 
61410Sstevel@tonic-gate 	rval3 = dma8237_read(pp, c_lpage);
61420Sstevel@tonic-gate 	rval4 = dma8237_read(pp, c_hpage);
61430Sstevel@tonic-gate 
61440Sstevel@tonic-gate 	rval = rval16 | (rval3 << 16) | (rval4 <<24);
61450Sstevel@tonic-gate 
61460Sstevel@tonic-gate 	return (rval);
61470Sstevel@tonic-gate }
61480Sstevel@tonic-gate #endif
61490Sstevel@tonic-gate 
61500Sstevel@tonic-gate static void
dma8237_write_count(struct ecppunit * pp,uint32_t count)61510Sstevel@tonic-gate dma8237_write_count(struct ecppunit *pp, uint32_t count)
61520Sstevel@tonic-gate {
61530Sstevel@tonic-gate 	uint8_t c_wcnt;
61540Sstevel@tonic-gate 	uint16_t *p;
61550Sstevel@tonic-gate 
61560Sstevel@tonic-gate 	switch (pp->uh.m1553.chn) {
61570Sstevel@tonic-gate 	case DMAE_CH0:
61580Sstevel@tonic-gate 		c_wcnt = DMA_0WCNT;
61590Sstevel@tonic-gate 		break;
61600Sstevel@tonic-gate 
61610Sstevel@tonic-gate 	case DMAE_CH1:
61620Sstevel@tonic-gate 		c_wcnt = DMA_1WCNT;
61630Sstevel@tonic-gate 		break;
61640Sstevel@tonic-gate 
61650Sstevel@tonic-gate 	case DMAE_CH2:
61660Sstevel@tonic-gate 		c_wcnt = DMA_2WCNT;
61670Sstevel@tonic-gate 		break;
61680Sstevel@tonic-gate 
61690Sstevel@tonic-gate 	case DMAE_CH3:
61700Sstevel@tonic-gate 		c_wcnt = DMA_3WCNT;
61710Sstevel@tonic-gate 		break;
61720Sstevel@tonic-gate 
61730Sstevel@tonic-gate 	default:
61740Sstevel@tonic-gate 		return;
61750Sstevel@tonic-gate 	}
61760Sstevel@tonic-gate 
61770Sstevel@tonic-gate 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
61780Sstevel@tonic-gate 	ddi_put16(pp->uh.m1553.d_handle, p, count & 0xFFFF);
61790Sstevel@tonic-gate 
61800Sstevel@tonic-gate }
61810Sstevel@tonic-gate 
61820Sstevel@tonic-gate static uint32_t
dma8237_read_count(struct ecppunit * pp)61830Sstevel@tonic-gate dma8237_read_count(struct ecppunit *pp)
61840Sstevel@tonic-gate {
61850Sstevel@tonic-gate 	uint8_t c_wcnt;
61860Sstevel@tonic-gate 	uint16_t *p;
61870Sstevel@tonic-gate 
61880Sstevel@tonic-gate 	switch (pp->uh.m1553.chn) {
61890Sstevel@tonic-gate 	case DMAE_CH0:
61900Sstevel@tonic-gate 		c_wcnt = DMA_0WCNT;
61910Sstevel@tonic-gate 		break;
61920Sstevel@tonic-gate 
61930Sstevel@tonic-gate 	case DMAE_CH1:
61940Sstevel@tonic-gate 		c_wcnt = DMA_1WCNT;
61950Sstevel@tonic-gate 		break;
61960Sstevel@tonic-gate 
61970Sstevel@tonic-gate 	case DMAE_CH2:
61980Sstevel@tonic-gate 		c_wcnt = DMA_2WCNT;
61990Sstevel@tonic-gate 		break;
62000Sstevel@tonic-gate 
62010Sstevel@tonic-gate 	case DMAE_CH3:
62020Sstevel@tonic-gate 		c_wcnt = DMA_3WCNT;
62030Sstevel@tonic-gate 		break;
62040Sstevel@tonic-gate 
62050Sstevel@tonic-gate 	default:
62060Sstevel@tonic-gate 		return (NULL);
62070Sstevel@tonic-gate 	}
62080Sstevel@tonic-gate 
62090Sstevel@tonic-gate 	p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
62100Sstevel@tonic-gate 	return (ddi_get16(pp->uh.m1553.d_handle, p));
62110Sstevel@tonic-gate 
62120Sstevel@tonic-gate }
62130Sstevel@tonic-gate 
62140Sstevel@tonic-gate static void
dma8237_write(struct ecppunit * pp,int reg_num,uint8_t val)62150Sstevel@tonic-gate dma8237_write(struct ecppunit *pp, int reg_num, uint8_t val)
62160Sstevel@tonic-gate {
62170Sstevel@tonic-gate 	ddi_put8(pp->uh.m1553.d_handle,
62187656SSherry.Moore@Sun.COM 	    &pp->uh.m1553.isa_space->isa_reg[reg_num], val);
62190Sstevel@tonic-gate }
62200Sstevel@tonic-gate 
62210Sstevel@tonic-gate static uint8_t
dma8237_read(struct ecppunit * pp,int reg_num)62220Sstevel@tonic-gate dma8237_read(struct ecppunit *pp, int reg_num)
62230Sstevel@tonic-gate {
62240Sstevel@tonic-gate 	return (ddi_get8(pp->uh.m1553.d_handle,
62250Sstevel@tonic-gate 	    &pp->uh.m1553.isa_space->isa_reg[reg_num]));
62260Sstevel@tonic-gate }
62270Sstevel@tonic-gate 
62280Sstevel@tonic-gate static size_t
dma8237_getcnt(struct ecppunit * pp)62290Sstevel@tonic-gate dma8237_getcnt(struct ecppunit *pp)
62300Sstevel@tonic-gate {
62310Sstevel@tonic-gate 	uint32_t cnt;
62320Sstevel@tonic-gate 
62330Sstevel@tonic-gate 	if ((cnt = dma8237_read_count(pp)) == 0xffff)
62340Sstevel@tonic-gate 		cnt = 0;
62350Sstevel@tonic-gate 	else
62360Sstevel@tonic-gate 		cnt++;
62370Sstevel@tonic-gate 	return (cnt);
62380Sstevel@tonic-gate }
62390Sstevel@tonic-gate 
62400Sstevel@tonic-gate 
62410Sstevel@tonic-gate /*
62420Sstevel@tonic-gate  *
62430Sstevel@tonic-gate  * Kstat support routines
62440Sstevel@tonic-gate  *
62450Sstevel@tonic-gate  */
62460Sstevel@tonic-gate static void
ecpp_kstat_init(struct ecppunit * pp)62470Sstevel@tonic-gate ecpp_kstat_init(struct ecppunit *pp)
62480Sstevel@tonic-gate {
62490Sstevel@tonic-gate 	struct ecppkstat *ekp;
62500Sstevel@tonic-gate 	char buf[16];
62510Sstevel@tonic-gate 
62520Sstevel@tonic-gate 	/*
62530Sstevel@tonic-gate 	 * Allocate, initialize and install interrupt counter kstat
62540Sstevel@tonic-gate 	 */
62550Sstevel@tonic-gate 	(void) sprintf(buf, "ecppc%d", pp->instance);
62560Sstevel@tonic-gate 	pp->intrstats = kstat_create("ecpp", pp->instance, buf, "controller",
62577656SSherry.Moore@Sun.COM 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
62580Sstevel@tonic-gate 	if (pp->intrstats == NULL) {
62590Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_kstat_init:1: kstat_create failed");
62600Sstevel@tonic-gate 	} else {
62610Sstevel@tonic-gate 		pp->intrstats->ks_update = ecpp_kstatintr_update;
62620Sstevel@tonic-gate 		pp->intrstats->ks_private = (void *) pp;
62630Sstevel@tonic-gate 		kstat_install(pp->intrstats);
62640Sstevel@tonic-gate 	}
62650Sstevel@tonic-gate 
62660Sstevel@tonic-gate 	/*
62670Sstevel@tonic-gate 	 * Allocate, initialize and install misc stats kstat
62680Sstevel@tonic-gate 	 */
62690Sstevel@tonic-gate 	pp->ksp = kstat_create("ecpp", pp->instance, NULL, "misc",
62707656SSherry.Moore@Sun.COM 	    KSTAT_TYPE_NAMED,
62717656SSherry.Moore@Sun.COM 	    sizeof (struct ecppkstat) / sizeof (kstat_named_t),
62727656SSherry.Moore@Sun.COM 	    KSTAT_FLAG_PERSISTENT);
62730Sstevel@tonic-gate 	if (pp->ksp == NULL) {
62740Sstevel@tonic-gate 		ecpp_error(pp->dip, "ecpp_kstat_init:2: kstat_create failed");
62750Sstevel@tonic-gate 		return;
62760Sstevel@tonic-gate 	}
62770Sstevel@tonic-gate 
62780Sstevel@tonic-gate 	ekp = (struct ecppkstat *)pp->ksp->ks_data;
62790Sstevel@tonic-gate 
62800Sstevel@tonic-gate #define	EK_NAMED_INIT(name) \
62810Sstevel@tonic-gate 	kstat_named_init(&ekp->ek_##name, #name, KSTAT_DATA_UINT32)
62820Sstevel@tonic-gate 
62830Sstevel@tonic-gate 	EK_NAMED_INIT(ctx_obytes);
62840Sstevel@tonic-gate 	EK_NAMED_INIT(ctxpio_obytes);
62850Sstevel@tonic-gate 	EK_NAMED_INIT(nib_ibytes);
62860Sstevel@tonic-gate 	EK_NAMED_INIT(ecp_obytes);
62870Sstevel@tonic-gate 	EK_NAMED_INIT(ecp_ibytes);
62880Sstevel@tonic-gate 	EK_NAMED_INIT(epp_obytes);
62890Sstevel@tonic-gate 	EK_NAMED_INIT(epp_ibytes);
62900Sstevel@tonic-gate 	EK_NAMED_INIT(diag_obytes);
62910Sstevel@tonic-gate 	EK_NAMED_INIT(to_ctx);
62920Sstevel@tonic-gate 	EK_NAMED_INIT(to_nib);
62930Sstevel@tonic-gate 	EK_NAMED_INIT(to_ecp);
62940Sstevel@tonic-gate 	EK_NAMED_INIT(to_epp);
62950Sstevel@tonic-gate 	EK_NAMED_INIT(to_diag);
62960Sstevel@tonic-gate 	EK_NAMED_INIT(xfer_tout);
62970Sstevel@tonic-gate 	EK_NAMED_INIT(ctx_cf);
62980Sstevel@tonic-gate 	EK_NAMED_INIT(joblen);
62990Sstevel@tonic-gate 	EK_NAMED_INIT(isr_reattempt_high);
63000Sstevel@tonic-gate 	EK_NAMED_INIT(mode);
63010Sstevel@tonic-gate 	EK_NAMED_INIT(phase);
63020Sstevel@tonic-gate 	EK_NAMED_INIT(backchan);
63030Sstevel@tonic-gate 	EK_NAMED_INIT(iomode);
63040Sstevel@tonic-gate 	EK_NAMED_INIT(state);
63050Sstevel@tonic-gate 
63060Sstevel@tonic-gate 	pp->ksp->ks_update = ecpp_kstat_update;
63070Sstevel@tonic-gate 	pp->ksp->ks_private = (void *) pp;
63080Sstevel@tonic-gate 	kstat_install(pp->ksp);
63090Sstevel@tonic-gate }
63100Sstevel@tonic-gate 
63110Sstevel@tonic-gate static int
ecpp_kstat_update(kstat_t * ksp,int rw)63120Sstevel@tonic-gate ecpp_kstat_update(kstat_t *ksp, int rw)
63130Sstevel@tonic-gate {
63140Sstevel@tonic-gate 	struct ecppunit *pp;
63150Sstevel@tonic-gate 	struct ecppkstat *ekp;
63160Sstevel@tonic-gate 
63170Sstevel@tonic-gate 	/*
63180Sstevel@tonic-gate 	 * For the time being there is no point
63190Sstevel@tonic-gate 	 * in supporting writable kstats
63200Sstevel@tonic-gate 	 */
63210Sstevel@tonic-gate 	if (rw == KSTAT_WRITE) {
63220Sstevel@tonic-gate 		return (EACCES);
63230Sstevel@tonic-gate 	}
63240Sstevel@tonic-gate 
63250Sstevel@tonic-gate 	pp = (struct ecppunit *)ksp->ks_private;
63260Sstevel@tonic-gate 	ekp = (struct ecppkstat *)ksp->ks_data;
63270Sstevel@tonic-gate 
63280Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
63290Sstevel@tonic-gate 
63300Sstevel@tonic-gate 	ekp->ek_ctx_obytes.value.ui32	= pp->obytes[ECPP_CENTRONICS] +
63317656SSherry.Moore@Sun.COM 	    pp->obytes[ECPP_COMPAT_MODE];
63320Sstevel@tonic-gate 	ekp->ek_ctxpio_obytes.value.ui32 = pp->ctxpio_obytes;
63330Sstevel@tonic-gate 	ekp->ek_nib_ibytes.value.ui32	= pp->ibytes[ECPP_NIBBLE_MODE];
63340Sstevel@tonic-gate 	ekp->ek_ecp_obytes.value.ui32	= pp->obytes[ECPP_ECP_MODE];
63350Sstevel@tonic-gate 	ekp->ek_ecp_ibytes.value.ui32	= pp->ibytes[ECPP_ECP_MODE];
63360Sstevel@tonic-gate 	ekp->ek_epp_obytes.value.ui32	= pp->obytes[ECPP_EPP_MODE];
63370Sstevel@tonic-gate 	ekp->ek_epp_ibytes.value.ui32	= pp->ibytes[ECPP_EPP_MODE];
63380Sstevel@tonic-gate 	ekp->ek_diag_obytes.value.ui32	= pp->obytes[ECPP_DIAG_MODE];
63390Sstevel@tonic-gate 	ekp->ek_to_ctx.value.ui32	= pp->to_mode[ECPP_CENTRONICS] +
63407656SSherry.Moore@Sun.COM 	    pp->to_mode[ECPP_COMPAT_MODE];
63410Sstevel@tonic-gate 	ekp->ek_to_nib.value.ui32	= pp->to_mode[ECPP_NIBBLE_MODE];
63420Sstevel@tonic-gate 	ekp->ek_to_ecp.value.ui32	= pp->to_mode[ECPP_ECP_MODE];
63430Sstevel@tonic-gate 	ekp->ek_to_epp.value.ui32	= pp->to_mode[ECPP_EPP_MODE];
63440Sstevel@tonic-gate 	ekp->ek_to_diag.value.ui32	= pp->to_mode[ECPP_DIAG_MODE];
63450Sstevel@tonic-gate 	ekp->ek_xfer_tout.value.ui32	= pp->xfer_tout;
63460Sstevel@tonic-gate 	ekp->ek_ctx_cf.value.ui32	= pp->ctx_cf;
63470Sstevel@tonic-gate 	ekp->ek_joblen.value.ui32	= pp->joblen;
63480Sstevel@tonic-gate 	ekp->ek_isr_reattempt_high.value.ui32	= pp->isr_reattempt_high;
63490Sstevel@tonic-gate 	ekp->ek_mode.value.ui32		= pp->current_mode;
63500Sstevel@tonic-gate 	ekp->ek_phase.value.ui32	= pp->current_phase;
63510Sstevel@tonic-gate 	ekp->ek_backchan.value.ui32	= pp->backchannel;
63520Sstevel@tonic-gate 	ekp->ek_iomode.value.ui32	= pp->io_mode;
63530Sstevel@tonic-gate 	ekp->ek_state.value.ui32	= pp->e_busy;
63540Sstevel@tonic-gate 
63550Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
63560Sstevel@tonic-gate 
63570Sstevel@tonic-gate 	return (0);
63580Sstevel@tonic-gate }
63590Sstevel@tonic-gate 
63600Sstevel@tonic-gate static int
ecpp_kstatintr_update(kstat_t * ksp,int rw)63610Sstevel@tonic-gate ecpp_kstatintr_update(kstat_t *ksp, int rw)
63620Sstevel@tonic-gate {
63630Sstevel@tonic-gate 	struct ecppunit *pp;
63640Sstevel@tonic-gate 
63650Sstevel@tonic-gate 	/*
63660Sstevel@tonic-gate 	 * For the time being there is no point
63670Sstevel@tonic-gate 	 * in supporting writable kstats
63680Sstevel@tonic-gate 	 */
63690Sstevel@tonic-gate 	if (rw == KSTAT_WRITE) {
63700Sstevel@tonic-gate 		return (EACCES);
63710Sstevel@tonic-gate 	}
63720Sstevel@tonic-gate 
63730Sstevel@tonic-gate 	pp = (struct ecppunit *)ksp->ks_private;
63740Sstevel@tonic-gate 
63750Sstevel@tonic-gate 	mutex_enter(&pp->umutex);
63760Sstevel@tonic-gate 
63770Sstevel@tonic-gate 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_HARD] = pp->intr_hard;
63780Sstevel@tonic-gate 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SPURIOUS] = pp->intr_spurious;
63790Sstevel@tonic-gate 	KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SOFT] = pp->intr_soft;
63800Sstevel@tonic-gate 
63810Sstevel@tonic-gate 	mutex_exit(&pp->umutex);
63820Sstevel@tonic-gate 
63830Sstevel@tonic-gate 	return (0);
63840Sstevel@tonic-gate }
6385