xref: /netbsd-src/sys/dev/ic/siop.c (revision 274254cdae52594c1aa480a736aef78313d15c9c)
1 /*	$NetBSD: siop.c,v 1.90 2009/03/15 17:24:43 cegger Exp $	*/
2 
3 /*
4  * Copyright (c) 2000 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Manuel Bouyer.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop.c,v 1.90 2009/03/15 17:24:43 cegger Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 
45 #include <uvm/uvm_extern.h>
46 
47 #include <machine/endian.h>
48 #include <sys/bus.h>
49 
50 #include <dev/microcode/siop/siop.out>
51 
52 #include <dev/scsipi/scsi_all.h>
53 #include <dev/scsipi/scsi_message.h>
54 #include <dev/scsipi/scsipi_all.h>
55 
56 #include <dev/scsipi/scsiconf.h>
57 
58 #include <dev/ic/siopreg.h>
59 #include <dev/ic/siopvar_common.h>
60 #include <dev/ic/siopvar.h>
61 
62 #include "opt_siop.h"
63 
64 #ifndef DEBUG
65 #undef DEBUG
66 #endif
67 /*
68 #define SIOP_DEBUG
69 #define SIOP_DEBUG_DR
70 #define SIOP_DEBUG_INTR
71 #define SIOP_DEBUG_SCHED
72 #define DUMP_SCRIPT
73 */
74 
75 #define SIOP_STATS
76 
77 #ifndef SIOP_DEFAULT_TARGET
78 #define SIOP_DEFAULT_TARGET 7
79 #endif
80 
81 /* number of cmd descriptors per block */
82 #define SIOP_NCMDPB (PAGE_SIZE / sizeof(struct siop_xfer))
83 
84 /* Number of scheduler slot (needs to match script) */
85 #define SIOP_NSLOTS 40
86 
87 void	siop_reset(struct siop_softc *);
88 void	siop_handle_reset(struct siop_softc *);
89 int	siop_handle_qtag_reject(struct siop_cmd *);
90 void	siop_scsicmd_end(struct siop_cmd *);
91 void	siop_unqueue(struct siop_softc *, int, int);
92 static void	siop_start(struct siop_softc *, struct siop_cmd *);
93 void 	siop_timeout(void *);
94 int	siop_scsicmd(struct scsipi_xfer *);
95 void	siop_scsipi_request(struct scsipi_channel *,
96 			scsipi_adapter_req_t, void *);
97 void	siop_dump_script(struct siop_softc *);
98 void	siop_morecbd(struct siop_softc *);
99 struct siop_lunsw *siop_get_lunsw(struct siop_softc *);
100 void	siop_add_reselsw(struct siop_softc *, int);
101 void	siop_update_scntl3(struct siop_softc *,
102 			struct siop_common_target *);
103 
104 #ifdef SIOP_STATS
105 static int siop_stat_intr = 0;
106 static int siop_stat_intr_shortxfer = 0;
107 static int siop_stat_intr_sdp = 0;
108 static int siop_stat_intr_saveoffset = 0;
109 static int siop_stat_intr_done = 0;
110 static int siop_stat_intr_xferdisc = 0;
111 static int siop_stat_intr_lunresel = 0;
112 static int siop_stat_intr_qfull = 0;
113 void siop_printstats(void);
114 #define INCSTAT(x) x++
115 #else
116 #define INCSTAT(x)
117 #endif
118 
119 static inline void siop_script_sync(struct siop_softc *, int);
120 static inline void
121 siop_script_sync(struct siop_softc *sc, int ops)
122 {
123 	if ((sc->sc_c.features & SF_CHIP_RAM) == 0)
124 		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
125 		    PAGE_SIZE, ops);
126 }
127 
128 static inline u_int32_t siop_script_read(struct siop_softc *, u_int);
129 static inline u_int32_t
130 siop_script_read(struct siop_softc *sc, u_int offset)
131 {
132 	if (sc->sc_c.features & SF_CHIP_RAM) {
133 		return bus_space_read_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
134 		    offset * 4);
135 	} else {
136 		return siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[offset]);
137 	}
138 }
139 
140 static inline void siop_script_write(struct siop_softc *, u_int,
141 	u_int32_t);
142 static inline void
143 siop_script_write(struct siop_softc *sc, u_int offset, u_int32_t val)
144 {
145 	if (sc->sc_c.features & SF_CHIP_RAM) {
146 		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
147 		    offset * 4, val);
148 	} else {
149 		sc->sc_c.sc_script[offset] = siop_htoc32(&sc->sc_c, val);
150 	}
151 }
152 
153 void
154 siop_attach(struct siop_softc *sc)
155 {
156 	if (siop_common_attach(&sc->sc_c) != 0)
157 		return;
158 
159 	TAILQ_INIT(&sc->free_list);
160 	TAILQ_INIT(&sc->cmds);
161 	TAILQ_INIT(&sc->lunsw_list);
162 	sc->sc_currschedslot = 0;
163 #ifdef SIOP_DEBUG
164 	printf("%s: script size = %d, PHY addr=0x%x, VIRT=%p\n",
165 	    device_xname(&sc->sc_c.sc_dev), (int)sizeof(siop_script),
166 	    (u_int32_t)sc->sc_c.sc_scriptaddr, sc->sc_c.sc_script);
167 #endif
168 
169 	sc->sc_c.sc_adapt.adapt_max_periph = SIOP_NTAG - 1;
170 	sc->sc_c.sc_adapt.adapt_request = siop_scsipi_request;
171 
172 	/* Do a bus reset, so that devices fall back to narrow/async */
173 	siop_resetbus(&sc->sc_c);
174 	/*
175 	 * siop_reset() will reset the chip, thus clearing pending interrupts
176 	 */
177 	siop_reset(sc);
178 #ifdef DUMP_SCRIPT
179 	siop_dump_script(sc);
180 #endif
181 
182 	config_found((struct device*)sc, &sc->sc_c.sc_chan, scsiprint);
183 }
184 
185 void
186 siop_reset(struct siop_softc *sc)
187 {
188 	int i, j;
189 	struct siop_lunsw *lunsw;
190 
191 	siop_common_reset(&sc->sc_c);
192 
193 	/* copy and patch the script */
194 	if (sc->sc_c.features & SF_CHIP_RAM) {
195 		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh, 0,
196 		    siop_script, sizeof(siop_script) / sizeof(siop_script[0]));
197 		for (j = 0; j <
198 		    (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
199 		    j++) {
200 			bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
201 			    E_abs_msgin_Used[j] * 4,
202 			    sc->sc_c.sc_scriptaddr + Ent_msgin_space);
203 		}
204 		if (sc->sc_c.features & SF_CHIP_LED0) {
205 			bus_space_write_region_4(sc->sc_c.sc_ramt,
206 			    sc->sc_c.sc_ramh,
207 			    Ent_led_on1, siop_led_on,
208 			    sizeof(siop_led_on) / sizeof(siop_led_on[0]));
209 			bus_space_write_region_4(sc->sc_c.sc_ramt,
210 			    sc->sc_c.sc_ramh,
211 			    Ent_led_on2, siop_led_on,
212 			    sizeof(siop_led_on) / sizeof(siop_led_on[0]));
213 			bus_space_write_region_4(sc->sc_c.sc_ramt,
214 			    sc->sc_c.sc_ramh,
215 			    Ent_led_off, siop_led_off,
216 			    sizeof(siop_led_off) / sizeof(siop_led_off[0]));
217 		}
218 	} else {
219 		for (j = 0;
220 		    j < (sizeof(siop_script) / sizeof(siop_script[0])); j++) {
221 			sc->sc_c.sc_script[j] =
222 			    siop_htoc32(&sc->sc_c, siop_script[j]);
223 		}
224 		for (j = 0; j <
225 		    (sizeof(E_abs_msgin_Used) / sizeof(E_abs_msgin_Used[0]));
226 		    j++) {
227 			sc->sc_c.sc_script[E_abs_msgin_Used[j]] =
228 			    siop_htoc32(&sc->sc_c,
229 				sc->sc_c.sc_scriptaddr + Ent_msgin_space);
230 		}
231 		if (sc->sc_c.features & SF_CHIP_LED0) {
232 			for (j = 0; j < (sizeof(siop_led_on) /
233 			    sizeof(siop_led_on[0])); j++)
234 				sc->sc_c.sc_script[
235 				    Ent_led_on1 / sizeof(siop_led_on[0]) + j
236 				    ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
237 			for (j = 0; j < (sizeof(siop_led_on) /
238 			    sizeof(siop_led_on[0])); j++)
239 				sc->sc_c.sc_script[
240 				    Ent_led_on2 / sizeof(siop_led_on[0]) + j
241 				    ] = siop_htoc32(&sc->sc_c, siop_led_on[j]);
242 			for (j = 0; j < (sizeof(siop_led_off) /
243 			    sizeof(siop_led_off[0])); j++)
244 				sc->sc_c.sc_script[
245 				   Ent_led_off / sizeof(siop_led_off[0]) + j
246 				   ] = siop_htoc32(&sc->sc_c, siop_led_off[j]);
247 		}
248 	}
249 	sc->script_free_lo = sizeof(siop_script) / sizeof(siop_script[0]);
250 	sc->script_free_hi = sc->sc_c.ram_size / 4;
251 	sc->sc_ntargets = 0;
252 
253 	/* free used and unused lun switches */
254 	while((lunsw = TAILQ_FIRST(&sc->lunsw_list)) != NULL) {
255 #ifdef SIOP_DEBUG
256 		printf("%s: free lunsw at offset %d\n",
257 			device_xname(&sc->sc_c.sc_dev), lunsw->lunsw_off);
258 #endif
259 		TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
260 		free(lunsw, M_DEVBUF);
261 	}
262 	TAILQ_INIT(&sc->lunsw_list);
263 	/* restore reselect switch */
264 	for (i = 0; i < sc->sc_c.sc_chan.chan_ntargets; i++) {
265 		struct siop_target *target;
266 		if (sc->sc_c.targets[i] == NULL)
267 			continue;
268 #ifdef SIOP_DEBUG
269 		printf("%s: restore sw for target %d\n",
270 			device_xname(&sc->sc_c.sc_dev), i);
271 #endif
272 		target = (struct siop_target *)sc->sc_c.targets[i];
273 		free(target->lunsw, M_DEVBUF);
274 		target->lunsw = siop_get_lunsw(sc);
275 		if (target->lunsw == NULL) {
276 			aprint_error_dev(&sc->sc_c.sc_dev, "can't alloc lunsw for target %d\n", i);
277 			break;
278 		}
279 		siop_add_reselsw(sc, i);
280 	}
281 
282 	/* start script */
283 	if ((sc->sc_c.features & SF_CHIP_RAM) == 0) {
284 		bus_dmamap_sync(sc->sc_c.sc_dmat, sc->sc_c.sc_scriptdma, 0,
285 		    PAGE_SIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
286 	}
287 	bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP,
288 	    sc->sc_c.sc_scriptaddr + Ent_reselect);
289 }
290 
291 #if 0
292 #define CALL_SCRIPT(ent) do {\
293 	printf ("start script DSA 0x%lx DSP 0x%lx\n", \
294 	    siop_cmd->cmd_c.dsa, \
295 	    sc->sc_c.sc_scriptaddr + ent); \
296 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
297 } while (0)
298 #else
299 #define CALL_SCRIPT(ent) do {\
300 bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSP, sc->sc_c.sc_scriptaddr + ent); \
301 } while (0)
302 #endif
303 
304 int
305 siop_intr(void *v)
306 {
307 	struct siop_softc *sc = v;
308 	struct siop_target *siop_target;
309 	struct siop_cmd *siop_cmd;
310 	struct siop_lun *siop_lun;
311 	struct scsipi_xfer *xs;
312 	int istat, sist, sstat1, dstat = 0; /* XXX: gcc */
313 	u_int32_t irqcode;
314 	int need_reset = 0;
315 	int offset, target, lun, tag;
316 	bus_addr_t dsa;
317 	struct siop_cbd *cbdp;
318 	int freetarget = 0;
319 	int restart = 0;
320 
321 	istat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_ISTAT);
322 	if ((istat & (ISTAT_INTF | ISTAT_DIP | ISTAT_SIP)) == 0)
323 		return 0;
324 	INCSTAT(siop_stat_intr);
325 	if (istat & ISTAT_INTF) {
326 		printf("INTRF\n");
327 		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
328 		    SIOP_ISTAT, ISTAT_INTF);
329 	}
330 	if ((istat &(ISTAT_DIP | ISTAT_SIP | ISTAT_ABRT)) ==
331 	    (ISTAT_DIP | ISTAT_ABRT)) {
332 		/* clear abort */
333 		bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
334 		    SIOP_ISTAT, 0);
335 	}
336 	/* use DSA to find the current siop_cmd */
337 	siop_cmd = NULL;
338 	dsa = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA);
339 	TAILQ_FOREACH(cbdp, &sc->cmds, next) {
340 		if (dsa >= cbdp->xferdma->dm_segs[0].ds_addr &&
341 	    	    dsa < cbdp->xferdma->dm_segs[0].ds_addr + PAGE_SIZE) {
342 			dsa -= cbdp->xferdma->dm_segs[0].ds_addr;
343 			siop_cmd = &cbdp->cmds[dsa / sizeof(struct siop_xfer)];
344 			siop_table_sync(siop_cmd,
345 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
346 			break;
347 		}
348 	}
349 	if (siop_cmd) {
350 		xs = siop_cmd->cmd_c.xs;
351 		siop_target = (struct siop_target *)siop_cmd->cmd_c.siop_target;
352 		target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
353 		lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
354 		tag = siop_cmd->cmd_c.tag;
355 		siop_lun = siop_target->siop_lun[lun];
356 #ifdef DIAGNOSTIC
357 		if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
358  			printf("siop_cmd (lun %d) for DSA 0x%x "
359 			    "not active (%d)\n", lun, (u_int)dsa,
360 			    siop_cmd->cmd_c.status);
361 			xs = NULL;
362 			siop_target = NULL;
363 			target = -1;
364 			lun = -1;
365 			tag = -1;
366 			siop_lun = NULL;
367 			siop_cmd = NULL;
368 		} else if (siop_lun->siop_tag[tag].active != siop_cmd) {
369 			printf("siop_cmd (lun %d tag %d) not in siop_lun "
370 			    "active (%p != %p)\n", lun, tag, siop_cmd,
371 			    siop_lun->siop_tag[tag].active);
372 		}
373 #endif
374 	} else {
375 		xs = NULL;
376 		siop_target = NULL;
377 		target = -1;
378 		lun = -1;
379 		tag = -1;
380 		siop_lun = NULL;
381 	}
382 	if (istat & ISTAT_DIP) {
383 		dstat = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
384 		    SIOP_DSTAT);
385 		if (dstat & DSTAT_ABRT) {
386 			/* was probably generated by a bus reset IOCTL */
387 			if ((dstat & DSTAT_DFE) == 0)
388 				siop_clearfifo(&sc->sc_c);
389 			goto reset;
390 		}
391 		if (dstat & DSTAT_SSI) {
392 			printf("single step dsp 0x%08x dsa 0x08%x\n",
393 			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
394 			    sc->sc_c.sc_rh, SIOP_DSP) -
395 			    sc->sc_c.sc_scriptaddr),
396 			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
397 				SIOP_DSA));
398 			if ((dstat & ~(DSTAT_DFE | DSTAT_SSI)) == 0 &&
399 			    (istat & ISTAT_SIP) == 0) {
400 				bus_space_write_1(sc->sc_c.sc_rt,
401 				    sc->sc_c.sc_rh, SIOP_DCNTL,
402 				    bus_space_read_1(sc->sc_c.sc_rt,
403 				    sc->sc_c.sc_rh, SIOP_DCNTL) | DCNTL_STD);
404 			}
405 			return 1;
406 		}
407 
408 		if (dstat & ~(DSTAT_SIR | DSTAT_DFE | DSTAT_SSI)) {
409 		printf("DMA IRQ:");
410 		if (dstat & DSTAT_IID)
411 			printf(" Illegal instruction");
412 		if (dstat & DSTAT_BF)
413 			printf(" bus fault");
414 		if (dstat & DSTAT_MDPE)
415 			printf(" parity");
416 		if (dstat & DSTAT_DFE)
417 			printf(" DMA fifo empty");
418 		else
419 			siop_clearfifo(&sc->sc_c);
420 		printf(", DSP=0x%x DSA=0x%x: ",
421 		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
422 			SIOP_DSP) - sc->sc_c.sc_scriptaddr),
423 		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA));
424 		if (siop_cmd)
425 			printf("last msg_in=0x%x status=0x%x\n",
426 			    siop_cmd->cmd_tables->msg_in[0],
427 			    siop_ctoh32(&sc->sc_c,
428 				siop_cmd->cmd_tables->status));
429 		else
430 			aprint_error_dev(&sc->sc_c.sc_dev, "current DSA invalid\n");
431 		need_reset = 1;
432 		}
433 	}
434 	if (istat & ISTAT_SIP) {
435 		if (istat & ISTAT_DIP)
436 			delay(10);
437 		/*
438 		 * Can't read sist0 & sist1 independently, or we have to
439 		 * insert delay
440 		 */
441 		sist = bus_space_read_2(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
442 		    SIOP_SIST0);
443 		sstat1 = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
444 		    SIOP_SSTAT1);
445 #ifdef SIOP_DEBUG_INTR
446 		printf("scsi interrupt, sist=0x%x sstat1=0x%x "
447 		    "DSA=0x%x DSP=0x%lx\n", sist,
448 		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
449 			SIOP_SSTAT1),
450 		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
451 		    (u_long)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
452 			SIOP_DSP) -
453 		    sc->sc_c.sc_scriptaddr));
454 #endif
455 		if (sist & SIST0_RST) {
456 			siop_handle_reset(sc);
457 			/* no table to flush here */
458 			return 1;
459 		}
460 		if (sist & SIST0_SGE) {
461 			if (siop_cmd)
462 				scsipi_printaddr(xs->xs_periph);
463 			else
464 				printf("%s:", device_xname(&sc->sc_c.sc_dev));
465 			printf("scsi gross error\n");
466 			goto reset;
467 		}
468 		if ((sist & SIST0_MA) && need_reset == 0) {
469 			if (siop_cmd) {
470 				int scratcha0;
471 				dstat = bus_space_read_1(sc->sc_c.sc_rt,
472 				    sc->sc_c.sc_rh, SIOP_DSTAT);
473 				/*
474 				 * first restore DSA, in case we were in a S/G
475 				 * operation.
476 				 */
477 				bus_space_write_4(sc->sc_c.sc_rt,
478 				    sc->sc_c.sc_rh,
479 				    SIOP_DSA, siop_cmd->cmd_c.dsa);
480 				scratcha0 = bus_space_read_1(sc->sc_c.sc_rt,
481 				    sc->sc_c.sc_rh, SIOP_SCRATCHA);
482 				switch (sstat1 & SSTAT1_PHASE_MASK) {
483 				case SSTAT1_PHASE_STATUS:
484 				/*
485 				 * previous phase may be aborted for any reason
486 				 * ( for example, the target has less data to
487 				 * transfer than requested). Compute resid and
488 				 * just go to status, the command should
489 				 * terminate.
490 				 */
491 					INCSTAT(siop_stat_intr_shortxfer);
492 					if (scratcha0 & A_flag_data)
493 						siop_ma(&siop_cmd->cmd_c);
494 					else if ((dstat & DSTAT_DFE) == 0)
495 						siop_clearfifo(&sc->sc_c);
496 					CALL_SCRIPT(Ent_status);
497 					return 1;
498 				case SSTAT1_PHASE_MSGIN:
499 				/*
500 				 * target may be ready to disconnect
501 				 * Compute resid which would be used later
502 				 * if a save data pointer is needed.
503 				 */
504 					INCSTAT(siop_stat_intr_xferdisc);
505 					if (scratcha0 & A_flag_data)
506 						siop_ma(&siop_cmd->cmd_c);
507 					else if ((dstat & DSTAT_DFE) == 0)
508 						siop_clearfifo(&sc->sc_c);
509 					bus_space_write_1(sc->sc_c.sc_rt,
510 					    sc->sc_c.sc_rh, SIOP_SCRATCHA,
511 					    scratcha0 & ~A_flag_data);
512 					CALL_SCRIPT(Ent_msgin);
513 					return 1;
514 				}
515 				aprint_error_dev(&sc->sc_c.sc_dev, "unexpected phase mismatch %d\n",
516 				    sstat1 & SSTAT1_PHASE_MASK);
517 			} else {
518 				aprint_error_dev(&sc->sc_c.sc_dev, "phase mismatch without command\n");
519 			}
520 			need_reset = 1;
521 		}
522 		if (sist & SIST0_PAR) {
523 			/* parity error, reset */
524 			if (siop_cmd)
525 				scsipi_printaddr(xs->xs_periph);
526 			else
527 				printf("%s:", device_xname(&sc->sc_c.sc_dev));
528 			printf("parity error\n");
529 			goto reset;
530 		}
531 		if ((sist & (SIST1_STO << 8)) && need_reset == 0) {
532 			/* selection time out, assume there's no device here */
533 			if (siop_cmd) {
534 				siop_cmd->cmd_c.status = CMDST_DONE;
535 				xs->error = XS_SELTIMEOUT;
536 				freetarget = 1;
537 				goto end;
538 			} else {
539 				aprint_error_dev(&sc->sc_c.sc_dev, "selection timeout without "
540 				    "command\n");
541 				need_reset = 1;
542 			}
543 		}
544 		if (sist & SIST0_UDC) {
545 			/*
546 			 * unexpected disconnect. Usually the target signals
547 			 * a fatal condition this way. Attempt to get sense.
548 			 */
549 			 if (siop_cmd) {
550 				siop_cmd->cmd_tables->status =
551 				    siop_htoc32(&sc->sc_c, SCSI_CHECK);
552 				goto end;
553 			}
554 			aprint_error_dev(&sc->sc_c.sc_dev, "unexpected disconnect without "
555 			    "command\n");
556 			goto reset;
557 		}
558 		if (sist & (SIST1_SBMC << 8)) {
559 			/* SCSI bus mode change */
560 			if (siop_modechange(&sc->sc_c) == 0 || need_reset == 1)
561 				goto reset;
562 			if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) {
563 				/*
564 				 * we have a script interrupt, it will
565 				 * restart the script.
566 				 */
567 				goto scintr;
568 			}
569 			/*
570 			 * else we have to restart it ourselve, at the
571 			 * interrupted instruction.
572 			 */
573 			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
574 			    SIOP_DSP,
575 			    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
576 			    SIOP_DSP) - 8);
577 			return 1;
578 		}
579 		/* Else it's an unhandled exception (for now). */
580 		aprint_error_dev(&sc->sc_c.sc_dev, "unhandled scsi interrupt, sist=0x%x sstat1=0x%x "
581 		    "DSA=0x%x DSP=0x%x\n", sist,
582 		    bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
583 			SIOP_SSTAT1),
584 		    bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh, SIOP_DSA),
585 		    (int)(bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
586 			SIOP_DSP) - sc->sc_c.sc_scriptaddr));
587 		if (siop_cmd) {
588 			siop_cmd->cmd_c.status = CMDST_DONE;
589 			xs->error = XS_SELTIMEOUT;
590 			goto end;
591 		}
592 		need_reset = 1;
593 	}
594 	if (need_reset) {
595 reset:
596 		/* fatal error, reset the bus */
597 		siop_resetbus(&sc->sc_c);
598 		/* no table to flush here */
599 		return 1;
600 	}
601 
602 scintr:
603 	if ((istat & ISTAT_DIP) && (dstat & DSTAT_SIR)) { /* script interrupt */
604 		irqcode = bus_space_read_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
605 		    SIOP_DSPS);
606 #ifdef SIOP_DEBUG_INTR
607 		printf("script interrupt 0x%x\n", irqcode);
608 #endif
609 		/*
610 		 * no command, or an inactive command is only valid for a
611 		 * reselect interrupt
612 		 */
613 		if ((irqcode & 0x80) == 0) {
614 			if (siop_cmd == NULL) {
615 				aprint_error_dev(&sc->sc_c.sc_dev,
616 			"script interrupt (0x%x) with invalid DSA !!!\n",
617 				    irqcode);
618 				goto reset;
619 			}
620 			if (siop_cmd->cmd_c.status != CMDST_ACTIVE) {
621 				aprint_error_dev(&sc->sc_c.sc_dev, "command with invalid status "
622 				    "(IRQ code 0x%x current status %d) !\n",
623 				    irqcode, siop_cmd->cmd_c.status);
624 				xs = NULL;
625 			}
626 		}
627 		switch(irqcode) {
628 		case A_int_err:
629 			printf("error, DSP=0x%x\n",
630 			    (int)(bus_space_read_4(sc->sc_c.sc_rt,
631 			    sc->sc_c.sc_rh, SIOP_DSP) - sc->sc_c.sc_scriptaddr));
632 			if (xs) {
633 				xs->error = XS_SELTIMEOUT;
634 				goto end;
635 			} else {
636 				goto reset;
637 			}
638 		case A_int_reseltarg:
639 			aprint_error_dev(&sc->sc_c.sc_dev, "reselect with invalid target\n");
640 			goto reset;
641 		case A_int_resellun:
642 			INCSTAT(siop_stat_intr_lunresel);
643 			target = bus_space_read_1(sc->sc_c.sc_rt,
644 			    sc->sc_c.sc_rh, SIOP_SCRATCHA) & 0xf;
645 			lun = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
646 			    SIOP_SCRATCHA + 1);
647 			tag = bus_space_read_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
648 			    SIOP_SCRATCHA + 2);
649 			siop_target =
650 			    (struct siop_target *)sc->sc_c.targets[target];
651 			if (siop_target == NULL) {
652 				printf("%s: reselect with invalid target %d\n",
653 				    device_xname(&sc->sc_c.sc_dev), target);
654 				goto reset;
655 			}
656 			siop_lun = siop_target->siop_lun[lun];
657 			if (siop_lun == NULL) {
658 				printf("%s: target %d reselect with invalid "
659 				    "lun %d\n", device_xname(&sc->sc_c.sc_dev),
660 				    target, lun);
661 				goto reset;
662 			}
663 			if (siop_lun->siop_tag[tag].active == NULL) {
664 				printf("%s: target %d lun %d tag %d reselect "
665 				    "without command\n",
666 				    device_xname(&sc->sc_c.sc_dev),
667 				    target, lun, tag);
668 				goto reset;
669 			}
670 			siop_cmd = siop_lun->siop_tag[tag].active;
671 			bus_space_write_4(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
672 			    SIOP_DSP, siop_cmd->cmd_c.dsa +
673 			    sizeof(struct siop_common_xfer) +
674 			    Ent_ldsa_reload_dsa);
675 			siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
676 			return 1;
677 		case A_int_reseltag:
678 			printf("%s: reselect with invalid tag\n",
679 				device_xname(&sc->sc_c.sc_dev));
680 			goto reset;
681 		case A_int_msgin:
682 		{
683 			int msgin = bus_space_read_1(sc->sc_c.sc_rt,
684 			    sc->sc_c.sc_rh, SIOP_SFBR);
685 			if (msgin == MSG_MESSAGE_REJECT) {
686 				int msg, extmsg;
687 				if (siop_cmd->cmd_tables->msg_out[0] & 0x80) {
688 					/*
689 					 * message was part of a identify +
690 					 * something else. Identify shouldn't
691 					 * have been rejected.
692 					 */
693 					msg =
694 					    siop_cmd->cmd_tables->msg_out[1];
695 					extmsg =
696 					    siop_cmd->cmd_tables->msg_out[3];
697 				} else {
698 					msg = siop_cmd->cmd_tables->msg_out[0];
699 					extmsg =
700 					    siop_cmd->cmd_tables->msg_out[2];
701 				}
702 				if (msg == MSG_MESSAGE_REJECT) {
703 					/* MSG_REJECT  for a MSG_REJECT  !*/
704 					if (xs)
705 						scsipi_printaddr(xs->xs_periph);
706 					else
707 						printf("%s: ",
708 						   device_xname(&sc->sc_c.sc_dev));
709 					printf("our reject message was "
710 					    "rejected\n");
711 					goto reset;
712 				}
713 				if (msg == MSG_EXTENDED &&
714 				    extmsg == MSG_EXT_WDTR) {
715 					/* WDTR rejected, initiate sync */
716 					if ((siop_target->target_c.flags &
717 					   TARF_SYNC) == 0) {
718 						siop_target->target_c.status =
719 						    TARST_OK;
720 						siop_update_xfer_mode(&sc->sc_c,
721 						    target);
722 						/* no table to flush here */
723 						CALL_SCRIPT(Ent_msgin_ack);
724 						return 1;
725 					}
726 					siop_target->target_c.status =
727 					    TARST_SYNC_NEG;
728 					siop_sdtr_msg(&siop_cmd->cmd_c, 0,
729 					    sc->sc_c.st_minsync,
730 					    sc->sc_c.maxoff);
731 					siop_table_sync(siop_cmd,
732 					    BUS_DMASYNC_PREREAD |
733 					    BUS_DMASYNC_PREWRITE);
734 					CALL_SCRIPT(Ent_send_msgout);
735 					return 1;
736 				} else if (msg == MSG_EXTENDED &&
737 				    extmsg == MSG_EXT_SDTR) {
738 					/* sync rejected */
739 					siop_target->target_c.offset = 0;
740 					siop_target->target_c.period = 0;
741 					siop_target->target_c.status = TARST_OK;
742 					siop_update_xfer_mode(&sc->sc_c,
743 					    target);
744 					/* no table to flush here */
745 					CALL_SCRIPT(Ent_msgin_ack);
746 					return 1;
747 				} else if (msg == MSG_SIMPLE_Q_TAG ||
748 				    msg == MSG_HEAD_OF_Q_TAG ||
749 				    msg == MSG_ORDERED_Q_TAG) {
750 					if (siop_handle_qtag_reject(
751 					    siop_cmd) == -1)
752 						goto reset;
753 					CALL_SCRIPT(Ent_msgin_ack);
754 					return 1;
755 				}
756 				if (xs)
757 					scsipi_printaddr(xs->xs_periph);
758 				else
759 					printf("%s: ",
760 					    device_xname(&sc->sc_c.sc_dev));
761 				if (msg == MSG_EXTENDED) {
762 					printf("scsi message reject, extended "
763 					    "message sent was 0x%x\n", extmsg);
764 				} else {
765 					printf("scsi message reject, message "
766 					    "sent was 0x%x\n", msg);
767 				}
768 				/* no table to flush here */
769 				CALL_SCRIPT(Ent_msgin_ack);
770 				return 1;
771 			}
772 			if (msgin == MSG_IGN_WIDE_RESIDUE) {
773 			/* use the extmsgdata table to get the second byte */
774 				siop_cmd->cmd_tables->t_extmsgdata.count =
775 				    siop_htoc32(&sc->sc_c, 1);
776 				siop_table_sync(siop_cmd,
777 				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
778 				CALL_SCRIPT(Ent_get_extmsgdata);
779 				return 1;
780 			}
781 			if (xs)
782 				scsipi_printaddr(xs->xs_periph);
783 			else
784 				printf("%s: ", device_xname(&sc->sc_c.sc_dev));
785 			printf("unhandled message 0x%x\n",
786 			    siop_cmd->cmd_tables->msg_in[0]);
787 			siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
788 			siop_cmd->cmd_tables->t_msgout.count =
789 				siop_htoc32(&sc->sc_c, 1);
790 			siop_table_sync(siop_cmd,
791 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
792 			CALL_SCRIPT(Ent_send_msgout);
793 			return 1;
794 		}
795 		case A_int_extmsgin:
796 #ifdef SIOP_DEBUG_INTR
797 			printf("extended message: msg 0x%x len %d\n",
798 			    siop_cmd->cmd_tables->msg_in[2],
799 			    siop_cmd->cmd_tables->msg_in[1]);
800 #endif
801 			if (siop_cmd->cmd_tables->msg_in[1] >
802 			    sizeof(siop_cmd->cmd_tables->msg_in) - 2)
803 				aprint_error_dev(&sc->sc_c.sc_dev, "extended message too big (%d)\n",
804 				    siop_cmd->cmd_tables->msg_in[1]);
805 			siop_cmd->cmd_tables->t_extmsgdata.count =
806 			    siop_htoc32(&sc->sc_c,
807 				siop_cmd->cmd_tables->msg_in[1] - 1);
808 			siop_table_sync(siop_cmd,
809 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
810 			CALL_SCRIPT(Ent_get_extmsgdata);
811 			return 1;
812 		case A_int_extmsgdata:
813 #ifdef SIOP_DEBUG_INTR
814 			{
815 			int i;
816 			printf("extended message: 0x%x, data:",
817 			    siop_cmd->cmd_tables->msg_in[2]);
818 			for (i = 3; i < 2 + siop_cmd->cmd_tables->msg_in[1];
819 			    i++)
820 				printf(" 0x%x",
821 				    siop_cmd->cmd_tables->msg_in[i]);
822 			printf("\n");
823 			}
824 #endif
825 			if (siop_cmd->cmd_tables->msg_in[0] ==
826 			    MSG_IGN_WIDE_RESIDUE) {
827 			/* we got the second byte of MSG_IGN_WIDE_RESIDUE */
828 				if (siop_cmd->cmd_tables->msg_in[3] != 1)
829 					printf("MSG_IGN_WIDE_RESIDUE: "
830 					    "bad len %d\n",
831 					    siop_cmd->cmd_tables->msg_in[3]);
832 				switch (siop_iwr(&siop_cmd->cmd_c)) {
833 				case SIOP_NEG_MSGOUT:
834 					siop_table_sync(siop_cmd,
835 					    BUS_DMASYNC_PREREAD |
836 					    BUS_DMASYNC_PREWRITE);
837 					CALL_SCRIPT(Ent_send_msgout);
838 					return(1);
839 				case SIOP_NEG_ACK:
840 					CALL_SCRIPT(Ent_msgin_ack);
841 					return(1);
842 				default:
843 					panic("invalid retval from "
844 					    "siop_iwr()");
845 				}
846 				return(1);
847 			}
848 			if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_WDTR) {
849 				switch (siop_wdtr_neg(&siop_cmd->cmd_c)) {
850 				case SIOP_NEG_MSGOUT:
851 					siop_update_scntl3(sc,
852 					    siop_cmd->cmd_c.siop_target);
853 					siop_table_sync(siop_cmd,
854 					    BUS_DMASYNC_PREREAD |
855 					    BUS_DMASYNC_PREWRITE);
856 					CALL_SCRIPT(Ent_send_msgout);
857 					return(1);
858 				case SIOP_NEG_ACK:
859 					siop_update_scntl3(sc,
860 					    siop_cmd->cmd_c.siop_target);
861 					CALL_SCRIPT(Ent_msgin_ack);
862 					return(1);
863 				default:
864 					panic("invalid retval from "
865 					    "siop_wdtr_neg()");
866 				}
867 				return(1);
868 			}
869 			if (siop_cmd->cmd_tables->msg_in[2] == MSG_EXT_SDTR) {
870 				switch (siop_sdtr_neg(&siop_cmd->cmd_c)) {
871 				case SIOP_NEG_MSGOUT:
872 					siop_update_scntl3(sc,
873 					    siop_cmd->cmd_c.siop_target);
874 					siop_table_sync(siop_cmd,
875 					    BUS_DMASYNC_PREREAD |
876 					    BUS_DMASYNC_PREWRITE);
877 					CALL_SCRIPT(Ent_send_msgout);
878 					return(1);
879 				case SIOP_NEG_ACK:
880 					siop_update_scntl3(sc,
881 					    siop_cmd->cmd_c.siop_target);
882 					CALL_SCRIPT(Ent_msgin_ack);
883 					return(1);
884 				default:
885 					panic("invalid retval from "
886 					    "siop_wdtr_neg()");
887 				}
888 				return(1);
889 			}
890 			/* send a message reject */
891 			siop_cmd->cmd_tables->msg_out[0] = MSG_MESSAGE_REJECT;
892 			siop_cmd->cmd_tables->t_msgout.count =
893 			    siop_htoc32(&sc->sc_c, 1);
894 			siop_table_sync(siop_cmd,
895 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
896 			CALL_SCRIPT(Ent_send_msgout);
897 			return 1;
898 		case A_int_disc:
899 			INCSTAT(siop_stat_intr_sdp);
900 			offset = bus_space_read_1(sc->sc_c.sc_rt,
901 			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
902 #ifdef SIOP_DEBUG_DR
903 			printf("disconnect offset %d\n", offset);
904 #endif
905 			siop_sdp(&siop_cmd->cmd_c, offset);
906 			/* we start again with no offset */
907 			siop_cmd->saved_offset = SIOP_NOOFFSET;
908 			siop_table_sync(siop_cmd,
909 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
910 			CALL_SCRIPT(Ent_script_sched);
911 			return 1;
912 		case A_int_saveoffset:
913 			INCSTAT(siop_stat_intr_saveoffset);
914 			offset = bus_space_read_1(sc->sc_c.sc_rt,
915 			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
916 #ifdef SIOP_DEBUG_DR
917 			printf("saveoffset offset %d\n", offset);
918 #endif
919 			siop_cmd->saved_offset = offset;
920 			CALL_SCRIPT(Ent_script_sched);
921 			return 1;
922 		case A_int_resfail:
923 			printf("reselect failed\n");
924 			CALL_SCRIPT(Ent_script_sched);
925 			return  1;
926 		case A_int_done:
927 			if (xs == NULL) {
928 				printf("%s: done without command, DSA=0x%lx\n",
929 				    device_xname(&sc->sc_c.sc_dev),
930 				    (u_long)siop_cmd->cmd_c.dsa);
931 				siop_cmd->cmd_c.status = CMDST_FREE;
932 				CALL_SCRIPT(Ent_script_sched);
933 				return 1;
934 			}
935 #ifdef SIOP_DEBUG_INTR
936 			printf("done, DSA=0x%lx target id 0x%x last msg "
937 			    "in=0x%x status=0x%x\n", (u_long)siop_cmd->cmd_c.dsa,
938 			    siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->id),
939 			    siop_cmd->cmd_tables->msg_in[0],
940 			    siop_ctoh32(&sc->sc_c,
941 				siop_cmd->cmd_tables->status));
942 #endif
943 			INCSTAT(siop_stat_intr_done);
944 			/* update resid.  */
945 			offset = bus_space_read_1(sc->sc_c.sc_rt,
946 			    sc->sc_c.sc_rh, SIOP_SCRATCHA + 1);
947 			/*
948 			 * if we got a disconnect between the last data phase
949 			 * and the status phase, offset will be 0. In this
950 			 * case, siop_cmd->saved_offset will have the proper
951 			 * value if it got updated by the controller
952 			 */
953 			if (offset == 0 &&
954 			    siop_cmd->saved_offset != SIOP_NOOFFSET)
955 				offset = siop_cmd->saved_offset;
956 			siop_update_resid(&siop_cmd->cmd_c, offset);
957 			siop_cmd->cmd_c.status = CMDST_DONE;
958 			goto end;
959 		default:
960 			printf("unknown irqcode %x\n", irqcode);
961 			if (xs) {
962 				xs->error = XS_SELTIMEOUT;
963 				goto end;
964 			}
965 			goto reset;
966 		}
967 		return 1;
968 	}
969 	/* We just should't get there */
970 	panic("siop_intr: I shouldn't be there !");
971 
972 end:
973 	/*
974 	 * restart the script now if command completed properly
975 	 * Otherwise wait for siop_scsicmd_end(), we may need to cleanup the
976 	 * queue
977 	 */
978 	xs->status = siop_ctoh32(&sc->sc_c, siop_cmd->cmd_tables->status);
979 	if (xs->status == SCSI_OK)
980 		CALL_SCRIPT(Ent_script_sched);
981 	else
982 		restart = 1;
983 	siop_lun->siop_tag[tag].active = NULL;
984 	siop_scsicmd_end(siop_cmd);
985 	if (freetarget && siop_target->target_c.status == TARST_PROBING)
986 		siop_del_dev(sc, target, lun);
987 	if (restart)
988 		CALL_SCRIPT(Ent_script_sched);
989 	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
990 		/* a command terminated, so we have free slots now */
991 		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
992 		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
993 	}
994 
995 	return 1;
996 }
997 
998 void
999 siop_scsicmd_end(struct siop_cmd *siop_cmd)
1000 {
1001 	struct scsipi_xfer *xs = siop_cmd->cmd_c.xs;
1002 	struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1003 
1004 	switch(xs->status) {
1005 	case SCSI_OK:
1006 		xs->error = XS_NOERROR;
1007 		break;
1008 	case SCSI_BUSY:
1009 		xs->error = XS_BUSY;
1010 		break;
1011 	case SCSI_CHECK:
1012 		xs->error = XS_BUSY;
1013 		/* remove commands in the queue and scheduler */
1014 		siop_unqueue(sc, xs->xs_periph->periph_target,
1015 		    xs->xs_periph->periph_lun);
1016 		break;
1017 	case SCSI_QUEUE_FULL:
1018 		INCSTAT(siop_stat_intr_qfull);
1019 #ifdef SIOP_DEBUG
1020 		printf("%s:%d:%d: queue full (tag %d)\n",
1021 		    device_xname(&sc->sc_c.sc_dev),
1022 		    xs->xs_periph->periph_target,
1023 		    xs->xs_periph->periph_lun, siop_cmd->cmd_c.tag);
1024 #endif
1025 		xs->error = XS_BUSY;
1026 		break;
1027 	case SCSI_SIOP_NOCHECK:
1028 		/*
1029 		 * don't check status, xs->error is already valid
1030 		 */
1031 		break;
1032 	case SCSI_SIOP_NOSTATUS:
1033 		/*
1034 		 * the status byte was not updated, cmd was
1035 		 * aborted
1036 		 */
1037 		xs->error = XS_SELTIMEOUT;
1038 		break;
1039 	default:
1040 		scsipi_printaddr(xs->xs_periph);
1041 		printf("invalid status code %d\n", xs->status);
1042 		xs->error = XS_DRIVER_STUFFUP;
1043 	}
1044 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1045 		bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data, 0,
1046 		    siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1047 		    (xs->xs_control & XS_CTL_DATA_IN) ?
1048 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1049 		bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_data);
1050 	}
1051 	bus_dmamap_unload(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd);
1052 	if ((xs->xs_control & XS_CTL_POLL) == 0)
1053 		callout_stop(&xs->xs_callout);
1054 	siop_cmd->cmd_c.status = CMDST_FREE;
1055 	TAILQ_INSERT_TAIL(&sc->free_list, siop_cmd, next);
1056 #if 0
1057 	if (xs->resid != 0)
1058 		printf("resid %d datalen %d\n", xs->resid, xs->datalen);
1059 #endif
1060 	scsipi_done (xs);
1061 }
1062 
1063 void
1064 siop_unqueue(struct siop_softc *sc, int target, int lun)
1065 {
1066  	int slot, tag;
1067 	struct siop_cmd *siop_cmd;
1068 	struct siop_lun *siop_lun =
1069 	    ((struct siop_target *)sc->sc_c.targets[target])->siop_lun[lun];
1070 
1071 	/* first make sure to read valid data */
1072 	siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1073 
1074 	for (tag = 1; tag < SIOP_NTAG; tag++) {
1075 		/* look for commands in the scheduler, not yet started */
1076 		if (siop_lun->siop_tag[tag].active == NULL)
1077 			continue;
1078 		siop_cmd = siop_lun->siop_tag[tag].active;
1079 		for (slot = 0; slot <= sc->sc_currschedslot; slot++) {
1080 			if (siop_script_read(sc,
1081 			    (Ent_script_sched_slot0 / 4) + slot * 2 + 1) ==
1082 			    siop_cmd->cmd_c.dsa +
1083 			    sizeof(struct siop_common_xfer) +
1084 			    Ent_ldsa_select)
1085 				break;
1086 		}
1087 		if (slot >  sc->sc_currschedslot)
1088 			continue; /* didn't find it */
1089 		if (siop_script_read(sc,
1090 		    (Ent_script_sched_slot0 / 4) + slot * 2) == 0x80000000)
1091 			continue; /* already started */
1092 		/* clear the slot */
1093 		siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1094 		    0x80000000);
1095 		/* ask to requeue */
1096 		siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1097 		siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1098 		siop_lun->siop_tag[tag].active = NULL;
1099 		siop_scsicmd_end(siop_cmd);
1100 	}
1101 	/* update sc_currschedslot */
1102 	sc->sc_currschedslot = 0;
1103 	for (slot = SIOP_NSLOTS - 1; slot >= 0; slot--) {
1104 		if (siop_script_read(sc,
1105 		    (Ent_script_sched_slot0 / 4) + slot * 2) != 0x80000000)
1106 			sc->sc_currschedslot = slot;
1107 	}
1108 }
1109 
1110 /*
1111  * handle a rejected queue tag message: the command will run untagged,
1112  * has to adjust the reselect script.
1113  */
1114 int
1115 siop_handle_qtag_reject(struct siop_cmd *siop_cmd)
1116 {
1117 	struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1118 	int target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1119 	int lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1120 	int tag = siop_cmd->cmd_tables->msg_out[2];
1121 	struct siop_lun *siop_lun =
1122 	    ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1123 
1124 #ifdef SIOP_DEBUG
1125 	printf("%s:%d:%d: tag message %d (%d) rejected (status %d)\n",
1126 	    device_xname(&sc->sc_c.sc_dev), target, lun, tag, siop_cmd->cmd_c.tag,
1127 	    siop_cmd->cmd_c.status);
1128 #endif
1129 
1130 	if (siop_lun->siop_tag[0].active != NULL) {
1131 		printf("%s: untagged command already running for target %d "
1132 		    "lun %d (status %d)\n", device_xname(&sc->sc_c.sc_dev),
1133 		    target, lun, siop_lun->siop_tag[0].active->cmd_c.status);
1134 		return -1;
1135 	}
1136 	/* clear tag slot */
1137 	siop_lun->siop_tag[tag].active = NULL;
1138 	/* add command to non-tagged slot */
1139 	siop_lun->siop_tag[0].active = siop_cmd;
1140 	siop_cmd->cmd_c.tag = 0;
1141 	/* adjust reselect script if there is one */
1142 	if (siop_lun->siop_tag[0].reseloff > 0) {
1143 		siop_script_write(sc,
1144 		    siop_lun->siop_tag[0].reseloff + 1,
1145 		    siop_cmd->cmd_c.dsa + sizeof(struct siop_common_xfer) +
1146 		    Ent_ldsa_reload_dsa);
1147 		siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1148 	}
1149 	return 0;
1150 }
1151 
1152 /*
1153  * handle a bus reset: reset chip, unqueue all active commands, free all
1154  * target struct and report lossage to upper layer.
1155  * As the upper layer may requeue immediatly we have to first store
1156  * all active commands in a temporary queue.
1157  */
1158 void
1159 siop_handle_reset(struct siop_softc *sc)
1160 {
1161 	struct siop_cmd *siop_cmd;
1162 	struct siop_lun *siop_lun;
1163 	int target, lun, tag;
1164 	/*
1165 	 * scsi bus reset. reset the chip and restart
1166 	 * the queue. Need to clean up all active commands
1167 	 */
1168 	printf("%s: scsi bus reset\n", device_xname(&sc->sc_c.sc_dev));
1169 	/* stop, reset and restart the chip */
1170 	siop_reset(sc);
1171 	if (sc->sc_flags & SCF_CHAN_NOSLOT) {
1172 		/* chip has been reset, all slots are free now */
1173 		sc->sc_flags &= ~SCF_CHAN_NOSLOT;
1174 		scsipi_channel_thaw(&sc->sc_c.sc_chan, 1);
1175 	}
1176 	/*
1177 	 * Process all commands: first commands being executed
1178 	 */
1179 	for (target = 0; target < sc->sc_c.sc_chan.chan_ntargets;
1180 	    target++) {
1181 		if (sc->sc_c.targets[target] == NULL)
1182 			continue;
1183 		for (lun = 0; lun < 8; lun++) {
1184 			struct siop_target *siop_target =
1185 			    (struct siop_target *)sc->sc_c.targets[target];
1186 			siop_lun = siop_target->siop_lun[lun];
1187 			if (siop_lun == NULL)
1188 				continue;
1189 			for (tag = 0; tag <
1190 			    ((sc->sc_c.targets[target]->flags & TARF_TAG) ?
1191 			    SIOP_NTAG : 1);
1192 			    tag++) {
1193 				siop_cmd = siop_lun->siop_tag[tag].active;
1194 				if (siop_cmd == NULL)
1195 					continue;
1196 				scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1197 				printf("command with tag id %d reset\n", tag);
1198 				siop_cmd->cmd_c.xs->error =
1199 				    (siop_cmd->cmd_c.flags & CMDFL_TIMEOUT) ?
1200 		    		    XS_TIMEOUT : XS_RESET;
1201 				siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1202 				siop_lun->siop_tag[tag].active = NULL;
1203 				siop_cmd->cmd_c.status = CMDST_DONE;
1204 				siop_scsicmd_end(siop_cmd);
1205 			}
1206 		}
1207 		sc->sc_c.targets[target]->status = TARST_ASYNC;
1208 		sc->sc_c.targets[target]->flags &= ~TARF_ISWIDE;
1209 		sc->sc_c.targets[target]->period =
1210 		    sc->sc_c.targets[target]->offset = 0;
1211 		siop_update_xfer_mode(&sc->sc_c, target);
1212 	}
1213 
1214 	scsipi_async_event(&sc->sc_c.sc_chan, ASYNC_EVENT_RESET, NULL);
1215 }
1216 
1217 void
1218 siop_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
1219 {
1220 	struct scsipi_xfer *xs;
1221 	struct scsipi_periph *periph;
1222 	struct siop_softc *sc = (void *)chan->chan_adapter->adapt_dev;
1223 	struct siop_cmd *siop_cmd;
1224 	struct siop_target *siop_target;
1225 	int s, error, i;
1226 	int target;
1227 	int lun;
1228 
1229 	switch (req) {
1230 	case ADAPTER_REQ_RUN_XFER:
1231 		xs = arg;
1232 		periph = xs->xs_periph;
1233 		target = periph->periph_target;
1234 		lun = periph->periph_lun;
1235 
1236 		s = splbio();
1237 #ifdef SIOP_DEBUG_SCHED
1238 		printf("starting cmd for %d:%d\n", target, lun);
1239 #endif
1240 		siop_cmd = TAILQ_FIRST(&sc->free_list);
1241 		if (siop_cmd == NULL) {
1242 			xs->error = XS_RESOURCE_SHORTAGE;
1243 			scsipi_done(xs);
1244 			splx(s);
1245 			return;
1246 		}
1247 		TAILQ_REMOVE(&sc->free_list, siop_cmd, next);
1248 #ifdef DIAGNOSTIC
1249 		if (siop_cmd->cmd_c.status != CMDST_FREE)
1250 			panic("siop_scsicmd: new cmd not free");
1251 #endif
1252 		siop_target = (struct siop_target*)sc->sc_c.targets[target];
1253 		if (siop_target == NULL) {
1254 #ifdef SIOP_DEBUG
1255 			printf("%s: alloc siop_target for target %d\n",
1256 				device_xname(&sc->sc_c.sc_dev), target);
1257 #endif
1258 			sc->sc_c.targets[target] =
1259 			    malloc(sizeof(struct siop_target),
1260 				M_DEVBUF, M_NOWAIT|M_ZERO);
1261 			if (sc->sc_c.targets[target] == NULL) {
1262 				aprint_error_dev(&sc->sc_c.sc_dev, "can't malloc memory for "
1263 				    "target %d\n", target);
1264 				xs->error = XS_RESOURCE_SHORTAGE;
1265 				scsipi_done(xs);
1266 				splx(s);
1267 				return;
1268 			}
1269 			siop_target =
1270 			    (struct siop_target*)sc->sc_c.targets[target];
1271 			siop_target->target_c.status = TARST_PROBING;
1272 			siop_target->target_c.flags = 0;
1273 			siop_target->target_c.id =
1274 			    sc->sc_c.clock_div << 24; /* scntl3 */
1275 			siop_target->target_c.id |=  target << 16; /* id */
1276 			/* siop_target->target_c.id |= 0x0 << 8; scxfer is 0 */
1277 
1278 			/* get a lun switch script */
1279 			siop_target->lunsw = siop_get_lunsw(sc);
1280 			if (siop_target->lunsw == NULL) {
1281 				aprint_error_dev(&sc->sc_c.sc_dev, "can't alloc lunsw for target %d\n",
1282 				    target);
1283 				xs->error = XS_RESOURCE_SHORTAGE;
1284 				scsipi_done(xs);
1285 				splx(s);
1286 				return;
1287 			}
1288 			for (i=0; i < 8; i++)
1289 				siop_target->siop_lun[i] = NULL;
1290 			siop_add_reselsw(sc, target);
1291 		}
1292 		if (siop_target->siop_lun[lun] == NULL) {
1293 			siop_target->siop_lun[lun] =
1294 			    malloc(sizeof(struct siop_lun), M_DEVBUF,
1295 			    M_NOWAIT|M_ZERO);
1296 			if (siop_target->siop_lun[lun] == NULL) {
1297 				aprint_error_dev(&sc->sc_c.sc_dev, "can't alloc siop_lun for "
1298 				    "target %d lun %d\n",
1299 				    target, lun);
1300 				xs->error = XS_RESOURCE_SHORTAGE;
1301 				scsipi_done(xs);
1302 				splx(s);
1303 				return;
1304 			}
1305 		}
1306 		siop_cmd->cmd_c.siop_target = sc->sc_c.targets[target];
1307 		siop_cmd->cmd_c.xs = xs;
1308 		siop_cmd->cmd_c.flags = 0;
1309 		siop_cmd->cmd_c.status = CMDST_READY;
1310 
1311 		/* load the DMA maps */
1312 		error = bus_dmamap_load(sc->sc_c.sc_dmat,
1313 		    siop_cmd->cmd_c.dmamap_cmd,
1314 		    xs->cmd, xs->cmdlen, NULL, BUS_DMA_NOWAIT);
1315 		if (error) {
1316 			aprint_error_dev(&sc->sc_c.sc_dev, "unable to load cmd DMA map: %d\n",
1317 			    error);
1318 			xs->error = XS_DRIVER_STUFFUP;
1319 			scsipi_done(xs);
1320 			splx(s);
1321 			return;
1322 		}
1323 		if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
1324 			error = bus_dmamap_load(sc->sc_c.sc_dmat,
1325 			    siop_cmd->cmd_c.dmamap_data, xs->data, xs->datalen,
1326 			    NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1327 			    ((xs->xs_control & XS_CTL_DATA_IN) ?
1328 			     BUS_DMA_READ : BUS_DMA_WRITE));
1329 			if (error) {
1330 				aprint_error_dev(&sc->sc_c.sc_dev, "unable to load cmd DMA map: %d",
1331 				    error);
1332 				xs->error = XS_DRIVER_STUFFUP;
1333 				scsipi_done(xs);
1334 				bus_dmamap_unload(sc->sc_c.sc_dmat,
1335 				    siop_cmd->cmd_c.dmamap_cmd);
1336 				splx(s);
1337 				return;
1338 			}
1339 			bus_dmamap_sync(sc->sc_c.sc_dmat,
1340 			    siop_cmd->cmd_c.dmamap_data, 0,
1341 			    siop_cmd->cmd_c.dmamap_data->dm_mapsize,
1342 			    (xs->xs_control & XS_CTL_DATA_IN) ?
1343 			    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1344 		}
1345 		bus_dmamap_sync(sc->sc_c.sc_dmat, siop_cmd->cmd_c.dmamap_cmd, 0,
1346 		    siop_cmd->cmd_c.dmamap_cmd->dm_mapsize,
1347 		    BUS_DMASYNC_PREWRITE);
1348 
1349 		if (xs->xs_tag_type) {
1350 			/* use tag_id + 1, tag 0 is reserved for untagged cmds*/
1351 			siop_cmd->cmd_c.tag = xs->xs_tag_id + 1;
1352 		} else {
1353 			siop_cmd->cmd_c.tag = 0;
1354 		}
1355 		siop_setuptables(&siop_cmd->cmd_c);
1356 		siop_cmd->saved_offset = SIOP_NOOFFSET;
1357 		siop_table_sync(siop_cmd,
1358 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1359 		siop_start(sc, siop_cmd);
1360 		if (xs->xs_control & XS_CTL_POLL) {
1361 			/* poll for command completion */
1362 			while ((xs->xs_status & XS_STS_DONE) == 0) {
1363 				delay(1000);
1364 				siop_intr(sc);
1365 			}
1366 		}
1367 		splx(s);
1368 		return;
1369 
1370 	case ADAPTER_REQ_GROW_RESOURCES:
1371 #ifdef SIOP_DEBUG
1372 		printf("%s grow resources (%d)\n", device_xname(&sc->sc_c.sc_dev),
1373 		    sc->sc_c.sc_adapt.adapt_openings);
1374 #endif
1375 		siop_morecbd(sc);
1376 		return;
1377 
1378 	case ADAPTER_REQ_SET_XFER_MODE:
1379 	{
1380 		struct scsipi_xfer_mode *xm = arg;
1381 		if (sc->sc_c.targets[xm->xm_target] == NULL)
1382 			return;
1383 		s = splbio();
1384 		if (xm->xm_mode & PERIPH_CAP_TQING)
1385 			sc->sc_c.targets[xm->xm_target]->flags |= TARF_TAG;
1386 		if ((xm->xm_mode & PERIPH_CAP_WIDE16) &&
1387 		    (sc->sc_c.features & SF_BUS_WIDE))
1388 			sc->sc_c.targets[xm->xm_target]->flags |= TARF_WIDE;
1389 		if (xm->xm_mode & PERIPH_CAP_SYNC)
1390 			sc->sc_c.targets[xm->xm_target]->flags |= TARF_SYNC;
1391 		if ((xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_WIDE16)) ||
1392 		    sc->sc_c.targets[xm->xm_target]->status == TARST_PROBING)
1393 			sc->sc_c.targets[xm->xm_target]->status =
1394 			    TARST_ASYNC;
1395 
1396 		for (lun = 0; lun < sc->sc_c.sc_chan.chan_nluns; lun++) {
1397 			if (scsipi_lookup_periph(chan,
1398 			    xm->xm_target, lun) != NULL) {
1399 				/* allocate a lun sw entry for this device */
1400 				siop_add_dev(sc, xm->xm_target, lun);
1401 			}
1402 		}
1403 
1404 		splx(s);
1405 	}
1406 	}
1407 }
1408 
1409 static void
1410 siop_start(struct siop_softc *sc, struct siop_cmd *siop_cmd)
1411 {
1412 	struct siop_lun *siop_lun;
1413 	struct siop_xfer *siop_xfer;
1414 	u_int32_t dsa;
1415 	int timeout;
1416 	int target, lun, slot;
1417 
1418 	/*
1419 	 * first make sure to read valid data
1420 	 */
1421 	siop_script_sync(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1422 
1423 	/*
1424 	 * The queue management here is a bit tricky: the script always looks
1425 	 * at the slot from first to last, so if we always use the first
1426 	 * free slot commands can stay at the tail of the queue ~forever.
1427 	 * The algorithm used here is to restart from the head when we know
1428 	 * that the queue is empty, and only add commands after the last one.
1429 	 * When we're at the end of the queue wait for the script to clear it.
1430 	 * The best thing to do here would be to implement a circular queue,
1431 	 * but using only 53c720 features this can be "interesting".
1432 	 * A mid-way solution could be to implement 2 queues and swap orders.
1433 	 */
1434 	slot = sc->sc_currschedslot;
1435 	/*
1436 	 * If the instruction is 0x80000000 (JUMP foo, IF FALSE) the slot is
1437 	 * free. As this is the last used slot, all previous slots are free,
1438 	 * we can restart from 0.
1439 	 */
1440 	if (siop_script_read(sc, (Ent_script_sched_slot0 / 4) + slot * 2) ==
1441 	    0x80000000) {
1442 		slot = sc->sc_currschedslot = 0;
1443 	} else {
1444 		slot++;
1445 	}
1446 	target = siop_cmd->cmd_c.xs->xs_periph->periph_target;
1447 	lun = siop_cmd->cmd_c.xs->xs_periph->periph_lun;
1448 	siop_lun =
1449 	    ((struct siop_target*)sc->sc_c.targets[target])->siop_lun[lun];
1450 	/* if non-tagged command active, panic: this shouldn't happen */
1451 	if (siop_lun->siop_tag[0].active != NULL) {
1452 		panic("siop_start: tagged cmd while untagged running");
1453 	}
1454 #ifdef DIAGNOSTIC
1455 	/* sanity check the tag if needed */
1456 	if (siop_cmd->cmd_c.flags & CMDFL_TAG) {
1457 		if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].active != NULL)
1458 			panic("siop_start: tag not free");
1459 		if (siop_cmd->cmd_c.tag >= SIOP_NTAG) {
1460 			scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1461 			printf(": tag id %d\n", siop_cmd->cmd_c.tag);
1462 			panic("siop_start: invalid tag id");
1463 		}
1464 	}
1465 #endif
1466 	/*
1467 	 * find a free scheduler slot and load it.
1468 	 */
1469 	for (; slot < SIOP_NSLOTS; slot++) {
1470 		/*
1471 		 * If cmd if 0x80000000 the slot is free
1472 		 */
1473 		if (siop_script_read(sc,
1474 		    (Ent_script_sched_slot0 / 4) + slot * 2) ==
1475 		    0x80000000)
1476 			break;
1477 	}
1478 	if (slot == SIOP_NSLOTS) {
1479 		/*
1480 		 * no more free slot, no need to continue. freeze the queue
1481 		 * and requeue this command.
1482 		 */
1483 		scsipi_channel_freeze(&sc->sc_c.sc_chan, 1);
1484 		sc->sc_flags |= SCF_CHAN_NOSLOT;
1485 		siop_cmd->cmd_c.xs->error = XS_REQUEUE;
1486 		siop_cmd->cmd_c.xs->status = SCSI_SIOP_NOCHECK;
1487 		siop_scsicmd_end(siop_cmd);
1488 		return;
1489 	}
1490 #ifdef SIOP_DEBUG_SCHED
1491 	printf("using slot %d for DSA 0x%lx\n", slot,
1492 	    (u_long)siop_cmd->cmd_c.dsa);
1493 #endif
1494 	/* mark command as active */
1495 	if (siop_cmd->cmd_c.status == CMDST_READY)
1496 		siop_cmd->cmd_c.status = CMDST_ACTIVE;
1497 	else
1498 		panic("siop_start: bad status");
1499 	siop_lun->siop_tag[siop_cmd->cmd_c.tag].active = siop_cmd;
1500 	/* patch scripts with DSA addr */
1501 	dsa = siop_cmd->cmd_c.dsa;
1502 	/* first reselect switch, if we have an entry */
1503 	if (siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff > 0)
1504 		siop_script_write(sc,
1505 		    siop_lun->siop_tag[siop_cmd->cmd_c.tag].reseloff + 1,
1506 		    dsa + sizeof(struct siop_common_xfer) +
1507 		    Ent_ldsa_reload_dsa);
1508 	/* CMD script: MOVE MEMORY addr */
1509 	siop_xfer = (struct siop_xfer*)siop_cmd->cmd_tables;
1510 	siop_xfer->resel[E_ldsa_abs_slot_Used[0]] =
1511 	   siop_htoc32(&sc->sc_c, sc->sc_c.sc_scriptaddr +
1512 		Ent_script_sched_slot0 + slot * 8);
1513 	siop_table_sync(siop_cmd, BUS_DMASYNC_PREWRITE);
1514 	/* scheduler slot: JUMP ldsa_select */
1515 	siop_script_write(sc,
1516 	    (Ent_script_sched_slot0 / 4) + slot * 2 + 1,
1517 	    dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_select);
1518 	/* handle timeout */
1519 	if ((siop_cmd->cmd_c.xs->xs_control & XS_CTL_POLL) == 0) {
1520 		/* start exire timer */
1521 		timeout = mstohz(siop_cmd->cmd_c.xs->timeout);
1522 		if (timeout == 0)
1523 			timeout = 1;
1524 		callout_reset( &siop_cmd->cmd_c.xs->xs_callout,
1525 		    timeout, siop_timeout, siop_cmd);
1526 	}
1527 	/*
1528 	 * Change JUMP cmd so that this slot will be handled
1529 	 */
1530 	siop_script_write(sc, (Ent_script_sched_slot0 / 4) + slot * 2,
1531 	    0x80080000);
1532 	sc->sc_currschedslot = slot;
1533 
1534 	/* make sure SCRIPT processor will read valid data */
1535 	siop_script_sync(sc,BUS_DMASYNC_PREREAD |  BUS_DMASYNC_PREWRITE);
1536 	/* Signal script it has some work to do */
1537 	bus_space_write_1(sc->sc_c.sc_rt, sc->sc_c.sc_rh,
1538 	    SIOP_ISTAT, ISTAT_SIGP);
1539 	/* and wait for IRQ */
1540 	return;
1541 }
1542 
1543 void
1544 siop_timeout(void *v)
1545 {
1546 	struct siop_cmd *siop_cmd = v;
1547 	struct siop_softc *sc = (struct siop_softc *)siop_cmd->cmd_c.siop_sc;
1548 	int s;
1549 
1550 	scsipi_printaddr(siop_cmd->cmd_c.xs->xs_periph);
1551 	printf("command timeout, CDB: ");
1552 	scsipi_print_cdb(siop_cmd->cmd_c.xs->cmd);
1553 	printf("\n");
1554 
1555 	s = splbio();
1556 	/* reset the scsi bus */
1557 	siop_resetbus(&sc->sc_c);
1558 
1559 	/* deactivate callout */
1560 	callout_stop(&siop_cmd->cmd_c.xs->xs_callout);
1561 	/* mark command as being timed out; siop_intr will handle it */
1562 	/*
1563 	 * mark command has being timed out and just return;
1564 	 * the bus reset will generate an interrupt,
1565 	 * it will be handled in siop_intr()
1566 	 */
1567 	siop_cmd->cmd_c.flags |= CMDFL_TIMEOUT;
1568 	splx(s);
1569 	return;
1570 
1571 }
1572 
1573 void
1574 siop_dump_script(struct siop_softc *sc)
1575 {
1576 	int i;
1577 	for (i = 0; i < PAGE_SIZE / 4; i += 2) {
1578 		printf("0x%04x: 0x%08x 0x%08x", i * 4,
1579 		    siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i]),
1580 		    siop_ctoh32(&sc->sc_c, sc->sc_c.sc_script[i+1]));
1581 		if ((siop_ctoh32(&sc->sc_c,
1582 		    sc->sc_c.sc_script[i]) & 0xe0000000) == 0xc0000000) {
1583 			i++;
1584 			printf(" 0x%08x", siop_ctoh32(&sc->sc_c,
1585 			     sc->sc_c.sc_script[i+1]));
1586 		}
1587 		printf("\n");
1588 	}
1589 }
1590 
1591 void
1592 siop_morecbd(struct siop_softc *sc)
1593 {
1594 	int error, off, i, j, s;
1595 	bus_dma_segment_t seg;
1596 	int rseg;
1597 	struct siop_cbd *newcbd;
1598 	struct siop_xfer *xfer;
1599 	bus_addr_t dsa;
1600 	u_int32_t *scr;
1601 
1602 	/* allocate a new list head */
1603 	newcbd = malloc(sizeof(struct siop_cbd), M_DEVBUF, M_NOWAIT|M_ZERO);
1604 	if (newcbd == NULL) {
1605 		aprint_error_dev(&sc->sc_c.sc_dev, "can't allocate memory for command descriptors head\n");
1606 		return;
1607 	}
1608 
1609 	/* allocate cmd list */
1610 	newcbd->cmds = malloc(sizeof(struct siop_cmd) * SIOP_NCMDPB,
1611 	    M_DEVBUF, M_NOWAIT|M_ZERO);
1612 	if (newcbd->cmds == NULL) {
1613 		aprint_error_dev(&sc->sc_c.sc_dev, "can't allocate memory for command descriptors\n");
1614 		goto bad3;
1615 	}
1616 	error = bus_dmamem_alloc(sc->sc_c.sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, &seg,
1617 	    1, &rseg, BUS_DMA_NOWAIT);
1618 	if (error) {
1619 		aprint_error_dev(&sc->sc_c.sc_dev, "unable to allocate cbd DMA memory, error = %d\n",
1620 		    error);
1621 		goto bad2;
1622 	}
1623 	error = bus_dmamem_map(sc->sc_c.sc_dmat, &seg, rseg, PAGE_SIZE,
1624 	    (void **)&newcbd->xfers, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
1625 	if (error) {
1626 		aprint_error_dev(&sc->sc_c.sc_dev, "unable to map cbd DMA memory, error = %d\n",
1627 		    error);
1628 		goto bad2;
1629 	}
1630 	error = bus_dmamap_create(sc->sc_c.sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
1631 	    BUS_DMA_NOWAIT, &newcbd->xferdma);
1632 	if (error) {
1633 		aprint_error_dev(&sc->sc_c.sc_dev, "unable to create cbd DMA map, error = %d\n",
1634 		    error);
1635 		goto bad1;
1636 	}
1637 	error = bus_dmamap_load(sc->sc_c.sc_dmat, newcbd->xferdma, newcbd->xfers,
1638 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
1639 	if (error) {
1640 		aprint_error_dev(&sc->sc_c.sc_dev, "unable to load cbd DMA map, error = %d\n",
1641 		    error);
1642 		goto bad0;
1643 	}
1644 #ifdef DEBUG
1645 	printf("%s: alloc newcdb at PHY addr 0x%lx\n", device_xname(&sc->sc_c.sc_dev),
1646 	    (unsigned long)newcbd->xferdma->dm_segs[0].ds_addr);
1647 #endif
1648 	off = (sc->sc_c.features & SF_CHIP_BE) ? 3 : 0;
1649 	for (i = 0; i < SIOP_NCMDPB; i++) {
1650 		error = bus_dmamap_create(sc->sc_c.sc_dmat, MAXPHYS, SIOP_NSG,
1651 		    MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1652 		    &newcbd->cmds[i].cmd_c.dmamap_data);
1653 		if (error) {
1654 			aprint_error_dev(&sc->sc_c.sc_dev, "unable to create data DMA map for cbd: "
1655 			    "error %d\n", error);
1656 			goto bad0;
1657 		}
1658 		error = bus_dmamap_create(sc->sc_c.sc_dmat,
1659 		    sizeof(struct scsipi_generic), 1,
1660 		    sizeof(struct scsipi_generic), 0,
1661 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1662 		    &newcbd->cmds[i].cmd_c.dmamap_cmd);
1663 		if (error) {
1664 			aprint_error_dev(&sc->sc_c.sc_dev, "unable to create cmd DMA map for cbd %d\n", error);
1665 			goto bad0;
1666 		}
1667 		newcbd->cmds[i].cmd_c.siop_sc = &sc->sc_c;
1668 		newcbd->cmds[i].siop_cbdp = newcbd;
1669 		xfer = &newcbd->xfers[i];
1670 		newcbd->cmds[i].cmd_tables = (struct siop_common_xfer *)xfer;
1671 		memset(newcbd->cmds[i].cmd_tables, 0, sizeof(struct siop_xfer));
1672 		dsa = newcbd->xferdma->dm_segs[0].ds_addr +
1673 		    i * sizeof(struct siop_xfer);
1674 		newcbd->cmds[i].cmd_c.dsa = dsa;
1675 		newcbd->cmds[i].cmd_c.status = CMDST_FREE;
1676 		xfer->siop_tables.t_msgout.count= siop_htoc32(&sc->sc_c, 1);
1677 		xfer->siop_tables.t_msgout.addr = siop_htoc32(&sc->sc_c, dsa);
1678 		xfer->siop_tables.t_msgin.count= siop_htoc32(&sc->sc_c, 1);
1679 		xfer->siop_tables.t_msgin.addr = siop_htoc32(&sc->sc_c,
1680 		    dsa + offsetof(struct siop_common_xfer, msg_in));
1681 		xfer->siop_tables.t_extmsgin.count= siop_htoc32(&sc->sc_c, 2);
1682 		xfer->siop_tables.t_extmsgin.addr = siop_htoc32(&sc->sc_c,
1683 		    dsa + offsetof(struct siop_common_xfer, msg_in) + 1);
1684 		xfer->siop_tables.t_extmsgdata.addr = siop_htoc32(&sc->sc_c,
1685 		    dsa + offsetof(struct siop_common_xfer, msg_in) + 3);
1686 		xfer->siop_tables.t_status.count= siop_htoc32(&sc->sc_c, 1);
1687 		xfer->siop_tables.t_status.addr = siop_htoc32(&sc->sc_c,
1688 		    dsa + offsetof(struct siop_common_xfer, status) + off);
1689 		/* The select/reselect script */
1690 		scr = &xfer->resel[0];
1691 		for (j = 0; j < sizeof(load_dsa) / sizeof(load_dsa[0]); j++)
1692 			scr[j] = siop_htoc32(&sc->sc_c, load_dsa[j]);
1693 		/*
1694 		 * 0x78000000 is a 'move data8 to reg'. data8 is the second
1695 		 * octet, reg offset is the third.
1696 		 */
1697 		scr[Ent_rdsa0 / 4] = siop_htoc32(&sc->sc_c,
1698 		    0x78100000 | ((dsa & 0x000000ff) <<  8));
1699 		scr[Ent_rdsa1 / 4] = siop_htoc32(&sc->sc_c,
1700 		    0x78110000 | ( dsa & 0x0000ff00       ));
1701 		scr[Ent_rdsa2 / 4] = siop_htoc32(&sc->sc_c,
1702 		    0x78120000 | ((dsa & 0x00ff0000) >>  8));
1703 		scr[Ent_rdsa3 / 4] = siop_htoc32(&sc->sc_c,
1704 		    0x78130000 | ((dsa & 0xff000000) >> 16));
1705 		scr[E_ldsa_abs_reselected_Used[0]] = siop_htoc32(&sc->sc_c,
1706 		    sc->sc_c.sc_scriptaddr + Ent_reselected);
1707 		scr[E_ldsa_abs_reselect_Used[0]] = siop_htoc32(&sc->sc_c,
1708 		    sc->sc_c.sc_scriptaddr + Ent_reselect);
1709 		scr[E_ldsa_abs_selected_Used[0]] = siop_htoc32(&sc->sc_c,
1710 		    sc->sc_c.sc_scriptaddr + Ent_selected);
1711 		scr[E_ldsa_abs_data_Used[0]] = siop_htoc32(&sc->sc_c,
1712 		    dsa + sizeof(struct siop_common_xfer) + Ent_ldsa_data);
1713 		/* JUMP foo, IF FALSE - used by MOVE MEMORY to clear the slot */
1714 		scr[Ent_ldsa_data / 4] = siop_htoc32(&sc->sc_c, 0x80000000);
1715 		s = splbio();
1716 		TAILQ_INSERT_TAIL(&sc->free_list, &newcbd->cmds[i], next);
1717 		splx(s);
1718 #ifdef SIOP_DEBUG
1719 		printf("tables[%d]: in=0x%x out=0x%x status=0x%x\n", i,
1720 		    siop_ctoh32(&sc->sc_c,
1721 			newcbd->cmds[i].cmd_tables->t_msgin.addr),
1722 		    siop_ctoh32(&sc->sc_c,
1723 			newcbd->cmds[i].cmd_tables->t_msgout.addr),
1724 		    siop_ctoh32(&sc->sc_c,
1725 			newcbd->cmds[i].cmd_tables->t_status.addr));
1726 #endif
1727 	}
1728 	s = splbio();
1729 	TAILQ_INSERT_TAIL(&sc->cmds, newcbd, next);
1730 	sc->sc_c.sc_adapt.adapt_openings += SIOP_NCMDPB;
1731 	splx(s);
1732 	return;
1733 bad0:
1734 	bus_dmamap_unload(sc->sc_c.sc_dmat, newcbd->xferdma);
1735 	bus_dmamap_destroy(sc->sc_c.sc_dmat, newcbd->xferdma);
1736 bad1:
1737 	bus_dmamem_free(sc->sc_c.sc_dmat, &seg, rseg);
1738 bad2:
1739 	free(newcbd->cmds, M_DEVBUF);
1740 bad3:
1741 	free(newcbd, M_DEVBUF);
1742 	return;
1743 }
1744 
1745 struct siop_lunsw *
1746 siop_get_lunsw(struct siop_softc *sc)
1747 {
1748 	struct siop_lunsw *lunsw;
1749 	int i;
1750 
1751 	if (sc->script_free_lo + (sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1752 	    sc->script_free_hi)
1753 		return NULL;
1754 	lunsw = TAILQ_FIRST(&sc->lunsw_list);
1755 	if (lunsw != NULL) {
1756 #ifdef SIOP_DEBUG
1757 		printf("siop_get_lunsw got lunsw at offset %d\n",
1758 		    lunsw->lunsw_off);
1759 #endif
1760 		TAILQ_REMOVE(&sc->lunsw_list, lunsw, next);
1761 		return lunsw;
1762 	}
1763 	lunsw = malloc(sizeof(struct siop_lunsw), M_DEVBUF, M_NOWAIT|M_ZERO);
1764 	if (lunsw == NULL)
1765 		return NULL;
1766 #ifdef SIOP_DEBUG
1767 	printf("allocating lunsw at offset %d\n", sc->script_free_lo);
1768 #endif
1769 	if (sc->sc_c.features & SF_CHIP_RAM) {
1770 		bus_space_write_region_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1771 		    sc->script_free_lo * 4, lun_switch,
1772 		    sizeof(lun_switch) / sizeof(lun_switch[0]));
1773 		bus_space_write_4(sc->sc_c.sc_ramt, sc->sc_c.sc_ramh,
1774 		    (sc->script_free_lo + E_abs_lunsw_return_Used[0]) * 4,
1775 		    sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1776 	} else {
1777 		for (i = 0; i < sizeof(lun_switch) / sizeof(lun_switch[0]);
1778 		    i++)
1779 			sc->sc_c.sc_script[sc->script_free_lo + i] =
1780 			    siop_htoc32(&sc->sc_c, lun_switch[i]);
1781 		sc->sc_c.sc_script[
1782 		    sc->script_free_lo + E_abs_lunsw_return_Used[0]] =
1783 		    siop_htoc32(&sc->sc_c,
1784 			sc->sc_c.sc_scriptaddr + Ent_lunsw_return);
1785 	}
1786 	lunsw->lunsw_off = sc->script_free_lo;
1787 	lunsw->lunsw_size = sizeof(lun_switch) / sizeof(lun_switch[0]);
1788 	sc->script_free_lo += lunsw->lunsw_size;
1789 	siop_script_sync(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1790 	return lunsw;
1791 }
1792 
1793 void
1794 siop_add_reselsw(struct siop_softc *sc, int target)
1795 {
1796 	int i, j;
1797 	struct siop_target *siop_target;
1798 	struct siop_lun *siop_lun;
1799 
1800 	siop_target = (struct siop_target *)sc->sc_c.targets[target];
1801 	/*
1802 	 * add an entry to resel switch
1803 	 */
1804 	siop_script_sync(sc, BUS_DMASYNC_POSTWRITE);
1805 	for (i = 0; i < 15; i++) {
1806 		siop_target->reseloff = Ent_resel_targ0 / 4 + i * 2;
1807 		if ((siop_script_read(sc, siop_target->reseloff) & 0xff)
1808 		    == 0xff) { /* it's free */
1809 #ifdef SIOP_DEBUG
1810 			printf("siop: target %d slot %d offset %d\n",
1811 			    target, i, siop_target->reseloff);
1812 #endif
1813 			/* JUMP abs_foo, IF target | 0x80; */
1814 			siop_script_write(sc, siop_target->reseloff,
1815 			    0x800c0080 | target);
1816 			siop_script_write(sc, siop_target->reseloff + 1,
1817 			    sc->sc_c.sc_scriptaddr +
1818 			    siop_target->lunsw->lunsw_off * 4 +
1819 			    Ent_lun_switch_entry);
1820 			break;
1821 		}
1822 	}
1823 	if (i == 15) /* no free slot, shouldn't happen */
1824 		panic("siop: resel switch full");
1825 
1826 	sc->sc_ntargets++;
1827 	for (i = 0; i < 8; i++) {
1828 		siop_lun = siop_target->siop_lun[i];
1829 		if (siop_lun == NULL)
1830 			continue;
1831 		if (siop_lun->reseloff > 0) {
1832 			siop_lun->reseloff = 0;
1833 			for (j = 0; j < SIOP_NTAG; j++)
1834 				siop_lun->siop_tag[j].reseloff = 0;
1835 			siop_add_dev(sc, target, i);
1836 		}
1837 	}
1838 	siop_update_scntl3(sc, sc->sc_c.targets[target]);
1839 	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1840 }
1841 
1842 void
1843 siop_update_scntl3(struct siop_softc *sc, struct siop_common_target *_siop_target)
1844 {
1845 	struct siop_target *siop_target = (struct siop_target *)_siop_target;
1846 	/* MOVE target->id >> 24 TO SCNTL3 */
1847 	siop_script_write(sc,
1848 	    siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4),
1849 	    0x78030000 | ((siop_target->target_c.id >> 16) & 0x0000ff00));
1850 	/* MOVE target->id >> 8 TO SXFER */
1851 	siop_script_write(sc,
1852 	    siop_target->lunsw->lunsw_off + (Ent_restore_scntl3 / 4) + 2,
1853 	    0x78050000 | (siop_target->target_c.id & 0x0000ff00));
1854 	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1855 }
1856 
1857 void
1858 siop_add_dev(struct siop_softc *sc, int target, int lun)
1859 {
1860 	struct siop_lunsw *lunsw;
1861 	struct siop_target *siop_target =
1862 	    (struct siop_target *)sc->sc_c.targets[target];
1863 	struct siop_lun *siop_lun = siop_target->siop_lun[lun];
1864 	int i, ntargets;
1865 
1866 	if (siop_lun->reseloff > 0)
1867 		return;
1868 	lunsw = siop_target->lunsw;
1869 	if ((lunsw->lunsw_off + lunsw->lunsw_size) < sc->script_free_lo) {
1870 		/*
1871 		 * can't extend this slot. Probably not worth trying to deal
1872 		 * with this case
1873 		 */
1874 #ifdef DEBUG
1875 		aprint_error_dev(&sc->sc_c.sc_dev, "%d:%d: can't allocate a lun sw slot\n", target, lun);
1876 #endif
1877 		return;
1878 	}
1879 	/* count how many free targets we still have to probe */
1880 	ntargets =  sc->sc_c.sc_chan.chan_ntargets - 1 - sc->sc_ntargets;
1881 
1882 	/*
1883 	 * we need 8 bytes for the lun sw additional entry, and
1884 	 * eventually sizeof(tag_switch) for the tag switch entry.
1885 	 * Keep enough free space for the free targets that could be
1886 	 * probed later.
1887 	 */
1888 	if (sc->script_free_lo + 2 +
1889 	    (ntargets * sizeof(lun_switch) / sizeof(lun_switch[0])) >=
1890 	    ((siop_target->target_c.flags & TARF_TAG) ?
1891 	    sc->script_free_hi - (sizeof(tag_switch) / sizeof(tag_switch[0])) :
1892 	    sc->script_free_hi)) {
1893 		/*
1894 		 * not enough space, probably not worth dealing with it.
1895 		 * We can hold 13 tagged-queuing capable devices in the 4k RAM.
1896 		 */
1897 #ifdef DEBUG
1898 		aprint_error_dev(&sc->sc_c.sc_dev, "%d:%d: not enough memory for a lun sw slot\n", target, lun);
1899 #endif
1900 		return;
1901 	}
1902 #ifdef SIOP_DEBUG
1903 	printf("%s:%d:%d: allocate lun sw entry\n",
1904 	    device_xname(&sc->sc_c.sc_dev), target, lun);
1905 #endif
1906 	/* INT int_resellun */
1907 	siop_script_write(sc, sc->script_free_lo, 0x98080000);
1908 	siop_script_write(sc, sc->script_free_lo + 1, A_int_resellun);
1909 	/* Now the slot entry: JUMP abs_foo, IF lun */
1910 	siop_script_write(sc, sc->script_free_lo - 2,
1911 	    0x800c0000 | lun);
1912 	siop_script_write(sc, sc->script_free_lo - 1, 0);
1913 	siop_lun->reseloff = sc->script_free_lo - 2;
1914 	lunsw->lunsw_size += 2;
1915 	sc->script_free_lo += 2;
1916 	if (siop_target->target_c.flags & TARF_TAG) {
1917 		/* we need a tag switch */
1918 		sc->script_free_hi -=
1919 		    sizeof(tag_switch) / sizeof(tag_switch[0]);
1920 		if (sc->sc_c.features & SF_CHIP_RAM) {
1921 			bus_space_write_region_4(sc->sc_c.sc_ramt,
1922 			    sc->sc_c.sc_ramh,
1923 			    sc->script_free_hi * 4, tag_switch,
1924 			    sizeof(tag_switch) / sizeof(tag_switch[0]));
1925 		} else {
1926 			for(i = 0;
1927 			    i < sizeof(tag_switch) / sizeof(tag_switch[0]);
1928 			    i++) {
1929 				sc->sc_c.sc_script[sc->script_free_hi + i] =
1930 				    siop_htoc32(&sc->sc_c, tag_switch[i]);
1931 			}
1932 		}
1933 		siop_script_write(sc,
1934 		    siop_lun->reseloff + 1,
1935 		    sc->sc_c.sc_scriptaddr + sc->script_free_hi * 4 +
1936 		    Ent_tag_switch_entry);
1937 
1938 		for (i = 0; i < SIOP_NTAG; i++) {
1939 			siop_lun->siop_tag[i].reseloff =
1940 			    sc->script_free_hi + (Ent_resel_tag0 / 4) + i * 2;
1941 		}
1942 	} else {
1943 		/* non-tag case; just work with the lun switch */
1944 		siop_lun->siop_tag[0].reseloff =
1945 		    siop_target->siop_lun[lun]->reseloff;
1946 	}
1947 	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1948 }
1949 
1950 void
1951 siop_del_dev(struct siop_softc *sc, int target, int lun)
1952 {
1953 	int i;
1954 	struct siop_target *siop_target;
1955 
1956 #ifdef SIOP_DEBUG
1957 	printf("%s:%d:%d: free lun sw entry\n",
1958 	    device_xname(&sc->sc_c.sc_dev), target, lun);
1959 #endif
1960 	if (sc->sc_c.targets[target] == NULL)
1961 		return;
1962 	siop_target = (struct siop_target *)sc->sc_c.targets[target];
1963 	free(siop_target->siop_lun[lun], M_DEVBUF);
1964 	siop_target->siop_lun[lun] = NULL;
1965 	/* XXX compact sw entry too ? */
1966 	/* check if we can free the whole target */
1967 	for (i = 0; i < 8; i++) {
1968 		if (siop_target->siop_lun[i] != NULL)
1969 			return;
1970 	}
1971 #ifdef SIOP_DEBUG
1972 	printf("%s: free siop_target for target %d lun %d lunsw offset %d\n",
1973 	    device_xname(&sc->sc_c.sc_dev), target, lun,
1974 	    siop_target->lunsw->lunsw_off);
1975 #endif
1976 	/*
1977 	 * nothing here, free the target struct and resel
1978 	 * switch entry
1979 	 */
1980 	siop_script_write(sc, siop_target->reseloff, 0x800c00ff);
1981 	siop_script_sync(sc, BUS_DMASYNC_PREWRITE);
1982 	TAILQ_INSERT_TAIL(&sc->lunsw_list, siop_target->lunsw, next);
1983 	free(sc->sc_c.targets[target], M_DEVBUF);
1984 	sc->sc_c.targets[target] = NULL;
1985 	sc->sc_ntargets--;
1986 }
1987 
1988 #ifdef SIOP_STATS
1989 void
1990 siop_printstats(void)
1991 {
1992 	printf("siop_stat_intr %d\n", siop_stat_intr);
1993 	printf("siop_stat_intr_shortxfer %d\n", siop_stat_intr_shortxfer);
1994 	printf("siop_stat_intr_xferdisc %d\n", siop_stat_intr_xferdisc);
1995 	printf("siop_stat_intr_sdp %d\n", siop_stat_intr_sdp);
1996 	printf("siop_stat_intr_saveoffset %d\n", siop_stat_intr_saveoffset);
1997 	printf("siop_stat_intr_done %d\n", siop_stat_intr_done);
1998 	printf("siop_stat_intr_lunresel %d\n", siop_stat_intr_lunresel);
1999 	printf("siop_stat_intr_qfull %d\n", siop_stat_intr_qfull);
2000 }
2001 #endif
2002