xref: /netbsd-src/sys/dev/ic/siop_common.c (revision 274254cdae52594c1aa480a736aef78313d15c9c)
1 /*	$NetBSD: siop_common.c,v 1.48 2009/03/14 21:04:20 dsl Exp $	*/
2 
3 /*
4  * Copyright (c) 2000, 2002 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Manuel Bouyer.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.48 2009/03/14 21:04:20 dsl Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 #include <sys/scsiio.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/endian.h>
49 #include <sys/bus.h>
50 
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_message.h>
53 #include <dev/scsipi/scsipi_all.h>
54 
55 #include <dev/scsipi/scsiconf.h>
56 
57 #include <dev/ic/siopreg.h>
58 #include <dev/ic/siopvar_common.h>
59 
60 #include "opt_siop.h"
61 
62 #undef DEBUG
63 #undef DEBUG_DR
64 #undef DEBUG_NEG
65 
66 int
67 siop_common_attach(struct siop_common_softc *sc)
68 {
69 	int error, i;
70 	bus_dma_segment_t seg;
71 	int rseg;
72 
73 	/*
74 	 * Allocate DMA-safe memory for the script and map it.
75 	 */
76 	if ((sc->features & SF_CHIP_RAM) == 0) {
77 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
78 		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
79 		if (error) {
80 			aprint_error_dev(&sc->sc_dev,
81 			    "unable to allocate script DMA memory, "
82 			    "error = %d\n", error);
83 			return error;
84 		}
85 		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
86 		    (void **)&sc->sc_script,
87 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
88 		if (error) {
89 			aprint_error_dev(&sc->sc_dev, "unable to map script DMA memory, "
90 			    "error = %d\n", error);
91 			return error;
92 		}
93 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
94 		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
95 		if (error) {
96 			aprint_error_dev(&sc->sc_dev, "unable to create script DMA map, "
97 			    "error = %d\n", error);
98 			return error;
99 		}
100 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
101 		    sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
102 		if (error) {
103 			aprint_error_dev(&sc->sc_dev, "unable to load script DMA map, "
104 			    "error = %d\n", error);
105 			return error;
106 		}
107 		sc->sc_scriptaddr =
108 		    sc->sc_scriptdma->dm_segs[0].ds_addr;
109 		sc->ram_size = PAGE_SIZE;
110 	}
111 
112 	sc->sc_adapt.adapt_dev = &sc->sc_dev;
113 	sc->sc_adapt.adapt_nchannels = 1;
114 	sc->sc_adapt.adapt_openings = 0;
115 	sc->sc_adapt.adapt_ioctl = siop_ioctl;
116 	sc->sc_adapt.adapt_minphys = minphys;
117 
118 	memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
119 	sc->sc_chan.chan_adapter = &sc->sc_adapt;
120 	sc->sc_chan.chan_bustype = &scsi_bustype;
121 	sc->sc_chan.chan_channel = 0;
122 	sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
123 	sc->sc_chan.chan_ntargets =
124 	    (sc->features & SF_BUS_WIDE) ? 16 : 8;
125 	sc->sc_chan.chan_nluns = 8;
126 	sc->sc_chan.chan_id =
127 	    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
128 	if (sc->sc_chan.chan_id == 0 ||
129 	    sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
130 		sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
131 
132 	for (i = 0; i < 16; i++)
133 		sc->targets[i] = NULL;
134 
135 	/* find min/max sync period for this chip */
136 	sc->st_maxsync = 0;
137 	sc->dt_maxsync = 0;
138 	sc->st_minsync = 255;
139 	sc->dt_minsync = 255;
140 	for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
141 		if (sc->clock_period != scf_period[i].clock)
142 			continue;
143 		if (sc->st_maxsync < scf_period[i].period)
144 			sc->st_maxsync = scf_period[i].period;
145 		if (sc->st_minsync > scf_period[i].period)
146 			sc->st_minsync = scf_period[i].period;
147 	}
148 	if (sc->st_maxsync == 255 || sc->st_minsync == 0)
149 		panic("siop: can't find my sync parameters");
150 	for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
151 		if (sc->clock_period != dt_scf_period[i].clock)
152 			continue;
153 		if (sc->dt_maxsync < dt_scf_period[i].period)
154 			sc->dt_maxsync = dt_scf_period[i].period;
155 		if (sc->dt_minsync > dt_scf_period[i].period)
156 			sc->dt_minsync = dt_scf_period[i].period;
157 	}
158 	if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
159 		panic("siop: can't find my sync parameters");
160 	return 0;
161 }
162 
163 void
164 siop_common_reset(struct siop_common_softc *sc)
165 {
166 	u_int32_t stest1, stest3;
167 
168 	/* reset the chip */
169 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
170 	delay(1000);
171 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
172 
173 	/* init registers */
174 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
175 	    SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
176 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
177 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
178 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
179 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
180 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
181 	    0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
182 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
183 	    0xff & ~(SIEN1_HTH | SIEN1_GEN));
184 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
185 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
186 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
187 	    (0xb << STIME0_SEL_SHIFT));
188 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
189 	    sc->sc_chan.chan_id | SCID_RRE);
190 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
191 	    1 << sc->sc_chan.chan_id);
192 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
193 	    (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
194 	if (sc->features & SF_CHIP_AAIP)
195 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
196 		    SIOP_AIPCNTL1, AIPCNTL1_DIS);
197 
198 	/* enable clock doubler or quadruler if appropriate */
199 	if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
200 		stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
201 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
202 		    STEST1_DBLEN);
203 		if (sc->features & SF_CHIP_QUAD) {
204 			/* wait for PPL to lock */
205 			while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
206 			    SIOP_STEST4) & STEST4_LOCK) == 0)
207 				delay(10);
208 		} else {
209 			/* data sheet says 20us - more won't hurt */
210 			delay(100);
211 		}
212 		/* halt scsi clock, select doubler/quad, restart clock */
213 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
214 		    stest3 | STEST3_HSC);
215 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
216 		    STEST1_DBLEN | STEST1_DBLSEL);
217 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
218 	} else {
219 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
220 	}
221 
222 	if (sc->features & SF_CHIP_USEPCIC) {
223 		stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1);
224 		stest1 |= STEST1_SCLK;
225 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1);
226 	}
227 
228 	if (sc->features & SF_CHIP_FIFO)
229 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
230 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
231 		    CTEST5_DFS);
232 	if (sc->features & SF_CHIP_LED0) {
233 		/* Set GPIO0 as output if software LED control is required */
234 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
235 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
236 	}
237 	if (sc->features & SF_BUS_ULTRA3) {
238 		/* reset SCNTL4 */
239 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
240 	}
241 	sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
242 	    STEST4_MODE_MASK;
243 
244 	/*
245 	 * initialise the RAM. Without this we may get scsi gross errors on
246 	 * the 1010
247 	 */
248 	if (sc->features & SF_CHIP_RAM)
249 		bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
250 			0, 0, sc->ram_size / 4);
251 	sc->sc_reset(sc);
252 }
253 
254 /* prepare tables before sending a cmd */
255 void
256 siop_setuptables(struct siop_common_cmd *siop_cmd)
257 {
258 	int i;
259 	struct siop_common_softc *sc = siop_cmd->siop_sc;
260 	struct scsipi_xfer *xs = siop_cmd->xs;
261 	int target = xs->xs_periph->periph_target;
262 	int lun = xs->xs_periph->periph_lun;
263 	int msgoffset = 1;
264 
265 	siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
266 	memset(siop_cmd->siop_tables->msg_out, 0,
267 	    sizeof(siop_cmd->siop_tables->msg_out));
268 	/* request sense doesn't disconnect */
269 	if (xs->xs_control & XS_CTL_REQSENSE)
270 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
271 	else if ((sc->features & SF_CHIP_GEBUG) &&
272 	    (sc->targets[target]->flags & TARF_ISWIDE) == 0)
273 		/*
274 		 * 1010 bug: it seems that the 1010 has problems with reselect
275 		 * when not in wide mode (generate false SCSI gross error).
276 		 * The FreeBSD sym driver has comments about it but their
277 		 * workaround (disable SCSI gross error reporting) doesn't
278 		 * work with my adapter. So disable disconnect when not
279 		 * wide.
280 		 */
281 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
282 	else
283 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
284 	if (xs->xs_tag_type != 0) {
285 		if ((sc->targets[target]->flags & TARF_TAG) == 0) {
286 			scsipi_printaddr(xs->xs_periph);
287 			printf(": tagged command type %d id %d\n",
288 			    siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
289 			panic("tagged command for non-tagging device");
290 		}
291 		siop_cmd->flags |= CMDFL_TAG;
292 		siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
293 		/*
294 		 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
295 		 * different one
296 		 */
297 		siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
298 		msgoffset = 3;
299 	}
300 	siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
301 	if (sc->targets[target]->status == TARST_ASYNC) {
302 		if ((sc->targets[target]->flags & TARF_DT) &&
303 			(sc->mode == STEST4_MODE_LVD)) {
304 			sc->targets[target]->status = TARST_PPR_NEG;
305 			 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
306 			    sc->maxoff);
307 		} else if (sc->targets[target]->flags & TARF_WIDE) {
308 			sc->targets[target]->status = TARST_WIDE_NEG;
309 			siop_wdtr_msg(siop_cmd, msgoffset,
310 			    MSG_EXT_WDTR_BUS_16_BIT);
311 		} else if (sc->targets[target]->flags & TARF_SYNC) {
312 			sc->targets[target]->status = TARST_SYNC_NEG;
313 			siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
314 			(sc->maxoff > 31) ? 31 :  sc->maxoff);
315 		} else {
316 			sc->targets[target]->status = TARST_OK;
317 			siop_update_xfer_mode(sc, target);
318 		}
319 	}
320 	siop_cmd->siop_tables->status =
321 	    siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
322 
323 	siop_cmd->siop_tables->cmd.count =
324 	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
325 	siop_cmd->siop_tables->cmd.addr =
326 	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
327 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
328 		for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
329 			siop_cmd->siop_tables->data[i].count =
330 			    siop_htoc32(sc,
331 				siop_cmd->dmamap_data->dm_segs[i].ds_len);
332 			siop_cmd->siop_tables->data[i].addr =
333 			    siop_htoc32(sc,
334 				siop_cmd->dmamap_data->dm_segs[i].ds_addr);
335 		}
336 	}
337 }
338 
339 int
340 siop_wdtr_neg(struct siop_common_cmd *siop_cmd)
341 {
342 	struct siop_common_softc *sc = siop_cmd->siop_sc;
343 	struct siop_common_target *siop_target = siop_cmd->siop_target;
344 	int target = siop_cmd->xs->xs_periph->periph_target;
345 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
346 
347 	if (siop_target->status == TARST_WIDE_NEG) {
348 		/* we initiated wide negotiation */
349 		switch (tables->msg_in[3]) {
350 		case MSG_EXT_WDTR_BUS_8_BIT:
351 			siop_target->flags &= ~TARF_ISWIDE;
352 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
353 			break;
354 		case MSG_EXT_WDTR_BUS_16_BIT:
355 			if (siop_target->flags & TARF_WIDE) {
356 				siop_target->flags |= TARF_ISWIDE;
357 				sc->targets[target]->id |= (SCNTL3_EWS << 24);
358 				break;
359 			}
360 		/* FALLTHROUGH */
361 		default:
362 			/*
363  			 * hum, we got more than what we can handle, shouldn't
364 			 * happen. Reject, and stay async
365 			 */
366 			siop_target->flags &= ~TARF_ISWIDE;
367 			siop_target->status = TARST_OK;
368 			siop_target->offset = siop_target->period = 0;
369 			siop_update_xfer_mode(sc, target);
370 			printf("%s: rejecting invalid wide negotiation from "
371 			    "target %d (%d)\n", device_xname(&sc->sc_dev), target,
372 			    tables->msg_in[3]);
373 			tables->t_msgout.count = siop_htoc32(sc, 1);
374 			tables->msg_out[0] = MSG_MESSAGE_REJECT;
375 			return SIOP_NEG_MSGOUT;
376 		}
377 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
378 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
379 		    SIOP_SCNTL3,
380 		    (sc->targets[target]->id >> 24) & 0xff);
381 		/* we now need to do sync */
382 		if (siop_target->flags & TARF_SYNC) {
383 			siop_target->status = TARST_SYNC_NEG;
384 			siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
385 			    (sc->maxoff > 31) ? 31 : sc->maxoff);
386 			return SIOP_NEG_MSGOUT;
387 		} else {
388 			siop_target->status = TARST_OK;
389 			siop_update_xfer_mode(sc, target);
390 			return SIOP_NEG_ACK;
391 		}
392 	} else {
393 		/* target initiated wide negotiation */
394 		if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
395 		    && (siop_target->flags & TARF_WIDE)) {
396 			siop_target->flags |= TARF_ISWIDE;
397 			sc->targets[target]->id |= SCNTL3_EWS << 24;
398 		} else {
399 			siop_target->flags &= ~TARF_ISWIDE;
400 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
401 		}
402 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
403 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
404 		    (sc->targets[target]->id >> 24) & 0xff);
405 		/*
406 		 * we did reset wide parameters, so fall back to async,
407 		 * but don't schedule a sync neg, target should initiate it
408 		 */
409 		siop_target->status = TARST_OK;
410 		siop_target->offset = siop_target->period = 0;
411 		siop_update_xfer_mode(sc, target);
412 		siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
413 		    MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
414 		return SIOP_NEG_MSGOUT;
415 	}
416 }
417 
418 int
419 siop_ppr_neg(struct siop_common_cmd *siop_cmd)
420 {
421 	struct siop_common_softc *sc = siop_cmd->siop_sc;
422 	struct siop_common_target *siop_target = siop_cmd->siop_target;
423 	int target = siop_cmd->xs->xs_periph->periph_target;
424 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
425 	int sync, offset, options, scf = 0;
426 	int i;
427 
428 #ifdef DEBUG_NEG
429 	printf("%s: answer on ppr negotiation:", device_xname(&sc->sc_dev));
430 	for (i = 0; i < 8; i++)
431 		printf(" 0x%x", tables->msg_in[i]);
432 	printf("\n");
433 #endif
434 
435 	if (siop_target->status == TARST_PPR_NEG) {
436 		/* we initiated PPR negotiation */
437 		sync = tables->msg_in[3];
438 		offset = tables->msg_in[5];
439 		options = tables->msg_in[7];
440 		if (options != MSG_EXT_PPR_DT) {
441 			/* should't happen */
442 			printf("%s: ppr negotiation for target %d: "
443 			    "no DT option\n", device_xname(&sc->sc_dev), target);
444 			siop_target->status = TARST_ASYNC;
445 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
446 			siop_target->offset = 0;
447 			siop_target->period = 0;
448 			goto reject;
449 		}
450 
451 		if (offset > sc->maxoff || sync < sc->dt_minsync ||
452 		    sync > sc->dt_maxsync) {
453 			printf("%s: ppr negotiation for target %d: "
454 			    "offset (%d) or sync (%d) out of range\n",
455 			    device_xname(&sc->sc_dev), target, offset, sync);
456 			/* should not happen */
457 			siop_target->offset = 0;
458 			siop_target->period = 0;
459 			goto reject;
460 		} else {
461 			for (i = 0; i <
462 			    sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
463 			    i++) {
464 				if (sc->clock_period != dt_scf_period[i].clock)
465 					continue;
466 				if (dt_scf_period[i].period == sync) {
467 					/* ok, found it. we now are sync. */
468 					siop_target->offset = offset;
469 					siop_target->period = sync;
470 					scf = dt_scf_period[i].scf;
471 					siop_target->flags |= TARF_ISDT;
472 				}
473 			}
474 			if ((siop_target->flags & TARF_ISDT) == 0) {
475 				printf("%s: ppr negotiation for target %d: "
476 				    "sync (%d) incompatible with adapter\n",
477 				    device_xname(&sc->sc_dev), target, sync);
478 				/*
479 				 * we didn't find it in our table, do async
480 				 * send reject msg, start SDTR/WDTR neg
481 				 */
482 				siop_target->status = TARST_ASYNC;
483 				siop_target->flags &= ~(TARF_DT | TARF_ISDT);
484 				siop_target->offset = 0;
485 				siop_target->period = 0;
486 				goto reject;
487 			}
488 		}
489 		if (tables->msg_in[6] != 1) {
490 			printf("%s: ppr negotiation for target %d: "
491 			    "transfer width (%d) incompatible with dt\n",
492 			    device_xname(&sc->sc_dev), target, tables->msg_in[6]);
493 			/* DT mode can only be done with wide transfers */
494 			siop_target->status = TARST_ASYNC;
495 			goto reject;
496 		}
497 		siop_target->flags |= TARF_ISWIDE;
498 		sc->targets[target]->id |= (SCNTL3_EWS << 24);
499 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
500 		sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
501 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
502 		sc->targets[target]->id |=
503 		    (siop_target->offset & SXFER_MO_MASK) << 8;
504 		sc->targets[target]->id &= ~0xff;
505 		sc->targets[target]->id |= SCNTL4_U3EN;
506 		siop_target->status = TARST_OK;
507 		siop_update_xfer_mode(sc, target);
508 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
509 		    (sc->targets[target]->id >> 24) & 0xff);
510 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
511 		    (sc->targets[target]->id >> 8) & 0xff);
512 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
513 		    sc->targets[target]->id & 0xff);
514 		return SIOP_NEG_ACK;
515 	} else {
516 		/* target initiated PPR negotiation, shouldn't happen */
517 		printf("%s: rejecting invalid PPR negotiation from "
518 		    "target %d\n", device_xname(&sc->sc_dev), target);
519 reject:
520 		tables->t_msgout.count = siop_htoc32(sc, 1);
521 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
522 		return SIOP_NEG_MSGOUT;
523 	}
524 }
525 
526 int
527 siop_sdtr_neg(struct siop_common_cmd *siop_cmd)
528 {
529 	struct siop_common_softc *sc = siop_cmd->siop_sc;
530 	struct siop_common_target *siop_target = siop_cmd->siop_target;
531 	int target = siop_cmd->xs->xs_periph->periph_target;
532 	int sync, maxoffset, offset, i;
533 	int send_msgout = 0;
534 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
535 
536 	/* limit to Ultra/2 parameters, need PPR for Ultra/3 */
537 	maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
538 
539 	sync = tables->msg_in[3];
540 	offset = tables->msg_in[4];
541 
542 	if (siop_target->status == TARST_SYNC_NEG) {
543 		/* we initiated sync negotiation */
544 		siop_target->status = TARST_OK;
545 #ifdef DEBUG
546 		printf("sdtr: sync %d offset %d\n", sync, offset);
547 #endif
548 		if (offset > maxoffset || sync < sc->st_minsync ||
549 			sync > sc->st_maxsync)
550 			goto reject;
551 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
552 		    i++) {
553 			if (sc->clock_period != scf_period[i].clock)
554 				continue;
555 			if (scf_period[i].period == sync) {
556 				/* ok, found it. we now are sync. */
557 				siop_target->offset = offset;
558 				siop_target->period = sync;
559 				sc->targets[target]->id &=
560 				    ~(SCNTL3_SCF_MASK << 24);
561 				sc->targets[target]->id |= scf_period[i].scf
562 				    << (24 + SCNTL3_SCF_SHIFT);
563 				if (sync < 25 && /* Ultra */
564 				    (sc->features & SF_BUS_ULTRA3) == 0)
565 					sc->targets[target]->id |=
566 					    SCNTL3_ULTRA << 24;
567 				else
568 					sc->targets[target]->id &=
569 					    ~(SCNTL3_ULTRA << 24);
570 				sc->targets[target]->id &=
571 				    ~(SXFER_MO_MASK << 8);
572 				sc->targets[target]->id |=
573 				    (offset & SXFER_MO_MASK) << 8;
574 				sc->targets[target]->id &= ~0xff; /* scntl4 */
575 				goto end;
576 			}
577 		}
578 		/*
579 		 * we didn't find it in our table, do async and send reject
580 		 * msg
581 		 */
582 reject:
583 		send_msgout = 1;
584 		tables->t_msgout.count = siop_htoc32(sc, 1);
585 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
586 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
587 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
588 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
589 		sc->targets[target]->id &= ~0xff; /* scntl4 */
590 		siop_target->offset = siop_target->period = 0;
591 	} else { /* target initiated sync neg */
592 #ifdef DEBUG
593 		printf("sdtr (target): sync %d offset %d\n", sync, offset);
594 #endif
595 		if (offset == 0 || sync > sc->st_maxsync) { /* async */
596 			goto async;
597 		}
598 		if (offset > maxoffset)
599 			offset = maxoffset;
600 		if (sync < sc->st_minsync)
601 			sync = sc->st_minsync;
602 		/* look for sync period */
603 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
604 		    i++) {
605 			if (sc->clock_period != scf_period[i].clock)
606 				continue;
607 			if (scf_period[i].period == sync) {
608 				/* ok, found it. we now are sync. */
609 				siop_target->offset = offset;
610 				siop_target->period = sync;
611 				sc->targets[target]->id &=
612 				    ~(SCNTL3_SCF_MASK << 24);
613 				sc->targets[target]->id |= scf_period[i].scf
614 				    << (24 + SCNTL3_SCF_SHIFT);
615 				if (sync < 25 && /* Ultra */
616 				    (sc->features & SF_BUS_ULTRA3) == 0)
617 					sc->targets[target]->id |=
618 					    SCNTL3_ULTRA << 24;
619 				else
620 					sc->targets[target]->id &=
621 					    ~(SCNTL3_ULTRA << 24);
622 				sc->targets[target]->id &=
623 				    ~(SXFER_MO_MASK << 8);
624 				sc->targets[target]->id |=
625 				    (offset & SXFER_MO_MASK) << 8;
626 				sc->targets[target]->id &= ~0xff; /* scntl4 */
627 				siop_sdtr_msg(siop_cmd, 0, sync, offset);
628 				send_msgout = 1;
629 				goto end;
630 			}
631 		}
632 async:
633 		siop_target->offset = siop_target->period = 0;
634 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
635 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
636 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
637 		sc->targets[target]->id &= ~0xff; /* scntl4 */
638 		siop_sdtr_msg(siop_cmd, 0, 0, 0);
639 		send_msgout = 1;
640 	}
641 end:
642 	if (siop_target->status == TARST_OK)
643 		siop_update_xfer_mode(sc, target);
644 #ifdef DEBUG
645 	printf("id now 0x%x\n", sc->targets[target]->id);
646 #endif
647 	tables->id = siop_htoc32(sc, sc->targets[target]->id);
648 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
649 	    (sc->targets[target]->id >> 24) & 0xff);
650 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
651 	    (sc->targets[target]->id >> 8) & 0xff);
652 	if (send_msgout) {
653 		return SIOP_NEG_MSGOUT;
654 	} else {
655 		return SIOP_NEG_ACK;
656 	}
657 }
658 
659 void
660 siop_sdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
661 {
662 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
663 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
664 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
665 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
666 	siop_cmd->siop_tables->msg_out[offset + 4] = soff;
667 	siop_cmd->siop_tables->t_msgout.count =
668 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
669 }
670 
671 void
672 siop_wdtr_msg(struct siop_common_cmd *siop_cmd, int offset, int wide)
673 {
674 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
675 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
676 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
677 	siop_cmd->siop_tables->msg_out[offset + 3] = wide;
678 	siop_cmd->siop_tables->t_msgout.count =
679 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
680 }
681 
682 void
683 siop_ppr_msg(struct siop_common_cmd *siop_cmd, int offset, int ssync, int soff)
684 {
685 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
686 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
687 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
688 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
689 	siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
690 	siop_cmd->siop_tables->msg_out[offset + 5] = soff;
691 	siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
692 	siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
693 	siop_cmd->siop_tables->t_msgout.count =
694 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
695 }
696 
697 void
698 siop_minphys(struct buf *bp)
699 {
700 	minphys(bp);
701 }
702 
703 int
704 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
705     int flag, struct proc *p)
706 {
707 	struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
708 
709 	switch (cmd) {
710 	case SCBUSIORESET:
711 		/*
712 		 * abort the script. This will trigger an interrupt, which will
713 		 * trigger a bus reset.
714 		 * We can't safely trigger the reset here as we can't access
715 		 * the required register while the script is running.
716 		 */
717 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
718 		return (0);
719 	default:
720 		return (ENOTTY);
721 	}
722 }
723 
724 void
725 siop_ma(struct siop_common_cmd *siop_cmd)
726 {
727 	int offset, dbc, sstat;
728 	struct siop_common_softc *sc = siop_cmd->siop_sc;
729 	scr_table_t *table; /* table with partial xfer */
730 
731 	/*
732 	 * compute how much of the current table didn't get handled when
733 	 * a phase mismatch occurs
734 	 */
735 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
736 	    == 0)
737 	    return; /* no valid data transfer */
738 
739 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
740 	if (offset >= SIOP_NSG) {
741 		aprint_error_dev(&sc->sc_dev, "bad offset in siop_sdp (%d)\n",
742 		    offset);
743 		return;
744 	}
745 	table = &siop_cmd->siop_tables->data[offset];
746 #ifdef DEBUG_DR
747 	printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
748 	    table->count, table->addr);
749 #endif
750 	dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
751 	if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
752 		if (sc->features & SF_CHIP_DFBC) {
753 			dbc +=
754 			    bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
755 		} else {
756 			/* need to account stale data in FIFO */
757 			int dfifo =
758 			    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
759 			if (sc->features & SF_CHIP_FIFO) {
760 				dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
761 				    SIOP_CTEST5) & CTEST5_BOMASK) << 8;
762 				dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
763 			} else {
764 				dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
765 			}
766 		}
767 		sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
768 		if (sstat & SSTAT0_OLF)
769 			dbc++;
770 		if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
771 			dbc++;
772 		if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
773 			sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
774 			    SIOP_SSTAT2);
775 			if (sstat & SSTAT2_OLF1)
776 				dbc++;
777 			if ((sstat & SSTAT2_ORF1) &&
778 			    (sc->features & SF_CHIP_DFBC) == 0)
779 				dbc++;
780 		}
781 		/* clear the FIFO */
782 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
783 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
784 		    CTEST3_CLF);
785 	}
786 	siop_cmd->flags |= CMDFL_RESID;
787 	siop_cmd->resid = dbc;
788 }
789 
790 void
791 siop_sdp(struct siop_common_cmd *siop_cmd, int offset)
792 {
793 	struct siop_common_softc *sc = siop_cmd->siop_sc;
794 	scr_table_t *table;
795 
796 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
797 	    == 0)
798 	    return; /* no data pointers to save */
799 
800 	/*
801 	 * offset == SIOP_NSG may be a valid condition if we get a Save data
802 	 * pointer when the xfer is done. Just ignore the Save data pointer
803 	 * in this case
804 	 */
805 	if (offset == SIOP_NSG)
806 		return;
807 #ifdef DIAGNOSTIC
808 	if (offset > SIOP_NSG) {
809 		scsipi_printaddr(siop_cmd->xs->xs_periph);
810 		printf(": offset %d > %d\n", offset, SIOP_NSG);
811 		panic("siop_sdp: offset");
812 	}
813 #endif
814 	/*
815 	 * Save data pointer. We do this by adjusting the tables to point
816 	 * at the begginning of the data not yet transfered.
817 	 * offset points to the first table with untransfered data.
818 	 */
819 
820 	/*
821 	 * before doing that we decrease resid from the ammount of data which
822 	 * has been transfered.
823 	 */
824 	siop_update_resid(siop_cmd, offset);
825 
826 	/*
827 	 * First let see if we have a resid from a phase mismatch. If so,
828 	 * we have to adjst the table at offset to remove transfered data.
829 	 */
830 	if (siop_cmd->flags & CMDFL_RESID) {
831 		siop_cmd->flags &= ~CMDFL_RESID;
832 		table = &siop_cmd->siop_tables->data[offset];
833 		/* "cut" already transfered data from this table */
834 		table->addr =
835 		    siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
836 		    siop_ctoh32(sc, table->count) - siop_cmd->resid);
837 		table->count = siop_htoc32(sc, siop_cmd->resid);
838 	}
839 
840 	/*
841 	 * now we can remove entries which have been transfered.
842 	 * We just move the entries with data left at the beggining of the
843 	 * tables
844 	 */
845 	memmove(&siop_cmd->siop_tables->data[0],
846 	    &siop_cmd->siop_tables->data[offset],
847 	    (SIOP_NSG - offset) * sizeof(scr_table_t));
848 }
849 
850 void
851 siop_update_resid(struct siop_common_cmd *siop_cmd, int offset)
852 {
853 	struct siop_common_softc *sc = siop_cmd->siop_sc;
854 	scr_table_t *table;
855 	int i;
856 
857 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
858 	    == 0)
859 	    return; /* no data to transfer */
860 
861 	/*
862 	 * update resid. First account for the table entries which have
863 	 * been fully completed.
864 	 */
865 	for (i = 0; i < offset; i++)
866 		siop_cmd->xs->resid -=
867 		    siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
868 	/*
869 	 * if CMDFL_RESID is set, the last table (pointed by offset) is a
870 	 * partial transfers. If not, offset points to the entry folloing
871 	 * the last full transfer.
872 	 */
873 	if (siop_cmd->flags & CMDFL_RESID) {
874 		table = &siop_cmd->siop_tables->data[offset];
875 		siop_cmd->xs->resid -=
876 		    siop_ctoh32(sc, table->count) - siop_cmd->resid;
877 	}
878 }
879 
880 int
881 siop_iwr(struct siop_common_cmd *siop_cmd)
882 {
883 	int offset;
884 	scr_table_t *table; /* table with IWR */
885 	struct siop_common_softc *sc = siop_cmd->siop_sc;
886 	/* handle ignore wide residue messages */
887 
888 	/* if target isn't wide, reject */
889 	if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
890 		siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
891 		siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
892 		return SIOP_NEG_MSGOUT;
893 	}
894 	/* get index of current command in table */
895 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
896 	/*
897 	 * if the current table did complete, we're now pointing at the
898 	 * next one. Go back one if we didn't see a phase mismatch.
899 	 */
900 	if ((siop_cmd->flags & CMDFL_RESID) == 0)
901 		offset--;
902 	table = &siop_cmd->siop_tables->data[offset];
903 
904 	if ((siop_cmd->flags & CMDFL_RESID) == 0) {
905 		if (siop_ctoh32(sc, table->count) & 1) {
906 			/* we really got the number of bytes we expected */
907 			return SIOP_NEG_ACK;
908 		} else {
909 			/*
910 			 * now we really had a short xfer, by one byte.
911 			 * handle it just as if we had a phase mistmatch
912 			 * (there is a resid of one for this table).
913 			 * Update scratcha1 to reflect the fact that
914 			 * this xfer isn't complete.
915 			 */
916 			 siop_cmd->flags |= CMDFL_RESID;
917 			 siop_cmd->resid = 1;
918 			 bus_space_write_1(sc->sc_rt, sc->sc_rh,
919 			     SIOP_SCRATCHA + 1, offset);
920 			 return SIOP_NEG_ACK;
921 		}
922 	} else {
923 		/*
924 		 * we already have a short xfer for this table; it's
925 		 * just one byte less than we though it was
926 		 */
927 		siop_cmd->resid--;
928 		return SIOP_NEG_ACK;
929 	}
930 }
931 
932 void
933 siop_clearfifo(struct siop_common_softc *sc)
934 {
935 	int timeout = 0;
936 	int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
937 
938 #ifdef DEBUG_INTR
939 	printf("DMA fifo not empty !\n");
940 #endif
941 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
942 	    ctest3 | CTEST3_CLF);
943 	while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
944 	    CTEST3_CLF) != 0) {
945 		delay(1);
946 		if (++timeout > 1000) {
947 			printf("clear fifo failed\n");
948 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
949 			    bus_space_read_1(sc->sc_rt, sc->sc_rh,
950 			    SIOP_CTEST3) & ~CTEST3_CLF);
951 			return;
952 		}
953 	}
954 }
955 
956 int
957 siop_modechange(struct siop_common_softc *sc)
958 {
959 	int retry;
960 	int sist0, sist1, stest2;
961 	for (retry = 0; retry < 5; retry++) {
962 		/*
963 		 * datasheet says to wait 100ms and re-read SIST1,
964 		 * to check that DIFFSENSE is stable.
965 		 * We may delay() 5 times for  100ms at interrupt time;
966 		 * hopefully this will not happen often.
967 		 */
968 		delay(100000);
969 		sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
970 		sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
971 		if (sist1 & SIEN1_SBMC)
972 			continue; /* we got an irq again */
973 		sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
974 		    STEST4_MODE_MASK;
975 		stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
976 		switch(sc->mode) {
977 		case STEST4_MODE_DIF:
978 			printf("%s: switching to differential mode\n",
979 			    device_xname(&sc->sc_dev));
980 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
981 			    stest2 | STEST2_DIF);
982 			break;
983 		case STEST4_MODE_SE:
984 			printf("%s: switching to single-ended mode\n",
985 			    device_xname(&sc->sc_dev));
986 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
987 			    stest2 & ~STEST2_DIF);
988 			break;
989 		case STEST4_MODE_LVD:
990 			printf("%s: switching to LVD mode\n",
991 			    device_xname(&sc->sc_dev));
992 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
993 			    stest2 & ~STEST2_DIF);
994 			break;
995 		default:
996 			aprint_error_dev(&sc->sc_dev, "invalid SCSI mode 0x%x\n",
997 			    sc->mode);
998 			return 0;
999 		}
1000 		return 1;
1001 	}
1002 	printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
1003 	    device_xname(&sc->sc_dev));
1004 	return 0;
1005 }
1006 
1007 void
1008 siop_resetbus(struct siop_common_softc *sc)
1009 {
1010 	int scntl1;
1011 	scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1012 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1013 	    scntl1 | SCNTL1_RST);
1014 	/* minimum 25 us, more time won't hurt */
1015 	delay(100);
1016 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1017 }
1018 
1019 void
1020 siop_update_xfer_mode(struct siop_common_softc *sc, int target)
1021 {
1022 	struct siop_common_target *siop_target = sc->targets[target];
1023 	struct scsipi_xfer_mode xm;
1024 
1025 	xm.xm_target = target;
1026 	xm.xm_mode = 0;
1027 	xm.xm_period = 0;
1028 	xm.xm_offset = 0;
1029 
1030 
1031 	if (siop_target->flags & TARF_ISWIDE)
1032 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1033 	if (siop_target->period) {
1034 		xm.xm_period = siop_target->period;
1035 		xm.xm_offset = siop_target->offset;
1036 		xm.xm_mode |= PERIPH_CAP_SYNC;
1037 	}
1038 	if (siop_target->flags & TARF_TAG) {
1039 	/* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1040 		if ((sc->features & SF_CHIP_GEBUG) == 0 ||
1041 		    (sc->targets[target]->flags & TARF_ISWIDE))
1042 			xm.xm_mode |= PERIPH_CAP_TQING;
1043 	}
1044 
1045 	scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
1046 }
1047