xref: /netbsd-src/sys/dev/ic/siop_common.c (revision cac8e449158efc7261bebc8657cbb0125a2cfdde)
1 /*	$NetBSD: siop_common.c,v 1.46 2008/06/11 02:09:16 kiyohara Exp $	*/
2 
3 /*
4  * Copyright (c) 2000, 2002 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Manuel Bouyer.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: siop_common.c,v 1.46 2008/06/11 02:09:16 kiyohara Exp $");
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/buf.h>
43 #include <sys/kernel.h>
44 #include <sys/scsiio.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/endian.h>
49 #include <sys/bus.h>
50 
51 #include <dev/scsipi/scsi_all.h>
52 #include <dev/scsipi/scsi_message.h>
53 #include <dev/scsipi/scsipi_all.h>
54 
55 #include <dev/scsipi/scsiconf.h>
56 
57 #include <dev/ic/siopreg.h>
58 #include <dev/ic/siopvar_common.h>
59 
60 #include "opt_siop.h"
61 
62 #undef DEBUG
63 #undef DEBUG_DR
64 #undef DEBUG_NEG
65 
66 int
67 siop_common_attach(sc)
68 	struct siop_common_softc *sc;
69 {
70 	int error, i;
71 	bus_dma_segment_t seg;
72 	int rseg;
73 
74 	/*
75 	 * Allocate DMA-safe memory for the script and map it.
76 	 */
77 	if ((sc->features & SF_CHIP_RAM) == 0) {
78 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
79 		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
80 		if (error) {
81 			aprint_error_dev(&sc->sc_dev,
82 			    "unable to allocate script DMA memory, "
83 			    "error = %d\n", error);
84 			return error;
85 		}
86 		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
87 		    (void **)&sc->sc_script,
88 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
89 		if (error) {
90 			aprint_error_dev(&sc->sc_dev, "unable to map script DMA memory, "
91 			    "error = %d\n", error);
92 			return error;
93 		}
94 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
95 		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
96 		if (error) {
97 			aprint_error_dev(&sc->sc_dev, "unable to create script DMA map, "
98 			    "error = %d\n", error);
99 			return error;
100 		}
101 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
102 		    sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
103 		if (error) {
104 			aprint_error_dev(&sc->sc_dev, "unable to load script DMA map, "
105 			    "error = %d\n", error);
106 			return error;
107 		}
108 		sc->sc_scriptaddr =
109 		    sc->sc_scriptdma->dm_segs[0].ds_addr;
110 		sc->ram_size = PAGE_SIZE;
111 	}
112 
113 	sc->sc_adapt.adapt_dev = &sc->sc_dev;
114 	sc->sc_adapt.adapt_nchannels = 1;
115 	sc->sc_adapt.adapt_openings = 0;
116 	sc->sc_adapt.adapt_ioctl = siop_ioctl;
117 	sc->sc_adapt.adapt_minphys = minphys;
118 
119 	memset(&sc->sc_chan, 0, sizeof(sc->sc_chan));
120 	sc->sc_chan.chan_adapter = &sc->sc_adapt;
121 	sc->sc_chan.chan_bustype = &scsi_bustype;
122 	sc->sc_chan.chan_channel = 0;
123 	sc->sc_chan.chan_flags = SCSIPI_CHAN_CANGROW;
124 	sc->sc_chan.chan_ntargets =
125 	    (sc->features & SF_BUS_WIDE) ? 16 : 8;
126 	sc->sc_chan.chan_nluns = 8;
127 	sc->sc_chan.chan_id =
128 	    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
129 	if (sc->sc_chan.chan_id == 0 ||
130 	    sc->sc_chan.chan_id >= sc->sc_chan.chan_ntargets)
131 		sc->sc_chan.chan_id = SIOP_DEFAULT_TARGET;
132 
133 	for (i = 0; i < 16; i++)
134 		sc->targets[i] = NULL;
135 
136 	/* find min/max sync period for this chip */
137 	sc->st_maxsync = 0;
138 	sc->dt_maxsync = 0;
139 	sc->st_minsync = 255;
140 	sc->dt_minsync = 255;
141 	for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
142 		if (sc->clock_period != scf_period[i].clock)
143 			continue;
144 		if (sc->st_maxsync < scf_period[i].period)
145 			sc->st_maxsync = scf_period[i].period;
146 		if (sc->st_minsync > scf_period[i].period)
147 			sc->st_minsync = scf_period[i].period;
148 	}
149 	if (sc->st_maxsync == 255 || sc->st_minsync == 0)
150 		panic("siop: can't find my sync parameters");
151 	for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
152 		if (sc->clock_period != dt_scf_period[i].clock)
153 			continue;
154 		if (sc->dt_maxsync < dt_scf_period[i].period)
155 			sc->dt_maxsync = dt_scf_period[i].period;
156 		if (sc->dt_minsync > dt_scf_period[i].period)
157 			sc->dt_minsync = dt_scf_period[i].period;
158 	}
159 	if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
160 		panic("siop: can't find my sync parameters");
161 	return 0;
162 }
163 
164 void
165 siop_common_reset(sc)
166 	struct siop_common_softc *sc;
167 {
168 	u_int32_t stest1, stest3;
169 
170 	/* reset the chip */
171 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
172 	delay(1000);
173 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
174 
175 	/* init registers */
176 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
177 	    SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
178 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
179 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
180 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
181 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
182 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
183 	    0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
184 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
185 	    0xff & ~(SIEN1_HTH | SIEN1_GEN));
186 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
187 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
188 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
189 	    (0xb << STIME0_SEL_SHIFT));
190 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
191 	    sc->sc_chan.chan_id | SCID_RRE);
192 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
193 	    1 << sc->sc_chan.chan_id);
194 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
195 	    (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
196 	if (sc->features & SF_CHIP_AAIP)
197 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
198 		    SIOP_AIPCNTL1, AIPCNTL1_DIS);
199 
200 	/* enable clock doubler or quadruler if appropriate */
201 	if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
202 		stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
203 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
204 		    STEST1_DBLEN);
205 		if (sc->features & SF_CHIP_QUAD) {
206 			/* wait for PPL to lock */
207 			while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
208 			    SIOP_STEST4) & STEST4_LOCK) == 0)
209 				delay(10);
210 		} else {
211 			/* data sheet says 20us - more won't hurt */
212 			delay(100);
213 		}
214 		/* halt scsi clock, select doubler/quad, restart clock */
215 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
216 		    stest3 | STEST3_HSC);
217 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
218 		    STEST1_DBLEN | STEST1_DBLSEL);
219 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
220 	} else {
221 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
222 	}
223 
224 	if (sc->features & SF_CHIP_USEPCIC) {
225 		stest1 = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_STEST1);
226 		stest1 |= STEST1_SCLK;
227 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, stest1);
228 	}
229 
230 	if (sc->features & SF_CHIP_FIFO)
231 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
232 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
233 		    CTEST5_DFS);
234 	if (sc->features & SF_CHIP_LED0) {
235 		/* Set GPIO0 as output if software LED control is required */
236 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
237 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
238 	}
239 	if (sc->features & SF_BUS_ULTRA3) {
240 		/* reset SCNTL4 */
241 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
242 	}
243 	sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
244 	    STEST4_MODE_MASK;
245 
246 	/*
247 	 * initialise the RAM. Without this we may get scsi gross errors on
248 	 * the 1010
249 	 */
250 	if (sc->features & SF_CHIP_RAM)
251 		bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
252 			0, 0, sc->ram_size / 4);
253 	sc->sc_reset(sc);
254 }
255 
256 /* prepare tables before sending a cmd */
257 void
258 siop_setuptables(siop_cmd)
259 	struct siop_common_cmd *siop_cmd;
260 {
261 	int i;
262 	struct siop_common_softc *sc = siop_cmd->siop_sc;
263 	struct scsipi_xfer *xs = siop_cmd->xs;
264 	int target = xs->xs_periph->periph_target;
265 	int lun = xs->xs_periph->periph_lun;
266 	int msgoffset = 1;
267 
268 	siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
269 	memset(siop_cmd->siop_tables->msg_out, 0,
270 	    sizeof(siop_cmd->siop_tables->msg_out));
271 	/* request sense doesn't disconnect */
272 	if (xs->xs_control & XS_CTL_REQSENSE)
273 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
274 	else if ((sc->features & SF_CHIP_GEBUG) &&
275 	    (sc->targets[target]->flags & TARF_ISWIDE) == 0)
276 		/*
277 		 * 1010 bug: it seems that the 1010 has problems with reselect
278 		 * when not in wide mode (generate false SCSI gross error).
279 		 * The FreeBSD sym driver has comments about it but their
280 		 * workaround (disable SCSI gross error reporting) doesn't
281 		 * work with my adapter. So disable disconnect when not
282 		 * wide.
283 		 */
284 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
285 	else
286 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
287 	if (xs->xs_tag_type != 0) {
288 		if ((sc->targets[target]->flags & TARF_TAG) == 0) {
289 			scsipi_printaddr(xs->xs_periph);
290 			printf(": tagged command type %d id %d\n",
291 			    siop_cmd->xs->xs_tag_type, siop_cmd->xs->xs_tag_id);
292 			panic("tagged command for non-tagging device");
293 		}
294 		siop_cmd->flags |= CMDFL_TAG;
295 		siop_cmd->siop_tables->msg_out[1] = siop_cmd->xs->xs_tag_type;
296 		/*
297 		 * use siop_cmd->tag not xs->xs_tag_id, caller may want a
298 		 * different one
299 		 */
300 		siop_cmd->siop_tables->msg_out[2] = siop_cmd->tag;
301 		msgoffset = 3;
302 	}
303 	siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
304 	if (sc->targets[target]->status == TARST_ASYNC) {
305 		if ((sc->targets[target]->flags & TARF_DT) &&
306 			(sc->mode == STEST4_MODE_LVD)) {
307 			sc->targets[target]->status = TARST_PPR_NEG;
308 			 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
309 			    sc->maxoff);
310 		} else if (sc->targets[target]->flags & TARF_WIDE) {
311 			sc->targets[target]->status = TARST_WIDE_NEG;
312 			siop_wdtr_msg(siop_cmd, msgoffset,
313 			    MSG_EXT_WDTR_BUS_16_BIT);
314 		} else if (sc->targets[target]->flags & TARF_SYNC) {
315 			sc->targets[target]->status = TARST_SYNC_NEG;
316 			siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
317 			(sc->maxoff > 31) ? 31 :  sc->maxoff);
318 		} else {
319 			sc->targets[target]->status = TARST_OK;
320 			siop_update_xfer_mode(sc, target);
321 		}
322 	}
323 	siop_cmd->siop_tables->status =
324 	    siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
325 
326 	siop_cmd->siop_tables->cmd.count =
327 	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_len);
328 	siop_cmd->siop_tables->cmd.addr =
329 	    siop_htoc32(sc, siop_cmd->dmamap_cmd->dm_segs[0].ds_addr);
330 	if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) {
331 		for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
332 			siop_cmd->siop_tables->data[i].count =
333 			    siop_htoc32(sc,
334 				siop_cmd->dmamap_data->dm_segs[i].ds_len);
335 			siop_cmd->siop_tables->data[i].addr =
336 			    siop_htoc32(sc,
337 				siop_cmd->dmamap_data->dm_segs[i].ds_addr);
338 		}
339 	}
340 }
341 
342 int
343 siop_wdtr_neg(siop_cmd)
344 	struct siop_common_cmd *siop_cmd;
345 {
346 	struct siop_common_softc *sc = siop_cmd->siop_sc;
347 	struct siop_common_target *siop_target = siop_cmd->siop_target;
348 	int target = siop_cmd->xs->xs_periph->periph_target;
349 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
350 
351 	if (siop_target->status == TARST_WIDE_NEG) {
352 		/* we initiated wide negotiation */
353 		switch (tables->msg_in[3]) {
354 		case MSG_EXT_WDTR_BUS_8_BIT:
355 			siop_target->flags &= ~TARF_ISWIDE;
356 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
357 			break;
358 		case MSG_EXT_WDTR_BUS_16_BIT:
359 			if (siop_target->flags & TARF_WIDE) {
360 				siop_target->flags |= TARF_ISWIDE;
361 				sc->targets[target]->id |= (SCNTL3_EWS << 24);
362 				break;
363 			}
364 		/* FALLTHROUGH */
365 		default:
366 			/*
367  			 * hum, we got more than what we can handle, shouldn't
368 			 * happen. Reject, and stay async
369 			 */
370 			siop_target->flags &= ~TARF_ISWIDE;
371 			siop_target->status = TARST_OK;
372 			siop_target->offset = siop_target->period = 0;
373 			siop_update_xfer_mode(sc, target);
374 			printf("%s: rejecting invalid wide negotiation from "
375 			    "target %d (%d)\n", device_xname(&sc->sc_dev), target,
376 			    tables->msg_in[3]);
377 			tables->t_msgout.count = siop_htoc32(sc, 1);
378 			tables->msg_out[0] = MSG_MESSAGE_REJECT;
379 			return SIOP_NEG_MSGOUT;
380 		}
381 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
382 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
383 		    SIOP_SCNTL3,
384 		    (sc->targets[target]->id >> 24) & 0xff);
385 		/* we now need to do sync */
386 		if (siop_target->flags & TARF_SYNC) {
387 			siop_target->status = TARST_SYNC_NEG;
388 			siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
389 			    (sc->maxoff > 31) ? 31 : sc->maxoff);
390 			return SIOP_NEG_MSGOUT;
391 		} else {
392 			siop_target->status = TARST_OK;
393 			siop_update_xfer_mode(sc, target);
394 			return SIOP_NEG_ACK;
395 		}
396 	} else {
397 		/* target initiated wide negotiation */
398 		if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
399 		    && (siop_target->flags & TARF_WIDE)) {
400 			siop_target->flags |= TARF_ISWIDE;
401 			sc->targets[target]->id |= SCNTL3_EWS << 24;
402 		} else {
403 			siop_target->flags &= ~TARF_ISWIDE;
404 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
405 		}
406 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
407 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
408 		    (sc->targets[target]->id >> 24) & 0xff);
409 		/*
410 		 * we did reset wide parameters, so fall back to async,
411 		 * but don't schedule a sync neg, target should initiate it
412 		 */
413 		siop_target->status = TARST_OK;
414 		siop_target->offset = siop_target->period = 0;
415 		siop_update_xfer_mode(sc, target);
416 		siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
417 		    MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
418 		return SIOP_NEG_MSGOUT;
419 	}
420 }
421 
422 int
423 siop_ppr_neg(siop_cmd)
424 	struct siop_common_cmd *siop_cmd;
425 {
426 	struct siop_common_softc *sc = siop_cmd->siop_sc;
427 	struct siop_common_target *siop_target = siop_cmd->siop_target;
428 	int target = siop_cmd->xs->xs_periph->periph_target;
429 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
430 	int sync, offset, options, scf = 0;
431 	int i;
432 
433 #ifdef DEBUG_NEG
434 	printf("%s: answer on ppr negotiation:", device_xname(&sc->sc_dev));
435 	for (i = 0; i < 8; i++)
436 		printf(" 0x%x", tables->msg_in[i]);
437 	printf("\n");
438 #endif
439 
440 	if (siop_target->status == TARST_PPR_NEG) {
441 		/* we initiated PPR negotiation */
442 		sync = tables->msg_in[3];
443 		offset = tables->msg_in[5];
444 		options = tables->msg_in[7];
445 		if (options != MSG_EXT_PPR_DT) {
446 			/* should't happen */
447 			printf("%s: ppr negotiation for target %d: "
448 			    "no DT option\n", device_xname(&sc->sc_dev), target);
449 			siop_target->status = TARST_ASYNC;
450 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
451 			siop_target->offset = 0;
452 			siop_target->period = 0;
453 			goto reject;
454 		}
455 
456 		if (offset > sc->maxoff || sync < sc->dt_minsync ||
457 		    sync > sc->dt_maxsync) {
458 			printf("%s: ppr negotiation for target %d: "
459 			    "offset (%d) or sync (%d) out of range\n",
460 			    device_xname(&sc->sc_dev), target, offset, sync);
461 			/* should not happen */
462 			siop_target->offset = 0;
463 			siop_target->period = 0;
464 			goto reject;
465 		} else {
466 			for (i = 0; i <
467 			    sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
468 			    i++) {
469 				if (sc->clock_period != dt_scf_period[i].clock)
470 					continue;
471 				if (dt_scf_period[i].period == sync) {
472 					/* ok, found it. we now are sync. */
473 					siop_target->offset = offset;
474 					siop_target->period = sync;
475 					scf = dt_scf_period[i].scf;
476 					siop_target->flags |= TARF_ISDT;
477 				}
478 			}
479 			if ((siop_target->flags & TARF_ISDT) == 0) {
480 				printf("%s: ppr negotiation for target %d: "
481 				    "sync (%d) incompatible with adapter\n",
482 				    device_xname(&sc->sc_dev), target, sync);
483 				/*
484 				 * we didn't find it in our table, do async
485 				 * send reject msg, start SDTR/WDTR neg
486 				 */
487 				siop_target->status = TARST_ASYNC;
488 				siop_target->flags &= ~(TARF_DT | TARF_ISDT);
489 				siop_target->offset = 0;
490 				siop_target->period = 0;
491 				goto reject;
492 			}
493 		}
494 		if (tables->msg_in[6] != 1) {
495 			printf("%s: ppr negotiation for target %d: "
496 			    "transfer width (%d) incompatible with dt\n",
497 			    device_xname(&sc->sc_dev), target, tables->msg_in[6]);
498 			/* DT mode can only be done with wide transfers */
499 			siop_target->status = TARST_ASYNC;
500 			goto reject;
501 		}
502 		siop_target->flags |= TARF_ISWIDE;
503 		sc->targets[target]->id |= (SCNTL3_EWS << 24);
504 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
505 		sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
506 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
507 		sc->targets[target]->id |=
508 		    (siop_target->offset & SXFER_MO_MASK) << 8;
509 		sc->targets[target]->id &= ~0xff;
510 		sc->targets[target]->id |= SCNTL4_U3EN;
511 		siop_target->status = TARST_OK;
512 		siop_update_xfer_mode(sc, target);
513 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
514 		    (sc->targets[target]->id >> 24) & 0xff);
515 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
516 		    (sc->targets[target]->id >> 8) & 0xff);
517 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
518 		    sc->targets[target]->id & 0xff);
519 		return SIOP_NEG_ACK;
520 	} else {
521 		/* target initiated PPR negotiation, shouldn't happen */
522 		printf("%s: rejecting invalid PPR negotiation from "
523 		    "target %d\n", device_xname(&sc->sc_dev), target);
524 reject:
525 		tables->t_msgout.count = siop_htoc32(sc, 1);
526 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
527 		return SIOP_NEG_MSGOUT;
528 	}
529 }
530 
531 int
532 siop_sdtr_neg(siop_cmd)
533 	struct siop_common_cmd *siop_cmd;
534 {
535 	struct siop_common_softc *sc = siop_cmd->siop_sc;
536 	struct siop_common_target *siop_target = siop_cmd->siop_target;
537 	int target = siop_cmd->xs->xs_periph->periph_target;
538 	int sync, maxoffset, offset, i;
539 	int send_msgout = 0;
540 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
541 
542 	/* limit to Ultra/2 parameters, need PPR for Ultra/3 */
543 	maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
544 
545 	sync = tables->msg_in[3];
546 	offset = tables->msg_in[4];
547 
548 	if (siop_target->status == TARST_SYNC_NEG) {
549 		/* we initiated sync negotiation */
550 		siop_target->status = TARST_OK;
551 #ifdef DEBUG
552 		printf("sdtr: sync %d offset %d\n", sync, offset);
553 #endif
554 		if (offset > maxoffset || sync < sc->st_minsync ||
555 			sync > sc->st_maxsync)
556 			goto reject;
557 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
558 		    i++) {
559 			if (sc->clock_period != scf_period[i].clock)
560 				continue;
561 			if (scf_period[i].period == sync) {
562 				/* ok, found it. we now are sync. */
563 				siop_target->offset = offset;
564 				siop_target->period = sync;
565 				sc->targets[target]->id &=
566 				    ~(SCNTL3_SCF_MASK << 24);
567 				sc->targets[target]->id |= scf_period[i].scf
568 				    << (24 + SCNTL3_SCF_SHIFT);
569 				if (sync < 25 && /* Ultra */
570 				    (sc->features & SF_BUS_ULTRA3) == 0)
571 					sc->targets[target]->id |=
572 					    SCNTL3_ULTRA << 24;
573 				else
574 					sc->targets[target]->id &=
575 					    ~(SCNTL3_ULTRA << 24);
576 				sc->targets[target]->id &=
577 				    ~(SXFER_MO_MASK << 8);
578 				sc->targets[target]->id |=
579 				    (offset & SXFER_MO_MASK) << 8;
580 				sc->targets[target]->id &= ~0xff; /* scntl4 */
581 				goto end;
582 			}
583 		}
584 		/*
585 		 * we didn't find it in our table, do async and send reject
586 		 * msg
587 		 */
588 reject:
589 		send_msgout = 1;
590 		tables->t_msgout.count = siop_htoc32(sc, 1);
591 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
592 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
593 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
594 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
595 		sc->targets[target]->id &= ~0xff; /* scntl4 */
596 		siop_target->offset = siop_target->period = 0;
597 	} else { /* target initiated sync neg */
598 #ifdef DEBUG
599 		printf("sdtr (target): sync %d offset %d\n", sync, offset);
600 #endif
601 		if (offset == 0 || sync > sc->st_maxsync) { /* async */
602 			goto async;
603 		}
604 		if (offset > maxoffset)
605 			offset = maxoffset;
606 		if (sync < sc->st_minsync)
607 			sync = sc->st_minsync;
608 		/* look for sync period */
609 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
610 		    i++) {
611 			if (sc->clock_period != scf_period[i].clock)
612 				continue;
613 			if (scf_period[i].period == sync) {
614 				/* ok, found it. we now are sync. */
615 				siop_target->offset = offset;
616 				siop_target->period = sync;
617 				sc->targets[target]->id &=
618 				    ~(SCNTL3_SCF_MASK << 24);
619 				sc->targets[target]->id |= scf_period[i].scf
620 				    << (24 + SCNTL3_SCF_SHIFT);
621 				if (sync < 25 && /* Ultra */
622 				    (sc->features & SF_BUS_ULTRA3) == 0)
623 					sc->targets[target]->id |=
624 					    SCNTL3_ULTRA << 24;
625 				else
626 					sc->targets[target]->id &=
627 					    ~(SCNTL3_ULTRA << 24);
628 				sc->targets[target]->id &=
629 				    ~(SXFER_MO_MASK << 8);
630 				sc->targets[target]->id |=
631 				    (offset & SXFER_MO_MASK) << 8;
632 				sc->targets[target]->id &= ~0xff; /* scntl4 */
633 				siop_sdtr_msg(siop_cmd, 0, sync, offset);
634 				send_msgout = 1;
635 				goto end;
636 			}
637 		}
638 async:
639 		siop_target->offset = siop_target->period = 0;
640 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
641 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
642 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
643 		sc->targets[target]->id &= ~0xff; /* scntl4 */
644 		siop_sdtr_msg(siop_cmd, 0, 0, 0);
645 		send_msgout = 1;
646 	}
647 end:
648 	if (siop_target->status == TARST_OK)
649 		siop_update_xfer_mode(sc, target);
650 #ifdef DEBUG
651 	printf("id now 0x%x\n", sc->targets[target]->id);
652 #endif
653 	tables->id = siop_htoc32(sc, sc->targets[target]->id);
654 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
655 	    (sc->targets[target]->id >> 24) & 0xff);
656 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
657 	    (sc->targets[target]->id >> 8) & 0xff);
658 	if (send_msgout) {
659 		return SIOP_NEG_MSGOUT;
660 	} else {
661 		return SIOP_NEG_ACK;
662 	}
663 }
664 
665 void
666 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
667 	struct siop_common_cmd *siop_cmd;
668 	int offset;
669 	int ssync, soff;
670 {
671 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
672 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
673 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
674 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
675 	siop_cmd->siop_tables->msg_out[offset + 4] = soff;
676 	siop_cmd->siop_tables->t_msgout.count =
677 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
678 }
679 
680 void
681 siop_wdtr_msg(siop_cmd, offset, wide)
682 	struct siop_common_cmd *siop_cmd;
683 	int offset;
684 	int wide;
685 {
686 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
687 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
688 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
689 	siop_cmd->siop_tables->msg_out[offset + 3] = wide;
690 	siop_cmd->siop_tables->t_msgout.count =
691 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
692 }
693 
694 void
695 siop_ppr_msg(siop_cmd, offset, ssync, soff)
696 	struct siop_common_cmd *siop_cmd;
697 	int offset;
698 	int ssync, soff;
699 {
700 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
701 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
702 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
703 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
704 	siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
705 	siop_cmd->siop_tables->msg_out[offset + 5] = soff;
706 	siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
707 	siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_DT;
708 	siop_cmd->siop_tables->t_msgout.count =
709 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
710 }
711 
712 void
713 siop_minphys(bp)
714 	struct buf *bp;
715 {
716 	minphys(bp);
717 }
718 
719 int
720 siop_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
721     int flag, struct proc *p)
722 {
723 	struct siop_common_softc *sc = (void *)chan->chan_adapter->adapt_dev;
724 
725 	switch (cmd) {
726 	case SCBUSIORESET:
727 		/*
728 		 * abort the script. This will trigger an interrupt, which will
729 		 * trigger a bus reset.
730 		 * We can't safely trigger the reset here as we can't access
731 		 * the required register while the script is running.
732 		 */
733 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_ABRT);
734 		return (0);
735 	default:
736 		return (ENOTTY);
737 	}
738 }
739 
740 void
741 siop_ma(siop_cmd)
742 	struct siop_common_cmd *siop_cmd;
743 {
744 	int offset, dbc, sstat;
745 	struct siop_common_softc *sc = siop_cmd->siop_sc;
746 	scr_table_t *table; /* table with partial xfer */
747 
748 	/*
749 	 * compute how much of the current table didn't get handled when
750 	 * a phase mismatch occurs
751 	 */
752 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
753 	    == 0)
754 	    return; /* no valid data transfer */
755 
756 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
757 	if (offset >= SIOP_NSG) {
758 		aprint_error_dev(&sc->sc_dev, "bad offset in siop_sdp (%d)\n",
759 		    offset);
760 		return;
761 	}
762 	table = &siop_cmd->siop_tables->data[offset];
763 #ifdef DEBUG_DR
764 	printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
765 	    table->count, table->addr);
766 #endif
767 	dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
768 	if (siop_cmd->xs->xs_control & XS_CTL_DATA_OUT) {
769 		if (sc->features & SF_CHIP_DFBC) {
770 			dbc +=
771 			    bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
772 		} else {
773 			/* need to account stale data in FIFO */
774 			int dfifo =
775 			    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
776 			if (sc->features & SF_CHIP_FIFO) {
777 				dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
778 				    SIOP_CTEST5) & CTEST5_BOMASK) << 8;
779 				dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
780 			} else {
781 				dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
782 			}
783 		}
784 		sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
785 		if (sstat & SSTAT0_OLF)
786 			dbc++;
787 		if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
788 			dbc++;
789 		if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
790 			sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
791 			    SIOP_SSTAT2);
792 			if (sstat & SSTAT2_OLF1)
793 				dbc++;
794 			if ((sstat & SSTAT2_ORF1) &&
795 			    (sc->features & SF_CHIP_DFBC) == 0)
796 				dbc++;
797 		}
798 		/* clear the FIFO */
799 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
800 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
801 		    CTEST3_CLF);
802 	}
803 	siop_cmd->flags |= CMDFL_RESID;
804 	siop_cmd->resid = dbc;
805 }
806 
807 void
808 siop_sdp(siop_cmd, offset)
809 	struct siop_common_cmd *siop_cmd;
810 	int offset;
811 {
812 	struct siop_common_softc *sc = siop_cmd->siop_sc;
813 	scr_table_t *table;
814 
815 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
816 	    == 0)
817 	    return; /* no data pointers to save */
818 
819 	/*
820 	 * offset == SIOP_NSG may be a valid condition if we get a Save data
821 	 * pointer when the xfer is done. Just ignore the Save data pointer
822 	 * in this case
823 	 */
824 	if (offset == SIOP_NSG)
825 		return;
826 #ifdef DIAGNOSTIC
827 	if (offset > SIOP_NSG) {
828 		scsipi_printaddr(siop_cmd->xs->xs_periph);
829 		printf(": offset %d > %d\n", offset, SIOP_NSG);
830 		panic("siop_sdp: offset");
831 	}
832 #endif
833 	/*
834 	 * Save data pointer. We do this by adjusting the tables to point
835 	 * at the begginning of the data not yet transfered.
836 	 * offset points to the first table with untransfered data.
837 	 */
838 
839 	/*
840 	 * before doing that we decrease resid from the ammount of data which
841 	 * has been transfered.
842 	 */
843 	siop_update_resid(siop_cmd, offset);
844 
845 	/*
846 	 * First let see if we have a resid from a phase mismatch. If so,
847 	 * we have to adjst the table at offset to remove transfered data.
848 	 */
849 	if (siop_cmd->flags & CMDFL_RESID) {
850 		siop_cmd->flags &= ~CMDFL_RESID;
851 		table = &siop_cmd->siop_tables->data[offset];
852 		/* "cut" already transfered data from this table */
853 		table->addr =
854 		    siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
855 		    siop_ctoh32(sc, table->count) - siop_cmd->resid);
856 		table->count = siop_htoc32(sc, siop_cmd->resid);
857 	}
858 
859 	/*
860 	 * now we can remove entries which have been transfered.
861 	 * We just move the entries with data left at the beggining of the
862 	 * tables
863 	 */
864 	memmove(&siop_cmd->siop_tables->data[0],
865 	    &siop_cmd->siop_tables->data[offset],
866 	    (SIOP_NSG - offset) * sizeof(scr_table_t));
867 }
868 
869 void
870 siop_update_resid(siop_cmd, offset)
871 	struct siop_common_cmd *siop_cmd;
872 	int offset;
873 {
874 	struct siop_common_softc *sc = siop_cmd->siop_sc;
875 	scr_table_t *table;
876 	int i;
877 
878 	if ((siop_cmd->xs->xs_control & (XS_CTL_DATA_OUT | XS_CTL_DATA_IN))
879 	    == 0)
880 	    return; /* no data to transfer */
881 
882 	/*
883 	 * update resid. First account for the table entries which have
884 	 * been fully completed.
885 	 */
886 	for (i = 0; i < offset; i++)
887 		siop_cmd->xs->resid -=
888 		    siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
889 	/*
890 	 * if CMDFL_RESID is set, the last table (pointed by offset) is a
891 	 * partial transfers. If not, offset points to the entry folloing
892 	 * the last full transfer.
893 	 */
894 	if (siop_cmd->flags & CMDFL_RESID) {
895 		table = &siop_cmd->siop_tables->data[offset];
896 		siop_cmd->xs->resid -=
897 		    siop_ctoh32(sc, table->count) - siop_cmd->resid;
898 	}
899 }
900 
901 int
902 siop_iwr(siop_cmd)
903 	struct siop_common_cmd *siop_cmd;
904 {
905 	int offset;
906 	scr_table_t *table; /* table with IWR */
907 	struct siop_common_softc *sc = siop_cmd->siop_sc;
908 	/* handle ignore wide residue messages */
909 
910 	/* if target isn't wide, reject */
911 	if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
912 		siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
913 		siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
914 		return SIOP_NEG_MSGOUT;
915 	}
916 	/* get index of current command in table */
917 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
918 	/*
919 	 * if the current table did complete, we're now pointing at the
920 	 * next one. Go back one if we didn't see a phase mismatch.
921 	 */
922 	if ((siop_cmd->flags & CMDFL_RESID) == 0)
923 		offset--;
924 	table = &siop_cmd->siop_tables->data[offset];
925 
926 	if ((siop_cmd->flags & CMDFL_RESID) == 0) {
927 		if (siop_ctoh32(sc, table->count) & 1) {
928 			/* we really got the number of bytes we expected */
929 			return SIOP_NEG_ACK;
930 		} else {
931 			/*
932 			 * now we really had a short xfer, by one byte.
933 			 * handle it just as if we had a phase mistmatch
934 			 * (there is a resid of one for this table).
935 			 * Update scratcha1 to reflect the fact that
936 			 * this xfer isn't complete.
937 			 */
938 			 siop_cmd->flags |= CMDFL_RESID;
939 			 siop_cmd->resid = 1;
940 			 bus_space_write_1(sc->sc_rt, sc->sc_rh,
941 			     SIOP_SCRATCHA + 1, offset);
942 			 return SIOP_NEG_ACK;
943 		}
944 	} else {
945 		/*
946 		 * we already have a short xfer for this table; it's
947 		 * just one byte less than we though it was
948 		 */
949 		siop_cmd->resid--;
950 		return SIOP_NEG_ACK;
951 	}
952 }
953 
954 void
955 siop_clearfifo(sc)
956 	struct siop_common_softc *sc;
957 {
958 	int timeout = 0;
959 	int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
960 
961 #ifdef DEBUG_INTR
962 	printf("DMA fifo not empty !\n");
963 #endif
964 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
965 	    ctest3 | CTEST3_CLF);
966 	while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
967 	    CTEST3_CLF) != 0) {
968 		delay(1);
969 		if (++timeout > 1000) {
970 			printf("clear fifo failed\n");
971 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
972 			    bus_space_read_1(sc->sc_rt, sc->sc_rh,
973 			    SIOP_CTEST3) & ~CTEST3_CLF);
974 			return;
975 		}
976 	}
977 }
978 
979 int
980 siop_modechange(sc)
981 	struct siop_common_softc *sc;
982 {
983 	int retry;
984 	int sist0, sist1, stest2;
985 	for (retry = 0; retry < 5; retry++) {
986 		/*
987 		 * datasheet says to wait 100ms and re-read SIST1,
988 		 * to check that DIFFSENSE is stable.
989 		 * We may delay() 5 times for  100ms at interrupt time;
990 		 * hopefully this will not happen often.
991 		 */
992 		delay(100000);
993 		sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
994 		sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
995 		if (sist1 & SIEN1_SBMC)
996 			continue; /* we got an irq again */
997 		sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
998 		    STEST4_MODE_MASK;
999 		stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
1000 		switch(sc->mode) {
1001 		case STEST4_MODE_DIF:
1002 			printf("%s: switching to differential mode\n",
1003 			    device_xname(&sc->sc_dev));
1004 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
1005 			    stest2 | STEST2_DIF);
1006 			break;
1007 		case STEST4_MODE_SE:
1008 			printf("%s: switching to single-ended mode\n",
1009 			    device_xname(&sc->sc_dev));
1010 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
1011 			    stest2 & ~STEST2_DIF);
1012 			break;
1013 		case STEST4_MODE_LVD:
1014 			printf("%s: switching to LVD mode\n",
1015 			    device_xname(&sc->sc_dev));
1016 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
1017 			    stest2 & ~STEST2_DIF);
1018 			break;
1019 		default:
1020 			aprint_error_dev(&sc->sc_dev, "invalid SCSI mode 0x%x\n",
1021 			    sc->mode);
1022 			return 0;
1023 		}
1024 		return 1;
1025 	}
1026 	printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
1027 	    device_xname(&sc->sc_dev));
1028 	return 0;
1029 }
1030 
1031 void
1032 siop_resetbus(sc)
1033 	struct siop_common_softc *sc;
1034 {
1035 	int scntl1;
1036 	scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1037 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1038 	    scntl1 | SCNTL1_RST);
1039 	/* minimum 25 us, more time won't hurt */
1040 	delay(100);
1041 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1042 }
1043 
1044 void
1045 siop_update_xfer_mode(sc, target)
1046 	struct siop_common_softc *sc;
1047 	int target;
1048 {
1049 	struct siop_common_target *siop_target = sc->targets[target];
1050 	struct scsipi_xfer_mode xm;
1051 
1052 	xm.xm_target = target;
1053 	xm.xm_mode = 0;
1054 	xm.xm_period = 0;
1055 	xm.xm_offset = 0;
1056 
1057 
1058 	if (siop_target->flags & TARF_ISWIDE)
1059 		xm.xm_mode |= PERIPH_CAP_WIDE16;
1060 	if (siop_target->period) {
1061 		xm.xm_period = siop_target->period;
1062 		xm.xm_offset = siop_target->offset;
1063 		xm.xm_mode |= PERIPH_CAP_SYNC;
1064 	}
1065 	if (siop_target->flags & TARF_TAG) {
1066 	/* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1067 		if ((sc->features & SF_CHIP_GEBUG) == 0 ||
1068 		    (sc->targets[target]->flags & TARF_ISWIDE))
1069 			xm.xm_mode |= PERIPH_CAP_TQING;
1070 	}
1071 
1072 	scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, &xm);
1073 }
1074