xref: /openbsd-src/sys/dev/ic/siop_common.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: siop_common.c,v 1.38 2020/02/17 02:50:23 krw Exp $ */
2 /*	$NetBSD: siop_common.c,v 1.37 2005/02/27 00:27:02 perry Exp $	*/
3 
4 /*
5  * Copyright (c) 2000, 2002 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  *
27  */
28 
29 /* SYM53c7/8xx PCI-SCSI I/O Processors driver */
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/device.h>
34 #include <sys/malloc.h>
35 #include <sys/buf.h>
36 #include <sys/kernel.h>
37 #include <sys/scsiio.h>
38 #include <sys/endian.h>
39 
40 #include <machine/bus.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_message.h>
44 #include <scsi/scsiconf.h>
45 
46 #define SIOP_NEEDS_PERIOD_TABLES
47 #include <dev/ic/siopreg.h>
48 #include <dev/ic/siopvar_common.h>
49 #include <dev/ic/siopvar.h>
50 
51 #undef DEBUG
52 #undef DEBUG_DR
53 #undef DEBUG_NEG
54 
55 int
56 siop_common_attach(sc)
57 	struct siop_common_softc *sc;
58 {
59 	int error, i;
60 	bus_dma_segment_t seg;
61 	int rseg;
62 
63 	/*
64 	 * Allocate DMA-safe memory for the script and map it.
65 	 */
66 	if ((sc->features & SF_CHIP_RAM) == 0) {
67 		error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE,
68 		    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
69 		if (error) {
70 			printf("%s: unable to allocate script DMA memory, "
71 			    "error = %d\n", sc->sc_dev.dv_xname, error);
72 			return error;
73 		}
74 		error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, PAGE_SIZE,
75 		    (caddr_t *)&sc->sc_script,
76 		    BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
77 		if (error) {
78 			printf("%s: unable to map script DMA memory, "
79 			    "error = %d\n", sc->sc_dev.dv_xname, error);
80 			return error;
81 		}
82 		error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1,
83 		    PAGE_SIZE, 0, BUS_DMA_NOWAIT, &sc->sc_scriptdma);
84 		if (error) {
85 			printf("%s: unable to create script DMA map, "
86 			    "error = %d\n", sc->sc_dev.dv_xname, error);
87 			return error;
88 		}
89 		error = bus_dmamap_load(sc->sc_dmat, sc->sc_scriptdma,
90 		    sc->sc_script, PAGE_SIZE, NULL, BUS_DMA_NOWAIT);
91 		if (error) {
92 			printf("%s: unable to load script DMA map, "
93 			    "error = %d\n", sc->sc_dev.dv_xname, error);
94 			return error;
95 		}
96 		sc->sc_scriptaddr =
97 		    sc->sc_scriptdma->dm_segs[0].ds_addr;
98 		sc->ram_size = PAGE_SIZE;
99 	}
100 
101 	/*
102 	 * sc->sc_link is the template for all device sc_link's
103 	 * for devices attached to this adapter. It is passed to
104 	 * the upper layers in config_found().
105 	 */
106 	sc->sc_link.adapter_softc = sc;
107 	sc->sc_link.adapter_buswidth =
108 	    (sc->features & SF_BUS_WIDE) ? 16 : 8;
109 	sc->sc_link.adapter_target =
110 	    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCID);
111 	if (sc->sc_link.adapter_target == 0 ||
112 	    sc->sc_link.adapter_target >=
113 	    sc->sc_link.adapter_buswidth)
114 		sc->sc_link.adapter_target = SIOP_DEFAULT_TARGET;
115 
116 	for (i = 0; i < 16; i++)
117 		sc->targets[i] = NULL;
118 
119 	/* find min/max sync period for this chip */
120 	sc->st_maxsync = 0;
121 	sc->dt_maxsync = 0;
122 	sc->st_minsync = 255;
123 	sc->dt_minsync = 255;
124 	for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]); i++) {
125 		if (sc->clock_period != scf_period[i].clock)
126 			continue;
127 		if (sc->st_maxsync < scf_period[i].period)
128 			sc->st_maxsync = scf_period[i].period;
129 		if (sc->st_minsync > scf_period[i].period)
130 			sc->st_minsync = scf_period[i].period;
131 	}
132 	if (sc->st_maxsync == 255 || sc->st_minsync == 0)
133 		panic("siop: can't find my sync parameters");
134 	for (i = 0; i < sizeof(dt_scf_period) / sizeof(dt_scf_period[0]); i++) {
135 		if (sc->clock_period != dt_scf_period[i].clock)
136 			continue;
137 		if (sc->dt_maxsync < dt_scf_period[i].period)
138 			sc->dt_maxsync = dt_scf_period[i].period;
139 		if (sc->dt_minsync > dt_scf_period[i].period)
140 			sc->dt_minsync = dt_scf_period[i].period;
141 	}
142 	if (sc->dt_maxsync == 255 || sc->dt_minsync == 0)
143 		panic("siop: can't find my sync parameters");
144 	return 0;
145 }
146 
147 void
148 siop_common_reset(sc)
149 	struct siop_common_softc *sc;
150 {
151 	u_int32_t stest3;
152 
153 	/* reset the chip */
154 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, ISTAT_SRST);
155 	delay(1000);
156 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_ISTAT, 0);
157 
158 	/* init registers */
159 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL0,
160 	    SCNTL0_ARB_MASK | SCNTL0_EPC | SCNTL0_AAP);
161 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, 0);
162 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3, sc->clock_div);
163 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER, 0);
164 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DIEN, 0xff);
165 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN0,
166 	    0xff & ~(SIEN0_CMP | SIEN0_SEL | SIEN0_RSL));
167 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SIEN1,
168 	    0xff & ~(SIEN1_HTH | SIEN1_GEN));
169 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2, 0);
170 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, STEST3_TE);
171 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STIME0,
172 	    (0xb << STIME0_SEL_SHIFT));
173 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCID,
174 	    sc->sc_link.adapter_target | SCID_RRE);
175 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_RESPID0,
176 	    1 << sc->sc_link.adapter_target);
177 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_DCNTL,
178 	    (sc->features & SF_CHIP_PF) ? DCNTL_COM | DCNTL_PFEN : DCNTL_COM);
179 	if (sc->features & SF_CHIP_AAIP)
180 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
181 		    SIOP_AIPCNTL1, AIPCNTL1_DIS);
182 
183 	/* enable clock doubler or quadrupler if appropriate */
184 	if (sc->features & (SF_CHIP_DBLR | SF_CHIP_QUAD)) {
185 		stest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3);
186 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
187 		    STEST1_DBLEN);
188 		if (sc->features & SF_CHIP_QUAD) {
189 			/* wait for PPL to lock */
190 			while ((bus_space_read_1(sc->sc_rt, sc->sc_rh,
191 			    SIOP_STEST4) & STEST4_LOCK) == 0)
192 				delay(10);
193 		} else {
194 			/* data sheet says 20us - more won't hurt */
195 			delay(100);
196 		}
197 		/* halt scsi clock, select doubler/quad, restart clock */
198 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3,
199 		    stest3 | STEST3_HSC);
200 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1,
201 		    STEST1_DBLEN | STEST1_DBLSEL);
202 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST3, stest3);
203 	} else {
204 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST1, 0);
205 	}
206 	if (sc->features & SF_CHIP_FIFO)
207 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5,
208 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST5) |
209 		    CTEST5_DFS);
210 	if (sc->features & SF_CHIP_LED0) {
211 		/* Set GPIO0 as output if software LED control is required */
212 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL,
213 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_GPCNTL) & 0xfe);
214 	}
215 	if (sc->features & SF_BUS_ULTRA3) {
216 		/* reset SCNTL4 */
217 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4, 0);
218 	}
219 	sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
220 	    STEST4_MODE_MASK;
221 
222 	/*
223 	 * initialise the RAM. Without this we may get scsi gross errors on
224 	 * the 1010
225 	 */
226 	if (sc->features & SF_CHIP_RAM)
227 		bus_space_set_region_4(sc->sc_ramt, sc->sc_ramh,
228 			0, 0, sc->ram_size / 4);
229 	sc->sc_reset(sc);
230 }
231 
232 /* prepare tables before sending a cmd */
233 void
234 siop_setuptables(siop_cmd)
235 	struct siop_common_cmd *siop_cmd;
236 {
237 	int i;
238 	struct siop_common_softc *sc = siop_cmd->siop_sc;
239 	struct scsi_xfer *xs = siop_cmd->xs;
240 	int target = xs->sc_link->target;
241 	int lun = xs->sc_link->lun;
242 	int msgoffset = 1;
243 	int *targ_flags = &sc->targets[target]->flags;
244 	int quirks;
245 
246 	siop_cmd->siop_tables->id = siop_htoc32(sc, sc->targets[target]->id);
247 	memset(siop_cmd->siop_tables->msg_out, 0,
248 	    sizeof(siop_cmd->siop_tables->msg_out));
249 	/* request sense doesn't disconnect */
250 	if (siop_cmd->status == CMDST_SENSE)
251 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
252 	else if ((sc->features & SF_CHIP_GEBUG) &&
253 	    (sc->targets[target]->flags & TARF_ISWIDE) == 0)
254 		/*
255 		 * 1010 bug: it seems that the 1010 has problems with reselect
256 		 * when not in wide mode (generate false SCSI gross error).
257 		 * The FreeBSD sym driver has comments about it but their
258 		 * workaround (disable SCSI gross error reporting) doesn't
259 		 * work with my adapter. So disable disconnect when not
260 		 * wide.
261 		 */
262 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 0);
263 	else
264 		siop_cmd->siop_tables->msg_out[0] = MSG_IDENTIFY(lun, 1);
265 	siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, msgoffset);
266 	if (sc->targets[target]->status == TARST_ASYNC) {
267 		*targ_flags &= TARF_DT; /* Save TARF_DT 'cuz we don't set it here */
268 		quirks = xs->sc_link->quirks;
269 
270 		if ((quirks & SDEV_NOTAGS) == 0)
271 			*targ_flags |= TARF_TAG;
272 		if (((quirks & SDEV_NOWIDE) == 0) &&
273 		    (sc->features & SF_BUS_WIDE))
274 			*targ_flags |= TARF_WIDE;
275 		if ((quirks & SDEV_NOSYNC) == 0)
276 			*targ_flags |= TARF_SYNC;
277 
278 		if ((sc->features & SF_CHIP_GEBUG) &&
279 		    (*targ_flags & TARF_WIDE) == 0)
280 			/*
281 			 * 1010 workaround: can't do disconnect if not wide,
282 			 * so can't do tag
283 			 */
284 			*targ_flags &= ~TARF_TAG;
285 
286 		/* Safe to call siop_add_dev() multiple times */
287 		siop_add_dev((struct siop_softc *)sc, target, lun);
288 
289 		if ((*targ_flags & TARF_DT) &&
290 		    (sc->mode == STEST4_MODE_LVD)) {
291 			sc->targets[target]->status = TARST_PPR_NEG;
292 			 siop_ppr_msg(siop_cmd, msgoffset, sc->dt_minsync,
293 			    sc->maxoff);
294 		} else if (*targ_flags & TARF_WIDE) {
295 			sc->targets[target]->status = TARST_WIDE_NEG;
296 			siop_wdtr_msg(siop_cmd, msgoffset,
297 			    MSG_EXT_WDTR_BUS_16_BIT);
298 		} else if (*targ_flags & TARF_SYNC) {
299 			sc->targets[target]->status = TARST_SYNC_NEG;
300 			siop_sdtr_msg(siop_cmd, msgoffset, sc->st_minsync,
301 			(sc->maxoff > 31) ? 31 :  sc->maxoff);
302 		} else {
303 			sc->targets[target]->status = TARST_OK;
304 			siop_update_xfer_mode(sc, target);
305 		}
306 	} else if (sc->targets[target]->status == TARST_OK &&
307 	    (*targ_flags & TARF_TAG) &&
308 	    siop_cmd->status != CMDST_SENSE) {
309 		siop_cmd->flags |= CMDFL_TAG;
310 	}
311 	siop_cmd->siop_tables->status =
312 	    siop_htoc32(sc, SCSI_SIOP_NOSTATUS); /* set invalid status */
313 
314 	if ((xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) ||
315 	    siop_cmd->status == CMDST_SENSE) {
316 		bzero(siop_cmd->siop_tables->data,
317 		    sizeof(siop_cmd->siop_tables->data));
318 		for (i = 0; i < siop_cmd->dmamap_data->dm_nsegs; i++) {
319 			siop_cmd->siop_tables->data[i].count =
320 			    siop_htoc32(sc,
321 				siop_cmd->dmamap_data->dm_segs[i].ds_len);
322 			siop_cmd->siop_tables->data[i].addr =
323 			    siop_htoc32(sc,
324 				siop_cmd->dmamap_data->dm_segs[i].ds_addr);
325 		}
326 	}
327 }
328 
329 int
330 siop_wdtr_neg(siop_cmd)
331 	struct siop_common_cmd *siop_cmd;
332 {
333 	struct siop_common_softc *sc = siop_cmd->siop_sc;
334 	struct siop_common_target *siop_target = siop_cmd->siop_target;
335 	int target = siop_cmd->xs->sc_link->target;
336 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
337 
338 	if (siop_target->status == TARST_WIDE_NEG) {
339 		/* we initiated wide negotiation */
340 		switch (tables->msg_in[3]) {
341 		case MSG_EXT_WDTR_BUS_8_BIT:
342 			siop_target->flags &= ~TARF_ISWIDE;
343 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
344 			break;
345 		case MSG_EXT_WDTR_BUS_16_BIT:
346 			if (siop_target->flags & TARF_WIDE) {
347 				siop_target->flags |= TARF_ISWIDE;
348 				sc->targets[target]->id |= (SCNTL3_EWS << 24);
349 				break;
350 			}
351 		/* FALLTHROUGH */
352 		default:
353 			/*
354  			 * hum, we got more than what we can handle, shouldn't
355 			 * happen. Reject, and stay async
356 			 */
357 			siop_target->flags &= ~TARF_ISWIDE;
358 			siop_target->status = TARST_OK;
359 			siop_target->offset = siop_target->period = 0;
360 			siop_update_xfer_mode(sc, target);
361 			printf("%s: rejecting invalid wide negotiation from "
362 			    "target %d (%d)\n", sc->sc_dev.dv_xname, target,
363 			    tables->msg_in[3]);
364 			tables->t_msgout.count = siop_htoc32(sc, 1);
365 			tables->msg_out[0] = MSG_MESSAGE_REJECT;
366 			return SIOP_NEG_MSGOUT;
367 		}
368 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
369 		bus_space_write_1(sc->sc_rt, sc->sc_rh,
370 		    SIOP_SCNTL3,
371 		    (sc->targets[target]->id >> 24) & 0xff);
372 		/* we now need to do sync */
373 		if (siop_target->flags & TARF_SYNC) {
374 			siop_target->status = TARST_SYNC_NEG;
375 			siop_sdtr_msg(siop_cmd, 0, sc->st_minsync,
376 			    (sc->maxoff > 31) ? 31 : sc->maxoff);
377 			return SIOP_NEG_MSGOUT;
378 		} else {
379 			siop_target->status = TARST_OK;
380 			siop_update_xfer_mode(sc, target);
381 			return SIOP_NEG_ACK;
382 		}
383 	} else {
384 		/* target initiated wide negotiation */
385 		if (tables->msg_in[3] >= MSG_EXT_WDTR_BUS_16_BIT
386 		    && (siop_target->flags & TARF_WIDE)) {
387 			siop_target->flags |= TARF_ISWIDE;
388 			sc->targets[target]->id |= SCNTL3_EWS << 24;
389 		} else {
390 			siop_target->flags &= ~TARF_ISWIDE;
391 			sc->targets[target]->id &= ~(SCNTL3_EWS << 24);
392 		}
393 		tables->id = siop_htoc32(sc, sc->targets[target]->id);
394 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
395 		    (sc->targets[target]->id >> 24) & 0xff);
396 		/*
397 		 * we did reset wide parameters, so fall back to async,
398 		 * but don't schedule a sync neg, target should initiate it
399 		 */
400 		siop_target->status = TARST_OK;
401 		siop_target->offset = siop_target->period = 0;
402 		siop_update_xfer_mode(sc, target);
403 		siop_wdtr_msg(siop_cmd, 0, (siop_target->flags & TARF_ISWIDE) ?
404 		    MSG_EXT_WDTR_BUS_16_BIT : MSG_EXT_WDTR_BUS_8_BIT);
405 		return SIOP_NEG_MSGOUT;
406 	}
407 }
408 
409 int
410 siop_ppr_neg(siop_cmd)
411 	struct siop_common_cmd *siop_cmd;
412 {
413 	struct siop_common_softc *sc = siop_cmd->siop_sc;
414 	struct siop_common_target *siop_target = siop_cmd->siop_target;
415 	int target = siop_cmd->xs->sc_link->target;
416 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
417 	int sync, offset, options, scf = 0;
418 	int i;
419 
420 #ifdef DEBUG_NEG
421 	printf("%s: answer on ppr negotiation:", sc->sc_dev.dv_xname);
422 	for (i = 0; i < 8; i++)
423 		printf(" 0x%x", tables->msg_in[i]);
424 	printf("\n");
425 #endif
426 
427 	if (siop_target->status == TARST_PPR_NEG) {
428 		/* we initiated PPR negotiation */
429 		sync = tables->msg_in[3];
430 		offset = tables->msg_in[5];
431 		options = tables->msg_in[7];
432 		if (options != MSG_EXT_PPR_PROT_DT) {
433 			/* should't happen */
434 			printf("%s: ppr negotiation for target %d: "
435 			    "no DT option\n", sc->sc_dev.dv_xname, target);
436 			siop_target->status = TARST_ASYNC;
437 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
438 			siop_target->offset = 0;
439 			siop_target->period = 0;
440 			goto reject;
441 		}
442 
443 		if (offset > sc->maxoff || sync < sc->dt_minsync ||
444 		    sync > sc->dt_maxsync) {
445 			printf("%s: ppr negotiation for target %d: "
446 			    "offset (%d) or sync (%d) out of range\n",
447 			    sc->sc_dev.dv_xname, target, offset, sync);
448 			/* should not happen */
449 			siop_target->status = TARST_ASYNC;
450 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
451 			siop_target->offset = 0;
452 			siop_target->period = 0;
453 			goto reject;
454 		} else {
455 			for (i = 0; i <
456 			    sizeof(dt_scf_period) / sizeof(dt_scf_period[0]);
457 			    i++) {
458 				if (sc->clock_period != dt_scf_period[i].clock)
459 					continue;
460 				if (dt_scf_period[i].period == sync) {
461 					/* ok, found it. we now are sync. */
462 					siop_target->offset = offset;
463 					siop_target->period = sync;
464 					scf = dt_scf_period[i].scf;
465 					siop_target->flags |= TARF_ISDT;
466 				}
467 			}
468 			if ((siop_target->flags & TARF_ISDT) == 0) {
469 				printf("%s: ppr negotiation for target %d: "
470 				    "sync (%d) incompatible with adapter\n",
471 				    sc->sc_dev.dv_xname, target, sync);
472 				/*
473 				 * we didn't find it in our table, do async
474 				 * send reject msg, start SDTR/WDTR neg
475 				 */
476 				siop_target->status = TARST_ASYNC;
477 				siop_target->flags &= ~(TARF_DT | TARF_ISDT);
478 				siop_target->offset = 0;
479 				siop_target->period = 0;
480 				goto reject;
481 			}
482 		}
483 		if (tables->msg_in[6] != 1) {
484 			printf("%s: ppr negotiation for target %d: "
485 			    "transfer width (%d) incompatible with dt\n",
486 			    sc->sc_dev.dv_xname, target, tables->msg_in[6]);
487 			/* DT mode can only be done with wide transfers */
488 			siop_target->status = TARST_ASYNC;
489 			siop_target->flags &= ~(TARF_DT | TARF_ISDT);
490 			siop_target->offset = 0;
491 			siop_target->period = 0;
492 			goto reject;
493 		}
494 		siop_target->flags |= TARF_ISWIDE;
495 		sc->targets[target]->id |= (SCNTL3_EWS << 24);
496 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
497 		sc->targets[target]->id |= scf << (24 + SCNTL3_SCF_SHIFT);
498 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
499 		sc->targets[target]->id |=
500 		    (siop_target->offset & SXFER_MO_MASK) << 8;
501 		sc->targets[target]->id &= ~0xff;
502 		sc->targets[target]->id |= SCNTL4_U3EN;
503 		siop_target->status = TARST_OK;
504 		siop_update_xfer_mode(sc, target);
505 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
506 		    (sc->targets[target]->id >> 24) & 0xff);
507 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
508 		    (sc->targets[target]->id >> 8) & 0xff);
509 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL4,
510 		    sc->targets[target]->id & 0xff);
511 		return SIOP_NEG_ACK;
512 	} else {
513 		/* target initiated PPR negotiation, shouldn't happen */
514 		printf("%s: rejecting invalid PPR negotiation from "
515 		    "target %d\n", sc->sc_dev.dv_xname, target);
516 reject:
517 		tables->t_msgout.count = siop_htoc32(sc, 1);
518 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
519 		return SIOP_NEG_MSGOUT;
520 	}
521 }
522 
523 int
524 siop_sdtr_neg(siop_cmd)
525 	struct siop_common_cmd *siop_cmd;
526 {
527 	struct siop_common_softc *sc = siop_cmd->siop_sc;
528 	struct siop_common_target *siop_target = siop_cmd->siop_target;
529 	int target = siop_cmd->xs->sc_link->target;
530 	int sync, maxoffset, offset, i;
531 	int send_msgout = 0;
532 	struct siop_common_xfer *tables = siop_cmd->siop_tables;
533 
534 	/* limit to Ultra/2 parameters, need PPR for Ultra/3 */
535 	maxoffset = (sc->maxoff > 31) ? 31 : sc->maxoff;
536 
537 	sync = tables->msg_in[3];
538 	offset = tables->msg_in[4];
539 
540 	if (siop_target->status == TARST_SYNC_NEG) {
541 		/* we initiated sync negotiation */
542 		siop_target->status = TARST_OK;
543 #ifdef DEBUG
544 		printf("sdtr: sync %d offset %d\n", sync, offset);
545 #endif
546 		if (offset > maxoffset || sync < sc->st_minsync ||
547 			sync > sc->st_maxsync)
548 			goto reject;
549 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
550 		    i++) {
551 			if (sc->clock_period != scf_period[i].clock)
552 				continue;
553 			if (scf_period[i].period == sync) {
554 				/* ok, found it. we now are sync. */
555 				siop_target->offset = offset;
556 				siop_target->period = sync;
557 				sc->targets[target]->id &=
558 				    ~(SCNTL3_SCF_MASK << 24);
559 				sc->targets[target]->id |= scf_period[i].scf
560 				    << (24 + SCNTL3_SCF_SHIFT);
561 				if (sync < 25 && /* Ultra */
562 				    (sc->features & SF_BUS_ULTRA3) == 0)
563 					sc->targets[target]->id |=
564 					    SCNTL3_ULTRA << 24;
565 				else
566 					sc->targets[target]->id &=
567 					    ~(SCNTL3_ULTRA << 24);
568 				sc->targets[target]->id &=
569 				    ~(SXFER_MO_MASK << 8);
570 				sc->targets[target]->id |=
571 				    (offset & SXFER_MO_MASK) << 8;
572 				sc->targets[target]->id &= ~0xff; /* scntl4 */
573 				goto end;
574 			}
575 		}
576 		/*
577 		 * we didn't find it in our table, do async and send reject
578 		 * msg
579 		 */
580 reject:
581 		send_msgout = 1;
582 		tables->t_msgout.count = siop_htoc32(sc, 1);
583 		tables->msg_out[0] = MSG_MESSAGE_REJECT;
584 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
585 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
586 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
587 		sc->targets[target]->id &= ~0xff; /* scntl4 */
588 		siop_target->offset = siop_target->period = 0;
589 	} else { /* target initiated sync neg */
590 #ifdef DEBUG
591 		printf("sdtr (target): sync %d offset %d\n", sync, offset);
592 #endif
593 		if (offset == 0 || sync > sc->st_maxsync) { /* async */
594 			goto async;
595 		}
596 		if (offset > maxoffset)
597 			offset = maxoffset;
598 		if (sync < sc->st_minsync)
599 			sync = sc->st_minsync;
600 		/* look for sync period */
601 		for (i = 0; i < sizeof(scf_period) / sizeof(scf_period[0]);
602 		    i++) {
603 			if (sc->clock_period != scf_period[i].clock)
604 				continue;
605 			if (scf_period[i].period == sync) {
606 				/* ok, found it. we now are sync. */
607 				siop_target->offset = offset;
608 				siop_target->period = sync;
609 				sc->targets[target]->id &=
610 				    ~(SCNTL3_SCF_MASK << 24);
611 				sc->targets[target]->id |= scf_period[i].scf
612 				    << (24 + SCNTL3_SCF_SHIFT);
613 				if (sync < 25 && /* Ultra */
614 				    (sc->features & SF_BUS_ULTRA3) == 0)
615 					sc->targets[target]->id |=
616 					    SCNTL3_ULTRA << 24;
617 				else
618 					sc->targets[target]->id &=
619 					    ~(SCNTL3_ULTRA << 24);
620 				sc->targets[target]->id &=
621 				    ~(SXFER_MO_MASK << 8);
622 				sc->targets[target]->id |=
623 				    (offset & SXFER_MO_MASK) << 8;
624 				sc->targets[target]->id &= ~0xff; /* scntl4 */
625 				siop_sdtr_msg(siop_cmd, 0, sync, offset);
626 				send_msgout = 1;
627 				goto end;
628 			}
629 		}
630 async:
631 		siop_target->offset = siop_target->period = 0;
632 		sc->targets[target]->id &= ~(SCNTL3_SCF_MASK << 24);
633 		sc->targets[target]->id &= ~(SCNTL3_ULTRA << 24);
634 		sc->targets[target]->id &= ~(SXFER_MO_MASK << 8);
635 		sc->targets[target]->id &= ~0xff; /* scntl4 */
636 		siop_sdtr_msg(siop_cmd, 0, 0, 0);
637 		send_msgout = 1;
638 	}
639 end:
640 	if (siop_target->status == TARST_OK)
641 		siop_update_xfer_mode(sc, target);
642 #ifdef DEBUG
643 	printf("id now 0x%x\n", sc->targets[target]->id);
644 #endif
645 	tables->id = siop_htoc32(sc, sc->targets[target]->id);
646 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL3,
647 	    (sc->targets[target]->id >> 24) & 0xff);
648 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SXFER,
649 	    (sc->targets[target]->id >> 8) & 0xff);
650 	if (send_msgout) {
651 		return SIOP_NEG_MSGOUT;
652 	} else {
653 		return SIOP_NEG_ACK;
654 	}
655 }
656 
657 void
658 siop_sdtr_msg(siop_cmd, offset, ssync, soff)
659 	struct siop_common_cmd *siop_cmd;
660 	int offset;
661 	int ssync, soff;
662 {
663 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
664 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_SDTR_LEN;
665 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_SDTR;
666 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
667 	siop_cmd->siop_tables->msg_out[offset + 4] = soff;
668 	siop_cmd->siop_tables->t_msgout.count =
669 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_SDTR_LEN + 2);
670 }
671 
672 void
673 siop_wdtr_msg(siop_cmd, offset, wide)
674 	struct siop_common_cmd *siop_cmd;
675 	int offset;
676 	int wide;
677 {
678 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
679 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_WDTR_LEN;
680 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_WDTR;
681 	siop_cmd->siop_tables->msg_out[offset + 3] = wide;
682 	siop_cmd->siop_tables->t_msgout.count =
683 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_WDTR_LEN + 2);
684 }
685 
686 void
687 siop_ppr_msg(siop_cmd, offset, ssync, soff)
688 	struct siop_common_cmd *siop_cmd;
689 	int offset;
690 	int ssync, soff;
691 {
692 	siop_cmd->siop_tables->msg_out[offset + 0] = MSG_EXTENDED;
693 	siop_cmd->siop_tables->msg_out[offset + 1] = MSG_EXT_PPR_LEN;
694 	siop_cmd->siop_tables->msg_out[offset + 2] = MSG_EXT_PPR;
695 	siop_cmd->siop_tables->msg_out[offset + 3] = ssync;
696 	siop_cmd->siop_tables->msg_out[offset + 4] = 0; /* reserved */
697 	siop_cmd->siop_tables->msg_out[offset + 5] = soff;
698 	siop_cmd->siop_tables->msg_out[offset + 6] = 1; /* wide */
699 	siop_cmd->siop_tables->msg_out[offset + 7] = MSG_EXT_PPR_PROT_DT;
700 	siop_cmd->siop_tables->t_msgout.count =
701 	    siop_htoc32(siop_cmd->siop_sc, offset + MSG_EXT_PPR_LEN + 2);
702 }
703 
704 void
705 siop_ma(siop_cmd)
706 	struct siop_common_cmd *siop_cmd;
707 {
708 	int offset, dbc, sstat;
709 	struct siop_common_softc *sc = siop_cmd->siop_sc;
710 	scr_table_t *table; /* table with partial xfer */
711 
712 	/*
713 	 * compute how much of the current table didn't get handled when
714 	 * a phase mismatch occurs
715 	 */
716 	if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN))
717 	    == 0)
718 	    return; /* no valid data transfer */
719 
720 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
721 	if (offset >= SIOP_NSG) {
722 		printf("%s: bad offset in siop_sdp (%d)\n",
723 		    sc->sc_dev.dv_xname, offset);
724 		return;
725 	}
726 	table = &siop_cmd->siop_tables->data[offset];
727 #ifdef DEBUG_DR
728 	printf("siop_ma: offset %d count=%d addr=0x%x ", offset,
729 	    table->count, table->addr);
730 #endif
731 	dbc = bus_space_read_4(sc->sc_rt, sc->sc_rh, SIOP_DBC) & 0x00ffffff;
732 	if (siop_cmd->xs->flags & SCSI_DATA_OUT) {
733 		if (sc->features & SF_CHIP_DFBC) {
734 			dbc +=
735 			    bus_space_read_2(sc->sc_rt, sc->sc_rh, SIOP_DFBC);
736 		} else {
737 			/* need to account stale data in FIFO */
738 			int dfifo =
739 			    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_DFIFO);
740 			if (sc->features & SF_CHIP_FIFO) {
741 				dfifo |= (bus_space_read_1(sc->sc_rt, sc->sc_rh,
742 				    SIOP_CTEST5) & CTEST5_BOMASK) << 8;
743 				dbc += (dfifo - (dbc & 0x3ff)) & 0x3ff;
744 			} else {
745 				dbc += (dfifo - (dbc & 0x7f)) & 0x7f;
746 			}
747 		}
748 		sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SSTAT0);
749 		if (sstat & SSTAT0_OLF)
750 			dbc++;
751 		if ((sstat & SSTAT0_ORF) && (sc->features & SF_CHIP_DFBC) == 0)
752 			dbc++;
753 		if (siop_cmd->siop_target->flags & TARF_ISWIDE) {
754 			sstat = bus_space_read_1(sc->sc_rt, sc->sc_rh,
755 			    SIOP_SSTAT2);
756 			if (sstat & SSTAT2_OLF1)
757 				dbc++;
758 			if ((sstat & SSTAT2_ORF1) &&
759 			    (sc->features & SF_CHIP_DFBC) == 0)
760 				dbc++;
761 		}
762 		/* clear the FIFO */
763 		bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
764 		    bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) |
765 		    CTEST3_CLF);
766 	}
767 	siop_cmd->flags |= CMDFL_RESID;
768 	siop_cmd->resid = dbc;
769 }
770 
771 void
772 siop_sdp(siop_cmd, offset)
773 	struct siop_common_cmd *siop_cmd;
774 	int offset;
775 {
776 	struct siop_common_softc *sc = siop_cmd->siop_sc;
777 	scr_table_t *table;
778 
779 	if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN))
780 	    == 0)
781 	    return; /* no data pointers to save */
782 
783 	/*
784 	 * offset == SIOP_NSG may be a valid condition if we get a Save data
785 	 * pointer when the xfer is done. Just ignore the Save data pointer
786 	 * in this case
787 	 */
788 	if (offset == SIOP_NSG)
789 		return;
790 #ifdef DIAGNOSTIC
791 	if (offset > SIOP_NSG) {
792 		sc_print_addr(siop_cmd->xs->sc_link);
793 		printf("offset %d > %d\n", offset, SIOP_NSG);
794 		panic("siop_sdp: offset");
795 	}
796 #endif
797 	/*
798 	 * Save data pointer. We do this by adjusting the tables to point
799 	 * at the beginning of the data not yet transferred.
800 	 * offset points to the first table with untransferred data.
801 	 */
802 
803 	/*
804 	 * before doing that we decrease resid from the amount of data which
805 	 * has been transferred.
806 	 */
807 	siop_update_resid(siop_cmd, offset);
808 
809 	/*
810 	 * First let see if we have a resid from a phase mismatch. If so,
811 	 * we have to adjst the table at offset to remove transferred data.
812 	 */
813 	if (siop_cmd->flags & CMDFL_RESID) {
814 		siop_cmd->flags &= ~CMDFL_RESID;
815 		table = &siop_cmd->siop_tables->data[offset];
816 		/* "cut" already transferred data from this table */
817 		table->addr =
818 		    siop_htoc32(sc, siop_ctoh32(sc, table->addr) +
819 		    siop_ctoh32(sc, table->count) - siop_cmd->resid);
820 		table->count = siop_htoc32(sc, siop_cmd->resid);
821 	}
822 
823 	/*
824 	 * now we can remove entries which have been transferred.
825 	 * We just move the entries with data left at the beginning of the
826 	 * tables
827 	 */
828 	bcopy(&siop_cmd->siop_tables->data[offset],
829 	    &siop_cmd->siop_tables->data[0],
830 	    (SIOP_NSG - offset) * sizeof(scr_table_t));
831 }
832 
833 void
834 siop_update_resid(siop_cmd, offset)
835 	struct siop_common_cmd *siop_cmd;
836 	int offset;
837 {
838 	struct siop_common_softc *sc = siop_cmd->siop_sc;
839 	scr_table_t *table;
840 	int i;
841 
842 	if ((siop_cmd->xs->flags & (SCSI_DATA_OUT | SCSI_DATA_IN))
843 	    == 0)
844 	    return; /* no data to transfer */
845 
846 	/*
847 	 * update resid. First account for the table entries which have
848 	 * been fully completed.
849 	 */
850 	for (i = 0; i < offset; i++)
851 		siop_cmd->xs->resid -=
852 		    siop_ctoh32(sc, siop_cmd->siop_tables->data[i].count);
853 	/*
854 	 * if CMDFL_RESID is set, the last table (pointed by offset) is a
855 	 * partial transfers. If not, offset points to the entry folloing
856 	 * the last full transfer.
857 	 */
858 	if (siop_cmd->flags & CMDFL_RESID) {
859 		table = &siop_cmd->siop_tables->data[offset];
860 		siop_cmd->xs->resid -=
861 		    siop_ctoh32(sc, table->count) - siop_cmd->resid;
862 	}
863 }
864 
865 int
866 siop_iwr(siop_cmd)
867 	struct siop_common_cmd *siop_cmd;
868 {
869 	int offset;
870 	scr_table_t *table; /* table with IWR */
871 	struct siop_common_softc *sc = siop_cmd->siop_sc;
872 	/* handle ignore wide residue messages */
873 
874 	/* if target isn't wide, reject */
875 	if ((siop_cmd->siop_target->flags & TARF_ISWIDE) == 0) {
876 		siop_cmd->siop_tables->t_msgout.count = siop_htoc32(sc, 1);
877 		siop_cmd->siop_tables->msg_out[0] = MSG_MESSAGE_REJECT;
878 		return SIOP_NEG_MSGOUT;
879 	}
880 	/* get index of current command in table */
881 	offset = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCRATCHA + 1);
882 	/*
883 	 * if the current table did complete, we're now pointing at the
884 	 * next one. Go back one if we didn't see a phase mismatch.
885 	 */
886 	if ((siop_cmd->flags & CMDFL_RESID) == 0)
887 		offset--;
888 	table = &siop_cmd->siop_tables->data[offset];
889 
890 	if ((siop_cmd->flags & CMDFL_RESID) == 0) {
891 		if (siop_ctoh32(sc, table->count) & 1) {
892 			/* we really got the number of bytes we expected */
893 			return SIOP_NEG_ACK;
894 		} else {
895 			/*
896 			 * now we really had a short xfer, by one byte.
897 			 * handle it just as if we had a phase mistmatch
898 			 * (there is a resid of one for this table).
899 			 * Update scratcha1 to reflect the fact that
900 			 * this xfer isn't complete.
901 			 */
902 			 siop_cmd->flags |= CMDFL_RESID;
903 			 siop_cmd->resid = 1;
904 			 bus_space_write_1(sc->sc_rt, sc->sc_rh,
905 			     SIOP_SCRATCHA + 1, offset);
906 			 return SIOP_NEG_ACK;
907 		}
908 	} else {
909 		/*
910 		 * we already have a short xfer for this table; it's
911 		 * just one byte less than we though it was
912 		 */
913 		siop_cmd->resid--;
914 		return SIOP_NEG_ACK;
915 	}
916 }
917 
918 void
919 siop_clearfifo(sc)
920 	struct siop_common_softc *sc;
921 {
922 	int timeout = 0;
923 	int ctest3 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3);
924 
925 #ifdef DEBUG_INTR
926 	printf("DMA fifo not empty !\n");
927 #endif
928 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
929 	    ctest3 | CTEST3_CLF);
930 	while ((bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3) &
931 	    CTEST3_CLF) != 0) {
932 		delay(1);
933 		if (++timeout > 1000) {
934 			printf("clear fifo failed\n");
935 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_CTEST3,
936 			    bus_space_read_1(sc->sc_rt, sc->sc_rh,
937 			    SIOP_CTEST3) & ~CTEST3_CLF);
938 			return;
939 		}
940 	}
941 }
942 
943 int
944 siop_modechange(sc)
945 	struct siop_common_softc *sc;
946 {
947 	int retry;
948 	int sist0, sist1, stest2;
949 	for (retry = 0; retry < 5; retry++) {
950 		/*
951 		 * datasheet says to wait 100ms and re-read SIST1,
952 		 * to check that DIFFSENSE is stable.
953 		 * We may delay() 5 times for  100ms at interrupt time;
954 		 * hopefully this will not happen often.
955 		 */
956 		delay(100000);
957 		sist0 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST0);
958 		sist1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SIST1);
959 		if (sist1 & SIEN1_SBMC)
960 			continue; /* we got an irq again */
961 		sc->mode = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST4) &
962 		    STEST4_MODE_MASK;
963 		stest2 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2);
964 		switch(sc->mode) {
965 		case STEST4_MODE_DIF:
966 			printf("%s: switching to differential mode\n",
967 			    sc->sc_dev.dv_xname);
968 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
969 			    stest2 | STEST2_DIF);
970 			break;
971 		case STEST4_MODE_SE:
972 			printf("%s: switching to single-ended mode\n",
973 			    sc->sc_dev.dv_xname);
974 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
975 			    stest2 & ~STEST2_DIF);
976 			break;
977 		case STEST4_MODE_LVD:
978 			printf("%s: switching to LVD mode\n",
979 			    sc->sc_dev.dv_xname);
980 			bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_STEST2,
981 			    stest2 & ~STEST2_DIF);
982 			break;
983 		default:
984 			printf("%s: invalid SCSI mode 0x%x\n",
985 			    sc->sc_dev.dv_xname, sc->mode);
986 			return 0;
987 		}
988 		return 1;
989 	}
990 	printf("%s: timeout waiting for DIFFSENSE to stabilise\n",
991 	    sc->sc_dev.dv_xname);
992 	return 0;
993 }
994 
995 void
996 siop_resetbus(sc)
997 	struct siop_common_softc *sc;
998 {
999 	int scntl1;
1000 	scntl1 = bus_space_read_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1);
1001 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1,
1002 	    scntl1 | SCNTL1_RST);
1003 	/* minimum 25 us, more time won't hurt */
1004 	delay(100);
1005 	bus_space_write_1(sc->sc_rt, sc->sc_rh, SIOP_SCNTL1, scntl1);
1006 }
1007 
1008 void
1009 siop_update_xfer_mode(sc, target)
1010         struct siop_common_softc *sc;
1011         int target;
1012 {
1013 	struct siop_common_target *siop_target;
1014 
1015 	siop_target = sc->targets[target];
1016 
1017 	printf("%s: target %d now using %s%s%d bit ",
1018             sc->sc_dev.dv_xname, target,
1019 	    (siop_target->flags & TARF_TAG) ? "tagged " : "",
1020 	    (siop_target->flags & TARF_ISDT) ? "DT " : "",
1021 	    (siop_target->flags & TARF_ISWIDE) ? 16 : 8);
1022 
1023 	if (siop_target->offset == 0)
1024 		printf("async ");
1025 	else {
1026 		switch (siop_target->period) {
1027 		case 9: /*   12.5ns cycle */
1028 			printf("80.0");
1029 			break;
1030 		case 10: /*  25  ns cycle */
1031 			printf("40.0");
1032 			break;
1033 		case 12: /*  48  ns cycle */
1034 			printf("20.0");
1035 			break;
1036 		case 18: /*  72  ns cycle */
1037 			printf("13.3");
1038 			break;
1039 		case 25: /* 100  ns cycle */
1040 			printf("10.0");
1041 			break;
1042 		case 37: /* 118  ns cycle */
1043 			printf("6.67");
1044 			break;
1045 		case 50: /* 200  ns cycle */
1046 			printf("5.0");
1047 			break;
1048 		case 75: /* 300  ns cycle */
1049 			printf("3.33");
1050 			break;
1051 		default:
1052 			printf("??");
1053 			break;
1054 		}
1055 		printf(" MHz %d REQ/ACK offset ", siop_target->offset);
1056 	}
1057 
1058 	printf("xfers\n");
1059 
1060 	if ((sc->features & SF_CHIP_GEBUG) &&
1061 	    (siop_target->flags & TARF_ISWIDE) == 0)
1062 		/* 1010 workaround: can't do disconnect if not wide, so can't do tag */
1063 		siop_target->flags &= ~TARF_TAG;
1064 }
1065