xref: /openbsd-src/sys/dev/ic/gdt_common.c (revision 505ee9ea3b177e2387d907a91ca7da069f3f14d8)
1 /*	$OpenBSD: gdt_common.c,v 1.76 2020/07/20 14:41:13 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * This driver would not have written if it was not for the hardware donations
29  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/device.h>
35 #include <sys/ioctl.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 
40 #include <machine/bus.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45 
46 #include <dev/biovar.h>
47 #include <dev/ic/gdtreg.h>
48 #include <dev/ic/gdtvar.h>
49 
50 #include "bio.h"
51 
52 #ifdef GDT_DEBUG
53 int gdt_maxcmds = GDT_MAXCMDS;
54 #undef GDT_MAXCMDS
55 #define GDT_MAXCMDS gdt_maxcmds
56 #endif
57 
58 #define GDT_DRIVER_VERSION 1
59 #define GDT_DRIVER_SUBVERSION 2
60 
61 int	gdt_async_event(struct gdt_softc *, int);
62 void	gdt_chain(struct gdt_softc *);
63 void	gdt_clear_events(struct gdt_softc *);
64 void	gdt_copy_internal_data(struct scsi_xfer *, u_int8_t *, size_t);
65 struct scsi_xfer *gdt_dequeue(struct gdt_softc *);
66 void	gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int);
67 void	gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *);
68 void	gdt_eval_mapping(u_int32_t, int *, int *, int *);
69 int	gdt_exec_ccb(struct gdt_ccb *);
70 void	gdt_ccb_free(void *, void *);
71 void   *gdt_ccb_alloc(void *);
72 void	gdt_internal_cache_cmd(struct scsi_xfer *);
73 int	gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t,
74     u_int32_t, u_int32_t, u_int32_t);
75 #if NBIO > 0
76 int	gdt_ioctl(struct device *, u_long, caddr_t);
77 int	gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *);
78 int	gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *);
79 int	gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *);
80 int	gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *);
81 int	gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *);
82 #endif /* NBIO > 0 */
83 void	gdt_scsi_cmd(struct scsi_xfer *);
84 void	gdt_start_ccbs(struct gdt_softc *);
85 int	gdt_sync_event(struct gdt_softc *, int, u_int8_t,
86     struct scsi_xfer *);
87 void	gdt_timeout(void *);
88 int	gdt_wait(struct gdt_softc *, struct gdt_ccb *, int);
89 void	gdt_watchdog(void *);
90 
91 struct cfdriver gdt_cd = {
92 	NULL, "gdt", DV_DULL
93 };
94 
95 struct scsi_adapter gdt_switch = {
96 	gdt_scsi_cmd, NULL, NULL, NULL, NULL
97 };
98 
99 int gdt_cnt = 0;
100 u_int8_t gdt_polling;
101 u_int8_t gdt_from_wait;
102 struct gdt_softc *gdt_wait_gdt;
103 int	gdt_wait_index;
104 #ifdef GDT_DEBUG
105 int	gdt_debug = GDT_DEBUG;
106 #endif
107 
108 int
109 gdt_attach(struct gdt_softc *sc)
110 {
111 	struct scsibus_attach_args saa;
112 	u_int16_t cdev_cnt;
113 	int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs;
114 
115 	gdt_polling = 1;
116 	gdt_from_wait = 0;
117 
118 	if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0,
119 	    &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT))
120 	    panic("%s: bus_dmamem_alloc failed", DEVNAME(sc));
121 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1,
122 	    GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT))
123 	    panic("%s: bus_dmamem_map failed", DEVNAME(sc));
124 
125 	gdt_clear_events(sc);
126 
127 	TAILQ_INIT(&sc->sc_free_ccb);
128 	TAILQ_INIT(&sc->sc_ccbq);
129 	TAILQ_INIT(&sc->sc_ucmdq);
130 	SIMPLEQ_INIT(&sc->sc_queue);
131 
132 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
133 	scsi_iopool_init(&sc->sc_iopool, sc, gdt_ccb_alloc, gdt_ccb_free);
134 
135 	/* Initialize the ccbs */
136 	for (i = 0; i < GDT_MAXCMDS; i++) {
137 		sc->sc_ccbs[i].gc_cmd_index = i + 2;
138 		error = bus_dmamap_create(sc->sc_dmat,
139 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
140 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
141 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
142 		    &sc->sc_ccbs[i].gc_dmamap_xfer);
143 		if (error) {
144 			printf("%s: cannot create ccb dmamap (%d)",
145 			    DEVNAME(sc), error);
146 			return (1);
147 		}
148 		(void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED);
149 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i],
150 		    gc_chain);
151 	}
152 
153 	if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
154 		printf("screen service initialization error %d\n",
155 		     sc->sc_status);
156 		return (1);
157 	}
158 
159 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
160 	    0)) {
161 		printf("cache service initialization error %d\n",
162 		    sc->sc_status);
163 		return (1);
164 	}
165 
166 	cdev_cnt = (u_int16_t)sc->sc_info;
167 
168 	/* Detect number of busses */
169 	gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
170 	sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
171 	sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
172 	sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
173 	gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
174 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
175 	    GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
176 	    GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
177 		sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT];
178 		for (i = 0; i < sc->sc_bus_cnt; i++) {
179 			id = sc->sc_scratch[GDT_IOC_HDR_SZ +
180 			    i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
181 			sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff;
182 		}
183 
184 	} else {
185 		/* New method failed, use fallback. */
186 		gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
187 		for (i = 0; i < GDT_MAXBUS; i++) {
188 			if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
189 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
190 			    GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
191 			    GDT_GETCH_SZ)) {
192 				if (i == 0) {
193 					printf("cannot get channel count, "
194 					    "error %d\n", sc->sc_status);
195 					return (1);
196 				}
197 				break;
198 			}
199 			sc->sc_bus_id[i] =
200 			    (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
201 			    sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
202 		}
203 		sc->sc_bus_cnt = i;
204 	}
205 
206 	/* Read cache configuration */
207 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
208 	    GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
209 		printf("cannot get cache info, error %d\n", sc->sc_status);
210 		return (1);
211 	}
212 	sc->sc_cpar.cp_version =
213 	    gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION);
214 	sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE);
215 	sc->sc_cpar.cp_strategy =
216 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY);
217 	sc->sc_cpar.cp_write_back =
218 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK);
219 	sc->sc_cpar.cp_block_size =
220 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE);
221 
222 	/* Read board information and features */
223 	sc->sc_more_proc = 0;
224 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
225 	    GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
226 		/* XXX A lot of these assignments can probably go later */
227 		sc->sc_binfo.bi_ser_no =
228 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO);
229 		bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID,
230 		    sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id);
231 		sc->sc_binfo.bi_ep_flags =
232 		    gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS);
233 		sc->sc_binfo.bi_proc_id =
234 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID);
235 		sc->sc_binfo.bi_memsize =
236 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE);
237 		sc->sc_binfo.bi_mem_banks =
238 		    sc->sc_scratch[GDT_BINFO_MEM_BANKS];
239 		sc->sc_binfo.bi_chan_type =
240 		    sc->sc_scratch[GDT_BINFO_CHAN_TYPE];
241 		sc->sc_binfo.bi_chan_count =
242 		    sc->sc_scratch[GDT_BINFO_CHAN_COUNT];
243 		sc->sc_binfo.bi_rdongle_pres =
244 		    sc->sc_scratch[GDT_BINFO_RDONGLE_PRES];
245 		sc->sc_binfo.bi_epr_fw_ver =
246 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER);
247 		sc->sc_binfo.bi_upd_fw_ver =
248 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER);
249 		sc->sc_binfo.bi_upd_revision =
250 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION);
251 		bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING,
252 		    sc->sc_binfo.bi_type_string,
253 		    sizeof sc->sc_binfo.bi_type_string);
254 		bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING,
255 		    sc->sc_binfo.bi_raid_string,
256 		    sizeof sc->sc_binfo.bi_raid_string);
257 		sc->sc_binfo.bi_update_pres =
258 		    sc->sc_scratch[GDT_BINFO_UPDATE_PRES];
259 		sc->sc_binfo.bi_xor_pres =
260 		    sc->sc_scratch[GDT_BINFO_XOR_PRES];
261 		sc->sc_binfo.bi_prom_type =
262 		    sc->sc_scratch[GDT_BINFO_PROM_TYPE];
263 		sc->sc_binfo.bi_prom_count =
264 		    sc->sc_scratch[GDT_BINFO_PROM_COUNT];
265 		sc->sc_binfo.bi_dup_pres =
266 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES);
267 		sc->sc_binfo.bi_chan_pres =
268 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES);
269 		sc->sc_binfo.bi_mem_pres =
270 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES);
271 		sc->sc_binfo.bi_ft_bus_system =
272 		    sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
273 		sc->sc_binfo.bi_subtype_valid =
274 		    sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
275 		sc->sc_binfo.bi_board_subtype =
276 		    sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
277 		sc->sc_binfo.bi_rampar_pres =
278 		    sc->sc_scratch[GDT_BINFO_RAMPAR_PRES];
279 
280 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
281 		    GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
282 			sc->sc_bfeat.bf_chaining =
283 			    sc->sc_scratch[GDT_BFEAT_CHAINING];
284 			sc->sc_bfeat.bf_striping =
285 			    sc->sc_scratch[GDT_BFEAT_STRIPING];
286 			sc->sc_bfeat.bf_mirroring =
287 			    sc->sc_scratch[GDT_BFEAT_MIRRORING];
288 			sc->sc_bfeat.bf_raid =
289 			    sc->sc_scratch[GDT_BFEAT_RAID];
290 			sc->sc_more_proc = 1;
291 		}
292 	} else {
293 		/* XXX Not implemented yet */
294 	}
295 
296 	/* Read more information */
297 	if (sc->sc_more_proc) {
298 		int bus, j;
299 		/* physical drives, channel addresses */
300 		/* step 1: get magical bus number from firmware */
301 		gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
302 		sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
303 		sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
304 		sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
305 		gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
306 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
307 		    GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL,
308 		    GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) {
309 			GDT_DPRINTF(GDT_D_INFO, ("method 1\n"));
310 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
311 				sc->sc_raw[bus].ra_address =
312 				    gdt_dec32(sc->sc_scratch +
313 				    GDT_IOC_HDR_SZ +
314 				    GDT_IOC_SZ * bus +
315 				    GDT_IOC_ADDRESS);
316 				sc->sc_raw[bus].ra_local_no =
317 				    gdt_dec8(sc->sc_scratch +
318 				    GDT_IOC_HDR_SZ +
319 				    GDT_IOC_SZ * bus +
320 				    GDT_IOC_LOCAL_NO);
321 				GDT_DPRINTF(GDT_D_INFO, (
322 				    "bus: %d address: %x local: %x\n",
323 				    bus,
324 				    sc->sc_raw[bus].ra_address,
325 				    sc->sc_raw[bus].ra_local_no));
326 			}
327 		} else {
328 			GDT_DPRINTF(GDT_D_INFO, ("method 2\n"));
329 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
330 				sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL;
331 				sc->sc_raw[bus].ra_local_no = bus;
332 				GDT_DPRINTF(GDT_D_INFO, (
333 				    "bus: %d address: %x local: %x\n",
334 				    bus,
335 				    sc->sc_raw[bus].ra_address,
336 				    sc->sc_raw[bus].ra_local_no));
337 			}
338 		}
339 		/* step 2: use magical bus number to get nr of phys disks */
340 		for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
341 			gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO,
342 			    sc->sc_raw[bus].ra_local_no);
343 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
344 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
345 			    sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL,
346 			    GDT_GETCH_SZ)) {
347 				sc->sc_raw[bus].ra_phys_cnt =
348 				    gdt_dec32(sc->sc_scratch +
349 				    GDT_GETCH_DRIVE_CNT);
350 				GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n",
351 				    bus, sc->sc_raw[bus].ra_phys_cnt));
352 			}
353 
354 			/* step 3: get scsi disk nr */
355 			if (sc->sc_raw[bus].ra_phys_cnt > 0) {
356 				gdt_enc32(sc->sc_scratch +
357 				    GDT_GETSCSI_CHAN,
358 				    sc->sc_raw[bus].ra_local_no);
359 				gdt_enc32(sc->sc_scratch +
360 				    GDT_GETSCSI_CNT,
361 				    sc->sc_raw[bus].ra_phys_cnt);
362 				if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
363 				    GDT_IOCTL,
364 				    GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN,
365 				    sc->sc_raw[bus].ra_address |
366 				    GDT_INVALID_CHANNEL,
367 				    GDT_GETSCSI_SZ))
368 					for (j = 0;
369 					    j < sc->sc_raw[bus].ra_phys_cnt;
370 					    j++) {
371 						sc->sc_raw[bus].ra_id_list[j] =
372 						    gdt_dec32(sc->sc_scratch +
373 						    GDT_GETSCSI_LIST +
374 						    GDT_GETSCSI_LIST_SZ * j);
375 						GDT_DPRINTF(GDT_D_INFO,
376 						    ("  diskid: %d\n",
377 						    sc->sc_raw[bus].ra_id_list[j]));
378 					}
379 				else
380 					sc->sc_raw[bus].ra_phys_cnt = 0;
381 			}
382 			/* add found disks to grand total */
383 			sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt;
384 		}
385 	} /* if (sc->sc_more_proc) */
386 
387 	if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
388 		printf("raw service initialization error %d\n",
389 		    sc->sc_status);
390 		return (1);
391 	}
392 
393 	/* Set/get features raw service (scatter/gather) */
394 	sc->sc_raw_feat = 0;
395 	if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
396 	    GDT_SCATTER_GATHER, 0, 0))
397 		if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
398 		    0, 0))
399 			sc->sc_raw_feat = sc->sc_info;
400 
401 	/* Set/get features cache service (scatter/gather) */
402 	sc->sc_cache_feat = 0;
403 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
404 	    GDT_SCATTER_GATHER, 0))
405 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
406 		    0))
407 			sc->sc_cache_feat = sc->sc_info;
408 
409 	/* XXX Linux reserve drives here, potentially */
410 
411 	sc->sc_ndevs = 0;
412 	/* Scan for cache devices */
413 	for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
414 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0,
415 		    0)) {
416 			sc->sc_hdr[i].hd_present = 1;
417 			sc->sc_hdr[i].hd_size = sc->sc_info;
418 
419 			if (sc->sc_hdr[i].hd_size > 0)
420 				sc->sc_ndevs++;
421 
422 			/*
423 			 * Evaluate mapping (sectors per head, heads per cyl)
424 			 */
425 			sc->sc_hdr[i].hd_size &= ~GDT_SECS32;
426 			if (sc->sc_info2 == 0)
427 				gdt_eval_mapping(sc->sc_hdr[i].hd_size,
428 				    &drv_cyls, &drv_hds, &drv_secs);
429 			else {
430 				drv_hds = sc->sc_info2 & 0xff;
431 				drv_secs = (sc->sc_info2 >> 8) & 0xff;
432 				drv_cyls = sc->sc_hdr[i].hd_size / drv_hds /
433 				    drv_secs;
434 			}
435 			sc->sc_hdr[i].hd_heads = drv_hds;
436 			sc->sc_hdr[i].hd_secs = drv_secs;
437 			/* Round the size */
438 			sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
439 
440 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
441 			    GDT_DEVTYPE, i, 0, 0))
442 				sc->sc_hdr[i].hd_devtype = sc->sc_info;
443 		}
444 
445 	printf("dpmem %llx %d-bus %d cache device%s\n",
446 	    (long long)sc->sc_dpmembase,
447 	    sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
448 	printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
449 	    DEVNAME(sc), sc->sc_cpar.cp_version,
450 	    sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy,
451 	    sc->sc_cpar.cp_write_back ? "on" : "off",
452 	    sc->sc_cpar.cp_block_size);
453 #if 1
454 	printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc),
455 	    sc->sc_raw_feat, sc->sc_cache_feat);
456 #endif
457 
458 #if NBIO > 0
459 	if (bio_register(&sc->sc_dev, gdt_ioctl) != 0)
460 		panic("%s: controller registration failed", DEVNAME(sc));
461 #endif
462 	gdt_cnt++;
463 
464 	saa.saa_adapter_softc = sc;
465 	saa.saa_adapter = &gdt_switch;
466 	saa.saa_adapter_buswidth =
467 	    (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
468 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
469 	saa.saa_luns = 8;
470 	if (sc->sc_ndevs == 0)
471 		saa.saa_openings = 0;
472 	else
473 		saa.saa_openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) /
474 		    sc->sc_ndevs;
475 	saa.saa_pool = &sc->sc_iopool;
476 	saa.saa_quirks = saa.saa_flags = 0;
477 	saa.saa_wwpn = saa.saa_wwnn = 0;
478 
479 	config_found(&sc->sc_dev, &saa, scsiprint);
480 
481 	gdt_polling = 0;
482 	return (0);
483 }
484 
485 void
486 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
487 {
488 	*cyls = size / GDT_HEADS / GDT_SECS;
489 	if (*cyls < GDT_MAXCYLS) {
490 		*heads = GDT_HEADS;
491 		*secs = GDT_SECS;
492 	} else {
493 		/* Too high for 64 * 32 */
494 		*cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
495 		if (*cyls < GDT_MAXCYLS) {
496 			*heads = GDT_MEDHEADS;
497 			*secs = GDT_MEDSECS;
498 		} else {
499 			/* Too high for 127 * 63 */
500 			*cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
501 			*heads = GDT_BIGHEADS;
502 			*secs = GDT_BIGSECS;
503 		}
504 	}
505 }
506 
507 /*
508  * Insert a command into the driver queue, either at the front or at the tail.
509  * It's ok to overload the freelist link as these structures are never on
510  * the freelist at this time.
511  */
512 void
513 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront)
514 {
515 	if (infront)
516 		SIMPLEQ_INSERT_HEAD(&sc->sc_queue, xs, xfer_list);
517 	else
518 		SIMPLEQ_INSERT_TAIL(&sc->sc_queue, xs, xfer_list);
519 }
520 
521 /*
522  * Pull a command off the front of the driver queue.
523  */
524 struct scsi_xfer *
525 gdt_dequeue(struct gdt_softc *sc)
526 {
527 	struct scsi_xfer *xs;
528 
529 	xs = SIMPLEQ_FIRST(&sc->sc_queue);
530 	if (xs != NULL)
531 		SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, xfer_list);
532 
533 	return (xs);
534 }
535 
536 /*
537  * Start a SCSI operation on a cache device.
538  * XXX Polled operation is not yet complete.  What kind of locking do we need?
539  */
540 void
541 gdt_scsi_cmd(struct scsi_xfer *xs)
542 {
543 	struct scsi_link *link = xs->sc_link;
544 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
545 	u_int8_t target = link->target;
546 	struct gdt_ccb *ccb;
547 	u_int32_t blockno, blockcnt;
548 	struct scsi_rw *rw;
549 	struct scsi_rw_big *rwb;
550 	bus_dmamap_t xfer;
551 	int error;
552 	int s;
553 	int polled;
554 
555 	GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
556 
557 	s = splbio();
558 
559 	xs->error = XS_NOERROR;
560 
561 	if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present ||
562 	    link->lun != 0) {
563 		/*
564 		 * XXX Should be XS_SENSE but that would require setting up a
565 		 * faked sense too.
566 		 */
567 		xs->error = XS_DRIVER_STUFFUP;
568 		scsi_done(xs);
569 		splx(s);
570 		return;
571 	}
572 
573 	/* Don't double enqueue if we came from gdt_chain. */
574 	if (xs != SIMPLEQ_FIRST(&sc->sc_queue))
575 		gdt_enqueue(sc, xs, 0);
576 
577 	while ((xs = gdt_dequeue(sc)) != NULL) {
578 		xs->error = XS_NOERROR;
579 		ccb = NULL;
580 		link = xs->sc_link;
581 		target = link->target;
582 		polled = ISSET(xs->flags, SCSI_POLL);
583 
584 		if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
585 		    sc->sc_test_busy(sc)) {
586 			/*
587 			 * Put it back in front.  XXX Should we instead
588 			 * set xs->error to XS_BUSY?
589 			 */
590 			gdt_enqueue(sc, xs, 1);
591 			break;
592 		}
593 
594 		switch (xs->cmd->opcode) {
595 		case TEST_UNIT_READY:
596 		case REQUEST_SENSE:
597 		case INQUIRY:
598 		case MODE_SENSE:
599 		case START_STOP:
600 		case READ_CAPACITY:
601 #if 0
602 		case VERIFY:
603 #endif
604 			gdt_internal_cache_cmd(xs);
605 			scsi_done(xs);
606 			goto ready;
607 
608 		case PREVENT_ALLOW:
609 			GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
610 			/* XXX Not yet implemented */
611 			xs->error = XS_NOERROR;
612 			scsi_done(xs);
613 			goto ready;
614 
615 		default:
616 			GDT_DPRINTF(GDT_D_CMD,
617 			    ("unknown opc %d ", xs->cmd->opcode));
618 			/* XXX Not yet implemented */
619 			xs->error = XS_DRIVER_STUFFUP;
620 			scsi_done(xs);
621 			goto ready;
622 
623 		case READ_COMMAND:
624 		case READ_BIG:
625 		case WRITE_COMMAND:
626 		case WRITE_BIG:
627 		case SYNCHRONIZE_CACHE:
628 			/*
629 			 * A new command chain, start from the beginning.
630 			 */
631 			sc->sc_cmd_off = 0;
632 
633 			if (xs->cmd->opcode == SYNCHRONIZE_CACHE) {
634 				 blockno = blockcnt = 0;
635 			} else {
636 				/* A read or write operation. */
637 				if (xs->cmdlen == 6) {
638 					rw = (struct scsi_rw *)xs->cmd;
639 					blockno = _3btol(rw->addr) &
640 					    (SRW_TOPADDR << 16 | 0xffff);
641 					blockcnt =
642 					    rw->length ? rw->length : 0x100;
643 				} else {
644 					rwb = (struct scsi_rw_big *)xs->cmd;
645 					blockno = _4btol(rwb->addr);
646 					blockcnt = _2btol(rwb->length);
647 				}
648 				if (blockno >= sc->sc_hdr[target].hd_size ||
649 				    blockno + blockcnt >
650 				    sc->sc_hdr[target].hd_size) {
651 					printf(
652 					    "%s: out of bounds %u-%u >= %u\n",
653 					    DEVNAME(sc), blockno,
654 					    blockcnt,
655 					    sc->sc_hdr[target].hd_size);
656 					/*
657 					 * XXX Should be XS_SENSE but that
658 					 * would require setting up a faked
659 					 * sense too.
660 					 */
661 					xs->error = XS_DRIVER_STUFFUP;
662 					scsi_done(xs);
663 					goto ready;
664 				}
665 			}
666 
667 			ccb = xs->io;
668 			ccb->gc_blockno = blockno;
669 			ccb->gc_blockcnt = blockcnt;
670 			ccb->gc_xs = xs;
671 			ccb->gc_timeout = xs->timeout;
672 			ccb->gc_service = GDT_CACHESERVICE;
673 			ccb->gc_flags = 0;
674 			gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
675 
676 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
677 				xfer = ccb->gc_dmamap_xfer;
678 				error = bus_dmamap_load(sc->sc_dmat, xfer,
679 				    xs->data, xs->datalen, NULL,
680 				    (xs->flags & SCSI_NOSLEEP) ?
681 				    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
682 				if (error) {
683 					printf("%s: gdt_scsi_cmd: ",
684 					    DEVNAME(sc));
685 					if (error == EFBIG)
686 						printf(
687 						    "more than %d dma segs\n",
688 						    GDT_MAXOFFSETS);
689 					else
690 						printf("error %d "
691 						    "loading dma map\n",
692 						    error);
693 
694 					xs->error = XS_DRIVER_STUFFUP;
695 					scsi_done(xs);
696 					goto ready;
697 				}
698 				bus_dmamap_sync(sc->sc_dmat, xfer, 0,
699 				    xfer->dm_mapsize,
700 				    (xs->flags & SCSI_DATA_IN) ?
701 				    BUS_DMASYNC_PREREAD :
702 				    BUS_DMASYNC_PREWRITE);
703 			}
704 
705 			gdt_enqueue_ccb(sc, ccb);
706 			/* XXX what if enqueue did not start a transfer? */
707 			if (gdt_polling || (xs->flags & SCSI_POLL)) {
708 				if (!gdt_wait(sc, ccb, ccb->gc_timeout)) {
709 					printf("%s: command %d timed out\n",
710 					    DEVNAME(sc),
711 					    ccb->gc_cmd_index);
712 					xs->error = XS_TIMEOUT;
713 					scsi_done(xs);
714 					splx(s);
715 					return;
716 				}
717 			}
718 		}
719 
720 	ready:
721 		/*
722 		 * Don't process the queue if we are polling.
723 		 */
724 		if (polled) {
725 			break;
726 		}
727 	}
728 
729 	splx(s);
730 }
731 
732 /* XXX Currently only for cacheservice, returns 0 if busy */
733 int
734 gdt_exec_ccb(struct gdt_ccb *ccb)
735 {
736 	struct scsi_xfer *xs = ccb->gc_xs;
737 	struct scsi_link *link = xs->sc_link;
738 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
739 	u_int8_t target = link->target;
740 	u_int32_t sg_canz;
741 	bus_dmamap_t xfer;
742 	int i;
743 #if 1 /* XXX */
744 	static int __level = 0;
745 
746 	if (__level++ > 0)
747 		panic("level > 0");
748 #endif
749 	GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
750 
751 	sc->sc_cmd_cnt = 0;
752 
753 	/*
754 	 * XXX Yeah I know it's an always-true condition, but that may change
755 	 * later.
756 	 */
757 	if (sc->sc_cmd_cnt == 0)
758 		sc->sc_set_sema0(sc);
759 
760 	gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
761 	gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
762 	gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
763 	    target);
764 
765 	switch (xs->cmd->opcode) {
766 	case PREVENT_ALLOW:
767 	case SYNCHRONIZE_CACHE:
768 		if (xs->cmd->opcode == PREVENT_ALLOW) {
769 			/* XXX PREVENT_ALLOW support goes here */
770 		} else {
771 			GDT_DPRINTF(GDT_D_CMD,
772 			    ("SYNCHRONIZE CACHE tgt %d ", target));
773 			sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
774 		}
775 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
776 		    1);
777 		sg_canz = 0;
778 		break;
779 
780 	case WRITE_COMMAND:
781 	case WRITE_BIG:
782 		/* XXX WRITE_THR could be supported too */
783 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
784 		break;
785 
786 	case READ_COMMAND:
787 	case READ_BIG:
788 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
789 		break;
790 	}
791 
792 	if (xs->cmd->opcode != PREVENT_ALLOW &&
793 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
794 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
795 		    ccb->gc_blockno);
796 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
797 		    ccb->gc_blockcnt);
798 
799 		xfer = ccb->gc_dmamap_xfer;
800 		if (sc->sc_cache_feat & GDT_SCATTER_GATHER) {
801 			gdt_enc32(
802 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
803 			    0xffffffff);
804 			for (i = 0; i < xfer->dm_nsegs; i++) {
805 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
806 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
807 				    GDT_SG_PTR,
808 				    xfer->dm_segs[i].ds_addr);
809 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
810 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
811 				    GDT_SG_LEN,
812 				    xfer->dm_segs[i].ds_len);
813 				GDT_DPRINTF(GDT_D_IO,
814 				    ("#%d va %p pa %p len %x\n", i, buf,
815 				    xfer->dm_segs[i].ds_addr,
816 				    xfer->dm_segs[i].ds_len));
817 			}
818 			sg_canz = xfer->dm_nsegs;
819 			gdt_enc32(
820 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
821 			    sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
822 		} else {
823 			/* XXX Hardly correct */
824 			gdt_enc32(
825 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
826 			    xfer->dm_segs[0].ds_addr);
827 			sg_canz = 0;
828 		}
829 	}
830 	gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
831 
832 	sc->sc_cmd_len =
833 	    roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
834 	    sizeof (u_int32_t));
835 
836 	if (sc->sc_cmd_cnt > 0 &&
837 	    sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
838 	    sc->sc_ic_all_size) {
839 		printf("%s: DPMEM overflow\n", DEVNAME(sc));
840 		xs->error = XS_BUSY;
841 #if 1 /* XXX */
842 		__level--;
843 #endif
844 		return (0);
845 	}
846 
847 	sc->sc_copy_cmd(sc, ccb);
848 	sc->sc_release_event(sc, ccb);
849 
850 	xs->error = XS_NOERROR;
851 	xs->resid = 0;
852 #if 1 /* XXX */
853 	__level--;
854 #endif
855 	return (1);
856 }
857 
858 void
859 gdt_copy_internal_data(struct scsi_xfer *xs, u_int8_t *data, size_t size)
860 {
861 	size_t copy_cnt;
862 
863 	GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data "));
864 
865 	if (!xs->datalen)
866 		printf("uio move not yet supported\n");
867 	else {
868 		copy_cnt = MIN(size, xs->datalen);
869 		bcopy(data, xs->data, copy_cnt);
870 	}
871 }
872 
873 /* Emulated SCSI operation on cache device */
874 void
875 gdt_internal_cache_cmd(struct scsi_xfer *xs)
876 {
877 	struct scsi_link *link = xs->sc_link;
878 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
879 	struct scsi_inquiry_data inq;
880 	struct scsi_sense_data sd;
881 	struct scsi_read_cap_data rcd;
882 	u_int8_t target = link->target;
883 
884 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
885 
886 	switch (xs->cmd->opcode) {
887 	case TEST_UNIT_READY:
888 	case START_STOP:
889 #if 0
890 	case VERIFY:
891 #endif
892 		GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
893 		    target));
894 		break;
895 
896 	case REQUEST_SENSE:
897 		GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
898 		bzero(&sd, sizeof sd);
899 		sd.error_code = SSD_ERRCODE_CURRENT;
900 		sd.segment = 0;
901 		sd.flags = SKEY_NO_SENSE;
902 		gdt_enc32(sd.info, 0);
903 		sd.extra_len = 0;
904 		gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd);
905 		break;
906 
907 	case INQUIRY:
908 		GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
909 		    sc->sc_hdr[target].hd_devtype));
910 		bzero(&inq, sizeof inq);
911 		inq.device =
912 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
913 		inq.dev_qual2 =
914 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
915 		inq.version = 2;
916 		inq.response_format = 2;
917 		inq.additional_length = 32;
918 		inq.flags |= SID_CmdQue;
919 		strlcpy(inq.vendor, "ICP	   ", sizeof inq.vendor);
920 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
921 		    target);
922 		strlcpy(inq.revision, "	 ", sizeof inq.revision);
923 		gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq);
924 		break;
925 
926 	case READ_CAPACITY:
927 		GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
928 		bzero(&rcd, sizeof rcd);
929 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
930 		_lto4b(GDT_SECTOR_SIZE, rcd.length);
931 		gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd);
932 		break;
933 
934 	default:
935 		GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ",
936 		    xs->cmd->opcode, target));
937 		xs->error = XS_DRIVER_STUFFUP;
938 		return;
939 	}
940 
941 	xs->error = XS_NOERROR;
942 }
943 
944 void
945 gdt_clear_events(struct gdt_softc *sc)
946 {
947 	GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc));
948 
949 	/* XXX To be implemented */
950 }
951 
952 int
953 gdt_async_event(struct gdt_softc *sc, int service)
954 {
955 	GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service));
956 
957 	if (service == GDT_SCREENSERVICE) {
958 		/* XXX To be implemented */
959 	} else {
960 		/* XXX To be implemented */
961 	}
962 
963 	return (0);
964 }
965 
966 int
967 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index,
968     struct scsi_xfer *xs)
969 {
970 	GDT_DPRINTF(GDT_D_INTR,
971 	    ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs));
972 
973 	if (service == GDT_SCREENSERVICE) {
974 		GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
975 		/* XXX To be implemented */
976 		return (0);
977 	} else {
978 		switch (sc->sc_status) {
979 		case GDT_S_OK:
980 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
981 			/* XXX To be implemented */
982 			break;
983 		case GDT_S_BSY:
984 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
985 			/* XXX To be implemented */
986 			return (2);
987 		default:
988 			GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
989 			    sc->sc_status));
990 			/* XXX To be implemented */
991 			return (0);
992 		}
993 	}
994 
995 	return (1);
996 }
997 
998 int
999 gdt_intr(void *arg)
1000 {
1001 	struct gdt_softc *sc = arg;
1002 	struct gdt_intr_ctx ctx;
1003 	int chain = 1;
1004 	int sync_val = 0;
1005 	struct scsi_xfer *xs = NULL;
1006 	int prev_cmd;
1007 	struct gdt_ccb *ccb;
1008 
1009 	GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc));
1010 
1011 	/* If polling and we were not called from gdt_wait, just return */
1012 	if (gdt_polling && !gdt_from_wait)
1013 		return (0);
1014 
1015 	ctx.istatus = sc->sc_get_status(sc);
1016 	if (!ctx.istatus) {
1017 		sc->sc_status = GDT_S_NO_STATUS;
1018 		return (0);
1019 	}
1020 
1021 	gdt_wait_index = 0;
1022 	ctx.service = ctx.info2 = 0;
1023 
1024 	sc->sc_intr(sc, &ctx);
1025 
1026 	sc->sc_status = ctx.cmd_status;
1027 	sc->sc_info = ctx.info;
1028 	sc->sc_info2 = ctx.info2;
1029 
1030 	if (gdt_from_wait) {
1031 		gdt_wait_gdt = sc;
1032 		gdt_wait_index = ctx.istatus;
1033 	}
1034 
1035 	switch (ctx.istatus) {
1036 	case GDT_ASYNCINDEX:
1037 		gdt_async_event(sc, ctx.service);
1038 		goto finish;
1039 
1040 	case GDT_SPEZINDEX:
1041 		printf("%s: uninitialized or unknown service (%d %d)\n",
1042 		    DEVNAME(sc), ctx.info, ctx.info2);
1043 		chain = 0;
1044 		goto finish;
1045 	}
1046 
1047 	ccb = &sc->sc_ccbs[ctx.istatus - 2];
1048 	xs = ccb->gc_xs;
1049 	if (!gdt_polling)
1050 		timeout_del(&xs->stimeout);
1051 	ctx.service = ccb->gc_service;
1052 	prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1053 	if (xs && xs->cmd->opcode != PREVENT_ALLOW &&
1054 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
1055 		bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0,
1056 		    ccb->gc_dmamap_xfer->dm_mapsize,
1057 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1058 		    BUS_DMASYNC_POSTWRITE);
1059 		bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer);
1060 	}
1061 	switch (prev_cmd) {
1062 	case GDT_GCF_UNUSED:
1063 		/* XXX Not yet implemented */
1064 		chain = 0;
1065 		goto finish;
1066 	case GDT_GCF_INTERNAL:
1067 		chain = 0;
1068 		goto finish;
1069 	}
1070 
1071 	sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs);
1072 
1073  finish:
1074 	switch (sync_val) {
1075 	case 0:
1076 		if (xs && gdt_from_wait)
1077 			scsi_done(xs);
1078 		break;
1079 	case 1:
1080 		scsi_done(xs);
1081 		break;
1082 
1083 	case 2:
1084 		gdt_enqueue(sc, xs, 0);
1085 	}
1086 
1087 	if (chain)
1088 		gdt_chain(sc);
1089 
1090 	return (1);
1091 }
1092 
1093 int
1094 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout)
1095 {
1096 	int s, rslt, rv = 0;
1097 
1098 	GDT_DPRINTF(GDT_D_MISC,
1099 	    ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout));
1100 
1101 	gdt_from_wait = 1;
1102 	do {
1103 		s = splbio();
1104 		rslt = gdt_intr(sc);
1105 		splx(s);
1106 		if (rslt && sc == gdt_wait_gdt &&
1107 		    ccb->gc_cmd_index == gdt_wait_index) {
1108 			rv = 1;
1109 			break;
1110 		}
1111 		DELAY(1000); /* 1 millisecond */
1112 	} while (--timeout);
1113 	gdt_from_wait = 0;
1114 
1115 	while (sc->sc_test_busy(sc))
1116 		DELAY(0);		/* XXX correct? */
1117 
1118 	return (rv);
1119 }
1120 
1121 int
1122 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode,
1123     u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
1124 {
1125 	int retries, rslt;
1126 	struct gdt_ccb *ccb;
1127 
1128 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1129 	    sc, service, opcode, arg1, arg2, arg3));
1130 
1131 	bzero(sc->sc_cmd, GDT_CMD_SZ);
1132 
1133 	for (retries = GDT_RETRIES; ; ) {
1134 		ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1135 		if (ccb == NULL) {
1136 			printf("%s: no free command index found\n",
1137 			    DEVNAME(sc));
1138 			return (0);
1139 		}
1140 		ccb->gc_service = service;
1141 		ccb->gc_xs = NULL;
1142 		ccb->gc_blockno = ccb->gc_blockcnt = 0;
1143 		ccb->gc_timeout = ccb->gc_flags = 0;
1144 		ccb->gc_service = GDT_CACHESERVICE;
1145 		gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1146 
1147 		sc->sc_set_sema0(sc);
1148 		gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX,
1149 		    ccb->gc_cmd_index);
1150 		gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode);
1151 		gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1152 
1153 		switch (service) {
1154 		case GDT_CACHESERVICE:
1155 			if (opcode == GDT_IOCTL) {
1156 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1157 				    GDT_IOCTL_SUBFUNC, arg1);
1158 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1159 				    GDT_IOCTL_CHANNEL, arg2);
1160 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1161 				    GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1162 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1163 				    GDT_IOCTL_P_PARAM,
1164 				    sc->sc_scratch_seg.ds_addr);
1165 			} else {
1166 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1167 				    GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1168 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1169 				    GDT_CACHE_BLOCKNO, arg2);
1170 			}
1171 			break;
1172 
1173 		case GDT_SCSIRAWSERVICE:
1174 			gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1175 			    GDT_RAW_DIRECTION, arg1);
1176 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1177 			    (u_int8_t)arg2;
1178 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1179 			    (u_int8_t)arg3;
1180 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1181 			    (u_int8_t)(arg3 >> 8);
1182 		}
1183 
1184 		sc->sc_cmd_len = GDT_CMD_SZ;
1185 		sc->sc_cmd_off = 0;
1186 		sc->sc_cmd_cnt = 0;
1187 		sc->sc_copy_cmd(sc, ccb);
1188 		sc->sc_release_event(sc, ccb);
1189 		DELAY(20);
1190 
1191 		rslt = gdt_wait(sc, ccb, GDT_POLL_TIMEOUT);
1192 		scsi_io_put(&sc->sc_iopool, ccb);
1193 
1194 		if (!rslt)
1195 			return (0);
1196 		if (sc->sc_status != GDT_S_BSY || --retries == 0)
1197 			break;
1198 		DELAY(1);
1199 	}
1200 	return (sc->sc_status == GDT_S_OK);
1201 }
1202 
1203 void *
1204 gdt_ccb_alloc(void *xsc)
1205 {
1206 	struct gdt_softc *sc = xsc;
1207 	struct gdt_ccb *ccb;
1208 
1209 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_alloc(%p) ", sc));
1210 
1211 	mtx_enter(&sc->sc_ccb_mtx);
1212 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1213 	if (ccb != NULL)
1214 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain);
1215 	mtx_leave(&sc->sc_ccb_mtx);
1216 
1217 	return (ccb);
1218 }
1219 
1220 void
1221 gdt_ccb_free(void *xsc, void *xccb)
1222 {
1223 	struct gdt_softc *sc = xsc;
1224 	struct gdt_ccb *ccb = xccb;
1225 	int wake = 0;
1226 
1227 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_free(%p, %p) ", sc, ccb));
1228 
1229 	mtx_enter(&sc->sc_ccb_mtx);
1230 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain);
1231 	/* If the free list was empty, wake up potential waiters. */
1232 	if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1233 		wake = 1;
1234 	mtx_leave(&sc->sc_ccb_mtx);
1235 
1236 	if (wake)
1237 		wakeup(&sc->sc_free_ccb);
1238 }
1239 
1240 void
1241 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1242 {
1243 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb));
1244 
1245 	timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1246 	TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain);
1247 	gdt_start_ccbs(sc);
1248 }
1249 
1250 void
1251 gdt_start_ccbs(struct gdt_softc *sc)
1252 {
1253 	struct gdt_ccb *ccb;
1254 	struct scsi_xfer *xs;
1255 
1256 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc));
1257 
1258 	while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) {
1259 
1260 		xs = ccb->gc_xs;
1261 		if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1262 			timeout_del(&xs->stimeout);
1263 
1264 		if (gdt_exec_ccb(ccb) == 0) {
1265 			ccb->gc_flags |= GDT_GCF_WATCHDOG;
1266 			timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1267 			timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT);
1268 			break;
1269 		}
1270 		TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain);
1271 
1272 		if ((xs->flags & SCSI_POLL) == 0) {
1273 			timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1274 			timeout_add_msec(&xs->stimeout, ccb->gc_timeout);
1275 		}
1276 	}
1277 }
1278 
1279 void
1280 gdt_chain(struct gdt_softc *sc)
1281 {
1282 	GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc));
1283 
1284 	if (!SIMPLEQ_EMPTY(&sc->sc_queue))
1285 		gdt_scsi_cmd(SIMPLEQ_FIRST(&sc->sc_queue));
1286 }
1287 
1288 void
1289 gdt_timeout(void *arg)
1290 {
1291 	struct gdt_ccb *ccb = arg;
1292 	struct scsi_link *link = ccb->gc_xs->sc_link;
1293 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
1294 	int s;
1295 
1296 	sc_print_addr(link);
1297 	printf("timed out\n");
1298 
1299 	/* XXX Test for multiple timeouts */
1300 
1301 	ccb->gc_xs->error = XS_TIMEOUT;
1302 	s = splbio();
1303 	gdt_enqueue_ccb(sc, ccb);
1304 	splx(s);
1305 }
1306 
1307 void
1308 gdt_watchdog(void *arg)
1309 {
1310 	struct gdt_ccb *ccb = arg;
1311 	struct scsi_link *link = ccb->gc_xs->sc_link;
1312 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
1313 	int s;
1314 
1315 	s = splbio();
1316 	ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1317 	gdt_start_ccbs(sc);
1318 	splx(s);
1319 }
1320 
1321 #if NBIO > 0
1322 int
1323 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1324 {
1325 	struct gdt_softc *sc = (struct gdt_softc *)dev;
1326 	int error = 0;
1327 
1328 	GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc)));
1329 
1330 	switch (cmd) {
1331 	case BIOCINQ:
1332 		GDT_DPRINTF(GDT_D_IOCTL, ("inq "));
1333 		error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr);
1334 		break;
1335 
1336 	case BIOCVOL:
1337 		GDT_DPRINTF(GDT_D_IOCTL, ("vol "));
1338 		error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr);
1339 		break;
1340 
1341 	case BIOCDISK:
1342 		GDT_DPRINTF(GDT_D_IOCTL, ("disk "));
1343 		error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr);
1344 		break;
1345 
1346 	case BIOCALARM:
1347 		GDT_DPRINTF(GDT_D_IOCTL, ("alarm "));
1348 		error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1349 		break;
1350 
1351 	case BIOCSETSTATE:
1352 		GDT_DPRINTF(GDT_D_IOCTL, ("setstate "));
1353 		error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1354 		break;
1355 
1356 	default:
1357 		GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n"));
1358 		error = ENOTTY;
1359 	}
1360 
1361 	return (error);
1362 }
1363 
1364 int
1365 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi)
1366 {
1367 	bi->bi_novol = sc->sc_ndevs;
1368 	bi->bi_nodisk = sc->sc_total_disks;
1369 
1370 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1371 
1372 	return (0);
1373 }
1374 
1375 int
1376 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv)
1377 {
1378 	return (1); /* XXX not yet */
1379 }
1380 
1381 int
1382 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd)
1383 {
1384 	return (1); /* XXX not yet */
1385 }
1386 
1387 int
1388 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba)
1389 {
1390 	return (1); /* XXX not yet */
1391 }
1392 
1393 int
1394 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs)
1395 {
1396 	return (1); /* XXX not yet */
1397 }
1398 #endif /* NBIO > 0 */
1399