xref: /openbsd-src/sys/dev/ic/gdt_common.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: gdt_common.c,v 1.68 2020/02/15 01:58:01 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * This driver would not have written if it was not for the hardware donations
29  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/device.h>
35 #include <sys/ioctl.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 
40 #include <machine/bus.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45 
46 #include <dev/biovar.h>
47 #include <dev/ic/gdtreg.h>
48 #include <dev/ic/gdtvar.h>
49 
50 #include "bio.h"
51 
52 #ifdef GDT_DEBUG
53 int gdt_maxcmds = GDT_MAXCMDS;
54 #undef GDT_MAXCMDS
55 #define GDT_MAXCMDS gdt_maxcmds
56 #endif
57 
58 #define GDT_DRIVER_VERSION 1
59 #define GDT_DRIVER_SUBVERSION 2
60 
61 int	gdt_async_event(struct gdt_softc *, int);
62 void	gdt_chain(struct gdt_softc *);
63 void	gdt_clear_events(struct gdt_softc *);
64 void	gdt_copy_internal_data(struct scsi_xfer *, u_int8_t *, size_t);
65 struct scsi_xfer *gdt_dequeue(struct gdt_softc *);
66 void	gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int);
67 void	gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *);
68 void	gdt_eval_mapping(u_int32_t, int *, int *, int *);
69 int	gdt_exec_ccb(struct gdt_ccb *);
70 void	gdt_ccb_free(void *, void *);
71 void   *gdt_ccb_alloc(void *);
72 void	gdt_internal_cache_cmd(struct scsi_xfer *);
73 int	gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t,
74     u_int32_t, u_int32_t, u_int32_t);
75 #if NBIO > 0
76 int	gdt_ioctl(struct device *, u_long, caddr_t);
77 int	gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *);
78 int	gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *);
79 int	gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *);
80 int	gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *);
81 int	gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *);
82 #endif /* NBIO > 0 */
83 void	gdt_scsi_cmd(struct scsi_xfer *);
84 void	gdt_start_ccbs(struct gdt_softc *);
85 int	gdt_sync_event(struct gdt_softc *, int, u_int8_t,
86     struct scsi_xfer *);
87 void	gdt_timeout(void *);
88 int	gdt_wait(struct gdt_softc *, struct gdt_ccb *, int);
89 void	gdt_watchdog(void *);
90 
91 struct cfdriver gdt_cd = {
92 	NULL, "gdt", DV_DULL
93 };
94 
95 struct scsi_adapter gdt_switch = {
96 	gdt_scsi_cmd, NULL, NULL, NULL, NULL
97 };
98 
99 int gdt_cnt = 0;
100 u_int8_t gdt_polling;
101 u_int8_t gdt_from_wait;
102 struct gdt_softc *gdt_wait_gdt;
103 int	gdt_wait_index;
104 #ifdef GDT_DEBUG
105 int	gdt_debug = GDT_DEBUG;
106 #endif
107 
108 int
109 gdt_attach(struct gdt_softc *sc)
110 {
111 	struct scsibus_attach_args saa;
112 	u_int16_t cdev_cnt;
113 	int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs;
114 
115 	gdt_polling = 1;
116 	gdt_from_wait = 0;
117 
118 	if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0,
119 	    &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT))
120 	    panic("%s: bus_dmamem_alloc failed", DEVNAME(sc));
121 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1,
122 	    GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT))
123 	    panic("%s: bus_dmamem_map failed", DEVNAME(sc));
124 
125 	gdt_clear_events(sc);
126 
127 	TAILQ_INIT(&sc->sc_free_ccb);
128 	TAILQ_INIT(&sc->sc_ccbq);
129 	TAILQ_INIT(&sc->sc_ucmdq);
130 	SIMPLEQ_INIT(&sc->sc_queue);
131 
132 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
133 	scsi_iopool_init(&sc->sc_iopool, sc, gdt_ccb_alloc, gdt_ccb_free);
134 
135 	/* Initialize the ccbs */
136 	for (i = 0; i < GDT_MAXCMDS; i++) {
137 		sc->sc_ccbs[i].gc_cmd_index = i + 2;
138 		error = bus_dmamap_create(sc->sc_dmat,
139 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
140 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
141 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
142 		    &sc->sc_ccbs[i].gc_dmamap_xfer);
143 		if (error) {
144 			printf("%s: cannot create ccb dmamap (%d)",
145 			    DEVNAME(sc), error);
146 			return (1);
147 		}
148 		(void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED);
149 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i],
150 		    gc_chain);
151 	}
152 
153 	/* Fill in the prototype scsi_link. */
154 	sc->sc_link.adapter_softc = sc;
155 	sc->sc_link.adapter = &gdt_switch;
156 	/* openings will be filled in later. */
157 	sc->sc_link.adapter_buswidth =
158 	    (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
159 	sc->sc_link.adapter_target = sc->sc_link.adapter_buswidth;
160 	sc->sc_link.pool = &sc->sc_iopool;
161 
162 	if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
163 		printf("screen service initialization error %d\n",
164 		     sc->sc_status);
165 		return (1);
166 	}
167 
168 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
169 	    0)) {
170 		printf("cache service initialization error %d\n",
171 		    sc->sc_status);
172 		return (1);
173 	}
174 
175 	cdev_cnt = (u_int16_t)sc->sc_info;
176 
177 	/* Detect number of busses */
178 	gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
179 	sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
180 	sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
181 	sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
182 	gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
183 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
184 	    GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
185 	    GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
186 		sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT];
187 		for (i = 0; i < sc->sc_bus_cnt; i++) {
188 			id = sc->sc_scratch[GDT_IOC_HDR_SZ +
189 			    i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
190 			sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff;
191 		}
192 
193 	} else {
194 		/* New method failed, use fallback. */
195 		gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
196 		for (i = 0; i < GDT_MAXBUS; i++) {
197 			if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
198 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
199 			    GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
200 			    GDT_GETCH_SZ)) {
201 				if (i == 0) {
202 					printf("cannot get channel count, "
203 					    "error %d\n", sc->sc_status);
204 					return (1);
205 				}
206 				break;
207 			}
208 			sc->sc_bus_id[i] =
209 			    (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
210 			    sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
211 		}
212 		sc->sc_bus_cnt = i;
213 	}
214 
215 	/* Read cache configuration */
216 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
217 	    GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
218 		printf("cannot get cache info, error %d\n", sc->sc_status);
219 		return (1);
220 	}
221 	sc->sc_cpar.cp_version =
222 	    gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION);
223 	sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE);
224 	sc->sc_cpar.cp_strategy =
225 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY);
226 	sc->sc_cpar.cp_write_back =
227 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK);
228 	sc->sc_cpar.cp_block_size =
229 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE);
230 
231 	/* Read board information and features */
232 	sc->sc_more_proc = 0;
233 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
234 	    GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
235 		/* XXX A lot of these assignments can probably go later */
236 		sc->sc_binfo.bi_ser_no =
237 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO);
238 		bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID,
239 		    sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id);
240 		sc->sc_binfo.bi_ep_flags =
241 		    gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS);
242 		sc->sc_binfo.bi_proc_id =
243 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID);
244 		sc->sc_binfo.bi_memsize =
245 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE);
246 		sc->sc_binfo.bi_mem_banks =
247 		    sc->sc_scratch[GDT_BINFO_MEM_BANKS];
248 		sc->sc_binfo.bi_chan_type =
249 		    sc->sc_scratch[GDT_BINFO_CHAN_TYPE];
250 		sc->sc_binfo.bi_chan_count =
251 		    sc->sc_scratch[GDT_BINFO_CHAN_COUNT];
252 		sc->sc_binfo.bi_rdongle_pres =
253 		    sc->sc_scratch[GDT_BINFO_RDONGLE_PRES];
254 		sc->sc_binfo.bi_epr_fw_ver =
255 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER);
256 		sc->sc_binfo.bi_upd_fw_ver =
257 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER);
258 		sc->sc_binfo.bi_upd_revision =
259 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION);
260 		bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING,
261 		    sc->sc_binfo.bi_type_string,
262 		    sizeof sc->sc_binfo.bi_type_string);
263 		bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING,
264 		    sc->sc_binfo.bi_raid_string,
265 		    sizeof sc->sc_binfo.bi_raid_string);
266 		sc->sc_binfo.bi_update_pres =
267 		    sc->sc_scratch[GDT_BINFO_UPDATE_PRES];
268 		sc->sc_binfo.bi_xor_pres =
269 		    sc->sc_scratch[GDT_BINFO_XOR_PRES];
270 		sc->sc_binfo.bi_prom_type =
271 		    sc->sc_scratch[GDT_BINFO_PROM_TYPE];
272 		sc->sc_binfo.bi_prom_count =
273 		    sc->sc_scratch[GDT_BINFO_PROM_COUNT];
274 		sc->sc_binfo.bi_dup_pres =
275 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES);
276 		sc->sc_binfo.bi_chan_pres =
277 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES);
278 		sc->sc_binfo.bi_mem_pres =
279 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES);
280 		sc->sc_binfo.bi_ft_bus_system =
281 		    sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
282 		sc->sc_binfo.bi_subtype_valid =
283 		    sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
284 		sc->sc_binfo.bi_board_subtype =
285 		    sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
286 		sc->sc_binfo.bi_rampar_pres =
287 		    sc->sc_scratch[GDT_BINFO_RAMPAR_PRES];
288 
289 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
290 		    GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
291 			sc->sc_bfeat.bf_chaining =
292 			    sc->sc_scratch[GDT_BFEAT_CHAINING];
293 			sc->sc_bfeat.bf_striping =
294 			    sc->sc_scratch[GDT_BFEAT_STRIPING];
295 			sc->sc_bfeat.bf_mirroring =
296 			    sc->sc_scratch[GDT_BFEAT_MIRRORING];
297 			sc->sc_bfeat.bf_raid =
298 			    sc->sc_scratch[GDT_BFEAT_RAID];
299 			sc->sc_more_proc = 1;
300 		}
301 	} else {
302 		/* XXX Not implemented yet */
303 	}
304 
305 	/* Read more information */
306 	if (sc->sc_more_proc) {
307 		int bus, j;
308 		/* physical drives, channel addresses */
309 		/* step 1: get magical bus number from firmware */
310 		gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
311 		sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
312 		sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
313 		sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
314 		gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
315 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
316 		    GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL,
317 		    GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) {
318 			GDT_DPRINTF(GDT_D_INFO, ("method 1\n"));
319 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
320 				sc->sc_raw[bus].ra_address =
321 				    gdt_dec32(sc->sc_scratch +
322 				    GDT_IOC_HDR_SZ +
323 				    GDT_IOC_SZ * bus +
324 				    GDT_IOC_ADDRESS);
325 				sc->sc_raw[bus].ra_local_no =
326 				    gdt_dec8(sc->sc_scratch +
327 				    GDT_IOC_HDR_SZ +
328 				    GDT_IOC_SZ * bus +
329 				    GDT_IOC_LOCAL_NO);
330 				GDT_DPRINTF(GDT_D_INFO, (
331 				    "bus: %d address: %x local: %x\n",
332 				    bus,
333 				    sc->sc_raw[bus].ra_address,
334 				    sc->sc_raw[bus].ra_local_no));
335 			}
336 		} else {
337 			GDT_DPRINTF(GDT_D_INFO, ("method 2\n"));
338 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
339 				sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL;
340 				sc->sc_raw[bus].ra_local_no = bus;
341 				GDT_DPRINTF(GDT_D_INFO, (
342 				    "bus: %d address: %x local: %x\n",
343 				    bus,
344 				    sc->sc_raw[bus].ra_address,
345 				    sc->sc_raw[bus].ra_local_no));
346 			}
347 		}
348 		/* step 2: use magical bus number to get nr of phys disks */
349 		for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
350 			gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO,
351 			    sc->sc_raw[bus].ra_local_no);
352 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
353 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
354 			    sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL,
355 			    GDT_GETCH_SZ)) {
356 				sc->sc_raw[bus].ra_phys_cnt =
357 				    gdt_dec32(sc->sc_scratch +
358 				    GDT_GETCH_DRIVE_CNT);
359 				GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n",
360 				    bus, sc->sc_raw[bus].ra_phys_cnt));
361 			}
362 
363 			/* step 3: get scsi disk nr */
364 			if (sc->sc_raw[bus].ra_phys_cnt > 0) {
365 				gdt_enc32(sc->sc_scratch +
366 				    GDT_GETSCSI_CHAN,
367 				    sc->sc_raw[bus].ra_local_no);
368 				gdt_enc32(sc->sc_scratch +
369 				    GDT_GETSCSI_CNT,
370 				    sc->sc_raw[bus].ra_phys_cnt);
371 				if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
372 				    GDT_IOCTL,
373 				    GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN,
374 				    sc->sc_raw[bus].ra_address |
375 				    GDT_INVALID_CHANNEL,
376 				    GDT_GETSCSI_SZ))
377 					for (j = 0;
378 					    j < sc->sc_raw[bus].ra_phys_cnt;
379 					    j++) {
380 						sc->sc_raw[bus].ra_id_list[j] =
381 						    gdt_dec32(sc->sc_scratch +
382 						    GDT_GETSCSI_LIST +
383 						    GDT_GETSCSI_LIST_SZ * j);
384 						GDT_DPRINTF(GDT_D_INFO,
385 						    ("  diskid: %d\n",
386 						    sc->sc_raw[bus].ra_id_list[j]));
387 					}
388 				else
389 					sc->sc_raw[bus].ra_phys_cnt = 0;
390 			}
391 			/* add found disks to grand total */
392 			sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt;
393 		}
394 	} /* if (sc->sc_more_proc) */
395 
396 	if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
397 		printf("raw service initialization error %d\n",
398 		    sc->sc_status);
399 		return (1);
400 	}
401 
402 	/* Set/get features raw service (scatter/gather) */
403 	sc->sc_raw_feat = 0;
404 	if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
405 	    GDT_SCATTER_GATHER, 0, 0))
406 		if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
407 		    0, 0))
408 			sc->sc_raw_feat = sc->sc_info;
409 
410 	/* Set/get features cache service (scatter/gather) */
411 	sc->sc_cache_feat = 0;
412 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
413 	    GDT_SCATTER_GATHER, 0))
414 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
415 		    0))
416 			sc->sc_cache_feat = sc->sc_info;
417 
418 	/* XXX Linux reserve drives here, potentially */
419 
420 	sc->sc_ndevs = 0;
421 	/* Scan for cache devices */
422 	for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
423 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0,
424 		    0)) {
425 			sc->sc_hdr[i].hd_present = 1;
426 			sc->sc_hdr[i].hd_size = sc->sc_info;
427 
428 			if (sc->sc_hdr[i].hd_size > 0)
429 				sc->sc_ndevs++;
430 
431 			/*
432 			 * Evaluate mapping (sectors per head, heads per cyl)
433 			 */
434 			sc->sc_hdr[i].hd_size &= ~GDT_SECS32;
435 			if (sc->sc_info2 == 0)
436 				gdt_eval_mapping(sc->sc_hdr[i].hd_size,
437 				    &drv_cyls, &drv_hds, &drv_secs);
438 			else {
439 				drv_hds = sc->sc_info2 & 0xff;
440 				drv_secs = (sc->sc_info2 >> 8) & 0xff;
441 				drv_cyls = sc->sc_hdr[i].hd_size / drv_hds /
442 				    drv_secs;
443 			}
444 			sc->sc_hdr[i].hd_heads = drv_hds;
445 			sc->sc_hdr[i].hd_secs = drv_secs;
446 			/* Round the size */
447 			sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
448 
449 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
450 			    GDT_DEVTYPE, i, 0, 0))
451 				sc->sc_hdr[i].hd_devtype = sc->sc_info;
452 		}
453 
454 	if (sc->sc_ndevs == 0)
455 		sc->sc_link.openings = 0;
456 	else
457 		sc->sc_link.openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) /
458 		    sc->sc_ndevs;
459 
460 	printf("dpmem %llx %d-bus %d cache device%s\n",
461 	    (long long)sc->sc_dpmembase,
462 	    sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
463 	printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
464 	    DEVNAME(sc), sc->sc_cpar.cp_version,
465 	    sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy,
466 	    sc->sc_cpar.cp_write_back ? "on" : "off",
467 	    sc->sc_cpar.cp_block_size);
468 #if 1
469 	printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc),
470 	    sc->sc_raw_feat, sc->sc_cache_feat);
471 #endif
472 
473 #if NBIO > 0
474 	if (bio_register(&sc->sc_dev, gdt_ioctl) != 0)
475 		panic("%s: controller registration failed", DEVNAME(sc));
476 #endif
477 	gdt_cnt++;
478 
479 	bzero(&saa, sizeof(saa));
480 	saa.saa_sc_link = &sc->sc_link;
481 
482 	config_found(&sc->sc_dev, &saa, scsiprint);
483 
484 	gdt_polling = 0;
485 	return (0);
486 }
487 
488 void
489 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
490 {
491 	*cyls = size / GDT_HEADS / GDT_SECS;
492 	if (*cyls < GDT_MAXCYLS) {
493 		*heads = GDT_HEADS;
494 		*secs = GDT_SECS;
495 	} else {
496 		/* Too high for 64 * 32 */
497 		*cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
498 		if (*cyls < GDT_MAXCYLS) {
499 			*heads = GDT_MEDHEADS;
500 			*secs = GDT_MEDSECS;
501 		} else {
502 			/* Too high for 127 * 63 */
503 			*cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
504 			*heads = GDT_BIGHEADS;
505 			*secs = GDT_BIGSECS;
506 		}
507 	}
508 }
509 
510 /*
511  * Insert a command into the driver queue, either at the front or at the tail.
512  * It's ok to overload the freelist link as these structures are never on
513  * the freelist at this time.
514  */
515 void
516 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront)
517 {
518 	if (infront)
519 		SIMPLEQ_INSERT_HEAD(&sc->sc_queue, xs, xfer_list);
520 	else
521 		SIMPLEQ_INSERT_TAIL(&sc->sc_queue, xs, xfer_list);
522 }
523 
524 /*
525  * Pull a command off the front of the driver queue.
526  */
527 struct scsi_xfer *
528 gdt_dequeue(struct gdt_softc *sc)
529 {
530 	struct scsi_xfer *xs;
531 
532 	xs = SIMPLEQ_FIRST(&sc->sc_queue);
533 	if (xs != NULL)
534 		SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, xfer_list);
535 
536 	return (xs);
537 }
538 
539 /*
540  * Start a SCSI operation on a cache device.
541  * XXX Polled operation is not yet complete.  What kind of locking do we need?
542  */
543 void
544 gdt_scsi_cmd(struct scsi_xfer *xs)
545 {
546 	struct scsi_link *link = xs->sc_link;
547 	struct gdt_softc *sc = link->adapter_softc;
548 	u_int8_t target = link->target;
549 	struct gdt_ccb *ccb;
550 	u_int32_t blockno, blockcnt;
551 	struct scsi_rw *rw;
552 	struct scsi_rw_big *rwb;
553 	bus_dmamap_t xfer;
554 	int error;
555 	int s;
556 	int polled;
557 
558 	GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
559 
560 	s = splbio();
561 
562 	xs->error = XS_NOERROR;
563 
564 	if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present ||
565 	    link->lun != 0) {
566 		/*
567 		 * XXX Should be XS_SENSE but that would require setting up a
568 		 * faked sense too.
569 		 */
570 		xs->error = XS_DRIVER_STUFFUP;
571 		scsi_done(xs);
572 		splx(s);
573 		return;
574 	}
575 
576 	/* Don't double enqueue if we came from gdt_chain. */
577 	if (xs != SIMPLEQ_FIRST(&sc->sc_queue))
578 		gdt_enqueue(sc, xs, 0);
579 
580 	while ((xs = gdt_dequeue(sc)) != NULL) {
581 		xs->error = XS_NOERROR;
582 		ccb = NULL;
583 		link = xs->sc_link;
584 		target = link->target;
585 		polled = ISSET(xs->flags, SCSI_POLL);
586 
587 		if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
588 		    sc->sc_test_busy(sc)) {
589 			/*
590 			 * Put it back in front.  XXX Should we instead
591 			 * set xs->error to XS_BUSY?
592 			 */
593 			gdt_enqueue(sc, xs, 1);
594 			break;
595 		}
596 
597 		switch (xs->cmd->opcode) {
598 		case TEST_UNIT_READY:
599 		case REQUEST_SENSE:
600 		case INQUIRY:
601 		case MODE_SENSE:
602 		case START_STOP:
603 		case READ_CAPACITY:
604 #if 0
605 		case VERIFY:
606 #endif
607 			gdt_internal_cache_cmd(xs);
608 			scsi_done(xs);
609 			goto ready;
610 
611 		case PREVENT_ALLOW:
612 			GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
613 			/* XXX Not yet implemented */
614 			xs->error = XS_NOERROR;
615 			scsi_done(xs);
616 			goto ready;
617 
618 		default:
619 			GDT_DPRINTF(GDT_D_CMD,
620 			    ("unknown opc %d ", xs->cmd->opcode));
621 			/* XXX Not yet implemented */
622 			xs->error = XS_DRIVER_STUFFUP;
623 			scsi_done(xs);
624 			goto ready;
625 
626 		case READ_COMMAND:
627 		case READ_BIG:
628 		case WRITE_COMMAND:
629 		case WRITE_BIG:
630 		case SYNCHRONIZE_CACHE:
631 			/*
632 			 * A new command chain, start from the beginning.
633 			 */
634 			sc->sc_cmd_off = 0;
635 
636 			if (xs->cmd->opcode == SYNCHRONIZE_CACHE) {
637 				 blockno = blockcnt = 0;
638 			} else {
639 				/* A read or write operation. */
640 				if (xs->cmdlen == 6) {
641 					rw = (struct scsi_rw *)xs->cmd;
642 					blockno = _3btol(rw->addr) &
643 					    (SRW_TOPADDR << 16 | 0xffff);
644 					blockcnt =
645 					    rw->length ? rw->length : 0x100;
646 				} else {
647 					rwb = (struct scsi_rw_big *)xs->cmd;
648 					blockno = _4btol(rwb->addr);
649 					blockcnt = _2btol(rwb->length);
650 				}
651 				if (blockno >= sc->sc_hdr[target].hd_size ||
652 				    blockno + blockcnt >
653 				    sc->sc_hdr[target].hd_size) {
654 					printf(
655 					    "%s: out of bounds %u-%u >= %u\n",
656 					    DEVNAME(sc), blockno,
657 					    blockcnt,
658 					    sc->sc_hdr[target].hd_size);
659 					/*
660 					 * XXX Should be XS_SENSE but that
661 					 * would require setting up a faked
662 					 * sense too.
663 					 */
664 					xs->error = XS_DRIVER_STUFFUP;
665 					scsi_done(xs);
666 					goto ready;
667 				}
668 			}
669 
670 			ccb = xs->io;
671 			ccb->gc_blockno = blockno;
672 			ccb->gc_blockcnt = blockcnt;
673 			ccb->gc_xs = xs;
674 			ccb->gc_timeout = xs->timeout;
675 			ccb->gc_service = GDT_CACHESERVICE;
676 			ccb->gc_flags = 0;
677 			gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
678 
679 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
680 				xfer = ccb->gc_dmamap_xfer;
681 				error = bus_dmamap_load(sc->sc_dmat, xfer,
682 				    xs->data, xs->datalen, NULL,
683 				    (xs->flags & SCSI_NOSLEEP) ?
684 				    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
685 				if (error) {
686 					printf("%s: gdt_scsi_cmd: ",
687 					    DEVNAME(sc));
688 					if (error == EFBIG)
689 						printf(
690 						    "more than %d dma segs\n",
691 						    GDT_MAXOFFSETS);
692 					else
693 						printf("error %d "
694 						    "loading dma map\n",
695 						    error);
696 
697 					xs->error = XS_DRIVER_STUFFUP;
698 					scsi_done(xs);
699 					goto ready;
700 				}
701 				bus_dmamap_sync(sc->sc_dmat, xfer, 0,
702 				    xfer->dm_mapsize,
703 				    (xs->flags & SCSI_DATA_IN) ?
704 				    BUS_DMASYNC_PREREAD :
705 				    BUS_DMASYNC_PREWRITE);
706 			}
707 
708 			gdt_enqueue_ccb(sc, ccb);
709 			/* XXX what if enqueue did not start a transfer? */
710 			if (gdt_polling || (xs->flags & SCSI_POLL)) {
711 				if (!gdt_wait(sc, ccb, ccb->gc_timeout)) {
712 					printf("%s: command %d timed out\n",
713 					    DEVNAME(sc),
714 					    ccb->gc_cmd_index);
715 					xs->error = XS_TIMEOUT;
716 					scsi_done(xs);
717 					splx(s);
718 					return;
719 				}
720 			}
721 		}
722 
723 	ready:
724 		/*
725 		 * Don't process the queue if we are polling.
726 		 */
727 		if (polled) {
728 			break;
729 		}
730 	}
731 
732 	splx(s);
733 }
734 
735 /* XXX Currently only for cacheservice, returns 0 if busy */
736 int
737 gdt_exec_ccb(struct gdt_ccb *ccb)
738 {
739 	struct scsi_xfer *xs = ccb->gc_xs;
740 	struct scsi_link *link = xs->sc_link;
741 	struct gdt_softc *sc = link->adapter_softc;
742 	u_int8_t target = link->target;
743 	u_int32_t sg_canz;
744 	bus_dmamap_t xfer;
745 	int i;
746 #if 1 /* XXX */
747 	static int __level = 0;
748 
749 	if (__level++ > 0)
750 		panic("level > 0");
751 #endif
752 	GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
753 
754 	sc->sc_cmd_cnt = 0;
755 
756 	/*
757 	 * XXX Yeah I know it's an always-true condition, but that may change
758 	 * later.
759 	 */
760 	if (sc->sc_cmd_cnt == 0)
761 		sc->sc_set_sema0(sc);
762 
763 	gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
764 	gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
765 	gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
766 	    target);
767 
768 	switch (xs->cmd->opcode) {
769 	case PREVENT_ALLOW:
770 	case SYNCHRONIZE_CACHE:
771 		if (xs->cmd->opcode == PREVENT_ALLOW) {
772 			/* XXX PREVENT_ALLOW support goes here */
773 		} else {
774 			GDT_DPRINTF(GDT_D_CMD,
775 			    ("SYNCHRONIZE CACHE tgt %d ", target));
776 			sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
777 		}
778 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
779 		    1);
780 		sg_canz = 0;
781 		break;
782 
783 	case WRITE_COMMAND:
784 	case WRITE_BIG:
785 		/* XXX WRITE_THR could be supported too */
786 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
787 		break;
788 
789 	case READ_COMMAND:
790 	case READ_BIG:
791 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
792 		break;
793 	}
794 
795 	if (xs->cmd->opcode != PREVENT_ALLOW &&
796 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
797 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
798 		    ccb->gc_blockno);
799 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
800 		    ccb->gc_blockcnt);
801 
802 		xfer = ccb->gc_dmamap_xfer;
803 		if (sc->sc_cache_feat & GDT_SCATTER_GATHER) {
804 			gdt_enc32(
805 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
806 			    0xffffffff);
807 			for (i = 0; i < xfer->dm_nsegs; i++) {
808 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
809 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
810 				    GDT_SG_PTR,
811 				    xfer->dm_segs[i].ds_addr);
812 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
813 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
814 				    GDT_SG_LEN,
815 				    xfer->dm_segs[i].ds_len);
816 				GDT_DPRINTF(GDT_D_IO,
817 				    ("#%d va %p pa %p len %x\n", i, buf,
818 				    xfer->dm_segs[i].ds_addr,
819 				    xfer->dm_segs[i].ds_len));
820 			}
821 			sg_canz = xfer->dm_nsegs;
822 			gdt_enc32(
823 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
824 			    sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
825 		} else {
826 			/* XXX Hardly correct */
827 			gdt_enc32(
828 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
829 			    xfer->dm_segs[0].ds_addr);
830 			sg_canz = 0;
831 		}
832 	}
833 	gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
834 
835 	sc->sc_cmd_len =
836 	    roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
837 	    sizeof (u_int32_t));
838 
839 	if (sc->sc_cmd_cnt > 0 &&
840 	    sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
841 	    sc->sc_ic_all_size) {
842 		printf("%s: DPMEM overflow\n", DEVNAME(sc));
843 		xs->error = XS_BUSY;
844 #if 1 /* XXX */
845 		__level--;
846 #endif
847 		return (0);
848 	}
849 
850 	sc->sc_copy_cmd(sc, ccb);
851 	sc->sc_release_event(sc, ccb);
852 
853 	xs->error = XS_NOERROR;
854 	xs->resid = 0;
855 #if 1 /* XXX */
856 	__level--;
857 #endif
858 	return (1);
859 }
860 
861 void
862 gdt_copy_internal_data(struct scsi_xfer *xs, u_int8_t *data, size_t size)
863 {
864 	size_t copy_cnt;
865 
866 	GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data "));
867 
868 	if (!xs->datalen)
869 		printf("uio move not yet supported\n");
870 	else {
871 		copy_cnt = MIN(size, xs->datalen);
872 		bcopy(data, xs->data, copy_cnt);
873 	}
874 }
875 
876 /* Emulated SCSI operation on cache device */
877 void
878 gdt_internal_cache_cmd(struct scsi_xfer *xs)
879 {
880 	struct scsi_link *link = xs->sc_link;
881 	struct gdt_softc *sc = link->adapter_softc;
882 	struct scsi_inquiry_data inq;
883 	struct scsi_sense_data sd;
884 	struct scsi_read_cap_data rcd;
885 	u_int8_t target = link->target;
886 
887 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
888 
889 	switch (xs->cmd->opcode) {
890 	case TEST_UNIT_READY:
891 	case START_STOP:
892 #if 0
893 	case VERIFY:
894 #endif
895 		GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
896 		    target));
897 		break;
898 
899 	case REQUEST_SENSE:
900 		GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
901 		bzero(&sd, sizeof sd);
902 		sd.error_code = SSD_ERRCODE_CURRENT;
903 		sd.segment = 0;
904 		sd.flags = SKEY_NO_SENSE;
905 		gdt_enc32(sd.info, 0);
906 		sd.extra_len = 0;
907 		gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd);
908 		break;
909 
910 	case INQUIRY:
911 		GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
912 		    sc->sc_hdr[target].hd_devtype));
913 		bzero(&inq, sizeof inq);
914 		inq.device =
915 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
916 		inq.dev_qual2 =
917 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
918 		inq.version = 2;
919 		inq.response_format = 2;
920 		inq.additional_length = 32;
921 		inq.flags |= SID_CmdQue;
922 		strlcpy(inq.vendor, "ICP	   ", sizeof inq.vendor);
923 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
924 		    target);
925 		strlcpy(inq.revision, "	 ", sizeof inq.revision);
926 		gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq);
927 		break;
928 
929 	case READ_CAPACITY:
930 		GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
931 		bzero(&rcd, sizeof rcd);
932 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
933 		_lto4b(GDT_SECTOR_SIZE, rcd.length);
934 		gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd);
935 		break;
936 
937 	default:
938 		GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ",
939 		    xs->cmd->opcode, target));
940 		xs->error = XS_DRIVER_STUFFUP;
941 		return;
942 	}
943 
944 	xs->error = XS_NOERROR;
945 }
946 
947 void
948 gdt_clear_events(struct gdt_softc *sc)
949 {
950 	GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc));
951 
952 	/* XXX To be implemented */
953 }
954 
955 int
956 gdt_async_event(struct gdt_softc *sc, int service)
957 {
958 	GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service));
959 
960 	if (service == GDT_SCREENSERVICE) {
961 		/* XXX To be implemented */
962 	} else {
963 		/* XXX To be implemented */
964 	}
965 
966 	return (0);
967 }
968 
969 int
970 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index,
971     struct scsi_xfer *xs)
972 {
973 	GDT_DPRINTF(GDT_D_INTR,
974 	    ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs));
975 
976 	if (service == GDT_SCREENSERVICE) {
977 		GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
978 		/* XXX To be implemented */
979 		return (0);
980 	} else {
981 		switch (sc->sc_status) {
982 		case GDT_S_OK:
983 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
984 			/* XXX To be implemented */
985 			break;
986 		case GDT_S_BSY:
987 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
988 			/* XXX To be implemented */
989 			return (2);
990 		default:
991 			GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
992 			    sc->sc_status));
993 			/* XXX To be implemented */
994 			return (0);
995 		}
996 	}
997 
998 	return (1);
999 }
1000 
1001 int
1002 gdt_intr(void *arg)
1003 {
1004 	struct gdt_softc *sc = arg;
1005 	struct gdt_intr_ctx ctx;
1006 	int chain = 1;
1007 	int sync_val = 0;
1008 	struct scsi_xfer *xs = NULL;
1009 	int prev_cmd;
1010 	struct gdt_ccb *ccb;
1011 
1012 	GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc));
1013 
1014 	/* If polling and we were not called from gdt_wait, just return */
1015 	if (gdt_polling && !gdt_from_wait)
1016 		return (0);
1017 
1018 	ctx.istatus = sc->sc_get_status(sc);
1019 	if (!ctx.istatus) {
1020 		sc->sc_status = GDT_S_NO_STATUS;
1021 		return (0);
1022 	}
1023 
1024 	gdt_wait_index = 0;
1025 	ctx.service = ctx.info2 = 0;
1026 
1027 	sc->sc_intr(sc, &ctx);
1028 
1029 	sc->sc_status = ctx.cmd_status;
1030 	sc->sc_info = ctx.info;
1031 	sc->sc_info2 = ctx.info2;
1032 
1033 	if (gdt_from_wait) {
1034 		gdt_wait_gdt = sc;
1035 		gdt_wait_index = ctx.istatus;
1036 	}
1037 
1038 	switch (ctx.istatus) {
1039 	case GDT_ASYNCINDEX:
1040 		gdt_async_event(sc, ctx.service);
1041 		goto finish;
1042 
1043 	case GDT_SPEZINDEX:
1044 		printf("%s: uninitialized or unknown service (%d %d)\n",
1045 		    DEVNAME(sc), ctx.info, ctx.info2);
1046 		chain = 0;
1047 		goto finish;
1048 	}
1049 
1050 	ccb = &sc->sc_ccbs[ctx.istatus - 2];
1051 	xs = ccb->gc_xs;
1052 	if (!gdt_polling)
1053 		timeout_del(&xs->stimeout);
1054 	ctx.service = ccb->gc_service;
1055 	prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1056 	if (xs && xs->cmd->opcode != PREVENT_ALLOW &&
1057 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
1058 		bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0,
1059 		    ccb->gc_dmamap_xfer->dm_mapsize,
1060 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1061 		    BUS_DMASYNC_POSTWRITE);
1062 		bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer);
1063 	}
1064 	switch (prev_cmd) {
1065 	case GDT_GCF_UNUSED:
1066 		/* XXX Not yet implemented */
1067 		chain = 0;
1068 		goto finish;
1069 	case GDT_GCF_INTERNAL:
1070 		chain = 0;
1071 		goto finish;
1072 	}
1073 
1074 	sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs);
1075 
1076  finish:
1077 	switch (sync_val) {
1078 	case 0:
1079 		if (xs && gdt_from_wait)
1080 			scsi_done(xs);
1081 		break;
1082 	case 1:
1083 		scsi_done(xs);
1084 		break;
1085 
1086 	case 2:
1087 		gdt_enqueue(sc, xs, 0);
1088 	}
1089 
1090 	if (chain)
1091 		gdt_chain(sc);
1092 
1093 	return (1);
1094 }
1095 
1096 int
1097 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout)
1098 {
1099 	int s, rslt, rv = 0;
1100 
1101 	GDT_DPRINTF(GDT_D_MISC,
1102 	    ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout));
1103 
1104 	gdt_from_wait = 1;
1105 	do {
1106 		s = splbio();
1107 		rslt = gdt_intr(sc);
1108 		splx(s);
1109 		if (rslt && sc == gdt_wait_gdt &&
1110 		    ccb->gc_cmd_index == gdt_wait_index) {
1111 			rv = 1;
1112 			break;
1113 		}
1114 		DELAY(1000); /* 1 millisecond */
1115 	} while (--timeout);
1116 	gdt_from_wait = 0;
1117 
1118 	while (sc->sc_test_busy(sc))
1119 		DELAY(0);		/* XXX correct? */
1120 
1121 	return (rv);
1122 }
1123 
1124 int
1125 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode,
1126     u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
1127 {
1128 	int retries, rslt;
1129 	struct gdt_ccb *ccb;
1130 
1131 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1132 	    sc, service, opcode, arg1, arg2, arg3));
1133 
1134 	bzero(sc->sc_cmd, GDT_CMD_SZ);
1135 
1136 	for (retries = GDT_RETRIES; ; ) {
1137 		ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1138 		if (ccb == NULL) {
1139 			printf("%s: no free command index found\n",
1140 			    DEVNAME(sc));
1141 			return (0);
1142 		}
1143 		ccb->gc_service = service;
1144 		ccb->gc_xs = NULL;
1145 		ccb->gc_blockno = ccb->gc_blockcnt = 0;
1146 		ccb->gc_timeout = ccb->gc_flags = 0;
1147 		ccb->gc_service = GDT_CACHESERVICE;
1148 		gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1149 
1150 		sc->sc_set_sema0(sc);
1151 		gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX,
1152 		    ccb->gc_cmd_index);
1153 		gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode);
1154 		gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1155 
1156 		switch (service) {
1157 		case GDT_CACHESERVICE:
1158 			if (opcode == GDT_IOCTL) {
1159 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1160 				    GDT_IOCTL_SUBFUNC, arg1);
1161 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1162 				    GDT_IOCTL_CHANNEL, arg2);
1163 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1164 				    GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1165 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1166 				    GDT_IOCTL_P_PARAM,
1167 				    sc->sc_scratch_seg.ds_addr);
1168 			} else {
1169 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1170 				    GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1171 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1172 				    GDT_CACHE_BLOCKNO, arg2);
1173 			}
1174 			break;
1175 
1176 		case GDT_SCSIRAWSERVICE:
1177 			gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1178 			    GDT_RAW_DIRECTION, arg1);
1179 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1180 			    (u_int8_t)arg2;
1181 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1182 			    (u_int8_t)arg3;
1183 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1184 			    (u_int8_t)(arg3 >> 8);
1185 		}
1186 
1187 		sc->sc_cmd_len = GDT_CMD_SZ;
1188 		sc->sc_cmd_off = 0;
1189 		sc->sc_cmd_cnt = 0;
1190 		sc->sc_copy_cmd(sc, ccb);
1191 		sc->sc_release_event(sc, ccb);
1192 		DELAY(20);
1193 
1194 		rslt = gdt_wait(sc, ccb, GDT_POLL_TIMEOUT);
1195 		scsi_io_put(&sc->sc_iopool, ccb);
1196 
1197 		if (!rslt)
1198 			return (0);
1199 		if (sc->sc_status != GDT_S_BSY || --retries == 0)
1200 			break;
1201 		DELAY(1);
1202 	}
1203 	return (sc->sc_status == GDT_S_OK);
1204 }
1205 
1206 void *
1207 gdt_ccb_alloc(void *xsc)
1208 {
1209 	struct gdt_softc *sc = xsc;
1210 	struct gdt_ccb *ccb;
1211 
1212 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_alloc(%p) ", sc));
1213 
1214 	mtx_enter(&sc->sc_ccb_mtx);
1215 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1216 	if (ccb != NULL)
1217 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain);
1218 	mtx_leave(&sc->sc_ccb_mtx);
1219 
1220 	return (ccb);
1221 }
1222 
1223 void
1224 gdt_ccb_free(void *xsc, void *xccb)
1225 {
1226 	struct gdt_softc *sc = xsc;
1227 	struct gdt_ccb *ccb = xccb;
1228 	int wake = 0;
1229 
1230 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_free(%p, %p) ", sc, ccb));
1231 
1232 	mtx_enter(&sc->sc_ccb_mtx);
1233 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain);
1234 	/* If the free list was empty, wake up potential waiters. */
1235 	if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1236 		wake = 1;
1237 	mtx_leave(&sc->sc_ccb_mtx);
1238 
1239 	if (wake)
1240 		wakeup(&sc->sc_free_ccb);
1241 }
1242 
1243 void
1244 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1245 {
1246 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb));
1247 
1248 	timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1249 	TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain);
1250 	gdt_start_ccbs(sc);
1251 }
1252 
1253 void
1254 gdt_start_ccbs(struct gdt_softc *sc)
1255 {
1256 	struct gdt_ccb *ccb;
1257 	struct scsi_xfer *xs;
1258 
1259 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc));
1260 
1261 	while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) {
1262 
1263 		xs = ccb->gc_xs;
1264 		if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1265 			timeout_del(&xs->stimeout);
1266 
1267 		if (gdt_exec_ccb(ccb) == 0) {
1268 			ccb->gc_flags |= GDT_GCF_WATCHDOG;
1269 			timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1270 			timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT);
1271 			break;
1272 		}
1273 		TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain);
1274 
1275 		if ((xs->flags & SCSI_POLL) == 0) {
1276 			timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1277 			timeout_add_msec(&xs->stimeout, ccb->gc_timeout);
1278 		}
1279 	}
1280 }
1281 
1282 void
1283 gdt_chain(struct gdt_softc *sc)
1284 {
1285 	GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc));
1286 
1287 	if (!SIMPLEQ_EMPTY(&sc->sc_queue))
1288 		gdt_scsi_cmd(SIMPLEQ_FIRST(&sc->sc_queue));
1289 }
1290 
1291 void
1292 gdt_timeout(void *arg)
1293 {
1294 	struct gdt_ccb *ccb = arg;
1295 	struct scsi_link *link = ccb->gc_xs->sc_link;
1296 	struct gdt_softc *sc = link->adapter_softc;
1297 	int s;
1298 
1299 	sc_print_addr(link);
1300 	printf("timed out\n");
1301 
1302 	/* XXX Test for multiple timeouts */
1303 
1304 	ccb->gc_xs->error = XS_TIMEOUT;
1305 	s = splbio();
1306 	gdt_enqueue_ccb(sc, ccb);
1307 	splx(s);
1308 }
1309 
1310 void
1311 gdt_watchdog(void *arg)
1312 {
1313 	struct gdt_ccb *ccb = arg;
1314 	struct scsi_link *link = ccb->gc_xs->sc_link;
1315 	struct gdt_softc *sc = link->adapter_softc;
1316 	int s;
1317 
1318 	s = splbio();
1319 	ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1320 	gdt_start_ccbs(sc);
1321 	splx(s);
1322 }
1323 
1324 #if NBIO > 0
1325 int
1326 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1327 {
1328 	struct gdt_softc *sc = (struct gdt_softc *)dev;
1329 	int error = 0;
1330 
1331 	GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc)));
1332 
1333 	switch (cmd) {
1334 	case BIOCINQ:
1335 		GDT_DPRINTF(GDT_D_IOCTL, ("inq "));
1336 		error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr);
1337 		break;
1338 
1339 	case BIOCVOL:
1340 		GDT_DPRINTF(GDT_D_IOCTL, ("vol "));
1341 		error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr);
1342 		break;
1343 
1344 	case BIOCDISK:
1345 		GDT_DPRINTF(GDT_D_IOCTL, ("disk "));
1346 		error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr);
1347 		break;
1348 
1349 	case BIOCALARM:
1350 		GDT_DPRINTF(GDT_D_IOCTL, ("alarm "));
1351 		error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1352 		break;
1353 
1354 	case BIOCSETSTATE:
1355 		GDT_DPRINTF(GDT_D_IOCTL, ("setstate "));
1356 		error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1357 		break;
1358 
1359 	default:
1360 		GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n"));
1361 		error = ENOTTY;
1362 	}
1363 
1364 	return (error);
1365 }
1366 
1367 int
1368 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi)
1369 {
1370 	bi->bi_novol = sc->sc_ndevs;
1371 	bi->bi_nodisk = sc->sc_total_disks;
1372 
1373 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1374 
1375 	return (0);
1376 }
1377 
1378 int
1379 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv)
1380 {
1381 	return (1); /* XXX not yet */
1382 }
1383 
1384 int
1385 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd)
1386 {
1387 	return (1); /* XXX not yet */
1388 }
1389 
1390 int
1391 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba)
1392 {
1393 	return (1); /* XXX not yet */
1394 }
1395 
1396 int
1397 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs)
1398 {
1399 	return (1); /* XXX not yet */
1400 }
1401 #endif /* NBIO > 0 */
1402