xref: /openbsd-src/sys/dev/ic/gdt_common.c (revision 91f110e064cd7c194e59e019b83bb7496c1c84d4)
1 /*	$OpenBSD: gdt_common.c,v 1.62 2013/03/04 00:41:54 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * This driver would not have written if it was not for the hardware donations
29  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/device.h>
35 #include <sys/ioctl.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 
40 #include <machine/bus.h>
41 
42 #include <uvm/uvm_extern.h>
43 
44 #include <scsi/scsi_all.h>
45 #include <scsi/scsi_disk.h>
46 #include <scsi/scsiconf.h>
47 
48 #include <dev/biovar.h>
49 #include <dev/ic/gdtreg.h>
50 #include <dev/ic/gdtvar.h>
51 
52 #include "bio.h"
53 
54 #ifdef GDT_DEBUG
55 int gdt_maxcmds = GDT_MAXCMDS;
56 #undef GDT_MAXCMDS
57 #define GDT_MAXCMDS gdt_maxcmds
58 #endif
59 
60 #define GDT_DRIVER_VERSION 1
61 #define GDT_DRIVER_SUBVERSION 2
62 
63 int	gdt_async_event(struct gdt_softc *, int);
64 void	gdt_chain(struct gdt_softc *);
65 void	gdt_clear_events(struct gdt_softc *);
66 void	gdt_copy_internal_data(struct scsi_xfer *, u_int8_t *, size_t);
67 struct scsi_xfer *gdt_dequeue(struct gdt_softc *);
68 void	gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int);
69 void	gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *);
70 void	gdt_eval_mapping(u_int32_t, int *, int *, int *);
71 int	gdt_exec_ccb(struct gdt_ccb *);
72 void	gdt_ccb_free(void *, void *);
73 void   *gdt_ccb_alloc(void *);
74 void	gdt_internal_cache_cmd(struct scsi_xfer *);
75 int	gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t,
76     u_int32_t, u_int32_t, u_int32_t);
77 #if NBIO > 0
78 int	gdt_ioctl(struct device *, u_long, caddr_t);
79 int	gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *);
80 int	gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *);
81 int	gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *);
82 int	gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *);
83 int	gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *);
84 #endif /* NBIO > 0 */
85 void	gdt_scsi_cmd(struct scsi_xfer *);
86 void	gdt_start_ccbs(struct gdt_softc *);
87 int	gdt_sync_event(struct gdt_softc *, int, u_int8_t,
88     struct scsi_xfer *);
89 void	gdt_timeout(void *);
90 int	gdt_wait(struct gdt_softc *, struct gdt_ccb *, int);
91 void	gdt_watchdog(void *);
92 
93 struct cfdriver gdt_cd = {
94 	NULL, "gdt", DV_DULL
95 };
96 
97 struct scsi_adapter gdt_switch = {
98 	gdt_scsi_cmd, gdtminphys, 0, 0,
99 };
100 
101 int gdt_cnt = 0;
102 u_int8_t gdt_polling;
103 u_int8_t gdt_from_wait;
104 struct gdt_softc *gdt_wait_gdt;
105 int	gdt_wait_index;
106 #ifdef GDT_DEBUG
107 int	gdt_debug = GDT_DEBUG;
108 #endif
109 
110 int
111 gdt_attach(struct gdt_softc *sc)
112 {
113 	struct scsibus_attach_args saa;
114 	u_int16_t cdev_cnt;
115 	int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs;
116 
117 	gdt_polling = 1;
118 	gdt_from_wait = 0;
119 
120 	if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0,
121 	    &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT))
122 	    panic("%s: bus_dmamem_alloc failed", DEVNAME(sc));
123 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1,
124 	    GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT))
125 	    panic("%s: bus_dmamem_map failed", DEVNAME(sc));
126 
127 	gdt_clear_events(sc);
128 
129 	TAILQ_INIT(&sc->sc_free_ccb);
130 	TAILQ_INIT(&sc->sc_ccbq);
131 	TAILQ_INIT(&sc->sc_ucmdq);
132 	SIMPLEQ_INIT(&sc->sc_queue);
133 
134 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
135 	scsi_iopool_init(&sc->sc_iopool, sc, gdt_ccb_alloc, gdt_ccb_free);
136 
137 	/* Initialize the ccbs */
138 	for (i = 0; i < GDT_MAXCMDS; i++) {
139 		sc->sc_ccbs[i].gc_cmd_index = i + 2;
140 		error = bus_dmamap_create(sc->sc_dmat,
141 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
142 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
143 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
144 		    &sc->sc_ccbs[i].gc_dmamap_xfer);
145 		if (error) {
146 			printf("%s: cannot create ccb dmamap (%d)",
147 			    DEVNAME(sc), error);
148 			return (1);
149 		}
150 		(void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED);
151 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i],
152 		    gc_chain);
153 	}
154 
155 	/* Fill in the prototype scsi_link. */
156 	sc->sc_link.adapter_softc = sc;
157 	sc->sc_link.adapter = &gdt_switch;
158 	/* openings will be filled in later. */
159 	sc->sc_link.adapter_buswidth =
160 	    (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
161 	sc->sc_link.adapter_target = sc->sc_link.adapter_buswidth;
162 	sc->sc_link.pool = &sc->sc_iopool;
163 
164 	if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
165 		printf("screen service initialization error %d\n",
166 		     sc->sc_status);
167 		return (1);
168 	}
169 
170 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
171 	    0)) {
172 		printf("cache service initialization error %d\n",
173 		    sc->sc_status);
174 		return (1);
175 	}
176 
177 	cdev_cnt = (u_int16_t)sc->sc_info;
178 
179 	/* Detect number of busses */
180 	gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
181 	sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
182 	sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
183 	sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
184 	gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
185 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
186 	    GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
187 	    GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
188 		sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT];
189 		for (i = 0; i < sc->sc_bus_cnt; i++) {
190 			id = sc->sc_scratch[GDT_IOC_HDR_SZ +
191 			    i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
192 			sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff;
193 		}
194 
195 	} else {
196 		/* New method failed, use fallback. */
197 		gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
198 		for (i = 0; i < GDT_MAXBUS; i++) {
199 			if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
200 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
201 			    GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
202 			    GDT_GETCH_SZ)) {
203 				if (i == 0) {
204 					printf("cannot get channel count, "
205 					    "error %d\n", sc->sc_status);
206 					return (1);
207 				}
208 				break;
209 			}
210 			sc->sc_bus_id[i] =
211 			    (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
212 			    sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
213 		}
214 		sc->sc_bus_cnt = i;
215 	}
216 
217 	/* Read cache configuration */
218 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
219 	    GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
220 		printf("cannot get cache info, error %d\n", sc->sc_status);
221 		return (1);
222 	}
223 	sc->sc_cpar.cp_version =
224 	    gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION);
225 	sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE);
226 	sc->sc_cpar.cp_strategy =
227 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY);
228 	sc->sc_cpar.cp_write_back =
229 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK);
230 	sc->sc_cpar.cp_block_size =
231 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE);
232 
233 	/* Read board information and features */
234 	sc->sc_more_proc = 0;
235 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
236 	    GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
237 		/* XXX A lot of these assignments can probably go later */
238 		sc->sc_binfo.bi_ser_no =
239 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO);
240 		bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID,
241 		    sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id);
242 		sc->sc_binfo.bi_ep_flags =
243 		    gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS);
244 		sc->sc_binfo.bi_proc_id =
245 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID);
246 		sc->sc_binfo.bi_memsize =
247 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE);
248 		sc->sc_binfo.bi_mem_banks =
249 		    sc->sc_scratch[GDT_BINFO_MEM_BANKS];
250 		sc->sc_binfo.bi_chan_type =
251 		    sc->sc_scratch[GDT_BINFO_CHAN_TYPE];
252 		sc->sc_binfo.bi_chan_count =
253 		    sc->sc_scratch[GDT_BINFO_CHAN_COUNT];
254 		sc->sc_binfo.bi_rdongle_pres =
255 		    sc->sc_scratch[GDT_BINFO_RDONGLE_PRES];
256 		sc->sc_binfo.bi_epr_fw_ver =
257 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER);
258 		sc->sc_binfo.bi_upd_fw_ver =
259 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER);
260 		sc->sc_binfo.bi_upd_revision =
261 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION);
262 		bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING,
263 		    sc->sc_binfo.bi_type_string,
264 		    sizeof sc->sc_binfo.bi_type_string);
265 		bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING,
266 		    sc->sc_binfo.bi_raid_string,
267 		    sizeof sc->sc_binfo.bi_raid_string);
268 		sc->sc_binfo.bi_update_pres =
269 		    sc->sc_scratch[GDT_BINFO_UPDATE_PRES];
270 		sc->sc_binfo.bi_xor_pres =
271 		    sc->sc_scratch[GDT_BINFO_XOR_PRES];
272 		sc->sc_binfo.bi_prom_type =
273 		    sc->sc_scratch[GDT_BINFO_PROM_TYPE];
274 		sc->sc_binfo.bi_prom_count =
275 		    sc->sc_scratch[GDT_BINFO_PROM_COUNT];
276 		sc->sc_binfo.bi_dup_pres =
277 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES);
278 		sc->sc_binfo.bi_chan_pres =
279 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES);
280 		sc->sc_binfo.bi_mem_pres =
281 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES);
282 		sc->sc_binfo.bi_ft_bus_system =
283 		    sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
284 		sc->sc_binfo.bi_subtype_valid =
285 		    sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
286 		sc->sc_binfo.bi_board_subtype =
287 		    sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
288 		sc->sc_binfo.bi_rampar_pres =
289 		    sc->sc_scratch[GDT_BINFO_RAMPAR_PRES];
290 
291 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
292 		    GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
293 			sc->sc_bfeat.bf_chaining =
294 			    sc->sc_scratch[GDT_BFEAT_CHAINING];
295 			sc->sc_bfeat.bf_striping =
296 			    sc->sc_scratch[GDT_BFEAT_STRIPING];
297 			sc->sc_bfeat.bf_mirroring =
298 			    sc->sc_scratch[GDT_BFEAT_MIRRORING];
299 			sc->sc_bfeat.bf_raid =
300 			    sc->sc_scratch[GDT_BFEAT_RAID];
301 			sc->sc_more_proc = 1;
302 		}
303 	} else {
304 		/* XXX Not implemented yet */
305 	}
306 
307 	/* Read more information */
308 	if (sc->sc_more_proc) {
309 		int bus, j;
310 		/* physical drives, channel addresses */
311 		/* step 1: get magical bus number from firmware */
312 		gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
313 		sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
314 		sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
315 		sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
316 		gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
317 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
318 		    GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL,
319 		    GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) {
320 			GDT_DPRINTF(GDT_D_INFO, ("method 1\n"));
321 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
322 				sc->sc_raw[bus].ra_address =
323 				    gdt_dec32(sc->sc_scratch +
324 				    GDT_IOC_HDR_SZ +
325 				    GDT_IOC_SZ * bus +
326 				    GDT_IOC_ADDRESS);
327 				sc->sc_raw[bus].ra_local_no =
328 				    gdt_dec8(sc->sc_scratch +
329 				    GDT_IOC_HDR_SZ +
330 				    GDT_IOC_SZ * bus +
331 				    GDT_IOC_LOCAL_NO);
332 				GDT_DPRINTF(GDT_D_INFO, (
333 				    "bus: %d address: %x local: %x\n",
334 				    bus,
335 				    sc->sc_raw[bus].ra_address,
336 				    sc->sc_raw[bus].ra_local_no));
337 			}
338 		} else {
339 			GDT_DPRINTF(GDT_D_INFO, ("method 2\n"));
340 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
341 				sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL;
342 				sc->sc_raw[bus].ra_local_no = bus;
343 				GDT_DPRINTF(GDT_D_INFO, (
344 				    "bus: %d address: %x local: %x\n",
345 				    bus,
346 				    sc->sc_raw[bus].ra_address,
347 				    sc->sc_raw[bus].ra_local_no));
348 			}
349 		}
350 		/* step 2: use magical bus number to get nr of phys disks */
351 		for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
352 			gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO,
353 			    sc->sc_raw[bus].ra_local_no);
354 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
355 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
356 			    sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL,
357 			    GDT_GETCH_SZ)) {
358 				sc->sc_raw[bus].ra_phys_cnt =
359 				    gdt_dec32(sc->sc_scratch +
360 				    GDT_GETCH_DRIVE_CNT);
361 				GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n",
362 				    bus, sc->sc_raw[bus].ra_phys_cnt));
363 			}
364 
365 			/* step 3: get scsi disk nr */
366 			if (sc->sc_raw[bus].ra_phys_cnt > 0) {
367 				gdt_enc32(sc->sc_scratch +
368 				    GDT_GETSCSI_CHAN,
369 				    sc->sc_raw[bus].ra_local_no);
370 				gdt_enc32(sc->sc_scratch +
371 				    GDT_GETSCSI_CNT,
372 				    sc->sc_raw[bus].ra_phys_cnt);
373 				if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
374 				    GDT_IOCTL,
375 				    GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN,
376 				    sc->sc_raw[bus].ra_address |
377 				    GDT_INVALID_CHANNEL,
378 				    GDT_GETSCSI_SZ))
379 					for (j = 0;
380 					    j < sc->sc_raw[bus].ra_phys_cnt;
381 					    j++) {
382 						sc->sc_raw[bus].ra_id_list[j] =
383 						    gdt_dec32(sc->sc_scratch +
384 						    GDT_GETSCSI_LIST +
385 						    GDT_GETSCSI_LIST_SZ * j);
386 						GDT_DPRINTF(GDT_D_INFO,
387 						    ("  diskid: %d\n",
388 						    sc->sc_raw[bus].ra_id_list[j]));
389 					}
390 				else
391 					sc->sc_raw[bus].ra_phys_cnt = 0;
392 			}
393 			/* add found disks to grand total */
394 			sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt;
395 		}
396 	} /* if (sc->sc_more_proc) */
397 
398 	if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
399 		printf("raw service initialization error %d\n",
400 		    sc->sc_status);
401 		return (1);
402 	}
403 
404 	/* Set/get features raw service (scatter/gather) */
405 	sc->sc_raw_feat = 0;
406 	if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
407 	    GDT_SCATTER_GATHER, 0, 0))
408 		if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
409 		    0, 0))
410 			sc->sc_raw_feat = sc->sc_info;
411 
412 	/* Set/get features cache service (scatter/gather) */
413 	sc->sc_cache_feat = 0;
414 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
415 	    GDT_SCATTER_GATHER, 0))
416 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
417 		    0))
418 			sc->sc_cache_feat = sc->sc_info;
419 
420 	/* XXX Linux reserve drives here, potentially */
421 
422 	sc->sc_ndevs = 0;
423 	/* Scan for cache devices */
424 	for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
425 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0,
426 		    0)) {
427 			sc->sc_hdr[i].hd_present = 1;
428 			sc->sc_hdr[i].hd_size = sc->sc_info;
429 
430 			if (sc->sc_hdr[i].hd_size > 0)
431 				sc->sc_ndevs++;
432 
433 			/*
434 			 * Evaluate mapping (sectors per head, heads per cyl)
435 			 */
436 			sc->sc_hdr[i].hd_size &= ~GDT_SECS32;
437 			if (sc->sc_info2 == 0)
438 				gdt_eval_mapping(sc->sc_hdr[i].hd_size,
439 				    &drv_cyls, &drv_hds, &drv_secs);
440 			else {
441 				drv_hds = sc->sc_info2 & 0xff;
442 				drv_secs = (sc->sc_info2 >> 8) & 0xff;
443 				drv_cyls = sc->sc_hdr[i].hd_size / drv_hds /
444 				    drv_secs;
445 			}
446 			sc->sc_hdr[i].hd_heads = drv_hds;
447 			sc->sc_hdr[i].hd_secs = drv_secs;
448 			/* Round the size */
449 			sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
450 
451 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
452 			    GDT_DEVTYPE, i, 0, 0))
453 				sc->sc_hdr[i].hd_devtype = sc->sc_info;
454 		}
455 
456 	if (sc->sc_ndevs == 0)
457 		sc->sc_link.openings = 0;
458 	else
459 		sc->sc_link.openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) /
460 		    sc->sc_ndevs;
461 
462 	printf("dpmem %llx %d-bus %d cache device%s\n",
463 	    (long long)sc->sc_dpmembase,
464 	    sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
465 	printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
466 	    DEVNAME(sc), sc->sc_cpar.cp_version,
467 	    sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy,
468 	    sc->sc_cpar.cp_write_back ? "on" : "off",
469 	    sc->sc_cpar.cp_block_size);
470 #if 1
471 	printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc),
472 	    sc->sc_raw_feat, sc->sc_cache_feat);
473 #endif
474 
475 #if NBIO > 0
476 	if (bio_register(&sc->sc_dev, gdt_ioctl) != 0)
477 		panic("%s: controller registration failed", DEVNAME(sc));
478 #endif
479 	gdt_cnt++;
480 
481 	bzero(&saa, sizeof(saa));
482 	saa.saa_sc_link = &sc->sc_link;
483 
484 	config_found(&sc->sc_dev, &saa, scsiprint);
485 
486 	gdt_polling = 0;
487 	return (0);
488 }
489 
490 void
491 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
492 {
493 	*cyls = size / GDT_HEADS / GDT_SECS;
494 	if (*cyls < GDT_MAXCYLS) {
495 		*heads = GDT_HEADS;
496 		*secs = GDT_SECS;
497 	} else {
498 		/* Too high for 64 * 32 */
499 		*cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
500 		if (*cyls < GDT_MAXCYLS) {
501 			*heads = GDT_MEDHEADS;
502 			*secs = GDT_MEDSECS;
503 		} else {
504 			/* Too high for 127 * 63 */
505 			*cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
506 			*heads = GDT_BIGHEADS;
507 			*secs = GDT_BIGSECS;
508 		}
509 	}
510 }
511 
512 /*
513  * Insert a command into the driver queue, either at the front or at the tail.
514  * It's ok to overload the freelist link as these structures are never on
515  * the freelist at this time.
516  */
517 void
518 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront)
519 {
520 	if (infront)
521 		SIMPLEQ_INSERT_HEAD(&sc->sc_queue, xs, xfer_list);
522 	else
523 		SIMPLEQ_INSERT_TAIL(&sc->sc_queue, xs, xfer_list);
524 }
525 
526 /*
527  * Pull a command off the front of the driver queue.
528  */
529 struct scsi_xfer *
530 gdt_dequeue(struct gdt_softc *sc)
531 {
532 	struct scsi_xfer *xs;
533 
534 	xs = SIMPLEQ_FIRST(&sc->sc_queue);
535 	if (xs != NULL)
536 		SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, xfer_list);
537 
538 	return (xs);
539 }
540 
541 /*
542  * Start a SCSI operation on a cache device.
543  * XXX Polled operation is not yet complete.  What kind of locking do we need?
544  */
545 void
546 gdt_scsi_cmd(struct scsi_xfer *xs)
547 {
548 	struct scsi_link *link = xs->sc_link;
549 	struct gdt_softc *sc = link->adapter_softc;
550 	u_int8_t target = link->target;
551 	struct gdt_ccb *ccb;
552 	u_int32_t blockno, blockcnt;
553 	struct scsi_rw *rw;
554 	struct scsi_rw_big *rwb;
555 	bus_dmamap_t xfer;
556 	int error;
557 	int s;
558 	int polled;
559 
560 	GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
561 
562 	s = splbio();
563 
564 	xs->error = XS_NOERROR;
565 
566 	if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present ||
567 	    link->lun != 0) {
568 		/*
569 		 * XXX Should be XS_SENSE but that would require setting up a
570 		 * faked sense too.
571 		 */
572 		xs->error = XS_DRIVER_STUFFUP;
573 		scsi_done(xs);
574 		splx(s);
575 		return;
576 	}
577 
578 	/* Don't double enqueue if we came from gdt_chain. */
579 	if (xs != SIMPLEQ_FIRST(&sc->sc_queue))
580 		gdt_enqueue(sc, xs, 0);
581 
582 	while ((xs = gdt_dequeue(sc)) != NULL) {
583 		xs->error = XS_NOERROR;
584 		ccb = NULL;
585 		link = xs->sc_link;
586 		target = link->target;
587 		polled = ISSET(xs->flags, SCSI_POLL);
588 
589 		if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
590 		    sc->sc_test_busy(sc)) {
591 			/*
592 			 * Put it back in front.  XXX Should we instead
593 			 * set xs->error to XS_BUSY?
594 			 */
595 			gdt_enqueue(sc, xs, 1);
596 			break;
597 		}
598 
599 		switch (xs->cmd->opcode) {
600 		case TEST_UNIT_READY:
601 		case REQUEST_SENSE:
602 		case INQUIRY:
603 		case MODE_SENSE:
604 		case START_STOP:
605 		case READ_CAPACITY:
606 #if 0
607 		case VERIFY:
608 #endif
609 			gdt_internal_cache_cmd(xs);
610 			scsi_done(xs);
611 			goto ready;
612 
613 		case PREVENT_ALLOW:
614 			GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
615 			/* XXX Not yet implemented */
616 			xs->error = XS_NOERROR;
617 			scsi_done(xs);
618 			goto ready;
619 
620 		default:
621 			GDT_DPRINTF(GDT_D_CMD,
622 			    ("unknown opc %d ", xs->cmd->opcode));
623 			/* XXX Not yet implemented */
624 			xs->error = XS_DRIVER_STUFFUP;
625 			scsi_done(xs);
626 			goto ready;
627 
628 		case READ_COMMAND:
629 		case READ_BIG:
630 		case WRITE_COMMAND:
631 		case WRITE_BIG:
632 		case SYNCHRONIZE_CACHE:
633 			/*
634 			 * A new command chain, start from the beginning.
635 			 */
636 			sc->sc_cmd_off = 0;
637 
638 			if (xs->cmd->opcode == SYNCHRONIZE_CACHE) {
639 				 blockno = blockcnt = 0;
640 			} else {
641 				/* A read or write operation. */
642 				if (xs->cmdlen == 6) {
643 					rw = (struct scsi_rw *)xs->cmd;
644 					blockno = _3btol(rw->addr) &
645 					    (SRW_TOPADDR << 16 | 0xffff);
646 					blockcnt =
647 					    rw->length ? rw->length : 0x100;
648 				} else {
649 					rwb = (struct scsi_rw_big *)xs->cmd;
650 					blockno = _4btol(rwb->addr);
651 					blockcnt = _2btol(rwb->length);
652 				}
653 				if (blockno >= sc->sc_hdr[target].hd_size ||
654 				    blockno + blockcnt >
655 				    sc->sc_hdr[target].hd_size) {
656 					printf(
657 					    "%s: out of bounds %u-%u >= %u\n",
658 					    DEVNAME(sc), blockno,
659 					    blockcnt,
660 					    sc->sc_hdr[target].hd_size);
661 					/*
662 					 * XXX Should be XS_SENSE but that
663 					 * would require setting up a faked
664 					 * sense too.
665 					 */
666 					xs->error = XS_DRIVER_STUFFUP;
667 					scsi_done(xs);
668 					goto ready;
669 				}
670 			}
671 
672 			ccb = xs->io;
673 			ccb->gc_blockno = blockno;
674 			ccb->gc_blockcnt = blockcnt;
675 			ccb->gc_xs = xs;
676 			ccb->gc_timeout = xs->timeout;
677 			ccb->gc_service = GDT_CACHESERVICE;
678 			ccb->gc_flags = 0;
679 			gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
680 
681 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
682 				xfer = ccb->gc_dmamap_xfer;
683 				error = bus_dmamap_load(sc->sc_dmat, xfer,
684 				    xs->data, xs->datalen, NULL,
685 				    (xs->flags & SCSI_NOSLEEP) ?
686 				    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
687 				if (error) {
688 					printf("%s: gdt_scsi_cmd: ",
689 					    DEVNAME(sc));
690 					if (error == EFBIG)
691 						printf(
692 						    "more than %d dma segs\n",
693 						    GDT_MAXOFFSETS);
694 					else
695 						printf("error %d "
696 						    "loading dma map\n",
697 						    error);
698 
699 					xs->error = XS_DRIVER_STUFFUP;
700 					scsi_done(xs);
701 					goto ready;
702 				}
703 				bus_dmamap_sync(sc->sc_dmat, xfer, 0,
704 				    xfer->dm_mapsize,
705 				    (xs->flags & SCSI_DATA_IN) ?
706 				    BUS_DMASYNC_PREREAD :
707 				    BUS_DMASYNC_PREWRITE);
708 			}
709 
710 			gdt_enqueue_ccb(sc, ccb);
711 			/* XXX what if enqueue did not start a transfer? */
712 			if (gdt_polling || (xs->flags & SCSI_POLL)) {
713 				if (!gdt_wait(sc, ccb, ccb->gc_timeout)) {
714 					printf("%s: command %d timed out\n",
715 					    DEVNAME(sc),
716 					    ccb->gc_cmd_index);
717 					xs->error = XS_TIMEOUT;
718 					scsi_done(xs);
719 					splx(s);
720 					return;
721 				}
722 			}
723 		}
724 
725 	ready:
726 		/*
727 		 * Don't process the queue if we are polling.
728 		 */
729 		if (polled) {
730 			break;
731 		}
732 	}
733 
734 	splx(s);
735 }
736 
737 /* XXX Currently only for cacheservice, returns 0 if busy */
738 int
739 gdt_exec_ccb(struct gdt_ccb *ccb)
740 {
741 	struct scsi_xfer *xs = ccb->gc_xs;
742 	struct scsi_link *link = xs->sc_link;
743 	struct gdt_softc *sc = link->adapter_softc;
744 	u_int8_t target = link->target;
745 	u_int32_t sg_canz;
746 	bus_dmamap_t xfer;
747 	int i;
748 #if 1 /* XXX */
749 	static int __level = 0;
750 
751 	if (__level++ > 0)
752 		panic("level > 0");
753 #endif
754 	GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
755 
756 	sc->sc_cmd_cnt = 0;
757 
758 	/*
759 	 * XXX Yeah I know it's an always-true condition, but that may change
760 	 * later.
761 	 */
762 	if (sc->sc_cmd_cnt == 0)
763 		sc->sc_set_sema0(sc);
764 
765 	gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
766 	gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
767 	gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
768 	    target);
769 
770 	switch (xs->cmd->opcode) {
771 	case PREVENT_ALLOW:
772 	case SYNCHRONIZE_CACHE:
773 		if (xs->cmd->opcode == PREVENT_ALLOW) {
774 			/* XXX PREVENT_ALLOW support goes here */
775 		} else {
776 			GDT_DPRINTF(GDT_D_CMD,
777 			    ("SYNCHRONIZE CACHE tgt %d ", target));
778 			sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
779 		}
780 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
781 		    1);
782 		sg_canz = 0;
783 		break;
784 
785 	case WRITE_COMMAND:
786 	case WRITE_BIG:
787 		/* XXX WRITE_THR could be supported too */
788 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
789 		break;
790 
791 	case READ_COMMAND:
792 	case READ_BIG:
793 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
794 		break;
795 	}
796 
797 	if (xs->cmd->opcode != PREVENT_ALLOW &&
798 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
799 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
800 		    ccb->gc_blockno);
801 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
802 		    ccb->gc_blockcnt);
803 
804 		xfer = ccb->gc_dmamap_xfer;
805 		if (sc->sc_cache_feat & GDT_SCATTER_GATHER) {
806 			gdt_enc32(
807 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
808 			    0xffffffff);
809 			for (i = 0; i < xfer->dm_nsegs; i++) {
810 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
811 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
812 				    GDT_SG_PTR,
813 				    xfer->dm_segs[i].ds_addr);
814 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
815 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
816 				    GDT_SG_LEN,
817 				    xfer->dm_segs[i].ds_len);
818 				GDT_DPRINTF(GDT_D_IO,
819 				    ("#%d va %p pa %p len %x\n", i, buf,
820 				    xfer->dm_segs[i].ds_addr,
821 				    xfer->dm_segs[i].ds_len));
822 			}
823 			sg_canz = xfer->dm_nsegs;
824 			gdt_enc32(
825 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
826 			    sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
827 		} else {
828 			/* XXX Hardly correct */
829 			gdt_enc32(
830 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
831 			    xfer->dm_segs[0].ds_addr);
832 			sg_canz = 0;
833 		}
834 	}
835 	gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
836 
837 	sc->sc_cmd_len =
838 	    roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
839 	    sizeof (u_int32_t));
840 
841 	if (sc->sc_cmd_cnt > 0 &&
842 	    sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
843 	    sc->sc_ic_all_size) {
844 		printf("%s: DPMEM overflow\n", DEVNAME(sc));
845 		xs->error = XS_BUSY;
846 #if 1 /* XXX */
847 		__level--;
848 #endif
849 		return (0);
850 	}
851 
852 	sc->sc_copy_cmd(sc, ccb);
853 	sc->sc_release_event(sc, ccb);
854 
855 	xs->error = XS_NOERROR;
856 	xs->resid = 0;
857 #if 1 /* XXX */
858 	__level--;
859 #endif
860 	return (1);
861 }
862 
863 void
864 gdt_copy_internal_data(struct scsi_xfer *xs, u_int8_t *data, size_t size)
865 {
866 	size_t copy_cnt;
867 
868 	GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data "));
869 
870 	if (!xs->datalen)
871 		printf("uio move not yet supported\n");
872 	else {
873 		copy_cnt = MIN(size, xs->datalen);
874 		bcopy(data, xs->data, copy_cnt);
875 	}
876 }
877 
878 /* Emulated SCSI operation on cache device */
879 void
880 gdt_internal_cache_cmd(struct scsi_xfer *xs)
881 {
882 	struct scsi_link *link = xs->sc_link;
883 	struct gdt_softc *sc = link->adapter_softc;
884 	struct scsi_inquiry_data inq;
885 	struct scsi_sense_data sd;
886 	struct scsi_read_cap_data rcd;
887 	u_int8_t target = link->target;
888 
889 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
890 
891 	switch (xs->cmd->opcode) {
892 	case TEST_UNIT_READY:
893 	case START_STOP:
894 #if 0
895 	case VERIFY:
896 #endif
897 		GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
898 		    target));
899 		break;
900 
901 	case REQUEST_SENSE:
902 		GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
903 		bzero(&sd, sizeof sd);
904 		sd.error_code = SSD_ERRCODE_CURRENT;
905 		sd.segment = 0;
906 		sd.flags = SKEY_NO_SENSE;
907 		gdt_enc32(sd.info, 0);
908 		sd.extra_len = 0;
909 		gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd);
910 		break;
911 
912 	case INQUIRY:
913 		GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
914 		    sc->sc_hdr[target].hd_devtype));
915 		bzero(&inq, sizeof inq);
916 		inq.device =
917 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
918 		inq.dev_qual2 =
919 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
920 		inq.version = 2;
921 		inq.response_format = 2;
922 		inq.additional_length = 32;
923 		inq.flags |= SID_CmdQue;
924 		strlcpy(inq.vendor, "ICP	   ", sizeof inq.vendor);
925 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
926 		    target);
927 		strlcpy(inq.revision, "	 ", sizeof inq.revision);
928 		gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq);
929 		break;
930 
931 	case READ_CAPACITY:
932 		GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
933 		bzero(&rcd, sizeof rcd);
934 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
935 		_lto4b(GDT_SECTOR_SIZE, rcd.length);
936 		gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd);
937 		break;
938 
939 	default:
940 		GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ",
941 		    xs->cmd->opcode, target));
942 		xs->error = XS_DRIVER_STUFFUP;
943 		return;
944 	}
945 
946 	xs->error = XS_NOERROR;
947 }
948 
949 void
950 gdt_clear_events(struct gdt_softc *sc)
951 {
952 	GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc));
953 
954 	/* XXX To be implemented */
955 }
956 
957 int
958 gdt_async_event(struct gdt_softc *sc, int service)
959 {
960 	GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service));
961 
962 	if (service == GDT_SCREENSERVICE) {
963 		/* XXX To be implemented */
964 	} else {
965 		/* XXX To be implemented */
966 	}
967 
968 	return (0);
969 }
970 
971 int
972 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index,
973     struct scsi_xfer *xs)
974 {
975 	GDT_DPRINTF(GDT_D_INTR,
976 	    ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs));
977 
978 	if (service == GDT_SCREENSERVICE) {
979 		GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
980 		/* XXX To be implemented */
981 		return (0);
982 	} else {
983 		switch (sc->sc_status) {
984 		case GDT_S_OK:
985 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
986 			/* XXX To be implemented */
987 			break;
988 		case GDT_S_BSY:
989 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
990 			/* XXX To be implemented */
991 			return (2);
992 		default:
993 			GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
994 			    sc->sc_status));
995 			/* XXX To be implemented */
996 			return (0);
997 		}
998 	}
999 
1000 	return (1);
1001 }
1002 
1003 int
1004 gdt_intr(void *arg)
1005 {
1006 	struct gdt_softc *sc = arg;
1007 	struct gdt_intr_ctx ctx;
1008 	int chain = 1;
1009 	int sync_val = 0;
1010 	struct scsi_xfer *xs = NULL;
1011 	int prev_cmd;
1012 	struct gdt_ccb *ccb;
1013 
1014 	GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc));
1015 
1016 	/* If polling and we were not called from gdt_wait, just return */
1017 	if (gdt_polling && !gdt_from_wait)
1018 		return (0);
1019 
1020 	ctx.istatus = sc->sc_get_status(sc);
1021 	if (!ctx.istatus) {
1022 		sc->sc_status = GDT_S_NO_STATUS;
1023 		return (0);
1024 	}
1025 
1026 	gdt_wait_index = 0;
1027 	ctx.service = ctx.info2 = 0;
1028 
1029 	sc->sc_intr(sc, &ctx);
1030 
1031 	sc->sc_status = ctx.cmd_status;
1032 	sc->sc_info = ctx.info;
1033 	sc->sc_info2 = ctx.info2;
1034 
1035 	if (gdt_from_wait) {
1036 		gdt_wait_gdt = sc;
1037 		gdt_wait_index = ctx.istatus;
1038 	}
1039 
1040 	switch (ctx.istatus) {
1041 	case GDT_ASYNCINDEX:
1042 		gdt_async_event(sc, ctx.service);
1043 		goto finish;
1044 
1045 	case GDT_SPEZINDEX:
1046 		printf("%s: uninitialized or unknown service (%d %d)\n",
1047 		    DEVNAME(sc), ctx.info, ctx.info2);
1048 		chain = 0;
1049 		goto finish;
1050 	}
1051 
1052 	ccb = &sc->sc_ccbs[ctx.istatus - 2];
1053 	xs = ccb->gc_xs;
1054 	if (!gdt_polling)
1055 		timeout_del(&xs->stimeout);
1056 	ctx.service = ccb->gc_service;
1057 	prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1058 	if (xs && xs->cmd->opcode != PREVENT_ALLOW &&
1059 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
1060 		bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0,
1061 		    ccb->gc_dmamap_xfer->dm_mapsize,
1062 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1063 		    BUS_DMASYNC_POSTWRITE);
1064 		bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer);
1065 	}
1066 	switch (prev_cmd) {
1067 	case GDT_GCF_UNUSED:
1068 		/* XXX Not yet implemented */
1069 		chain = 0;
1070 		goto finish;
1071 	case GDT_GCF_INTERNAL:
1072 		chain = 0;
1073 		goto finish;
1074 	}
1075 
1076 	sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs);
1077 
1078  finish:
1079 	switch (sync_val) {
1080 	case 0:
1081 		if (xs && gdt_from_wait)
1082 			scsi_done(xs);
1083 		break;
1084 	case 1:
1085 		scsi_done(xs);
1086 		break;
1087 
1088 	case 2:
1089 		gdt_enqueue(sc, xs, 0);
1090 	}
1091 
1092 	if (chain)
1093 		gdt_chain(sc);
1094 
1095 	return (1);
1096 }
1097 
1098 void
1099 gdtminphys(struct buf *bp, struct scsi_link *sl)
1100 {
1101 	GDT_DPRINTF(GDT_D_MISC, ("gdtminphys(0x%x) ", bp));
1102 
1103 	/* As this is way more than MAXPHYS it's really not necessary. */
1104 	if ((GDT_MAXOFFSETS - 1) * PAGE_SIZE < MAXPHYS &&
1105 	    bp->b_bcount > ((GDT_MAXOFFSETS - 1) * PAGE_SIZE))
1106 		bp->b_bcount = ((GDT_MAXOFFSETS - 1) * PAGE_SIZE);
1107 
1108 	minphys(bp);
1109 }
1110 
1111 int
1112 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout)
1113 {
1114 	int s, rslt, rv = 0;
1115 
1116 	GDT_DPRINTF(GDT_D_MISC,
1117 	    ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout));
1118 
1119 	gdt_from_wait = 1;
1120 	do {
1121 		s = splbio();
1122 		rslt = gdt_intr(sc);
1123 		splx(s);
1124 		if (rslt && sc == gdt_wait_gdt &&
1125 		    ccb->gc_cmd_index == gdt_wait_index) {
1126 			rv = 1;
1127 			break;
1128 		}
1129 		DELAY(1000); /* 1 millisecond */
1130 	} while (--timeout);
1131 	gdt_from_wait = 0;
1132 
1133 	while (sc->sc_test_busy(sc))
1134 		DELAY(0);		/* XXX correct? */
1135 
1136 	return (rv);
1137 }
1138 
1139 int
1140 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode,
1141     u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
1142 {
1143 	int retries, rslt;
1144 	struct gdt_ccb *ccb;
1145 
1146 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1147 	    sc, service, opcode, arg1, arg2, arg3));
1148 
1149 	bzero(sc->sc_cmd, GDT_CMD_SZ);
1150 
1151 	for (retries = GDT_RETRIES; ; ) {
1152 		ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1153 		if (ccb == NULL) {
1154 			printf("%s: no free command index found\n",
1155 			    DEVNAME(sc));
1156 			return (0);
1157 		}
1158 		ccb->gc_service = service;
1159 		ccb->gc_xs = NULL;
1160 		ccb->gc_blockno = ccb->gc_blockcnt = 0;
1161 		ccb->gc_timeout = ccb->gc_flags = 0;
1162 		ccb->gc_service = GDT_CACHESERVICE;
1163 		gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1164 
1165 		sc->sc_set_sema0(sc);
1166 		gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX,
1167 		    ccb->gc_cmd_index);
1168 		gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode);
1169 		gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1170 
1171 		switch (service) {
1172 		case GDT_CACHESERVICE:
1173 			if (opcode == GDT_IOCTL) {
1174 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1175 				    GDT_IOCTL_SUBFUNC, arg1);
1176 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1177 				    GDT_IOCTL_CHANNEL, arg2);
1178 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1179 				    GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1180 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1181 				    GDT_IOCTL_P_PARAM,
1182 				    sc->sc_scratch_seg.ds_addr);
1183 			} else {
1184 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1185 				    GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1186 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1187 				    GDT_CACHE_BLOCKNO, arg2);
1188 			}
1189 			break;
1190 
1191 		case GDT_SCSIRAWSERVICE:
1192 			gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1193 			    GDT_RAW_DIRECTION, arg1);
1194 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1195 			    (u_int8_t)arg2;
1196 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1197 			    (u_int8_t)arg3;
1198 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1199 			    (u_int8_t)(arg3 >> 8);
1200 		}
1201 
1202 		sc->sc_cmd_len = GDT_CMD_SZ;
1203 		sc->sc_cmd_off = 0;
1204 		sc->sc_cmd_cnt = 0;
1205 		sc->sc_copy_cmd(sc, ccb);
1206 		sc->sc_release_event(sc, ccb);
1207 		DELAY(20);
1208 
1209 		rslt = gdt_wait(sc, ccb, GDT_POLL_TIMEOUT);
1210 		scsi_io_put(&sc->sc_iopool, ccb);
1211 
1212 		if (!rslt)
1213 			return (0);
1214 		if (sc->sc_status != GDT_S_BSY || --retries == 0)
1215 			break;
1216 		DELAY(1);
1217 	}
1218 	return (sc->sc_status == GDT_S_OK);
1219 }
1220 
1221 void *
1222 gdt_ccb_alloc(void *xsc)
1223 {
1224 	struct gdt_softc *sc = xsc;
1225 	struct gdt_ccb *ccb;
1226 
1227 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_alloc(%p) ", sc));
1228 
1229 	mtx_enter(&sc->sc_ccb_mtx);
1230 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1231 	if (ccb != NULL)
1232 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain);
1233 	mtx_leave(&sc->sc_ccb_mtx);
1234 
1235 	return (ccb);
1236 }
1237 
1238 void
1239 gdt_ccb_free(void *xsc, void *xccb)
1240 {
1241 	struct gdt_softc *sc = xsc;
1242 	struct gdt_ccb *ccb = xccb;
1243 	int wake = 0;
1244 
1245 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_free(%p, %p) ", sc, ccb));
1246 
1247 	mtx_enter(&sc->sc_ccb_mtx);
1248 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain);
1249 	/* If the free list was empty, wake up potential waiters. */
1250 	if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1251 		wake = 1;
1252 	mtx_leave(&sc->sc_ccb_mtx);
1253 
1254 	if (wake)
1255 		wakeup(&sc->sc_free_ccb);
1256 }
1257 
1258 void
1259 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1260 {
1261 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb));
1262 
1263 	timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1264 	TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain);
1265 	gdt_start_ccbs(sc);
1266 }
1267 
1268 void
1269 gdt_start_ccbs(struct gdt_softc *sc)
1270 {
1271 	struct gdt_ccb *ccb;
1272 	struct scsi_xfer *xs;
1273 
1274 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc));
1275 
1276 	while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) {
1277 
1278 		xs = ccb->gc_xs;
1279 		if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1280 			timeout_del(&xs->stimeout);
1281 
1282 		if (gdt_exec_ccb(ccb) == 0) {
1283 			ccb->gc_flags |= GDT_GCF_WATCHDOG;
1284 			timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1285 			timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT);
1286 			break;
1287 		}
1288 		TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain);
1289 
1290 		if ((xs->flags & SCSI_POLL) == 0) {
1291 			timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1292 			timeout_add_msec(&xs->stimeout, ccb->gc_timeout);
1293 		}
1294 	}
1295 }
1296 
1297 void
1298 gdt_chain(struct gdt_softc *sc)
1299 {
1300 	GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc));
1301 
1302 	if (!SIMPLEQ_EMPTY(&sc->sc_queue))
1303 		gdt_scsi_cmd(SIMPLEQ_FIRST(&sc->sc_queue));
1304 }
1305 
1306 void
1307 gdt_timeout(void *arg)
1308 {
1309 	struct gdt_ccb *ccb = arg;
1310 	struct scsi_link *link = ccb->gc_xs->sc_link;
1311 	struct gdt_softc *sc = link->adapter_softc;
1312 	int s;
1313 
1314 	sc_print_addr(link);
1315 	printf("timed out\n");
1316 
1317 	/* XXX Test for multiple timeouts */
1318 
1319 	ccb->gc_xs->error = XS_TIMEOUT;
1320 	s = splbio();
1321 	gdt_enqueue_ccb(sc, ccb);
1322 	splx(s);
1323 }
1324 
1325 void
1326 gdt_watchdog(void *arg)
1327 {
1328 	struct gdt_ccb *ccb = arg;
1329 	struct scsi_link *link = ccb->gc_xs->sc_link;
1330 	struct gdt_softc *sc = link->adapter_softc;
1331 	int s;
1332 
1333 	s = splbio();
1334 	ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1335 	gdt_start_ccbs(sc);
1336 	splx(s);
1337 }
1338 
1339 #if NBIO > 0
1340 int
1341 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1342 {
1343 	struct gdt_softc *sc = (struct gdt_softc *)dev;
1344 	int error = 0;
1345 
1346 	GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc)));
1347 
1348 	switch (cmd) {
1349 	case BIOCINQ:
1350 		GDT_DPRINTF(GDT_D_IOCTL, ("inq "));
1351 		error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr);
1352 		break;
1353 
1354 	case BIOCVOL:
1355 		GDT_DPRINTF(GDT_D_IOCTL, ("vol "));
1356 		error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr);
1357 		break;
1358 
1359 	case BIOCDISK:
1360 		GDT_DPRINTF(GDT_D_IOCTL, ("disk "));
1361 		error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr);
1362 		break;
1363 
1364 	case BIOCALARM:
1365 		GDT_DPRINTF(GDT_D_IOCTL, ("alarm "));
1366 		error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1367 		break;
1368 
1369 	case BIOCSETSTATE:
1370 		GDT_DPRINTF(GDT_D_IOCTL, ("setstate "));
1371 		error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1372 		break;
1373 
1374 	default:
1375 		GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n"));
1376 		error = EINVAL;
1377 	}
1378 
1379 	return (error);
1380 }
1381 
1382 int
1383 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi)
1384 {
1385 	bi->bi_novol = sc->sc_ndevs;
1386 	bi->bi_nodisk = sc->sc_total_disks;
1387 
1388 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1389 
1390 	return (0);
1391 }
1392 
1393 int
1394 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv)
1395 {
1396 	return (1); /* XXX not yet */
1397 }
1398 
1399 int
1400 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd)
1401 {
1402 	return (1); /* XXX not yet */
1403 }
1404 
1405 int
1406 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba)
1407 {
1408 	return (1); /* XXX not yet */
1409 }
1410 
1411 int
1412 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs)
1413 {
1414 	return (1); /* XXX not yet */
1415 }
1416 #endif /* NBIO > 0 */
1417