xref: /openbsd-src/sys/dev/ic/gdt_common.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: gdt_common.c,v 1.83 2020/10/15 00:01:24 krw Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * This driver would not have written if it was not for the hardware donations
29  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/device.h>
35 #include <sys/ioctl.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 
40 #include <machine/bus.h>
41 
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45 
46 #include <dev/biovar.h>
47 #include <dev/ic/gdtreg.h>
48 #include <dev/ic/gdtvar.h>
49 
50 #include "bio.h"
51 
52 #ifdef GDT_DEBUG
53 int gdt_maxcmds = GDT_MAXCMDS;
54 #undef GDT_MAXCMDS
55 #define GDT_MAXCMDS gdt_maxcmds
56 #endif
57 
58 #define GDT_DRIVER_VERSION 1
59 #define GDT_DRIVER_SUBVERSION 2
60 
61 int	gdt_async_event(struct gdt_softc *, int);
62 void	gdt_chain(struct gdt_softc *);
63 void	gdt_clear_events(struct gdt_softc *);
64 struct scsi_xfer *gdt_dequeue(struct gdt_softc *);
65 void	gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int);
66 void	gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *);
67 void	gdt_eval_mapping(u_int32_t, int *, int *, int *);
68 int	gdt_exec_ccb(struct gdt_ccb *);
69 void	gdt_ccb_free(void *, void *);
70 void   *gdt_ccb_alloc(void *);
71 void	gdt_internal_cache_cmd(struct scsi_xfer *);
72 int	gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t,
73     u_int32_t, u_int32_t, u_int32_t);
74 #if NBIO > 0
75 int	gdt_ioctl(struct device *, u_long, caddr_t);
76 int	gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *);
77 int	gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *);
78 int	gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *);
79 int	gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *);
80 int	gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *);
81 #endif /* NBIO > 0 */
82 void	gdt_scsi_cmd(struct scsi_xfer *);
83 void	gdt_start_ccbs(struct gdt_softc *);
84 int	gdt_sync_event(struct gdt_softc *, int, u_int8_t,
85     struct scsi_xfer *);
86 void	gdt_timeout(void *);
87 int	gdt_wait(struct gdt_softc *, struct gdt_ccb *, int);
88 void	gdt_watchdog(void *);
89 
90 struct cfdriver gdt_cd = {
91 	NULL, "gdt", DV_DULL
92 };
93 
94 struct scsi_adapter gdt_switch = {
95 	gdt_scsi_cmd, NULL, NULL, NULL, NULL
96 };
97 
98 int gdt_cnt = 0;
99 u_int8_t gdt_polling;
100 u_int8_t gdt_from_wait;
101 struct gdt_softc *gdt_wait_gdt;
102 int	gdt_wait_index;
103 #ifdef GDT_DEBUG
104 int	gdt_debug = GDT_DEBUG;
105 #endif
106 
107 int
108 gdt_attach(struct gdt_softc *sc)
109 {
110 	struct scsibus_attach_args saa;
111 	u_int16_t cdev_cnt;
112 	int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs;
113 
114 	gdt_polling = 1;
115 	gdt_from_wait = 0;
116 
117 	if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0,
118 	    &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT))
119 	    panic("%s: bus_dmamem_alloc failed", DEVNAME(sc));
120 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1,
121 	    GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT))
122 	    panic("%s: bus_dmamem_map failed", DEVNAME(sc));
123 
124 	gdt_clear_events(sc);
125 
126 	TAILQ_INIT(&sc->sc_free_ccb);
127 	TAILQ_INIT(&sc->sc_ccbq);
128 	TAILQ_INIT(&sc->sc_ucmdq);
129 	SIMPLEQ_INIT(&sc->sc_queue);
130 
131 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
132 	scsi_iopool_init(&sc->sc_iopool, sc, gdt_ccb_alloc, gdt_ccb_free);
133 
134 	/* Initialize the ccbs */
135 	for (i = 0; i < GDT_MAXCMDS; i++) {
136 		sc->sc_ccbs[i].gc_cmd_index = i + 2;
137 		error = bus_dmamap_create(sc->sc_dmat,
138 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
139 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
140 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
141 		    &sc->sc_ccbs[i].gc_dmamap_xfer);
142 		if (error) {
143 			printf("%s: cannot create ccb dmamap (%d)",
144 			    DEVNAME(sc), error);
145 			return (1);
146 		}
147 		(void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED);
148 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i],
149 		    gc_chain);
150 	}
151 
152 	if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
153 		printf("screen service initialization error %d\n",
154 		     sc->sc_status);
155 		return (1);
156 	}
157 
158 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
159 	    0)) {
160 		printf("cache service initialization error %d\n",
161 		    sc->sc_status);
162 		return (1);
163 	}
164 
165 	cdev_cnt = (u_int16_t)sc->sc_info;
166 
167 	/* Detect number of busses */
168 	gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
169 	sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
170 	sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
171 	sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
172 	gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
173 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
174 	    GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
175 	    GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
176 		sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT];
177 		for (i = 0; i < sc->sc_bus_cnt; i++) {
178 			id = sc->sc_scratch[GDT_IOC_HDR_SZ +
179 			    i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
180 			sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff;
181 		}
182 
183 	} else {
184 		/* New method failed, use fallback. */
185 		gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
186 		for (i = 0; i < GDT_MAXBUS; i++) {
187 			if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
188 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
189 			    GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
190 			    GDT_GETCH_SZ)) {
191 				if (i == 0) {
192 					printf("cannot get channel count, "
193 					    "error %d\n", sc->sc_status);
194 					return (1);
195 				}
196 				break;
197 			}
198 			sc->sc_bus_id[i] =
199 			    (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
200 			    sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
201 		}
202 		sc->sc_bus_cnt = i;
203 	}
204 
205 	/* Read cache configuration */
206 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
207 	    GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
208 		printf("cannot get cache info, error %d\n", sc->sc_status);
209 		return (1);
210 	}
211 	sc->sc_cpar.cp_version =
212 	    gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION);
213 	sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE);
214 	sc->sc_cpar.cp_strategy =
215 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY);
216 	sc->sc_cpar.cp_write_back =
217 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK);
218 	sc->sc_cpar.cp_block_size =
219 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE);
220 
221 	/* Read board information and features */
222 	sc->sc_more_proc = 0;
223 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
224 	    GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
225 		/* XXX A lot of these assignments can probably go later */
226 		sc->sc_binfo.bi_ser_no =
227 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO);
228 		bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID,
229 		    sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id);
230 		sc->sc_binfo.bi_ep_flags =
231 		    gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS);
232 		sc->sc_binfo.bi_proc_id =
233 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID);
234 		sc->sc_binfo.bi_memsize =
235 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE);
236 		sc->sc_binfo.bi_mem_banks =
237 		    sc->sc_scratch[GDT_BINFO_MEM_BANKS];
238 		sc->sc_binfo.bi_chan_type =
239 		    sc->sc_scratch[GDT_BINFO_CHAN_TYPE];
240 		sc->sc_binfo.bi_chan_count =
241 		    sc->sc_scratch[GDT_BINFO_CHAN_COUNT];
242 		sc->sc_binfo.bi_rdongle_pres =
243 		    sc->sc_scratch[GDT_BINFO_RDONGLE_PRES];
244 		sc->sc_binfo.bi_epr_fw_ver =
245 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER);
246 		sc->sc_binfo.bi_upd_fw_ver =
247 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER);
248 		sc->sc_binfo.bi_upd_revision =
249 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION);
250 		bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING,
251 		    sc->sc_binfo.bi_type_string,
252 		    sizeof sc->sc_binfo.bi_type_string);
253 		bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING,
254 		    sc->sc_binfo.bi_raid_string,
255 		    sizeof sc->sc_binfo.bi_raid_string);
256 		sc->sc_binfo.bi_update_pres =
257 		    sc->sc_scratch[GDT_BINFO_UPDATE_PRES];
258 		sc->sc_binfo.bi_xor_pres =
259 		    sc->sc_scratch[GDT_BINFO_XOR_PRES];
260 		sc->sc_binfo.bi_prom_type =
261 		    sc->sc_scratch[GDT_BINFO_PROM_TYPE];
262 		sc->sc_binfo.bi_prom_count =
263 		    sc->sc_scratch[GDT_BINFO_PROM_COUNT];
264 		sc->sc_binfo.bi_dup_pres =
265 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES);
266 		sc->sc_binfo.bi_chan_pres =
267 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES);
268 		sc->sc_binfo.bi_mem_pres =
269 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES);
270 		sc->sc_binfo.bi_ft_bus_system =
271 		    sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
272 		sc->sc_binfo.bi_subtype_valid =
273 		    sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
274 		sc->sc_binfo.bi_board_subtype =
275 		    sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
276 		sc->sc_binfo.bi_rampar_pres =
277 		    sc->sc_scratch[GDT_BINFO_RAMPAR_PRES];
278 
279 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
280 		    GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
281 			sc->sc_bfeat.bf_chaining =
282 			    sc->sc_scratch[GDT_BFEAT_CHAINING];
283 			sc->sc_bfeat.bf_striping =
284 			    sc->sc_scratch[GDT_BFEAT_STRIPING];
285 			sc->sc_bfeat.bf_mirroring =
286 			    sc->sc_scratch[GDT_BFEAT_MIRRORING];
287 			sc->sc_bfeat.bf_raid =
288 			    sc->sc_scratch[GDT_BFEAT_RAID];
289 			sc->sc_more_proc = 1;
290 		}
291 	} else {
292 		/* XXX Not implemented yet */
293 	}
294 
295 	/* Read more information */
296 	if (sc->sc_more_proc) {
297 		int bus, j;
298 		/* physical drives, channel addresses */
299 		/* step 1: get magical bus number from firmware */
300 		gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
301 		sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
302 		sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
303 		sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
304 		gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
305 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
306 		    GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL,
307 		    GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) {
308 			GDT_DPRINTF(GDT_D_INFO, ("method 1\n"));
309 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
310 				sc->sc_raw[bus].ra_address =
311 				    gdt_dec32(sc->sc_scratch +
312 				    GDT_IOC_HDR_SZ +
313 				    GDT_IOC_SZ * bus +
314 				    GDT_IOC_ADDRESS);
315 				sc->sc_raw[bus].ra_local_no =
316 				    gdt_dec8(sc->sc_scratch +
317 				    GDT_IOC_HDR_SZ +
318 				    GDT_IOC_SZ * bus +
319 				    GDT_IOC_LOCAL_NO);
320 				GDT_DPRINTF(GDT_D_INFO, (
321 				    "bus: %d address: %x local: %x\n",
322 				    bus,
323 				    sc->sc_raw[bus].ra_address,
324 				    sc->sc_raw[bus].ra_local_no));
325 			}
326 		} else {
327 			GDT_DPRINTF(GDT_D_INFO, ("method 2\n"));
328 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
329 				sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL;
330 				sc->sc_raw[bus].ra_local_no = bus;
331 				GDT_DPRINTF(GDT_D_INFO, (
332 				    "bus: %d address: %x local: %x\n",
333 				    bus,
334 				    sc->sc_raw[bus].ra_address,
335 				    sc->sc_raw[bus].ra_local_no));
336 			}
337 		}
338 		/* step 2: use magical bus number to get nr of phys disks */
339 		for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
340 			gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO,
341 			    sc->sc_raw[bus].ra_local_no);
342 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
343 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
344 			    sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL,
345 			    GDT_GETCH_SZ)) {
346 				sc->sc_raw[bus].ra_phys_cnt =
347 				    gdt_dec32(sc->sc_scratch +
348 				    GDT_GETCH_DRIVE_CNT);
349 				GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n",
350 				    bus, sc->sc_raw[bus].ra_phys_cnt));
351 			}
352 
353 			/* step 3: get scsi disk nr */
354 			if (sc->sc_raw[bus].ra_phys_cnt > 0) {
355 				gdt_enc32(sc->sc_scratch +
356 				    GDT_GETSCSI_CHAN,
357 				    sc->sc_raw[bus].ra_local_no);
358 				gdt_enc32(sc->sc_scratch +
359 				    GDT_GETSCSI_CNT,
360 				    sc->sc_raw[bus].ra_phys_cnt);
361 				if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
362 				    GDT_IOCTL,
363 				    GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN,
364 				    sc->sc_raw[bus].ra_address |
365 				    GDT_INVALID_CHANNEL,
366 				    GDT_GETSCSI_SZ))
367 					for (j = 0;
368 					    j < sc->sc_raw[bus].ra_phys_cnt;
369 					    j++) {
370 						sc->sc_raw[bus].ra_id_list[j] =
371 						    gdt_dec32(sc->sc_scratch +
372 						    GDT_GETSCSI_LIST +
373 						    GDT_GETSCSI_LIST_SZ * j);
374 						GDT_DPRINTF(GDT_D_INFO,
375 						    ("  diskid: %d\n",
376 						    sc->sc_raw[bus].ra_id_list[j]));
377 					}
378 				else
379 					sc->sc_raw[bus].ra_phys_cnt = 0;
380 			}
381 			/* add found disks to grand total */
382 			sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt;
383 		}
384 	} /* if (sc->sc_more_proc) */
385 
386 	if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
387 		printf("raw service initialization error %d\n",
388 		    sc->sc_status);
389 		return (1);
390 	}
391 
392 	/* Set/get features raw service (scatter/gather) */
393 	sc->sc_raw_feat = 0;
394 	if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
395 	    GDT_SCATTER_GATHER, 0, 0))
396 		if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
397 		    0, 0))
398 			sc->sc_raw_feat = sc->sc_info;
399 
400 	/* Set/get features cache service (scatter/gather) */
401 	sc->sc_cache_feat = 0;
402 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
403 	    GDT_SCATTER_GATHER, 0))
404 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
405 		    0))
406 			sc->sc_cache_feat = sc->sc_info;
407 
408 	/* XXX Linux reserve drives here, potentially */
409 
410 	sc->sc_ndevs = 0;
411 	/* Scan for cache devices */
412 	for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
413 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0,
414 		    0)) {
415 			sc->sc_hdr[i].hd_present = 1;
416 			sc->sc_hdr[i].hd_size = sc->sc_info;
417 
418 			if (sc->sc_hdr[i].hd_size > 0)
419 				sc->sc_ndevs++;
420 
421 			/*
422 			 * Evaluate mapping (sectors per head, heads per cyl)
423 			 */
424 			sc->sc_hdr[i].hd_size &= ~GDT_SECS32;
425 			if (sc->sc_info2 == 0)
426 				gdt_eval_mapping(sc->sc_hdr[i].hd_size,
427 				    &drv_cyls, &drv_hds, &drv_secs);
428 			else {
429 				drv_hds = sc->sc_info2 & 0xff;
430 				drv_secs = (sc->sc_info2 >> 8) & 0xff;
431 				drv_cyls = sc->sc_hdr[i].hd_size / drv_hds /
432 				    drv_secs;
433 			}
434 			sc->sc_hdr[i].hd_heads = drv_hds;
435 			sc->sc_hdr[i].hd_secs = drv_secs;
436 			/* Round the size */
437 			sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
438 
439 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
440 			    GDT_DEVTYPE, i, 0, 0))
441 				sc->sc_hdr[i].hd_devtype = sc->sc_info;
442 		}
443 
444 	printf("dpmem %llx %d-bus %d cache device%s\n",
445 	    (long long)sc->sc_dpmembase,
446 	    sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
447 	printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
448 	    DEVNAME(sc), sc->sc_cpar.cp_version,
449 	    sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy,
450 	    sc->sc_cpar.cp_write_back ? "on" : "off",
451 	    sc->sc_cpar.cp_block_size);
452 #if 1
453 	printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc),
454 	    sc->sc_raw_feat, sc->sc_cache_feat);
455 #endif
456 
457 #if NBIO > 0
458 	if (bio_register(&sc->sc_dev, gdt_ioctl) != 0)
459 		panic("%s: controller registration failed", DEVNAME(sc));
460 #endif
461 	gdt_cnt++;
462 
463 	saa.saa_adapter_softc = sc;
464 	saa.saa_adapter = &gdt_switch;
465 	saa.saa_adapter_buswidth =
466 	    (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
467 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
468 	saa.saa_luns = 8;
469 	if (sc->sc_ndevs == 0)
470 		saa.saa_openings = 0;
471 	else
472 		saa.saa_openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) /
473 		    sc->sc_ndevs;
474 	saa.saa_pool = &sc->sc_iopool;
475 	saa.saa_quirks = saa.saa_flags = 0;
476 	saa.saa_wwpn = saa.saa_wwnn = 0;
477 
478 	config_found(&sc->sc_dev, &saa, scsiprint);
479 
480 	gdt_polling = 0;
481 	return (0);
482 }
483 
484 void
485 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
486 {
487 	*cyls = size / GDT_HEADS / GDT_SECS;
488 	if (*cyls < GDT_MAXCYLS) {
489 		*heads = GDT_HEADS;
490 		*secs = GDT_SECS;
491 	} else {
492 		/* Too high for 64 * 32 */
493 		*cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
494 		if (*cyls < GDT_MAXCYLS) {
495 			*heads = GDT_MEDHEADS;
496 			*secs = GDT_MEDSECS;
497 		} else {
498 			/* Too high for 127 * 63 */
499 			*cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
500 			*heads = GDT_BIGHEADS;
501 			*secs = GDT_BIGSECS;
502 		}
503 	}
504 }
505 
506 /*
507  * Insert a command into the driver queue, either at the front or at the tail.
508  * It's ok to overload the freelist link as these structures are never on
509  * the freelist at this time.
510  */
511 void
512 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront)
513 {
514 	if (infront)
515 		SIMPLEQ_INSERT_HEAD(&sc->sc_queue, xs, xfer_list);
516 	else
517 		SIMPLEQ_INSERT_TAIL(&sc->sc_queue, xs, xfer_list);
518 }
519 
520 /*
521  * Pull a command off the front of the driver queue.
522  */
523 struct scsi_xfer *
524 gdt_dequeue(struct gdt_softc *sc)
525 {
526 	struct scsi_xfer *xs;
527 
528 	xs = SIMPLEQ_FIRST(&sc->sc_queue);
529 	if (xs != NULL)
530 		SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, xfer_list);
531 
532 	return (xs);
533 }
534 
535 /*
536  * Start a SCSI operation on a cache device.
537  * XXX Polled operation is not yet complete.  What kind of locking do we need?
538  */
539 void
540 gdt_scsi_cmd(struct scsi_xfer *xs)
541 {
542 	struct scsi_link *link = xs->sc_link;
543 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
544 	u_int8_t target = link->target;
545 	struct gdt_ccb *ccb;
546 	u_int32_t blockno, blockcnt;
547 	struct scsi_rw *rw;
548 	struct scsi_rw_10 *rw10;
549 	bus_dmamap_t xfer;
550 	int error;
551 	int s;
552 	int polled;
553 
554 	GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
555 
556 	s = splbio();
557 
558 	xs->error = XS_NOERROR;
559 
560 	if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present ||
561 	    link->lun != 0) {
562 		/*
563 		 * XXX Should be XS_SENSE but that would require setting up a
564 		 * faked sense too.
565 		 */
566 		xs->error = XS_DRIVER_STUFFUP;
567 		scsi_done(xs);
568 		splx(s);
569 		return;
570 	}
571 
572 	/* Don't double enqueue if we came from gdt_chain. */
573 	if (xs != SIMPLEQ_FIRST(&sc->sc_queue))
574 		gdt_enqueue(sc, xs, 0);
575 
576 	while ((xs = gdt_dequeue(sc)) != NULL) {
577 		xs->error = XS_NOERROR;
578 		ccb = NULL;
579 		link = xs->sc_link;
580 		target = link->target;
581 		polled = ISSET(xs->flags, SCSI_POLL);
582 
583 		if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
584 		    sc->sc_test_busy(sc)) {
585 			/*
586 			 * Put it back in front.  XXX Should we instead
587 			 * set xs->error to XS_BUSY?
588 			 */
589 			gdt_enqueue(sc, xs, 1);
590 			break;
591 		}
592 
593 		switch (xs->cmd.opcode) {
594 		case TEST_UNIT_READY:
595 		case REQUEST_SENSE:
596 		case INQUIRY:
597 		case MODE_SENSE:
598 		case START_STOP:
599 		case READ_CAPACITY:
600 #if 0
601 		case VERIFY:
602 #endif
603 			gdt_internal_cache_cmd(xs);
604 			scsi_done(xs);
605 			goto ready;
606 
607 		case PREVENT_ALLOW:
608 			GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
609 			/* XXX Not yet implemented */
610 			xs->error = XS_NOERROR;
611 			scsi_done(xs);
612 			goto ready;
613 
614 		default:
615 			GDT_DPRINTF(GDT_D_CMD,
616 			    ("unknown opc %d ", xs->cmd.opcode));
617 			/* XXX Not yet implemented */
618 			xs->error = XS_DRIVER_STUFFUP;
619 			scsi_done(xs);
620 			goto ready;
621 
622 		case READ_COMMAND:
623 		case READ_10:
624 		case WRITE_COMMAND:
625 		case WRITE_10:
626 		case SYNCHRONIZE_CACHE:
627 			/*
628 			 * A new command chain, start from the beginning.
629 			 */
630 			sc->sc_cmd_off = 0;
631 
632 			if (xs->cmd.opcode == SYNCHRONIZE_CACHE) {
633 				 blockno = blockcnt = 0;
634 			} else {
635 				/* A read or write operation. */
636 				if (xs->cmdlen == 6) {
637 					rw = (struct scsi_rw *)&xs->cmd;
638 					blockno = _3btol(rw->addr) &
639 					    (SRW_TOPADDR << 16 | 0xffff);
640 					blockcnt =
641 					    rw->length ? rw->length : 0x100;
642 				} else {
643 					rw10 = (struct scsi_rw_10 *)&xs->cmd;
644 					blockno = _4btol(rw10->addr);
645 					blockcnt = _2btol(rw10->length);
646 				}
647 				if (blockno >= sc->sc_hdr[target].hd_size ||
648 				    blockno + blockcnt >
649 				    sc->sc_hdr[target].hd_size) {
650 					printf(
651 					    "%s: out of bounds %u-%u >= %u\n",
652 					    DEVNAME(sc), blockno,
653 					    blockcnt,
654 					    sc->sc_hdr[target].hd_size);
655 					/*
656 					 * XXX Should be XS_SENSE but that
657 					 * would require setting up a faked
658 					 * sense too.
659 					 */
660 					xs->error = XS_DRIVER_STUFFUP;
661 					scsi_done(xs);
662 					goto ready;
663 				}
664 			}
665 
666 			ccb = xs->io;
667 			ccb->gc_blockno = blockno;
668 			ccb->gc_blockcnt = blockcnt;
669 			ccb->gc_xs = xs;
670 			ccb->gc_timeout = xs->timeout;
671 			ccb->gc_service = GDT_CACHESERVICE;
672 			ccb->gc_flags = 0;
673 			gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
674 
675 			if (xs->cmd.opcode != SYNCHRONIZE_CACHE) {
676 				xfer = ccb->gc_dmamap_xfer;
677 				error = bus_dmamap_load(sc->sc_dmat, xfer,
678 				    xs->data, xs->datalen, NULL,
679 				    (xs->flags & SCSI_NOSLEEP) ?
680 				    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
681 				if (error) {
682 					printf("%s: gdt_scsi_cmd: ",
683 					    DEVNAME(sc));
684 					if (error == EFBIG)
685 						printf(
686 						    "more than %d dma segs\n",
687 						    GDT_MAXOFFSETS);
688 					else
689 						printf("error %d "
690 						    "loading dma map\n",
691 						    error);
692 
693 					xs->error = XS_DRIVER_STUFFUP;
694 					scsi_done(xs);
695 					goto ready;
696 				}
697 				bus_dmamap_sync(sc->sc_dmat, xfer, 0,
698 				    xfer->dm_mapsize,
699 				    (xs->flags & SCSI_DATA_IN) ?
700 				    BUS_DMASYNC_PREREAD :
701 				    BUS_DMASYNC_PREWRITE);
702 			}
703 
704 			gdt_enqueue_ccb(sc, ccb);
705 			/* XXX what if enqueue did not start a transfer? */
706 			if (gdt_polling || (xs->flags & SCSI_POLL)) {
707 				if (!gdt_wait(sc, ccb, ccb->gc_timeout)) {
708 					printf("%s: command %d timed out\n",
709 					    DEVNAME(sc),
710 					    ccb->gc_cmd_index);
711 					xs->error = XS_TIMEOUT;
712 					scsi_done(xs);
713 					splx(s);
714 					return;
715 				}
716 			}
717 		}
718 
719 	ready:
720 		/*
721 		 * Don't process the queue if we are polling.
722 		 */
723 		if (polled) {
724 			break;
725 		}
726 	}
727 
728 	splx(s);
729 }
730 
731 /* XXX Currently only for cacheservice, returns 0 if busy */
732 int
733 gdt_exec_ccb(struct gdt_ccb *ccb)
734 {
735 	struct scsi_xfer *xs = ccb->gc_xs;
736 	struct scsi_link *link = xs->sc_link;
737 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
738 	u_int8_t target = link->target;
739 	u_int32_t sg_canz;
740 	bus_dmamap_t xfer;
741 	int i;
742 #if 1 /* XXX */
743 	static int __level = 0;
744 
745 	if (__level++ > 0)
746 		panic("level > 0");
747 #endif
748 	GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
749 
750 	sc->sc_cmd_cnt = 0;
751 
752 	/*
753 	 * XXX Yeah I know it's an always-true condition, but that may change
754 	 * later.
755 	 */
756 	if (sc->sc_cmd_cnt == 0)
757 		sc->sc_set_sema0(sc);
758 
759 	gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
760 	gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
761 	gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
762 	    target);
763 
764 	switch (xs->cmd.opcode) {
765 	case PREVENT_ALLOW:
766 	case SYNCHRONIZE_CACHE:
767 		if (xs->cmd.opcode == PREVENT_ALLOW) {
768 			/* XXX PREVENT_ALLOW support goes here */
769 		} else {
770 			GDT_DPRINTF(GDT_D_CMD,
771 			    ("SYNCHRONIZE CACHE tgt %d ", target));
772 			sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
773 		}
774 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
775 		    1);
776 		sg_canz = 0;
777 		break;
778 
779 	case WRITE_COMMAND:
780 	case WRITE_10:
781 		/* XXX WRITE_THR could be supported too */
782 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
783 		break;
784 
785 	case READ_COMMAND:
786 	case READ_10:
787 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
788 		break;
789 	}
790 
791 	if (xs->cmd.opcode != PREVENT_ALLOW &&
792 	    xs->cmd.opcode != SYNCHRONIZE_CACHE) {
793 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
794 		    ccb->gc_blockno);
795 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
796 		    ccb->gc_blockcnt);
797 
798 		xfer = ccb->gc_dmamap_xfer;
799 		if (sc->sc_cache_feat & GDT_SCATTER_GATHER) {
800 			gdt_enc32(
801 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
802 			    0xffffffff);
803 			for (i = 0; i < xfer->dm_nsegs; i++) {
804 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
805 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
806 				    GDT_SG_PTR,
807 				    xfer->dm_segs[i].ds_addr);
808 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
809 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
810 				    GDT_SG_LEN,
811 				    xfer->dm_segs[i].ds_len);
812 				GDT_DPRINTF(GDT_D_IO,
813 				    ("#%d pa %lx len %lx\n", i,
814 				    xfer->dm_segs[i].ds_addr,
815 				    xfer->dm_segs[i].ds_len));
816 			}
817 			sg_canz = xfer->dm_nsegs;
818 			gdt_enc32(
819 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
820 			    sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
821 		} else {
822 			/* XXX Hardly correct */
823 			gdt_enc32(
824 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
825 			    xfer->dm_segs[0].ds_addr);
826 			sg_canz = 0;
827 		}
828 	}
829 	gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
830 
831 	sc->sc_cmd_len =
832 	    roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
833 	    sizeof (u_int32_t));
834 
835 	if (sc->sc_cmd_cnt > 0 &&
836 	    sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
837 	    sc->sc_ic_all_size) {
838 		printf("%s: DPMEM overflow\n", DEVNAME(sc));
839 		xs->error = XS_BUSY;
840 #if 1 /* XXX */
841 		__level--;
842 #endif
843 		return (0);
844 	}
845 
846 	sc->sc_copy_cmd(sc, ccb);
847 	sc->sc_release_event(sc, ccb);
848 
849 	xs->error = XS_NOERROR;
850 	xs->resid = 0;
851 #if 1 /* XXX */
852 	__level--;
853 #endif
854 	return (1);
855 }
856 
857 /* Emulated SCSI operation on cache device */
858 void
859 gdt_internal_cache_cmd(struct scsi_xfer *xs)
860 {
861 	struct scsi_link *link = xs->sc_link;
862 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
863 	struct scsi_inquiry_data inq;
864 	struct scsi_sense_data sd;
865 	struct scsi_read_cap_data rcd;
866 	u_int8_t target = link->target;
867 
868 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
869 
870 	switch (xs->cmd.opcode) {
871 	case TEST_UNIT_READY:
872 	case START_STOP:
873 #if 0
874 	case VERIFY:
875 #endif
876 		GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd.opcode,
877 		    target));
878 		break;
879 
880 	case REQUEST_SENSE:
881 		GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
882 		bzero(&sd, sizeof sd);
883 		sd.error_code = SSD_ERRCODE_CURRENT;
884 		sd.segment = 0;
885 		sd.flags = SKEY_NO_SENSE;
886 		gdt_enc32(sd.info, 0);
887 		sd.extra_len = 0;
888 		scsi_copy_internal_data(xs, &sd, sizeof(sd));
889 		break;
890 
891 	case INQUIRY:
892 		GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
893 		    sc->sc_hdr[target].hd_devtype));
894 		bzero(&inq, sizeof inq);
895 		inq.device =
896 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
897 		inq.dev_qual2 =
898 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
899 		inq.version = SCSI_REV_2;
900 		inq.response_format = SID_SCSI2_RESPONSE;
901 		inq.additional_length = SID_SCSI2_ALEN;
902 		inq.flags |= SID_CmdQue;
903 		strlcpy(inq.vendor, "ICP	   ", sizeof inq.vendor);
904 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
905 		    target);
906 		strlcpy(inq.revision, "	 ", sizeof inq.revision);
907 		scsi_copy_internal_data(xs, &inq, sizeof(inq));
908 		break;
909 
910 	case READ_CAPACITY:
911 		GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
912 		bzero(&rcd, sizeof rcd);
913 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
914 		_lto4b(GDT_SECTOR_SIZE, rcd.length);
915 		scsi_copy_internal_data(xs, &rcd, sizeof(rcd));
916 		break;
917 
918 	default:
919 		GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ",
920 		    xs->cmd.opcode, target));
921 		xs->error = XS_DRIVER_STUFFUP;
922 		return;
923 	}
924 
925 	xs->error = XS_NOERROR;
926 }
927 
928 void
929 gdt_clear_events(struct gdt_softc *sc)
930 {
931 	GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc));
932 
933 	/* XXX To be implemented */
934 }
935 
936 int
937 gdt_async_event(struct gdt_softc *sc, int service)
938 {
939 	GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service));
940 
941 	if (service == GDT_SCREENSERVICE) {
942 		/* XXX To be implemented */
943 	} else {
944 		/* XXX To be implemented */
945 	}
946 
947 	return (0);
948 }
949 
950 int
951 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index,
952     struct scsi_xfer *xs)
953 {
954 	GDT_DPRINTF(GDT_D_INTR,
955 	    ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs));
956 
957 	if (service == GDT_SCREENSERVICE) {
958 		GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
959 		/* XXX To be implemented */
960 		return (0);
961 	} else {
962 		switch (sc->sc_status) {
963 		case GDT_S_OK:
964 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
965 			/* XXX To be implemented */
966 			break;
967 		case GDT_S_BSY:
968 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
969 			/* XXX To be implemented */
970 			return (2);
971 		default:
972 			GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
973 			    sc->sc_status));
974 			/* XXX To be implemented */
975 			return (0);
976 		}
977 	}
978 
979 	return (1);
980 }
981 
982 int
983 gdt_intr(void *arg)
984 {
985 	struct gdt_softc *sc = arg;
986 	struct gdt_intr_ctx ctx;
987 	int chain = 1;
988 	int sync_val = 0;
989 	struct scsi_xfer *xs = NULL;
990 	int prev_cmd;
991 	struct gdt_ccb *ccb;
992 
993 	GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc));
994 
995 	/* If polling and we were not called from gdt_wait, just return */
996 	if (gdt_polling && !gdt_from_wait)
997 		return (0);
998 
999 	ctx.istatus = sc->sc_get_status(sc);
1000 	if (!ctx.istatus) {
1001 		sc->sc_status = GDT_S_NO_STATUS;
1002 		return (0);
1003 	}
1004 
1005 	gdt_wait_index = 0;
1006 	ctx.service = ctx.info2 = 0;
1007 
1008 	sc->sc_intr(sc, &ctx);
1009 
1010 	sc->sc_status = ctx.cmd_status;
1011 	sc->sc_info = ctx.info;
1012 	sc->sc_info2 = ctx.info2;
1013 
1014 	if (gdt_from_wait) {
1015 		gdt_wait_gdt = sc;
1016 		gdt_wait_index = ctx.istatus;
1017 	}
1018 
1019 	switch (ctx.istatus) {
1020 	case GDT_ASYNCINDEX:
1021 		gdt_async_event(sc, ctx.service);
1022 		goto finish;
1023 
1024 	case GDT_SPEZINDEX:
1025 		printf("%s: uninitialized or unknown service (%d %d)\n",
1026 		    DEVNAME(sc), ctx.info, ctx.info2);
1027 		chain = 0;
1028 		goto finish;
1029 	}
1030 
1031 	ccb = &sc->sc_ccbs[ctx.istatus - 2];
1032 	xs = ccb->gc_xs;
1033 	if (!gdt_polling)
1034 		timeout_del(&xs->stimeout);
1035 	ctx.service = ccb->gc_service;
1036 	prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1037 	if (xs && xs->cmd.opcode != PREVENT_ALLOW &&
1038 	    xs->cmd.opcode != SYNCHRONIZE_CACHE) {
1039 		bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0,
1040 		    ccb->gc_dmamap_xfer->dm_mapsize,
1041 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1042 		    BUS_DMASYNC_POSTWRITE);
1043 		bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer);
1044 	}
1045 	switch (prev_cmd) {
1046 	case GDT_GCF_UNUSED:
1047 		/* XXX Not yet implemented */
1048 		chain = 0;
1049 		goto finish;
1050 	case GDT_GCF_INTERNAL:
1051 		chain = 0;
1052 		goto finish;
1053 	}
1054 
1055 	sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs);
1056 
1057  finish:
1058 	switch (sync_val) {
1059 	case 0:
1060 		if (xs && gdt_from_wait)
1061 			scsi_done(xs);
1062 		break;
1063 	case 1:
1064 		scsi_done(xs);
1065 		break;
1066 
1067 	case 2:
1068 		gdt_enqueue(sc, xs, 0);
1069 	}
1070 
1071 	if (chain)
1072 		gdt_chain(sc);
1073 
1074 	return (1);
1075 }
1076 
1077 int
1078 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout)
1079 {
1080 	int s, rslt, rv = 0;
1081 
1082 	GDT_DPRINTF(GDT_D_MISC,
1083 	    ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout));
1084 
1085 	gdt_from_wait = 1;
1086 	do {
1087 		s = splbio();
1088 		rslt = gdt_intr(sc);
1089 		splx(s);
1090 		if (rslt && sc == gdt_wait_gdt &&
1091 		    ccb->gc_cmd_index == gdt_wait_index) {
1092 			rv = 1;
1093 			break;
1094 		}
1095 		DELAY(1000); /* 1 millisecond */
1096 	} while (--timeout);
1097 	gdt_from_wait = 0;
1098 
1099 	while (sc->sc_test_busy(sc))
1100 		DELAY(0);		/* XXX correct? */
1101 
1102 	return (rv);
1103 }
1104 
1105 int
1106 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode,
1107     u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
1108 {
1109 	int retries, rslt;
1110 	struct gdt_ccb *ccb;
1111 
1112 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1113 	    sc, service, opcode, arg1, arg2, arg3));
1114 
1115 	bzero(sc->sc_cmd, GDT_CMD_SZ);
1116 
1117 	for (retries = GDT_RETRIES; ; ) {
1118 		ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1119 		if (ccb == NULL) {
1120 			printf("%s: no free command index found\n",
1121 			    DEVNAME(sc));
1122 			return (0);
1123 		}
1124 		ccb->gc_service = service;
1125 		ccb->gc_xs = NULL;
1126 		ccb->gc_blockno = ccb->gc_blockcnt = 0;
1127 		ccb->gc_timeout = ccb->gc_flags = 0;
1128 		ccb->gc_service = GDT_CACHESERVICE;
1129 		gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1130 
1131 		sc->sc_set_sema0(sc);
1132 		gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX,
1133 		    ccb->gc_cmd_index);
1134 		gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode);
1135 		gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1136 
1137 		switch (service) {
1138 		case GDT_CACHESERVICE:
1139 			if (opcode == GDT_IOCTL) {
1140 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1141 				    GDT_IOCTL_SUBFUNC, arg1);
1142 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1143 				    GDT_IOCTL_CHANNEL, arg2);
1144 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1145 				    GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1146 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1147 				    GDT_IOCTL_P_PARAM,
1148 				    sc->sc_scratch_seg.ds_addr);
1149 			} else {
1150 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1151 				    GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1152 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1153 				    GDT_CACHE_BLOCKNO, arg2);
1154 			}
1155 			break;
1156 
1157 		case GDT_SCSIRAWSERVICE:
1158 			gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1159 			    GDT_RAW_DIRECTION, arg1);
1160 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1161 			    (u_int8_t)arg2;
1162 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1163 			    (u_int8_t)arg3;
1164 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1165 			    (u_int8_t)(arg3 >> 8);
1166 		}
1167 
1168 		sc->sc_cmd_len = GDT_CMD_SZ;
1169 		sc->sc_cmd_off = 0;
1170 		sc->sc_cmd_cnt = 0;
1171 		sc->sc_copy_cmd(sc, ccb);
1172 		sc->sc_release_event(sc, ccb);
1173 		DELAY(20);
1174 
1175 		rslt = gdt_wait(sc, ccb, GDT_POLL_TIMEOUT);
1176 		scsi_io_put(&sc->sc_iopool, ccb);
1177 
1178 		if (!rslt)
1179 			return (0);
1180 		if (sc->sc_status != GDT_S_BSY || --retries == 0)
1181 			break;
1182 		DELAY(1);
1183 	}
1184 	return (sc->sc_status == GDT_S_OK);
1185 }
1186 
1187 void *
1188 gdt_ccb_alloc(void *xsc)
1189 {
1190 	struct gdt_softc *sc = xsc;
1191 	struct gdt_ccb *ccb;
1192 
1193 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_alloc(%p) ", sc));
1194 
1195 	mtx_enter(&sc->sc_ccb_mtx);
1196 	ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1197 	if (ccb != NULL)
1198 		TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain);
1199 	mtx_leave(&sc->sc_ccb_mtx);
1200 
1201 	return (ccb);
1202 }
1203 
1204 void
1205 gdt_ccb_free(void *xsc, void *xccb)
1206 {
1207 	struct gdt_softc *sc = xsc;
1208 	struct gdt_ccb *ccb = xccb;
1209 	int wake = 0;
1210 
1211 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_free(%p, %p) ", sc, ccb));
1212 
1213 	mtx_enter(&sc->sc_ccb_mtx);
1214 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain);
1215 	/* If the free list was empty, wake up potential waiters. */
1216 	if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1217 		wake = 1;
1218 	mtx_leave(&sc->sc_ccb_mtx);
1219 
1220 	if (wake)
1221 		wakeup(&sc->sc_free_ccb);
1222 }
1223 
1224 void
1225 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1226 {
1227 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb));
1228 
1229 	timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1230 	TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain);
1231 	gdt_start_ccbs(sc);
1232 }
1233 
1234 void
1235 gdt_start_ccbs(struct gdt_softc *sc)
1236 {
1237 	struct gdt_ccb *ccb;
1238 	struct scsi_xfer *xs;
1239 
1240 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc));
1241 
1242 	while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) {
1243 
1244 		xs = ccb->gc_xs;
1245 		if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1246 			timeout_del(&xs->stimeout);
1247 
1248 		if (gdt_exec_ccb(ccb) == 0) {
1249 			ccb->gc_flags |= GDT_GCF_WATCHDOG;
1250 			timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1251 			timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT);
1252 			break;
1253 		}
1254 		TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain);
1255 
1256 		if ((xs->flags & SCSI_POLL) == 0) {
1257 			timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1258 			timeout_add_msec(&xs->stimeout, ccb->gc_timeout);
1259 		}
1260 	}
1261 }
1262 
1263 void
1264 gdt_chain(struct gdt_softc *sc)
1265 {
1266 	GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc));
1267 
1268 	if (!SIMPLEQ_EMPTY(&sc->sc_queue))
1269 		gdt_scsi_cmd(SIMPLEQ_FIRST(&sc->sc_queue));
1270 }
1271 
1272 void
1273 gdt_timeout(void *arg)
1274 {
1275 	struct gdt_ccb *ccb = arg;
1276 	struct scsi_link *link = ccb->gc_xs->sc_link;
1277 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
1278 	int s;
1279 
1280 	sc_print_addr(link);
1281 	printf("timed out\n");
1282 
1283 	/* XXX Test for multiple timeouts */
1284 
1285 	ccb->gc_xs->error = XS_TIMEOUT;
1286 	s = splbio();
1287 	gdt_enqueue_ccb(sc, ccb);
1288 	splx(s);
1289 }
1290 
1291 void
1292 gdt_watchdog(void *arg)
1293 {
1294 	struct gdt_ccb *ccb = arg;
1295 	struct scsi_link *link = ccb->gc_xs->sc_link;
1296 	struct gdt_softc *sc = link->bus->sb_adapter_softc;
1297 	int s;
1298 
1299 	s = splbio();
1300 	ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1301 	gdt_start_ccbs(sc);
1302 	splx(s);
1303 }
1304 
1305 #if NBIO > 0
1306 int
1307 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1308 {
1309 	struct gdt_softc *sc = (struct gdt_softc *)dev;
1310 	int error = 0;
1311 
1312 	GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc)));
1313 
1314 	switch (cmd) {
1315 	case BIOCINQ:
1316 		GDT_DPRINTF(GDT_D_IOCTL, ("inq "));
1317 		error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr);
1318 		break;
1319 
1320 	case BIOCVOL:
1321 		GDT_DPRINTF(GDT_D_IOCTL, ("vol "));
1322 		error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr);
1323 		break;
1324 
1325 	case BIOCDISK:
1326 		GDT_DPRINTF(GDT_D_IOCTL, ("disk "));
1327 		error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr);
1328 		break;
1329 
1330 	case BIOCALARM:
1331 		GDT_DPRINTF(GDT_D_IOCTL, ("alarm "));
1332 		error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1333 		break;
1334 
1335 	case BIOCSETSTATE:
1336 		GDT_DPRINTF(GDT_D_IOCTL, ("setstate "));
1337 		error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1338 		break;
1339 
1340 	default:
1341 		GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n"));
1342 		error = ENOTTY;
1343 	}
1344 
1345 	return (error);
1346 }
1347 
1348 int
1349 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi)
1350 {
1351 	bi->bi_novol = sc->sc_ndevs;
1352 	bi->bi_nodisk = sc->sc_total_disks;
1353 
1354 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1355 
1356 	return (0);
1357 }
1358 
1359 int
1360 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv)
1361 {
1362 	return (1); /* XXX not yet */
1363 }
1364 
1365 int
1366 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd)
1367 {
1368 	return (1); /* XXX not yet */
1369 }
1370 
1371 int
1372 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba)
1373 {
1374 	return (1); /* XXX not yet */
1375 }
1376 
1377 int
1378 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs)
1379 {
1380 	return (1); /* XXX not yet */
1381 }
1382 #endif /* NBIO > 0 */
1383