xref: /openbsd-src/sys/dev/ic/gdt_common.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: gdt_common.c,v 1.16 2001/11/06 19:53:18 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Niklas Hallqvist.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * This driver would not have written if it was not for the hardware donations
34  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
35  */
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/buf.h>
40 #include <sys/device.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 
44 #include <machine/bus.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <scsi/scsi_all.h>
49 #include <scsi/scsi_disk.h>
50 #include <scsi/scsiconf.h>
51 
52 #include <dev/ic/gdtreg.h>
53 #include <dev/ic/gdtvar.h>
54 
55 #ifdef GDT_DEBUG
56 int gdt_maxcmds = GDT_MAXCMDS;
57 #undef GDT_MAXCMDS
58 #define GDT_MAXCMDS gdt_maxcmds
59 #endif
60 
61 int	gdt_async_event __P((struct gdt_softc *, int));
62 void	gdt_chain __P((struct gdt_softc *));
63 void	gdt_clear_events __P((struct gdt_softc *));
64 void	gdt_copy_internal_data __P((struct scsi_xfer *, u_int8_t *, size_t));
65 struct scsi_xfer *gdt_dequeue __P((struct gdt_softc *));
66 void	gdt_enqueue __P((struct gdt_softc *, struct scsi_xfer *, int));
67 void	gdt_enqueue_ccb __P((struct gdt_softc *, struct gdt_ccb *));
68 void	gdt_eval_mapping __P((u_int32_t, int *, int *, int *));
69 int	gdt_exec_ccb __P((struct gdt_ccb *));
70 void	gdt_free_ccb __P((struct gdt_softc *, struct gdt_ccb *));
71 struct gdt_ccb *gdt_get_ccb __P((struct gdt_softc *, int));
72 int	gdt_internal_cache_cmd __P((struct scsi_xfer *));
73 int	gdt_internal_cmd __P((struct gdt_softc *, u_int8_t, u_int16_t,
74     u_int32_t, u_int32_t, u_int32_t));
75 int	gdt_raw_scsi_cmd __P((struct scsi_xfer *));
76 int	gdt_scsi_cmd __P((struct scsi_xfer *));
77 void	gdt_start_ccbs __P((struct gdt_softc *));
78 int	gdt_sync_event __P((struct gdt_softc *, int, u_int8_t,
79     struct scsi_xfer *));
80 void	gdt_timeout __P((void *));
81 int	gdt_wait __P((struct gdt_softc *, struct gdt_ccb *, int));
82 void	gdt_watchdog __P((void *));
83 
84 struct cfdriver gdt_cd = {
85 	NULL, "gdt", DV_DULL
86 };
87 
88 struct scsi_adapter gdt_switch = {
89 	gdt_scsi_cmd, gdtminphys, 0, 0,
90 };
91 
92 struct scsi_adapter gdt_raw_switch = {
93 	gdt_raw_scsi_cmd, gdtminphys, 0, 0,
94 };
95 
96 struct scsi_device gdt_dev = {
97 	NULL, NULL, NULL, NULL
98 };
99 
100 u_int8_t gdt_polling;
101 u_int8_t gdt_from_wait;
102 struct gdt_softc *gdt_wait_gdt;
103 int	gdt_wait_index;
104 #ifdef GDT_DEBUG
105 int	gdt_debug = GDT_DEBUG;
106 #endif
107 
108 int
109 gdt_attach(gdt)
110 	struct gdt_softc *gdt;
111 {
112 	u_int16_t cdev_cnt;
113 	int i, id, drv_cyls, drv_hds, drv_secs, error;
114 
115 	gdt_polling = 1;
116 	gdt_from_wait = 0;
117 	gdt_clear_events(gdt);
118 
119 	TAILQ_INIT(&gdt->sc_free_ccb);
120 	TAILQ_INIT(&gdt->sc_ccbq);
121 	LIST_INIT(&gdt->sc_queue);
122 
123 	/* Initialize the ccbs */
124 	for (i = 0; i < GDT_MAXCMDS; i++) {
125 		gdt->sc_ccbs[i].gc_cmd_index = i + 2;
126 		error = bus_dmamap_create(gdt->sc_dmat,
127 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
128 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
129 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
130 		    &gdt->sc_ccbs[i].gc_dmamap_xfer);
131 		if (error) {
132 			printf("%s: cannot create ccb dmamap (%d)",
133 			    gdt->sc_dev.dv_xname, error);
134 			return (1);
135 		}
136 		(void)gdt_ccb_set_cmd(gdt->sc_ccbs + i, GDT_GCF_UNUSED);
137 		TAILQ_INSERT_TAIL(&gdt->sc_free_ccb, &gdt->sc_ccbs[i],
138 		    gc_chain);
139 	}
140 
141 	/* Fill in the prototype scsi_link. */
142 	gdt->sc_link.adapter_softc = gdt;
143 	gdt->sc_link.adapter = &gdt_switch;
144 	gdt->sc_link.device = &gdt_dev;
145 	/* XXX what is optimal? */
146 	gdt->sc_link.openings = GDT_MAXCMDS;
147 	gdt->sc_link.adapter_buswidth =
148 	    (gdt->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
149 	gdt->sc_link.adapter_target = gdt->sc_link.adapter_buswidth;
150 
151 	if (!gdt_internal_cmd(gdt, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
152 		printf("screen service initialization error %d\n",
153 		     gdt->sc_status);
154 		return (1);
155 	}
156 
157 	if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
158 	    0)) {
159 		printf("cache service initialization error %d\n",
160 		    gdt->sc_status);
161 		return (1);
162 	}
163 
164 	if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_MOUNT, 0xffff, 1,
165 	    0)) {
166 		printf("cache service mount error %d\n",
167 		    gdt->sc_status);
168 		return (1);
169 	}
170 
171 	if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
172 	    0)) {
173 		printf("cache service post-mount initialization error %d\n",
174 		    gdt->sc_status);
175 		return (1);
176 	}
177 	cdev_cnt = (u_int16_t)gdt->sc_info;
178 
179 	/* Detect number of busses */
180 	gdt_enc32(gdt->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
181 	gdt->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
182 	gdt->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
183 	gdt->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
184 	gdt_enc32(gdt->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
185 	if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL,
186 	    GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
187 	    GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
188 		gdt->sc_bus_cnt = gdt->sc_scratch[GDT_IOC_CHAN_COUNT];
189 		for (i = 0; i < gdt->sc_bus_cnt; i++) {
190 			id = gdt->sc_scratch[GDT_IOC_HDR_SZ +
191 			    i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
192 			gdt->sc_bus_id[id] = id < GDT_MAXID ? id : 0xff;
193 		}
194 
195 	} else {
196 		/* New method failed, use fallback. */
197 		gdt_enc32(gdt->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
198 		for (i = 0; i < GDT_MAXBUS; i++) {
199 			if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL,
200 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
201 			    GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
202 			    GDT_GETCH_SZ)) {
203 				if (i == 0) {
204 					printf("cannot get channel count, "
205 					    "error %d\n", gdt->sc_status);
206 					return (1);
207 				}
208 				break;
209 			}
210 			gdt->sc_bus_id[i] =
211 			    (gdt->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
212 			    gdt->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
213 		}
214 		gdt->sc_bus_cnt = i;
215 	}
216 
217 	/* Read cache configuration */
218 	if (!gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
219 	    GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
220 		printf("cannot get cache info, error %d\n", gdt->sc_status);
221 		return (1);
222 	}
223 	gdt->sc_cpar.cp_version =
224 	    gdt_dec32(gdt->sc_scratch + GDT_CPAR_VERSION);
225 	gdt->sc_cpar.cp_state = gdt_dec16(gdt->sc_scratch + GDT_CPAR_STATE);
226 	gdt->sc_cpar.cp_strategy =
227 	    gdt_dec16(gdt->sc_scratch + GDT_CPAR_STRATEGY);
228 	gdt->sc_cpar.cp_write_back =
229 	    gdt_dec16(gdt->sc_scratch + GDT_CPAR_WRITE_BACK);
230 	gdt->sc_cpar.cp_block_size =
231 	    gdt_dec16(gdt->sc_scratch + GDT_CPAR_BLOCK_SIZE);
232 
233 	/* Read board information and features */
234 	gdt->sc_more_proc = 0;
235 	if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
236 	    GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
237 		/* XXX A lot of these assignments can probably go later */
238 		gdt->sc_binfo.bi_ser_no =
239 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_SER_NO);
240 		bcopy(gdt->sc_scratch + GDT_BINFO_OEM_ID,
241 		    gdt->sc_binfo.bi_oem_id, sizeof gdt->sc_binfo.bi_oem_id);
242 		gdt->sc_binfo.bi_ep_flags =
243 		    gdt_dec16(gdt->sc_scratch + GDT_BINFO_EP_FLAGS);
244 		gdt->sc_binfo.bi_proc_id =
245 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_PROC_ID);
246 		gdt->sc_binfo.bi_memsize =
247 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_MEMSIZE);
248 		gdt->sc_binfo.bi_mem_banks =
249 		    gdt->sc_scratch[GDT_BINFO_MEM_BANKS];
250 		gdt->sc_binfo.bi_chan_type =
251 		    gdt->sc_scratch[GDT_BINFO_CHAN_TYPE];
252 		gdt->sc_binfo.bi_chan_count =
253 		    gdt->sc_scratch[GDT_BINFO_CHAN_COUNT];
254 		gdt->sc_binfo.bi_rdongle_pres =
255 		    gdt->sc_scratch[GDT_BINFO_RDONGLE_PRES];
256 		gdt->sc_binfo.bi_epr_fw_ver =
257 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_EPR_FW_VER);
258 		gdt->sc_binfo.bi_upd_fw_ver =
259 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_UPD_FW_VER);
260 		gdt->sc_binfo.bi_upd_revision =
261 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_UPD_REVISION);
262 		bcopy(gdt->sc_scratch + GDT_BINFO_TYPE_STRING,
263 		    gdt->sc_binfo.bi_type_string,
264 		    sizeof gdt->sc_binfo.bi_type_string);
265 		bcopy(gdt->sc_scratch + GDT_BINFO_RAID_STRING,
266 		    gdt->sc_binfo.bi_raid_string,
267 		    sizeof gdt->sc_binfo.bi_raid_string);
268 		gdt->sc_binfo.bi_update_pres =
269 		    gdt->sc_scratch[GDT_BINFO_UPDATE_PRES];
270 		gdt->sc_binfo.bi_xor_pres =
271 		    gdt->sc_scratch[GDT_BINFO_XOR_PRES];
272 		gdt->sc_binfo.bi_prom_type =
273 		    gdt->sc_scratch[GDT_BINFO_PROM_TYPE];
274 		gdt->sc_binfo.bi_prom_count =
275 		    gdt->sc_scratch[GDT_BINFO_PROM_COUNT];
276 		gdt->sc_binfo.bi_dup_pres =
277 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_DUP_PRES);
278 		gdt->sc_binfo.bi_chan_pres =
279 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_CHAN_PRES);
280 		gdt->sc_binfo.bi_mem_pres =
281 		    gdt_dec32(gdt->sc_scratch + GDT_BINFO_MEM_PRES);
282 		gdt->sc_binfo.bi_ft_bus_system =
283 		    gdt->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
284 		gdt->sc_binfo.bi_subtype_valid =
285 		    gdt->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
286 		gdt->sc_binfo.bi_board_subtype =
287 		    gdt->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
288 		gdt->sc_binfo.bi_rampar_pres =
289 		    gdt->sc_scratch[GDT_BINFO_RAMPAR_PRES];
290 
291 		if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_IOCTL,
292 		    GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
293 			gdt->sc_bfeat.bf_chaining =
294 			    gdt->sc_scratch[GDT_BFEAT_CHAINING];
295 			gdt->sc_bfeat.bf_striping =
296 			    gdt->sc_scratch[GDT_BFEAT_STRIPING];
297 			gdt->sc_bfeat.bf_mirroring =
298 			    gdt->sc_scratch[GDT_BFEAT_MIRRORING];
299 			gdt->sc_bfeat.bf_raid =
300 			    gdt->sc_scratch[GDT_BFEAT_RAID];
301 			gdt->sc_more_proc = 1;
302 		}
303 	} else {
304 		/* XXX Not implemented yet */
305 	}
306 
307 	/* Read more information */
308 	if (gdt->sc_more_proc) {
309 		/* XXX Not implemented yet */
310 	}
311 
312 	if (!gdt_internal_cmd(gdt, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
313 		printf("raw service initialization error %d\n",
314 		    gdt->sc_status);
315 		return (1);
316 	}
317 
318 	/* Set/get features raw service (scatter/gather) */
319 	gdt->sc_raw_feat = 0;
320 	if (gdt_internal_cmd(gdt, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
321 	    GDT_SCATTER_GATHER, 0, 0))
322 		if (gdt_internal_cmd(gdt, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
323 		    0, 0))
324 			gdt->sc_raw_feat = gdt->sc_info;
325 
326 	/* Set/get features cache service (scatter/gather) */
327 	gdt->sc_cache_feat = 0;
328 	if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
329 	    GDT_SCATTER_GATHER, 0))
330 		if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
331 		    0))
332 			gdt->sc_cache_feat = gdt->sc_info;
333 
334 	/* XXX Linux reserve drives here, potentially */
335 
336 	/* Scan for cache devices */
337 	for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
338 		if (gdt_internal_cmd(gdt, GDT_CACHESERVICE, GDT_INFO, i, 0,
339 		    0)) {
340 			gdt->sc_hdr[i].hd_present = 1;
341 			gdt->sc_hdr[i].hd_size = gdt->sc_info;
342 
343 			/*
344 			 * Evaluate mapping (sectors per head, heads per cyl)
345 			 */
346 			gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
347 			if (gdt->sc_info2 == 0)
348 				gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
349 				    &drv_cyls, &drv_hds, &drv_secs);
350 			else {
351 				drv_hds = gdt->sc_info2 & 0xff;
352 				drv_secs = (gdt->sc_info2 >> 8) & 0xff;
353 				drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
354 				    drv_secs;
355 			}
356 			gdt->sc_hdr[i].hd_heads = drv_hds;
357 			gdt->sc_hdr[i].hd_secs = drv_secs;
358 			/* Round the size */
359 			gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
360 
361 			if (gdt_internal_cmd(gdt, GDT_CACHESERVICE,
362 			    GDT_DEVTYPE, i, 0, 0))
363 				gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
364 		}
365 
366 	printf("dpmem %x %d-bus %d cache device%s\n", gdt->sc_dpmembase,
367 	    gdt->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
368 	printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
369 	    gdt->sc_dev.dv_xname, gdt->sc_cpar.cp_version,
370 	    gdt->sc_cpar.cp_state ? "on" : "off", gdt->sc_cpar.cp_strategy,
371 	    gdt->sc_cpar.cp_write_back ? "on" : "off",
372 	    gdt->sc_cpar.cp_block_size);
373 #if 1
374 	printf("%s: raw feat %x cache feat %x\n", gdt->sc_dev.dv_xname,
375 	    gdt->sc_raw_feat, gdt->sc_cache_feat);
376 #endif
377 
378 	config_found(&gdt->sc_dev, &gdt->sc_link, scsiprint);
379 
380 	gdt->sc_raw_link = malloc(gdt->sc_bus_cnt * sizeof (struct scsi_link),
381 				  M_DEVBUF, M_NOWAIT);
382 	bzero(gdt->sc_raw_link, gdt->sc_bus_cnt * sizeof (struct scsi_link));
383 
384 	for (i = 0; i < gdt->sc_bus_cnt; i++) {
385 		/* Fill in the prototype scsi_link. */
386 		gdt->sc_raw_link[i].adapter_softc = gdt;
387 		gdt->sc_raw_link[i].adapter = &gdt_raw_switch;
388 		gdt->sc_raw_link[i].adapter_target = 7;
389 		gdt->sc_raw_link[i].device = &gdt_dev;
390 		gdt->sc_raw_link[i].openings = 4;	/* XXX a guess */
391 		gdt->sc_raw_link[i].adapter_buswidth =
392 		    (gdt->sc_class & GDT_FC) ? GDT_MAXID : 16;	/* XXX */
393 
394 		config_found(&gdt->sc_dev, &gdt->sc_raw_link[i], scsiprint);
395 	}
396 
397 	gdt_polling = 0;
398 	return (0);
399 }
400 
401 void
402 gdt_eval_mapping(size, cyls, heads, secs)
403 	u_int32_t size;
404 	int *cyls, *heads, *secs;
405 {
406 	*cyls = size / GDT_HEADS / GDT_SECS;
407 	if (*cyls < GDT_MAXCYLS) {
408 		*heads = GDT_HEADS;
409 		*secs = GDT_SECS;
410 	} else {
411 		/* Too high for 64 * 32 */
412 		*cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
413 		if (*cyls < GDT_MAXCYLS) {
414 			*heads = GDT_MEDHEADS;
415 			*secs = GDT_MEDSECS;
416 		} else {
417 			/* Too high for 127 * 63 */
418 			*cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
419 			*heads = GDT_BIGHEADS;
420 			*secs = GDT_BIGSECS;
421 		}
422 	}
423 }
424 
425 /*
426  * Insert a command into the driver queue, either at the front or at the tail.
427  * It's ok to overload the freelist link as these structures are never on
428  * the freelist at this time.
429  */
430 void
431 gdt_enqueue(gdt, xs, infront)
432 	struct gdt_softc *gdt;
433 	struct scsi_xfer *xs;
434 	int infront;
435 {
436 	if (infront || LIST_FIRST(&gdt->sc_queue) == NULL) {
437 		if (LIST_FIRST(&gdt->sc_queue) == NULL)
438 			gdt->sc_queuelast = xs;
439 		LIST_INSERT_HEAD(&gdt->sc_queue, xs, free_list);
440 		return;
441 	}
442 	LIST_INSERT_AFTER(gdt->sc_queuelast, xs, free_list);
443 	gdt->sc_queuelast = xs;
444 }
445 
446 /*
447  * Pull a command off the front of the driver queue.
448  */
449 struct scsi_xfer *
450 gdt_dequeue(gdt)
451 	struct gdt_softc *gdt;
452 {
453 	struct scsi_xfer *xs;
454 
455 	xs = LIST_FIRST(&gdt->sc_queue);
456 	if (xs == NULL)
457 		return (NULL);
458 	LIST_REMOVE(xs, free_list);
459 
460 	if (LIST_FIRST(&gdt->sc_queue) == NULL)
461 		gdt->sc_queuelast = NULL;
462 
463 	return (xs);
464 }
465 
466 /*
467  * Start a SCSI operation on a cache device.
468  * XXX Polled operation is not yet complete.  What kind of locking do we need?
469  */
470 int
471 gdt_scsi_cmd(xs)
472 	struct scsi_xfer *xs;
473 {
474 	struct scsi_link *link = xs->sc_link;
475 	struct gdt_softc *gdt = link->adapter_softc;
476 	u_int8_t target = link->target;
477 	struct gdt_ccb *ccb;
478 	u_int32_t blockno, blockcnt;
479 	struct scsi_rw *rw;
480 	struct scsi_rw_big *rwb;
481 	bus_dmamap_t xfer;
482 	int error, retval = SUCCESSFULLY_QUEUED;
483 	gdt_lock_t lock;
484 
485 	GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
486 
487 	xs->error = XS_NOERROR;
488 
489 	if (target >= GDT_MAX_HDRIVES || !gdt->sc_hdr[target].hd_present ||
490 	    link->lun != 0) {
491 		/*
492 		 * XXX Should be XS_SENSE but that would require setting up a
493 		 * faked sense too.
494 		 */
495 		xs->error = XS_DRIVER_STUFFUP;
496 		xs->flags |= ITSDONE;
497 		scsi_done(xs);
498 		return (COMPLETE);
499 	}
500 
501 	lock = GDT_LOCK_GDT(lock);
502 
503 	/* Don't double enqueue if we came from gdt_chain. */
504 	if (xs != LIST_FIRST(&gdt->sc_queue))
505 		gdt_enqueue(gdt, xs, 0);
506 
507 	while ((xs = gdt_dequeue(gdt))) {
508 		xs->error = XS_NOERROR;
509 		ccb = NULL;
510 		link = xs->sc_link;
511 		target = link->target;
512 
513 		if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
514 		    gdt->sc_test_busy(gdt)) {
515 			/*
516 			 * Put it back in front.  XXX Should we instead
517 			 * set xs->error to XS_BUSY?
518 			 */
519 			gdt_enqueue(gdt, xs, 1);
520 			break;
521 		}
522 
523 		switch (xs->cmd->opcode) {
524 		case TEST_UNIT_READY:
525 		case REQUEST_SENSE:
526 		case INQUIRY:
527 		case MODE_SENSE:
528 		case START_STOP:
529 		case READ_CAPACITY:
530 #if 0
531 		case VERIFY:
532 #endif
533 			if (!gdt_internal_cache_cmd(xs)) {
534 				GDT_UNLOCK_GDT(gdt, lock);
535 				return (TRY_AGAIN_LATER);
536 			}
537 			xs->flags |= ITSDONE;
538 			scsi_done(xs);
539 			goto ready;
540 
541 		case PREVENT_ALLOW:
542 			GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
543 			/* XXX Not yet implemented */
544 			xs->error = XS_NOERROR;
545 			xs->flags |= ITSDONE;
546 			scsi_done(xs);
547 			goto ready;
548 
549 		default:
550 			GDT_DPRINTF(GDT_D_CMD,
551 			    ("unknown opc %d ", xs->cmd->opcode));
552 			/* XXX Not yet implemented */
553 			xs->error = XS_DRIVER_STUFFUP;
554 			xs->flags |= ITSDONE;
555 			scsi_done(xs);
556 			goto ready;
557 
558 		case READ_COMMAND:
559 		case READ_BIG:
560 		case WRITE_COMMAND:
561 		case WRITE_BIG:
562 		case SYNCHRONIZE_CACHE:
563 			/*
564 			 * A new command chain, start from the beginning.
565 			 */
566 			gdt->sc_cmd_off = 0;
567 
568 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
569 				/* A read or write operation. */
570 				if (xs->cmdlen == 6) {
571 					rw = (struct scsi_rw *)xs->cmd;
572 					blockno = _3btol(rw->addr) &
573 					    (SRW_TOPADDR << 16 | 0xffff);
574 					blockcnt =
575 					    rw->length ? rw->length : 0x100;
576 				} else {
577 					rwb = (struct scsi_rw_big *)xs->cmd;
578 					blockno = _4btol(rwb->addr);
579 					blockcnt = _2btol(rwb->length);
580 				}
581 				if (blockno >= gdt->sc_hdr[target].hd_size ||
582 				    blockno + blockcnt >
583 				    gdt->sc_hdr[target].hd_size) {
584 					printf(
585 					    "%s: out of bounds %u-%u >= %u\n",
586 					    gdt->sc_dev.dv_xname, blockno,
587 					    blockcnt,
588 					    gdt->sc_hdr[target].hd_size);
589 					/*
590 					 * XXX Should be XS_SENSE but that
591 					 * would require setting up a faked
592 					 * sense too.
593 					 */
594 					xs->error = XS_DRIVER_STUFFUP;
595 					xs->flags |= ITSDONE;
596 					scsi_done(xs);
597 					goto ready;
598 				}
599 			}
600 
601 			ccb = gdt_get_ccb(gdt, xs->flags);
602 			/*
603 			 * Are we out of commands, something is wrong.
604 			 *
605 			 */
606 			if (ccb == NULL) {
607 				printf("%s: no ccb in gdt_scsi_cmd",
608 				    gdt->sc_dev.dv_xname);
609 				xs->error = XS_DRIVER_STUFFUP;
610 				xs->flags |= ITSDONE;
611 				scsi_done(xs);
612 				goto ready;
613 			}
614 
615 			ccb->gc_blockno = blockno;
616 			ccb->gc_blockcnt = blockcnt;
617 			ccb->gc_xs = xs;
618 			ccb->gc_timeout = xs->timeout;
619 			ccb->gc_service = GDT_CACHESERVICE;
620 			gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
621 
622 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
623 				xfer = ccb->gc_dmamap_xfer;
624 				error = bus_dmamap_load(gdt->sc_dmat, xfer,
625 				    xs->data, xs->datalen, NULL,
626 				    (xs->flags & SCSI_NOSLEEP) ?
627 				    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
628 				if (error) {
629 					printf("%s: gdt_scsi_cmd: ",
630 					    gdt->sc_dev.dv_xname);
631 					if (error == EFBIG)
632 						printf(
633 						    "more than %d dma segs\n",
634 						    GDT_MAXOFFSETS);
635 					else
636 						printf("error %d "
637 						    "loading dma map\n",
638 						    error);
639 
640 					gdt_free_ccb(gdt, ccb);
641 					xs->error = XS_DRIVER_STUFFUP;
642 					xs->flags |= ITSDONE;
643 					scsi_done(xs);
644 					goto ready;
645 				}
646 				bus_dmamap_sync(gdt->sc_dmat, xfer, 0,
647 				    xfer->dm_mapsize,
648 				    (xs->flags & SCSI_DATA_IN) ?
649 				    BUS_DMASYNC_PREREAD :
650 				    BUS_DMASYNC_PREWRITE);
651 			}
652 
653 			gdt_enqueue_ccb(gdt, ccb);
654 			/* XXX what if enqueue did not start a transfer? */
655 			if (gdt_polling || (xs->flags & SCSI_POLL)) {
656 				if (!gdt_wait(gdt, ccb, ccb->gc_timeout)) {
657 					GDT_UNLOCK_GDT(gdt, lock);
658 					printf("%s: command %d timed out\n",
659 					    gdt->sc_dev.dv_xname,
660 					    ccb->gc_cmd_index);
661 					xs->error = XS_TIMEOUT;
662 					return (TRY_AGAIN_LATER);
663 				}
664 				xs->flags |= ITSDONE;
665 				scsi_done(xs);
666 			}
667 		}
668 
669 	ready:
670 		/*
671 		 * Don't process the queue if we are polling.
672 		 */
673 		if (xs->flags & SCSI_POLL) {
674 			retval = COMPLETE;
675 			break;
676 		}
677 	}
678 
679 	GDT_UNLOCK_GDT(gdt, lock);
680 	return (retval);
681 }
682 
683 /* XXX Currently only for cacheservice, returns 0 if busy */
684 int
685 gdt_exec_ccb(ccb)
686 	struct gdt_ccb *ccb;
687 {
688 	struct scsi_xfer *xs = ccb->gc_xs;
689 	struct scsi_link *link = xs->sc_link;
690 	struct gdt_softc *gdt = link->adapter_softc;
691 	u_int8_t target = link->target;
692 	u_int32_t sg_canz;
693 	bus_dmamap_t xfer;
694 	int i;
695 
696 	GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
697 
698 	gdt->sc_cmd_cnt = 0;
699 
700 	/*
701 	 * XXX Yeah I know it's an always-true condition, but that may change
702 	 * later.
703 	 */
704 	if (gdt->sc_cmd_cnt == 0)
705 		gdt->sc_set_sema0(gdt);
706 
707 	gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
708 	gdt_enc32(gdt->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
709 	gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
710 	    target);
711 
712 	switch (xs->cmd->opcode) {
713 	case PREVENT_ALLOW:
714 	case SYNCHRONIZE_CACHE:
715 		if (xs->cmd->opcode == PREVENT_ALLOW) {
716 			/* XXX PREVENT_ALLOW support goes here */
717 		} else {
718 			GDT_DPRINTF(GDT_D_CMD,
719 			    ("SYNCHRONIZE CACHE tgt %d ", target));
720 			gdt->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
721 		}
722 		gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
723 		    1);
724 		sg_canz = 0;
725 		break;
726 
727 	case WRITE_COMMAND:
728 	case WRITE_BIG:
729 		/* XXX WRITE_THR could be supported too */
730 		gdt->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
731 		break;
732 
733 	case READ_COMMAND:
734 	case READ_BIG:
735 		gdt->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
736 		break;
737 	}
738 
739 	if (xs->cmd->opcode != PREVENT_ALLOW &&
740 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
741 		gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
742 		    ccb->gc_blockno);
743 		gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
744 		    ccb->gc_blockcnt);
745 
746 		xfer = ccb->gc_dmamap_xfer;
747 		if (gdt->sc_cache_feat & GDT_SCATTER_GATHER) {
748 			gdt_enc32(
749 			    gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
750 			    0xffffffff);
751 			for (i = 0; i < xfer->dm_nsegs; i++) {
752 				gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
753 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
754 				    GDT_SG_PTR,
755 				    xfer->dm_segs[i].ds_addr);
756 				gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
757 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
758 				    GDT_SG_LEN,
759 				    xfer->dm_segs[i].ds_len);
760 				GDT_DPRINTF(GDT_D_IO,
761 				    ("#%d va %p pa %p len %x\n", i, buf,
762 				    xfer->dm_segs[i].ds_addr,
763 				    xfer->dm_segs[i].ds_len));
764 			}
765 			sg_canz = xfer->dm_nsegs;
766 			gdt_enc32(
767 			    gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
768 			    sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
769 		} else {
770 			/* XXX Hardly correct */
771 			gdt_enc32(
772 			    gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
773 			    xfer->dm_segs[0].ds_addr);
774 			sg_canz = 0;
775 		}
776 	}
777 	gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
778 
779 	gdt->sc_cmd_len =
780 	    roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
781 	    sizeof (u_int32_t));
782 
783 	if (gdt->sc_cmd_cnt > 0 &&
784 	    gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
785 	    gdt->sc_ic_all_size) {
786 		printf("%s: DPMEM overflow\n", gdt->sc_dev.dv_xname);
787 		gdt_free_ccb(gdt, ccb);
788 		xs->error = XS_BUSY;
789 		return (0);
790 	}
791 
792 	gdt->sc_copy_cmd(gdt, ccb);
793 	gdt->sc_release_event(gdt, ccb);
794 
795 	xs->error = XS_NOERROR;
796 	xs->resid = 0;
797 	return (1);
798 }
799 
800 void
801 gdt_copy_internal_data(xs, data, size)
802 	struct scsi_xfer *xs;
803 	u_int8_t *data;
804 	size_t size;
805 {
806 	size_t copy_cnt;
807 
808 	GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data "));
809 
810 	if (!xs->datalen)
811 		printf("uio move not yet supported\n");
812 	else {
813 		copy_cnt = MIN(size, xs->datalen);
814 		bcopy(data, xs->data, copy_cnt);
815 	}
816 }
817 
818 /* Emulated SCSI operation on cache device */
819 int
820 gdt_internal_cache_cmd(xs)
821 	struct scsi_xfer *xs;
822 {
823 	struct scsi_link *link = xs->sc_link;
824 	struct gdt_softc *gdt = link->adapter_softc;
825 	struct scsi_inquiry_data inq;
826 	struct scsi_sense_data sd;
827 	struct {
828 		struct scsi_mode_header hd;
829 		struct scsi_blk_desc bd;
830 		union scsi_disk_pages dp;
831 	} mpd;
832 	struct scsi_read_cap_data rcd;
833 	u_int8_t target = link->target;
834 
835 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
836 
837 	switch (xs->cmd->opcode) {
838 	case TEST_UNIT_READY:
839 	case START_STOP:
840 #if 0
841 	case VERIFY:
842 #endif
843 		GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
844 		    target));
845 		break;
846 
847 	case REQUEST_SENSE:
848 		GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
849 		bzero(&sd, sizeof sd);
850 		sd.error_code = 0x70;
851 		sd.segment = 0;
852 		sd.flags = SKEY_NO_SENSE;
853 		gdt_enc32(sd.info, 0);
854 		sd.extra_len = 0;
855 		gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd);
856 		break;
857 
858 	case INQUIRY:
859 		GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
860 		    gdt->sc_hdr[target].hd_devtype));
861 		bzero(&inq, sizeof inq);
862 		inq.device =
863 		    (gdt->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
864 		inq.dev_qual2 =
865 		    (gdt->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
866 		inq.version = 2;
867 		inq.response_format = 2;
868 		inq.additional_length = 32;
869 		strcpy(inq.vendor, "ICP    ");
870 		sprintf(inq.product, "Host drive  #%02d", target);
871 		strcpy(inq.revision, "   ");
872 		gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq);
873 		break;
874 
875 	case MODE_SENSE:
876 		GDT_DPRINTF(GDT_D_CMD, ("MODE SENSE tgt %d ", target));
877 
878 		bzero(&mpd, sizeof mpd);
879 		switch (((struct scsi_mode_sense *)xs->cmd)->page) {
880 		case 4:
881 			/* scsi_disk.h says this should be 0x16 */
882 			mpd.dp.rigid_geometry.pg_length = 0x16;
883 			mpd.hd.data_length = sizeof mpd.hd + sizeof mpd.bd +
884 			    mpd.dp.rigid_geometry.pg_length;
885 			mpd.hd.blk_desc_len = sizeof mpd.bd;
886 
887 			/* XXX */
888 			mpd.hd.dev_spec =
889 			    (gdt->sc_hdr[target].hd_devtype & 2) ? 0x80 : 0;
890 			_lto3b(GDT_SECTOR_SIZE, mpd.bd.blklen);
891 			mpd.dp.rigid_geometry.pg_code = 4;
892 			_lto3b(gdt->sc_hdr[target].hd_size /
893 			    gdt->sc_hdr[target].hd_heads /
894 			    gdt->sc_hdr[target].hd_secs,
895 			    mpd.dp.rigid_geometry.ncyl);
896 			mpd.dp.rigid_geometry.nheads =
897 			    gdt->sc_hdr[target].hd_heads;
898 			gdt_copy_internal_data(xs, (u_int8_t *)&mpd,
899 			    sizeof mpd);
900 			break;
901 
902 		default:
903 			printf("%s: mode sense page %d not simulated\n",
904 			    gdt->sc_dev.dv_xname,
905 			    ((struct scsi_mode_sense *)xs->cmd)->page);
906 			xs->error = XS_DRIVER_STUFFUP;
907 			return (0);
908 		}
909 		break;
910 
911 	case READ_CAPACITY:
912 		GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
913 		bzero(&rcd, sizeof rcd);
914 		_lto4b(gdt->sc_hdr[target].hd_size - 1, rcd.addr);
915 		_lto4b(GDT_SECTOR_SIZE, rcd.length);
916 		gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd);
917 		break;
918 
919 	default:
920 		printf("gdt_internal_cache_cmd got bad opcode: %d\n",
921 		    xs->cmd->opcode);
922 		xs->error = XS_DRIVER_STUFFUP;
923 		return (0);
924 	}
925 
926 	xs->error = XS_NOERROR;
927 	return (1);
928 }
929 
930 /* Start a raw SCSI operation */
931 int
932 gdt_raw_scsi_cmd(xs)
933 	struct scsi_xfer *xs;
934 {
935 	GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_scsi_cmd "));
936 
937 	/* XXX Not yet implemented */
938 	xs->error = XS_DRIVER_STUFFUP;
939 	return (COMPLETE);
940 }
941 
942 void
943 gdt_clear_events(gdt)
944 	struct gdt_softc *gdt;
945 {
946 	GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", gdt));
947 
948 	/* XXX To be implemented */
949 }
950 
951 int
952 gdt_async_event(gdt, service)
953 	struct gdt_softc *gdt;
954 	int service;
955 {
956 	GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", gdt, service));
957 
958 	if (service == GDT_SCREENSERVICE) {
959 		/* XXX To be implemented */
960 	} else {
961 		/* XXX To be implemented */
962 	}
963 
964 	return (0);
965 }
966 
967 int
968 gdt_sync_event(gdt, service, index, xs)
969 	struct gdt_softc *gdt;
970 	int service;
971 	u_int8_t index;
972 	struct scsi_xfer *xs;
973 {
974 	GDT_DPRINTF(GDT_D_INTR,
975 	    ("gdt_sync_event(%p, %d, %d, %p) ", gdt, service, index, xs));
976 
977 	if (service == GDT_SCREENSERVICE) {
978 		GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
979 		/* XXX To be implemented */
980 		return (0);
981 	} else {
982 		switch (gdt->sc_status) {
983 		case GDT_S_OK:
984 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
985 			/* XXX To be implemented */
986 			break;
987 		case GDT_S_BSY:
988 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
989 			/* XXX To be implemented */
990 			return (2);
991 		default:
992 			GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
993 			    gdt->sc_status));
994 			/* XXX To be implemented */
995 			return (0);
996 		}
997 	}
998 
999 	return (1);
1000 }
1001 
1002 int
1003 gdt_intr(arg)
1004 	void *arg;
1005 {
1006 	struct gdt_softc *gdt = arg;
1007 	struct gdt_intr_ctx ctx;
1008 	int chain = 1;
1009 	int sync_val = 0;
1010 	struct scsi_xfer *xs;
1011 	int prev_cmd;
1012 	struct gdt_ccb *ccb;
1013 	gdt_lock_t lock;
1014 
1015 	GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", gdt));
1016 
1017 	/* If polling and we were not called from gdt_wait, just return */
1018 	if (gdt_polling && !gdt_from_wait)
1019 		return (0);
1020 
1021 	if (!gdt_polling)
1022 		lock = GDT_LOCK_GDT(gdt);
1023 
1024 	ctx.istatus = gdt->sc_get_status(gdt);
1025 	if (!ctx.istatus) {
1026 		if (!gdt_polling)
1027 			GDT_UNLOCK_GDT(gdt, lock);
1028 		gdt->sc_status = GDT_S_NO_STATUS;
1029 		return (0);
1030 	}
1031 
1032 	gdt_wait_index = 0;
1033 	ctx.service = ctx.info2 = 0;
1034 
1035 	gdt->sc_intr(gdt, &ctx);
1036 
1037 	gdt->sc_status = ctx.cmd_status;
1038 	gdt->sc_info = ctx.info;
1039 	gdt->sc_info2 = ctx.info2;
1040 
1041 	if (gdt_from_wait) {
1042 		gdt_wait_gdt = gdt;
1043 		gdt_wait_index = ctx.istatus;
1044 	}
1045 
1046 	switch (ctx.istatus) {
1047 	case GDT_ASYNCINDEX:
1048 		gdt_async_event(gdt, ctx.service);
1049 		goto finish;
1050 
1051 	case GDT_SPEZINDEX:
1052 		printf("%s: uninitialized or unknown service (%d %d)\n",
1053 		    gdt->sc_dev.dv_xname, ctx.info, ctx.info2);
1054 		chain = 0;
1055 		goto finish;
1056 	}
1057 
1058 	ccb = &gdt->sc_ccbs[ctx.istatus - 2];
1059 	xs = ccb->gc_xs;
1060 	if (!gdt_polling)
1061 		timeout_del(&xs->stimeout);
1062 	ctx.service = ccb->gc_service;
1063 	prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1064 	if (xs && xs->cmd->opcode != PREVENT_ALLOW &&
1065 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
1066 		bus_dmamap_sync(gdt->sc_dmat, ccb->gc_dmamap_xfer, 0,
1067 		    ccb->gc_dmamap_xfer->dm_mapsize,
1068 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1069 		    BUS_DMASYNC_POSTWRITE);
1070 		bus_dmamap_unload(gdt->sc_dmat, ccb->gc_dmamap_xfer);
1071 	}
1072 	gdt_free_ccb(gdt, ccb);
1073 	switch (prev_cmd) {
1074 	case GDT_GCF_UNUSED:
1075 		/* XXX Not yet implemented */
1076 		chain = 0;
1077 		goto finish;
1078 	case GDT_GCF_INTERNAL:
1079 		chain = 0;
1080 		goto finish;
1081 	}
1082 
1083 	sync_val = gdt_sync_event(gdt, ctx.service, ctx.istatus, xs);
1084 
1085  finish:
1086 	if (!gdt_polling)
1087 		GDT_UNLOCK_GDT(gdt, lock);
1088 
1089 	switch (sync_val) {
1090 	case 1:
1091 		xs->flags |= ITSDONE;
1092 		scsi_done(xs);
1093 		break;
1094 
1095 	case 2:
1096 		gdt_enqueue(gdt, xs, 0);
1097 	}
1098 
1099 	if (chain)
1100 		gdt_chain(gdt);
1101 	return (1);
1102 }
1103 
1104 void
1105 gdtminphys(bp)
1106 	struct buf *bp;
1107 {
1108 #if 0
1109 	u_int8_t *buf = bp->b_data;
1110 	paddr_t pa;
1111 	long off;
1112 #endif
1113 
1114 	GDT_DPRINTF(GDT_D_MISC, ("gdtminphys(0x%x) ", bp));
1115 
1116 #if 1
1117 	/* As this is way more than MAXPHYS it's really not necessary. */
1118 	if (bp->b_bcount > ((GDT_MAXOFFSETS - 1) * PAGE_SIZE))
1119 		bp->b_bcount = ((GDT_MAXOFFSETS - 1) * PAGE_SIZE);
1120 #else
1121 	for (off = PAGE_SIZE, pa = vtophys(buf); off < bp->b_bcount;
1122 	    off += PAGE_SIZE)
1123 		if (pa + off != vtophys(buf + off)) {
1124 			bp->b_bcount = off;
1125 			break;
1126 		}
1127 #endif
1128 	minphys(bp);
1129 }
1130 
1131 int
1132 gdt_wait(gdt, ccb, timeout)
1133 	struct gdt_softc *gdt;
1134 	struct gdt_ccb *ccb;
1135 	int timeout;
1136 {
1137 	int rv = 0;
1138 
1139 	GDT_DPRINTF(GDT_D_MISC,
1140 	    ("gdt_wait(%p, %p, %d) ", gdt, ccb, timeout));
1141 
1142 	gdt_from_wait = 1;
1143 	do {
1144 		if (gdt_intr(gdt) && gdt == gdt_wait_gdt &&
1145 		    ccb->gc_cmd_index == gdt_wait_index) {
1146 			rv = 1;
1147 			break;
1148 		}
1149 		DELAY(1);
1150 	} while (--timeout);
1151 	gdt_from_wait = 0;
1152 
1153 	while (gdt->sc_test_busy(gdt))
1154 		DELAY(0);		/* XXX correct? */
1155 
1156 	return (rv);
1157 }
1158 
1159 int
1160 gdt_internal_cmd(gdt, service, opcode, arg1, arg2, arg3)
1161 	struct gdt_softc *gdt;
1162 	u_int8_t service;
1163 	u_int16_t opcode;
1164 	u_int32_t arg1, arg2, arg3;
1165 {
1166 	int retries;
1167 	struct gdt_ccb *ccb;
1168 
1169 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1170 	    gdt, service, opcode, arg1, arg2, arg3));
1171 
1172 	bzero(gdt->sc_cmd, GDT_CMD_SZ);
1173 
1174 	for (retries = GDT_RETRIES; ; ) {
1175 		ccb = gdt_get_ccb(gdt, SCSI_NOSLEEP);
1176 		if (ccb == NULL) {
1177 			printf("%s: no free command index found\n",
1178 			    gdt->sc_dev.dv_xname);
1179 			return (0);
1180 		}
1181 		ccb->gc_service = service;
1182 		gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1183 
1184 		gdt->sc_set_sema0(gdt);
1185 		gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1186 		    ccb->gc_cmd_index);
1187 		gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
1188 		gdt_enc32(gdt->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1189 
1190 		switch (service) {
1191 		case GDT_CACHESERVICE:
1192 			if (opcode == GDT_IOCTL) {
1193 				gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
1194 				    GDT_IOCTL_SUBFUNC, arg1);
1195 				gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
1196 				    GDT_IOCTL_CHANNEL, arg2);
1197 				gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
1198 				    GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1199 				gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
1200 				    GDT_IOCTL_P_PARAM,
1201 				    vtophys(gdt->sc_scratch));
1202 			} else {
1203 				gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
1204 				    GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1205 				gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
1206 				    GDT_CACHE_BLOCKNO, arg2);
1207 			}
1208 			break;
1209 
1210 		case GDT_SCSIRAWSERVICE:
1211 			gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
1212 			    GDT_RAW_DIRECTION, arg1);
1213 			gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1214 			    (u_int8_t)arg2;
1215 			gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1216 			    (u_int8_t)arg3;
1217 			gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1218 			    (u_int8_t)(arg3 >> 8);
1219 		}
1220 
1221 		gdt->sc_cmd_len = GDT_CMD_SZ;
1222 		gdt->sc_cmd_off = 0;
1223 		gdt->sc_cmd_cnt = 0;
1224 		gdt->sc_copy_cmd(gdt, ccb);
1225 		gdt->sc_release_event(gdt, ccb);
1226 		DELAY(20);
1227 		if (!gdt_wait(gdt, ccb, GDT_POLL_TIMEOUT))
1228 			return (0);
1229 		if (gdt->sc_status != GDT_S_BSY || --retries == 0)
1230 			break;
1231 		DELAY(1);
1232 	}
1233 	return (gdt->sc_status == GDT_S_OK);
1234 }
1235 
1236 struct gdt_ccb *
1237 gdt_get_ccb(gdt, flags)
1238 	struct gdt_softc *gdt;
1239 	int flags;
1240 {
1241 	struct gdt_ccb *ccb;
1242 	gdt_lock_t lock;
1243 
1244 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p, 0x%x) ", gdt, flags));
1245 
1246 	lock = GDT_LOCK_GDT(gdt);
1247 
1248 	for (;;) {
1249 		ccb = TAILQ_FIRST(&gdt->sc_free_ccb);
1250 		if (ccb != NULL)
1251 			break;
1252 		if (flags & SCSI_NOSLEEP)
1253 			goto bail_out;
1254 		tsleep(&gdt->sc_free_ccb, PRIBIO, "gdt_ccb", 0);
1255 	}
1256 
1257 	TAILQ_REMOVE(&gdt->sc_free_ccb, ccb, gc_chain);
1258 
1259  bail_out:
1260 	GDT_UNLOCK_GDT(gdt, lock);
1261 	return (ccb);
1262 }
1263 
1264 void
1265 gdt_free_ccb(gdt, ccb)
1266 	struct gdt_softc *gdt;
1267 	struct gdt_ccb *ccb;
1268 {
1269 	gdt_lock_t lock;
1270 
1271 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p) ", gdt, ccb));
1272 
1273 	lock = GDT_LOCK_GDT(gdt);
1274 
1275 	TAILQ_INSERT_HEAD(&gdt->sc_free_ccb, ccb, gc_chain);
1276 
1277 	/* If the free list was empty, wake up potential waiters. */
1278 	if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1279 		wakeup(&gdt->sc_free_ccb);
1280 
1281 	GDT_UNLOCK_GDT(gdt, lock);
1282 }
1283 
1284 void
1285 gdt_enqueue_ccb(gdt, ccb)
1286 	struct gdt_softc *gdt;
1287 	struct gdt_ccb *ccb;
1288 {
1289 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", gdt, ccb));
1290 
1291 	timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1292 	TAILQ_INSERT_TAIL(&gdt->sc_ccbq, ccb, gc_chain);
1293 	gdt_start_ccbs(gdt);
1294 }
1295 
1296 void
1297 gdt_start_ccbs(gdt)
1298 	struct gdt_softc *gdt;
1299 {
1300 	struct gdt_ccb *ccb;
1301 	struct scsi_xfer *xs;
1302 
1303 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", gdt));
1304 
1305 	while ((ccb = TAILQ_FIRST(&gdt->sc_ccbq)) != NULL) {
1306 
1307 		xs = ccb->gc_xs;
1308 		if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1309 			timeout_del(&xs->stimeout);
1310 
1311 		if (gdt_exec_ccb(ccb) == 0) {
1312 			ccb->gc_flags |= GDT_GCF_WATCHDOG;
1313 			timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1314 			timeout_add(&xs->stimeout,
1315 			    (GDT_WATCH_TIMEOUT * hz) / 1000);
1316 			break;
1317 		}
1318 		TAILQ_REMOVE(&gdt->sc_ccbq, ccb, gc_chain);
1319 
1320 		if ((xs->flags & SCSI_POLL) == 0) {
1321 			timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1322 			timeout_add(&xs->stimeout,
1323 			    (ccb->gc_timeout * hz) / 1000);
1324 		}
1325 	}
1326 }
1327 
1328 void
1329 gdt_chain(gdt)
1330 	struct gdt_softc *gdt;
1331 {
1332 	GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", gdt));
1333 
1334 	if (LIST_FIRST(&gdt->sc_queue))
1335 		gdt_scsi_cmd(LIST_FIRST(&gdt->sc_queue));
1336 }
1337 
1338 void
1339 gdt_timeout(arg)
1340 	void *arg;
1341 {
1342 	struct gdt_ccb *ccb = arg;
1343 	struct scsi_link *link = ccb->gc_xs->sc_link;
1344 	struct gdt_softc *gdt = link->adapter_softc;
1345 	gdt_lock_t lock;
1346 
1347 	sc_print_addr(link);
1348 	printf("timed out\n");
1349 
1350 	/* XXX Test for multiple timeouts */
1351 
1352 	ccb->gc_xs->error = XS_TIMEOUT;
1353 	lock = GDT_LOCK_GDT(gdt);
1354 	gdt_enqueue_ccb(gdt, ccb);
1355 	GDT_UNLOCK_GDT(gdt, lock);
1356 }
1357 
1358 void
1359 gdt_watchdog(arg)
1360 	void *arg;
1361 {
1362 	struct gdt_ccb *ccb = arg;
1363 	struct scsi_link *link = ccb->gc_xs->sc_link;
1364 	struct gdt_softc *gdt = link->adapter_softc;
1365 	gdt_lock_t lock;
1366 
1367 	lock = GDT_LOCK_GDT(gdt);
1368 	ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1369 	gdt_start_ccbs(gdt);
1370 	GDT_UNLOCK_GDT(gdt, lock);
1371 }
1372