xref: /openbsd-src/sys/dev/ic/gdt_common.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: gdt_common.c,v 1.45 2009/02/16 21:19:06 miod Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * This driver would not have written if it was not for the hardware donations
29  * from both ICP-Vortex and �ko.neT.  I want to thank them for their support.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/device.h>
35 #include <sys/ioctl.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39 
40 #include <machine/bus.h>
41 
42 #include <uvm/uvm_extern.h>
43 
44 #include <scsi/scsi_all.h>
45 #include <scsi/scsi_disk.h>
46 #include <scsi/scsiconf.h>
47 
48 #include <dev/biovar.h>
49 #include <dev/ic/gdtreg.h>
50 #include <dev/ic/gdtvar.h>
51 
52 #include "bio.h"
53 
54 #ifdef GDT_DEBUG
55 int gdt_maxcmds = GDT_MAXCMDS;
56 #undef GDT_MAXCMDS
57 #define GDT_MAXCMDS gdt_maxcmds
58 #endif
59 
60 #define GDT_DRIVER_VERSION 1
61 #define GDT_DRIVER_SUBVERSION 2
62 
63 int	gdt_async_event(struct gdt_softc *, int);
64 void	gdt_chain(struct gdt_softc *);
65 void	gdt_clear_events(struct gdt_softc *);
66 void	gdt_copy_internal_data(struct scsi_xfer *, u_int8_t *, size_t);
67 struct scsi_xfer *gdt_dequeue(struct gdt_softc *);
68 void	gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int);
69 void	gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *);
70 void	gdt_eval_mapping(u_int32_t, int *, int *, int *);
71 int	gdt_exec_ccb(struct gdt_ccb *);
72 void	gdt_free_ccb(struct gdt_softc *, struct gdt_ccb *);
73 struct gdt_ccb *gdt_get_ccb(struct gdt_softc *, int);
74 void	gdt_internal_cache_cmd(struct scsi_xfer *);
75 int	gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t,
76     u_int32_t, u_int32_t, u_int32_t);
77 #if NBIO > 0
78 int	gdt_ioctl(struct device *, u_long, caddr_t);
79 int	gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *);
80 int	gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *);
81 int	gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *);
82 int	gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *);
83 int	gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *);
84 #endif /* NBIO > 0 */
85 int	gdt_raw_scsi_cmd(struct scsi_xfer *);
86 int	gdt_scsi_cmd(struct scsi_xfer *);
87 void	gdt_start_ccbs(struct gdt_softc *);
88 int	gdt_sync_event(struct gdt_softc *, int, u_int8_t,
89     struct scsi_xfer *);
90 void	gdt_timeout(void *);
91 int	gdt_wait(struct gdt_softc *, struct gdt_ccb *, int);
92 void	gdt_watchdog(void *);
93 
94 struct cfdriver gdt_cd = {
95 	NULL, "gdt", DV_DULL
96 };
97 
98 struct scsi_adapter gdt_switch = {
99 	gdt_scsi_cmd, gdtminphys, 0, 0,
100 };
101 
102 struct scsi_adapter gdt_raw_switch = {
103 	gdt_raw_scsi_cmd, gdtminphys, 0, 0,
104 };
105 
106 struct scsi_device gdt_dev = {
107 	NULL, NULL, NULL, NULL
108 };
109 
110 int gdt_cnt = 0;
111 u_int8_t gdt_polling;
112 u_int8_t gdt_from_wait;
113 struct gdt_softc *gdt_wait_gdt;
114 int	gdt_wait_index;
115 #ifdef GDT_DEBUG
116 int	gdt_debug = GDT_DEBUG;
117 #endif
118 
119 int
120 gdt_attach(struct gdt_softc *sc)
121 {
122 	struct scsibus_attach_args saa;
123 	u_int16_t cdev_cnt;
124 	int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs;
125 
126 	gdt_polling = 1;
127 	gdt_from_wait = 0;
128 
129 	if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0,
130 	    &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT))
131 	    panic("%s: bus_dmamem_alloc failed", DEVNAME(sc));
132 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1,
133 	    GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT))
134 	    panic("%s: bus_dmamem_map failed", DEVNAME(sc));
135 
136 	gdt_clear_events(sc);
137 
138 	TAILQ_INIT(&sc->sc_free_ccb);
139 	TAILQ_INIT(&sc->sc_ccbq);
140 	TAILQ_INIT(&sc->sc_ucmdq);
141 	LIST_INIT(&sc->sc_queue);
142 
143 	/* Initialize the ccbs */
144 	for (i = 0; i < GDT_MAXCMDS; i++) {
145 		sc->sc_ccbs[i].gc_cmd_index = i + 2;
146 		error = bus_dmamap_create(sc->sc_dmat,
147 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
148 		    (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
149 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
150 		    &sc->sc_ccbs[i].gc_dmamap_xfer);
151 		if (error) {
152 			printf("%s: cannot create ccb dmamap (%d)",
153 			    DEVNAME(sc), error);
154 			return (1);
155 		}
156 		(void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED);
157 		TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i],
158 		    gc_chain);
159 	}
160 
161 	/* Fill in the prototype scsi_link. */
162 	sc->sc_link.adapter_softc = sc;
163 	sc->sc_link.adapter = &gdt_switch;
164 	sc->sc_link.device = &gdt_dev;
165 	/* openings will be filled in later. */
166 	sc->sc_link.adapter_buswidth =
167 	    (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
168 	sc->sc_link.adapter_target = sc->sc_link.adapter_buswidth;
169 
170 	if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
171 		printf("screen service initialization error %d\n",
172 		     sc->sc_status);
173 		return (1);
174 	}
175 
176 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
177 	    0)) {
178 		printf("cache service initialization error %d\n",
179 		    sc->sc_status);
180 		return (1);
181 	}
182 
183 	cdev_cnt = (u_int16_t)sc->sc_info;
184 
185 	/* Detect number of busses */
186 	gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
187 	sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
188 	sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
189 	sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
190 	gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
191 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
192 	    GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
193 	    GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
194 		sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT];
195 		for (i = 0; i < sc->sc_bus_cnt; i++) {
196 			id = sc->sc_scratch[GDT_IOC_HDR_SZ +
197 			    i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
198 			sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff;
199 		}
200 
201 	} else {
202 		/* New method failed, use fallback. */
203 		gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
204 		for (i = 0; i < GDT_MAXBUS; i++) {
205 			if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
206 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
207 			    GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
208 			    GDT_GETCH_SZ)) {
209 				if (i == 0) {
210 					printf("cannot get channel count, "
211 					    "error %d\n", sc->sc_status);
212 					return (1);
213 				}
214 				break;
215 			}
216 			sc->sc_bus_id[i] =
217 			    (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
218 			    sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
219 		}
220 		sc->sc_bus_cnt = i;
221 	}
222 
223 	/* Read cache configuration */
224 	if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
225 	    GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
226 		printf("cannot get cache info, error %d\n", sc->sc_status);
227 		return (1);
228 	}
229 	sc->sc_cpar.cp_version =
230 	    gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION);
231 	sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE);
232 	sc->sc_cpar.cp_strategy =
233 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY);
234 	sc->sc_cpar.cp_write_back =
235 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK);
236 	sc->sc_cpar.cp_block_size =
237 	    gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE);
238 
239 	/* Read board information and features */
240 	sc->sc_more_proc = 0;
241 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
242 	    GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
243 		/* XXX A lot of these assignments can probably go later */
244 		sc->sc_binfo.bi_ser_no =
245 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO);
246 		bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID,
247 		    sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id);
248 		sc->sc_binfo.bi_ep_flags =
249 		    gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS);
250 		sc->sc_binfo.bi_proc_id =
251 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID);
252 		sc->sc_binfo.bi_memsize =
253 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE);
254 		sc->sc_binfo.bi_mem_banks =
255 		    sc->sc_scratch[GDT_BINFO_MEM_BANKS];
256 		sc->sc_binfo.bi_chan_type =
257 		    sc->sc_scratch[GDT_BINFO_CHAN_TYPE];
258 		sc->sc_binfo.bi_chan_count =
259 		    sc->sc_scratch[GDT_BINFO_CHAN_COUNT];
260 		sc->sc_binfo.bi_rdongle_pres =
261 		    sc->sc_scratch[GDT_BINFO_RDONGLE_PRES];
262 		sc->sc_binfo.bi_epr_fw_ver =
263 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER);
264 		sc->sc_binfo.bi_upd_fw_ver =
265 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER);
266 		sc->sc_binfo.bi_upd_revision =
267 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION);
268 		bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING,
269 		    sc->sc_binfo.bi_type_string,
270 		    sizeof sc->sc_binfo.bi_type_string);
271 		bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING,
272 		    sc->sc_binfo.bi_raid_string,
273 		    sizeof sc->sc_binfo.bi_raid_string);
274 		sc->sc_binfo.bi_update_pres =
275 		    sc->sc_scratch[GDT_BINFO_UPDATE_PRES];
276 		sc->sc_binfo.bi_xor_pres =
277 		    sc->sc_scratch[GDT_BINFO_XOR_PRES];
278 		sc->sc_binfo.bi_prom_type =
279 		    sc->sc_scratch[GDT_BINFO_PROM_TYPE];
280 		sc->sc_binfo.bi_prom_count =
281 		    sc->sc_scratch[GDT_BINFO_PROM_COUNT];
282 		sc->sc_binfo.bi_dup_pres =
283 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES);
284 		sc->sc_binfo.bi_chan_pres =
285 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES);
286 		sc->sc_binfo.bi_mem_pres =
287 		    gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES);
288 		sc->sc_binfo.bi_ft_bus_system =
289 		    sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
290 		sc->sc_binfo.bi_subtype_valid =
291 		    sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
292 		sc->sc_binfo.bi_board_subtype =
293 		    sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
294 		sc->sc_binfo.bi_rampar_pres =
295 		    sc->sc_scratch[GDT_BINFO_RAMPAR_PRES];
296 
297 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
298 		    GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
299 			sc->sc_bfeat.bf_chaining =
300 			    sc->sc_scratch[GDT_BFEAT_CHAINING];
301 			sc->sc_bfeat.bf_striping =
302 			    sc->sc_scratch[GDT_BFEAT_STRIPING];
303 			sc->sc_bfeat.bf_mirroring =
304 			    sc->sc_scratch[GDT_BFEAT_MIRRORING];
305 			sc->sc_bfeat.bf_raid =
306 			    sc->sc_scratch[GDT_BFEAT_RAID];
307 			sc->sc_more_proc = 1;
308 		}
309 	} else {
310 		/* XXX Not implemented yet */
311 	}
312 
313 	/* Read more information */
314 	if (sc->sc_more_proc) {
315 		int bus, j;
316 		/* physical drives, channel addresses */
317 		/* step 1: get magical bus number from firmware */
318 		gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
319 		sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
320 		sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
321 		sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
322 		gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
323 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
324 		    GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL,
325 		    GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) {
326 			GDT_DPRINTF(GDT_D_INFO, ("method 1\n"));
327 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
328 				sc->sc_raw[bus].ra_address =
329 				    gdt_dec32(sc->sc_scratch +
330 				    GDT_IOC_HDR_SZ +
331 				    GDT_IOC_SZ * bus +
332 				    GDT_IOC_ADDRESS);
333 				sc->sc_raw[bus].ra_local_no =
334 				    gdt_dec8(sc->sc_scratch +
335 				    GDT_IOC_HDR_SZ +
336 				    GDT_IOC_SZ * bus +
337 				    GDT_IOC_LOCAL_NO);
338 				GDT_DPRINTF(GDT_D_INFO, (
339 				    "bus: %d address: %x local: %x\n",
340 				    bus,
341 				    sc->sc_raw[bus].ra_address,
342 				    sc->sc_raw[bus].ra_local_no));
343 			}
344 		} else {
345 			GDT_DPRINTF(GDT_D_INFO, ("method 2\n"));
346 			for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
347 				sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL;
348 				sc->sc_raw[bus].ra_local_no = bus;
349 				GDT_DPRINTF(GDT_D_INFO, (
350 				    "bus: %d address: %x local: %x\n",
351 				    bus,
352 				    sc->sc_raw[bus].ra_address,
353 				    sc->sc_raw[bus].ra_local_no));
354 			}
355 		}
356 		/* step 2: use magical bus number to get nr of phys disks */
357 		for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
358 			gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO,
359 			    sc->sc_raw[bus].ra_local_no);
360 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
361 			    GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
362 			    sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL,
363 			    GDT_GETCH_SZ)) {
364 				sc->sc_raw[bus].ra_phys_cnt =
365 				    gdt_dec32(sc->sc_scratch +
366 				    GDT_GETCH_DRIVE_CNT);
367 				GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n",
368 				    bus, sc->sc_raw[bus].ra_phys_cnt));
369 			}
370 
371 			/* step 3: get scsi disk nr */
372 			if (sc->sc_raw[bus].ra_phys_cnt > 0) {
373 				gdt_enc32(sc->sc_scratch +
374 				    GDT_GETSCSI_CHAN,
375 				    sc->sc_raw[bus].ra_local_no);
376 				gdt_enc32(sc->sc_scratch +
377 				    GDT_GETSCSI_CNT,
378 				    sc->sc_raw[bus].ra_phys_cnt);
379 				if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
380 				    GDT_IOCTL,
381 				    GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN,
382 				    sc->sc_raw[bus].ra_address |
383 				    GDT_INVALID_CHANNEL,
384 				    GDT_GETSCSI_SZ))
385 					for (j = 0;
386 					    j < sc->sc_raw[bus].ra_phys_cnt;
387 					    j++) {
388 						sc->sc_raw[bus].ra_id_list[j] =
389 						    gdt_dec32(sc->sc_scratch +
390 						    GDT_GETSCSI_LIST +
391 						    GDT_GETSCSI_LIST_SZ * j);
392 						GDT_DPRINTF(GDT_D_INFO,
393 						    ("  diskid: %d\n",
394 						    sc->sc_raw[bus].ra_id_list[j]));
395 					}
396 				else
397 					sc->sc_raw[bus].ra_phys_cnt = 0;
398 			}
399 			/* add found disks to grand total */
400 			sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt;
401 		}
402 	} /* if (sc->sc_more_proc) */
403 
404 	if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
405 		printf("raw service initialization error %d\n",
406 		    sc->sc_status);
407 		return (1);
408 	}
409 
410 	/* Set/get features raw service (scatter/gather) */
411 	sc->sc_raw_feat = 0;
412 	if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
413 	    GDT_SCATTER_GATHER, 0, 0))
414 		if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
415 		    0, 0))
416 			sc->sc_raw_feat = sc->sc_info;
417 
418 	/* Set/get features cache service (scatter/gather) */
419 	sc->sc_cache_feat = 0;
420 	if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
421 	    GDT_SCATTER_GATHER, 0))
422 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
423 		    0))
424 			sc->sc_cache_feat = sc->sc_info;
425 
426 	/* XXX Linux reserve drives here, potentially */
427 
428 	sc->sc_ndevs = 0;
429 	/* Scan for cache devices */
430 	for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
431 		if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0,
432 		    0)) {
433 			sc->sc_hdr[i].hd_present = 1;
434 			sc->sc_hdr[i].hd_size = sc->sc_info;
435 
436 			if (sc->sc_hdr[i].hd_size > 0)
437 				sc->sc_ndevs++;
438 
439 			/*
440 			 * Evaluate mapping (sectors per head, heads per cyl)
441 			 */
442 			sc->sc_hdr[i].hd_size &= ~GDT_SECS32;
443 			if (sc->sc_info2 == 0)
444 				gdt_eval_mapping(sc->sc_hdr[i].hd_size,
445 				    &drv_cyls, &drv_hds, &drv_secs);
446 			else {
447 				drv_hds = sc->sc_info2 & 0xff;
448 				drv_secs = (sc->sc_info2 >> 8) & 0xff;
449 				drv_cyls = sc->sc_hdr[i].hd_size / drv_hds /
450 				    drv_secs;
451 			}
452 			sc->sc_hdr[i].hd_heads = drv_hds;
453 			sc->sc_hdr[i].hd_secs = drv_secs;
454 			/* Round the size */
455 			sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
456 
457 			if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
458 			    GDT_DEVTYPE, i, 0, 0))
459 				sc->sc_hdr[i].hd_devtype = sc->sc_info;
460 		}
461 
462 	if (sc->sc_ndevs == 0)
463 		sc->sc_link.openings = 0;
464 	else
465 		sc->sc_link.openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) /
466 		    sc->sc_ndevs;
467 
468 	printf("dpmem %llx %d-bus %d cache device%s\n",
469 	    (long long)sc->sc_dpmembase,
470 	    sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
471 	printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
472 	    DEVNAME(sc), sc->sc_cpar.cp_version,
473 	    sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy,
474 	    sc->sc_cpar.cp_write_back ? "on" : "off",
475 	    sc->sc_cpar.cp_block_size);
476 #if 1
477 	printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc),
478 	    sc->sc_raw_feat, sc->sc_cache_feat);
479 #endif
480 
481 #if NBIO > 0
482 	if (bio_register(&sc->sc_dev, gdt_ioctl) != 0)
483 		panic("%s: controller registration failed", DEVNAME(sc));
484 #endif
485 	gdt_cnt++;
486 
487 	bzero(&saa, sizeof(saa));
488 	saa.saa_sc_link = &sc->sc_link;
489 
490 	config_found(&sc->sc_dev, &saa, scsiprint);
491 
492 	sc->sc_raw_link = malloc(sc->sc_bus_cnt * sizeof (struct scsi_link),
493 	    M_DEVBUF, M_NOWAIT | M_ZERO);
494 	if (sc->sc_raw_link == NULL)
495 		panic("gdt_attach");
496 
497 	for (i = 0; i < sc->sc_bus_cnt; i++) {
498 		/* Fill in the prototype scsi_link. */
499 		sc->sc_raw_link[i].adapter_softc = sc;
500 		sc->sc_raw_link[i].adapter = &gdt_raw_switch;
501 		sc->sc_raw_link[i].adapter_target = 7;
502 		sc->sc_raw_link[i].device = &gdt_dev;
503 		sc->sc_raw_link[i].openings = 4;	/* XXX a guess */
504 		sc->sc_raw_link[i].adapter_buswidth =
505 		    (sc->sc_class & GDT_FC) ? GDT_MAXID : 16;	/* XXX */
506 
507 		bzero(&saa, sizeof(saa));
508 		saa.saa_sc_link = &sc->sc_raw_link[i];
509 
510 		config_found(&sc->sc_dev, &saa, scsiprint);
511 	}
512 
513 	gdt_polling = 0;
514 	return (0);
515 }
516 
517 void
518 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
519 {
520 	*cyls = size / GDT_HEADS / GDT_SECS;
521 	if (*cyls < GDT_MAXCYLS) {
522 		*heads = GDT_HEADS;
523 		*secs = GDT_SECS;
524 	} else {
525 		/* Too high for 64 * 32 */
526 		*cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
527 		if (*cyls < GDT_MAXCYLS) {
528 			*heads = GDT_MEDHEADS;
529 			*secs = GDT_MEDSECS;
530 		} else {
531 			/* Too high for 127 * 63 */
532 			*cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
533 			*heads = GDT_BIGHEADS;
534 			*secs = GDT_BIGSECS;
535 		}
536 	}
537 }
538 
539 /*
540  * Insert a command into the driver queue, either at the front or at the tail.
541  * It's ok to overload the freelist link as these structures are never on
542  * the freelist at this time.
543  */
544 void
545 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront)
546 {
547 	if (infront || LIST_FIRST(&sc->sc_queue) == NULL) {
548 		if (LIST_FIRST(&sc->sc_queue) == NULL)
549 			sc->sc_queuelast = xs;
550 		LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list);
551 		return;
552 	}
553 	LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list);
554 	sc->sc_queuelast = xs;
555 }
556 
557 /*
558  * Pull a command off the front of the driver queue.
559  */
560 struct scsi_xfer *
561 gdt_dequeue(struct gdt_softc *sc)
562 {
563 	struct scsi_xfer *xs;
564 
565 	xs = LIST_FIRST(&sc->sc_queue);
566 	if (xs == NULL)
567 		return (NULL);
568 	LIST_REMOVE(xs, free_list);
569 
570 	if (LIST_FIRST(&sc->sc_queue) == NULL)
571 		sc->sc_queuelast = NULL;
572 
573 	return (xs);
574 }
575 
576 /*
577  * Start a SCSI operation on a cache device.
578  * XXX Polled operation is not yet complete.  What kind of locking do we need?
579  */
580 int
581 gdt_scsi_cmd(struct scsi_xfer *xs)
582 {
583 	struct scsi_link *link = xs->sc_link;
584 	struct gdt_softc *sc = link->adapter_softc;
585 	u_int8_t target = link->target;
586 	struct gdt_ccb *ccb;
587 #if 0
588 	struct gdt_ucmd *ucmd;
589 #endif
590 	u_int32_t blockno, blockcnt;
591 	struct scsi_rw *rw;
592 	struct scsi_rw_big *rwb;
593 	bus_dmamap_t xfer;
594 	int error, retval = SUCCESSFULLY_QUEUED;
595 	int s;
596 
597 	GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
598 
599 	s = splbio();
600 
601 	xs->error = XS_NOERROR;
602 
603 	if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present ||
604 	    link->lun != 0) {
605 		/*
606 		 * XXX Should be XS_SENSE but that would require setting up a
607 		 * faked sense too.
608 		 */
609 		xs->error = XS_DRIVER_STUFFUP;
610 		xs->flags |= ITSDONE;
611 		scsi_done(xs);
612 		splx(s);
613 		return (COMPLETE);
614 	}
615 
616 	/* Don't double enqueue if we came from gdt_chain. */
617 	if (xs != LIST_FIRST(&sc->sc_queue))
618 		gdt_enqueue(sc, xs, 0);
619 
620 	while ((xs = gdt_dequeue(sc)) != NULL) {
621 		xs->error = XS_NOERROR;
622 		ccb = NULL;
623 		link = xs->sc_link;
624 		target = link->target;
625 
626 		if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
627 		    sc->sc_test_busy(sc)) {
628 			/*
629 			 * Put it back in front.  XXX Should we instead
630 			 * set xs->error to XS_BUSY?
631 			 */
632 			gdt_enqueue(sc, xs, 1);
633 			break;
634 		}
635 
636 		switch (xs->cmd->opcode) {
637 		case TEST_UNIT_READY:
638 		case REQUEST_SENSE:
639 		case INQUIRY:
640 		case MODE_SENSE:
641 		case START_STOP:
642 		case READ_CAPACITY:
643 #if 0
644 		case VERIFY:
645 #endif
646 			gdt_internal_cache_cmd(xs);
647 			xs->flags |= ITSDONE;
648 			scsi_done(xs);
649 			goto ready;
650 
651 		case PREVENT_ALLOW:
652 			GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
653 			/* XXX Not yet implemented */
654 			xs->error = XS_NOERROR;
655 			xs->flags |= ITSDONE;
656 			scsi_done(xs);
657 			goto ready;
658 
659 		default:
660 			GDT_DPRINTF(GDT_D_CMD,
661 			    ("unknown opc %d ", xs->cmd->opcode));
662 			/* XXX Not yet implemented */
663 			xs->error = XS_DRIVER_STUFFUP;
664 			xs->flags |= ITSDONE;
665 			scsi_done(xs);
666 			goto ready;
667 
668 		case READ_COMMAND:
669 		case READ_BIG:
670 		case WRITE_COMMAND:
671 		case WRITE_BIG:
672 		case SYNCHRONIZE_CACHE:
673 			/*
674 			 * A new command chain, start from the beginning.
675 			 */
676 			sc->sc_cmd_off = 0;
677 
678 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
679 				/* A read or write operation. */
680 				if (xs->cmdlen == 6) {
681 					rw = (struct scsi_rw *)xs->cmd;
682 					blockno = _3btol(rw->addr) &
683 					    (SRW_TOPADDR << 16 | 0xffff);
684 					blockcnt =
685 					    rw->length ? rw->length : 0x100;
686 				} else {
687 					rwb = (struct scsi_rw_big *)xs->cmd;
688 					blockno = _4btol(rwb->addr);
689 					blockcnt = _2btol(rwb->length);
690 				}
691 				if (blockno >= sc->sc_hdr[target].hd_size ||
692 				    blockno + blockcnt >
693 				    sc->sc_hdr[target].hd_size) {
694 					printf(
695 					    "%s: out of bounds %u-%u >= %u\n",
696 					    DEVNAME(sc), blockno,
697 					    blockcnt,
698 					    sc->sc_hdr[target].hd_size);
699 					/*
700 					 * XXX Should be XS_SENSE but that
701 					 * would require setting up a faked
702 					 * sense too.
703 					 */
704 					xs->error = XS_DRIVER_STUFFUP;
705 					xs->flags |= ITSDONE;
706 					scsi_done(xs);
707 					goto ready;
708 				}
709 			}
710 
711 			ccb = gdt_get_ccb(sc, xs->flags);
712 			/*
713 			 * We are out of commands, try again in a little while.
714 			 */
715 			if (ccb == NULL) {
716 				splx(s);
717 				return (NO_CCB);
718 			}
719 
720 			ccb->gc_blockno = blockno;
721 			ccb->gc_blockcnt = blockcnt;
722 			ccb->gc_xs = xs;
723 			ccb->gc_timeout = xs->timeout;
724 			ccb->gc_service = GDT_CACHESERVICE;
725 			gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
726 
727 			if (xs->cmd->opcode != SYNCHRONIZE_CACHE) {
728 				xfer = ccb->gc_dmamap_xfer;
729 				error = bus_dmamap_load(sc->sc_dmat, xfer,
730 				    xs->data, xs->datalen, NULL,
731 				    (xs->flags & SCSI_NOSLEEP) ?
732 				    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
733 				if (error) {
734 					printf("%s: gdt_scsi_cmd: ",
735 					    DEVNAME(sc));
736 					if (error == EFBIG)
737 						printf(
738 						    "more than %d dma segs\n",
739 						    GDT_MAXOFFSETS);
740 					else
741 						printf("error %d "
742 						    "loading dma map\n",
743 						    error);
744 
745 					gdt_free_ccb(sc, ccb);
746 					xs->error = XS_DRIVER_STUFFUP;
747 					xs->flags |= ITSDONE;
748 					scsi_done(xs);
749 					goto ready;
750 				}
751 				bus_dmamap_sync(sc->sc_dmat, xfer, 0,
752 				    xfer->dm_mapsize,
753 				    (xs->flags & SCSI_DATA_IN) ?
754 				    BUS_DMASYNC_PREREAD :
755 				    BUS_DMASYNC_PREWRITE);
756 			}
757 
758 			gdt_enqueue_ccb(sc, ccb);
759 			/* XXX what if enqueue did not start a transfer? */
760 			if (gdt_polling || (xs->flags & SCSI_POLL)) {
761 				if (!gdt_wait(sc, ccb, ccb->gc_timeout)) {
762 					splx(s);
763 					printf("%s: command %d timed out\n",
764 					    DEVNAME(sc),
765 					    ccb->gc_cmd_index);
766 					return (TRY_AGAIN_LATER);
767 				}
768 				xs->flags |= ITSDONE;
769 				scsi_done(xs);
770 			}
771 		}
772 
773 	ready:
774 		/*
775 		 * Don't process the queue if we are polling.
776 		 */
777 		if (xs->flags & SCSI_POLL) {
778 			retval = COMPLETE;
779 			break;
780 		}
781 	}
782 
783 	splx(s);
784 	return (retval);
785 }
786 
787 /* XXX Currently only for cacheservice, returns 0 if busy */
788 int
789 gdt_exec_ccb(struct gdt_ccb *ccb)
790 {
791 	struct scsi_xfer *xs = ccb->gc_xs;
792 	struct scsi_link *link = xs->sc_link;
793 	struct gdt_softc *sc = link->adapter_softc;
794 	u_int8_t target = link->target;
795 	u_int32_t sg_canz;
796 	bus_dmamap_t xfer;
797 	int i;
798 #if 1 /* XXX */
799 	static int __level = 0;
800 
801 	if (__level++ > 0)
802 		panic("level > 0");
803 #endif
804 	GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
805 
806 	sc->sc_cmd_cnt = 0;
807 
808 	/*
809 	 * XXX Yeah I know it's an always-true condition, but that may change
810 	 * later.
811 	 */
812 	if (sc->sc_cmd_cnt == 0)
813 		sc->sc_set_sema0(sc);
814 
815 	gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
816 	gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
817 	gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
818 	    target);
819 
820 	switch (xs->cmd->opcode) {
821 	case PREVENT_ALLOW:
822 	case SYNCHRONIZE_CACHE:
823 		if (xs->cmd->opcode == PREVENT_ALLOW) {
824 			/* XXX PREVENT_ALLOW support goes here */
825 		} else {
826 			GDT_DPRINTF(GDT_D_CMD,
827 			    ("SYNCHRONIZE CACHE tgt %d ", target));
828 			sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
829 		}
830 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
831 		    1);
832 		sg_canz = 0;
833 		break;
834 
835 	case WRITE_COMMAND:
836 	case WRITE_BIG:
837 		/* XXX WRITE_THR could be supported too */
838 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
839 		break;
840 
841 	case READ_COMMAND:
842 	case READ_BIG:
843 		sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
844 		break;
845 	}
846 
847 	if (xs->cmd->opcode != PREVENT_ALLOW &&
848 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
849 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
850 		    ccb->gc_blockno);
851 		gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
852 		    ccb->gc_blockcnt);
853 
854 		xfer = ccb->gc_dmamap_xfer;
855 		if (sc->sc_cache_feat & GDT_SCATTER_GATHER) {
856 			gdt_enc32(
857 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
858 			    0xffffffff);
859 			for (i = 0; i < xfer->dm_nsegs; i++) {
860 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
861 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
862 				    GDT_SG_PTR,
863 				    xfer->dm_segs[i].ds_addr);
864 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
865 				    GDT_CACHE_SG_LST + i * GDT_SG_SZ +
866 				    GDT_SG_LEN,
867 				    xfer->dm_segs[i].ds_len);
868 				GDT_DPRINTF(GDT_D_IO,
869 				    ("#%d va %p pa %p len %x\n", i, buf,
870 				    xfer->dm_segs[i].ds_addr,
871 				    xfer->dm_segs[i].ds_len));
872 			}
873 			sg_canz = xfer->dm_nsegs;
874 			gdt_enc32(
875 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
876 			    sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
877 		} else {
878 			/* XXX Hardly correct */
879 			gdt_enc32(
880 			    sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
881 			    xfer->dm_segs[0].ds_addr);
882 			sg_canz = 0;
883 		}
884 	}
885 	gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
886 
887 	sc->sc_cmd_len =
888 	    roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
889 	    sizeof (u_int32_t));
890 
891 	if (sc->sc_cmd_cnt > 0 &&
892 	    sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
893 	    sc->sc_ic_all_size) {
894 		printf("%s: DPMEM overflow\n", DEVNAME(sc));
895 		gdt_free_ccb(sc, ccb);
896 		xs->error = XS_BUSY;
897 #if 1 /* XXX */
898 		__level--;
899 #endif
900 		return (0);
901 	}
902 
903 	sc->sc_copy_cmd(sc, ccb);
904 	sc->sc_release_event(sc, ccb);
905 
906 	xs->error = XS_NOERROR;
907 	xs->resid = 0;
908 #if 1 /* XXX */
909 	__level--;
910 #endif
911 	return (1);
912 }
913 
914 void
915 gdt_copy_internal_data(struct scsi_xfer *xs, u_int8_t *data, size_t size)
916 {
917 	size_t copy_cnt;
918 
919 	GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data "));
920 
921 	if (!xs->datalen)
922 		printf("uio move not yet supported\n");
923 	else {
924 		copy_cnt = MIN(size, xs->datalen);
925 		bcopy(data, xs->data, copy_cnt);
926 	}
927 }
928 
929 /* Emulated SCSI operation on cache device */
930 void
931 gdt_internal_cache_cmd(struct scsi_xfer *xs)
932 {
933 	struct scsi_link *link = xs->sc_link;
934 	struct gdt_softc *sc = link->adapter_softc;
935 	struct scsi_inquiry_data inq;
936 	struct scsi_sense_data sd;
937 	struct scsi_read_cap_data rcd;
938 	u_int8_t target = link->target;
939 
940 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
941 
942 	switch (xs->cmd->opcode) {
943 	case TEST_UNIT_READY:
944 	case START_STOP:
945 #if 0
946 	case VERIFY:
947 #endif
948 		GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode,
949 		    target));
950 		break;
951 
952 	case REQUEST_SENSE:
953 		GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
954 		bzero(&sd, sizeof sd);
955 		sd.error_code = 0x70;
956 		sd.segment = 0;
957 		sd.flags = SKEY_NO_SENSE;
958 		gdt_enc32(sd.info, 0);
959 		sd.extra_len = 0;
960 		gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd);
961 		break;
962 
963 	case INQUIRY:
964 		GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
965 		    sc->sc_hdr[target].hd_devtype));
966 		bzero(&inq, sizeof inq);
967 		inq.device =
968 		    (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
969 		inq.dev_qual2 =
970 		    (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
971 		inq.version = 2;
972 		inq.response_format = 2;
973 		inq.additional_length = 32;
974 		strlcpy(inq.vendor, "ICP	   ", sizeof inq.vendor);
975 		snprintf(inq.product, sizeof inq.product, "Host drive  #%02d",
976 		    target);
977 		strlcpy(inq.revision, "	 ", sizeof inq.revision);
978 		gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq);
979 		break;
980 
981 	case READ_CAPACITY:
982 		GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
983 		bzero(&rcd, sizeof rcd);
984 		_lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
985 		_lto4b(GDT_SECTOR_SIZE, rcd.length);
986 		gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd);
987 		break;
988 
989 	default:
990 		GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ",
991 		    xs->cmd->opcode, target));
992 		xs->error = XS_DRIVER_STUFFUP;
993 		return;
994 	}
995 
996 	xs->error = XS_NOERROR;
997 }
998 
999 /* Start a raw SCSI operation */
1000 int
1001 gdt_raw_scsi_cmd(struct scsi_xfer *xs)
1002 {
1003 	struct scsi_link *link = xs->sc_link;
1004 	struct gdt_softc *sc = link->adapter_softc;
1005 	struct gdt_ccb *ccb;
1006 	int s;
1007 
1008 	GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_scsi_cmd "));
1009 
1010 	if (xs->cmdlen > 12 /* XXX create #define */) {
1011 		GDT_DPRINTF(GDT_D_CMD, ("CDB too big %p ", xs));
1012 		bzero(&xs->sense, sizeof(xs->sense));
1013 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
1014 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1015 		xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
1016 		xs->error = XS_SENSE;
1017 		s = splbio();
1018 		scsi_done(xs);
1019 		splx(s);
1020 		return (COMPLETE);
1021 	}
1022 
1023 	if ((ccb = gdt_get_ccb(sc, xs->flags)) == NULL) {
1024 		GDT_DPRINTF(GDT_D_CMD, ("no ccb available for %p ", xs));
1025 		xs->error = XS_DRIVER_STUFFUP;
1026 		s = splbio();
1027 		scsi_done(xs);
1028 		splx(s);
1029 		return (COMPLETE);
1030 	}
1031 
1032 	xs->error = XS_DRIVER_STUFFUP;
1033 	xs->flags |= ITSDONE;
1034 	s = splbio();
1035 	scsi_done(xs);
1036 	gdt_free_ccb(sc, ccb);
1037 
1038 	splx(s);
1039 
1040 	return (COMPLETE);
1041 }
1042 
1043 void
1044 gdt_clear_events(struct gdt_softc *sc)
1045 {
1046 	GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc));
1047 
1048 	/* XXX To be implemented */
1049 }
1050 
1051 int
1052 gdt_async_event(struct gdt_softc *sc, int service)
1053 {
1054 	GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service));
1055 
1056 	if (service == GDT_SCREENSERVICE) {
1057 		/* XXX To be implemented */
1058 	} else {
1059 		/* XXX To be implemented */
1060 	}
1061 
1062 	return (0);
1063 }
1064 
1065 int
1066 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index,
1067     struct scsi_xfer *xs)
1068 {
1069 	GDT_DPRINTF(GDT_D_INTR,
1070 	    ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs));
1071 
1072 	if (service == GDT_SCREENSERVICE) {
1073 		GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
1074 		/* XXX To be implemented */
1075 		return (0);
1076 	} else {
1077 		switch (sc->sc_status) {
1078 		case GDT_S_OK:
1079 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
1080 			/* XXX To be implemented */
1081 			break;
1082 		case GDT_S_BSY:
1083 			GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
1084 			/* XXX To be implemented */
1085 			return (2);
1086 		default:
1087 			GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
1088 			    sc->sc_status));
1089 			/* XXX To be implemented */
1090 			return (0);
1091 		}
1092 	}
1093 
1094 	return (1);
1095 }
1096 
1097 int
1098 gdt_intr(void *arg)
1099 {
1100 	struct gdt_softc *sc = arg;
1101 	struct gdt_intr_ctx ctx;
1102 	int chain = 1;
1103 	int sync_val = 0;
1104 	struct scsi_xfer *xs;
1105 	int prev_cmd;
1106 	struct gdt_ccb *ccb;
1107 
1108 	GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc));
1109 
1110 	/* If polling and we were not called from gdt_wait, just return */
1111 	if (gdt_polling && !gdt_from_wait)
1112 		return (0);
1113 
1114 	ctx.istatus = sc->sc_get_status(sc);
1115 	if (!ctx.istatus) {
1116 		sc->sc_status = GDT_S_NO_STATUS;
1117 		return (0);
1118 	}
1119 
1120 	gdt_wait_index = 0;
1121 	ctx.service = ctx.info2 = 0;
1122 
1123 	sc->sc_intr(sc, &ctx);
1124 
1125 	sc->sc_status = ctx.cmd_status;
1126 	sc->sc_info = ctx.info;
1127 	sc->sc_info2 = ctx.info2;
1128 
1129 	if (gdt_from_wait) {
1130 		gdt_wait_gdt = sc;
1131 		gdt_wait_index = ctx.istatus;
1132 	}
1133 
1134 	switch (ctx.istatus) {
1135 	case GDT_ASYNCINDEX:
1136 		gdt_async_event(sc, ctx.service);
1137 		goto finish;
1138 
1139 	case GDT_SPEZINDEX:
1140 		printf("%s: uninitialized or unknown service (%d %d)\n",
1141 		    DEVNAME(sc), ctx.info, ctx.info2);
1142 		chain = 0;
1143 		goto finish;
1144 	}
1145 
1146 	ccb = &sc->sc_ccbs[ctx.istatus - 2];
1147 	xs = ccb->gc_xs;
1148 	if (!gdt_polling)
1149 		timeout_del(&xs->stimeout);
1150 	ctx.service = ccb->gc_service;
1151 	prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1152 	if (xs && xs->cmd->opcode != PREVENT_ALLOW &&
1153 	    xs->cmd->opcode != SYNCHRONIZE_CACHE) {
1154 		bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0,
1155 		    ccb->gc_dmamap_xfer->dm_mapsize,
1156 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1157 		    BUS_DMASYNC_POSTWRITE);
1158 		bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer);
1159 	}
1160 	gdt_free_ccb(sc, ccb);
1161 	switch (prev_cmd) {
1162 	case GDT_GCF_UNUSED:
1163 		/* XXX Not yet implemented */
1164 		chain = 0;
1165 		goto finish;
1166 	case GDT_GCF_INTERNAL:
1167 		chain = 0;
1168 		goto finish;
1169 	}
1170 
1171 	sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs);
1172 
1173  finish:
1174 
1175 	switch (sync_val) {
1176 	case 1:
1177 		xs->flags |= ITSDONE;
1178 		scsi_done(xs);
1179 		break;
1180 
1181 	case 2:
1182 		gdt_enqueue(sc, xs, 0);
1183 	}
1184 
1185 	if (chain)
1186 		gdt_chain(sc);
1187 
1188 	return (1);
1189 }
1190 
1191 void
1192 gdtminphys(struct buf *bp, struct scsi_link *sl)
1193 {
1194 	GDT_DPRINTF(GDT_D_MISC, ("gdtminphys(0x%x) ", bp));
1195 
1196 	/* As this is way more than MAXPHYS it's really not necessary. */
1197 	if ((GDT_MAXOFFSETS - 1) * PAGE_SIZE < MAXPHYS &&
1198 	    bp->b_bcount > ((GDT_MAXOFFSETS - 1) * PAGE_SIZE))
1199 		bp->b_bcount = ((GDT_MAXOFFSETS - 1) * PAGE_SIZE);
1200 
1201 	minphys(bp);
1202 }
1203 
1204 int
1205 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout)
1206 {
1207 	int s, rslt, rv = 0;
1208 
1209 	GDT_DPRINTF(GDT_D_MISC,
1210 	    ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout));
1211 
1212 	gdt_from_wait = 1;
1213 	do {
1214 		s = splbio();
1215 		rslt = gdt_intr(sc);
1216 		splx(s);
1217 		if (rslt && sc == gdt_wait_gdt &&
1218 		    ccb->gc_cmd_index == gdt_wait_index) {
1219 			rv = 1;
1220 			break;
1221 		}
1222 		DELAY(1000); /* 1 millisecond */
1223 	} while (--timeout);
1224 	gdt_from_wait = 0;
1225 
1226 	while (sc->sc_test_busy(sc))
1227 		DELAY(0);		/* XXX correct? */
1228 
1229 	return (rv);
1230 }
1231 
1232 int
1233 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode,
1234     u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
1235 {
1236 	int retries;
1237 	struct gdt_ccb *ccb;
1238 
1239 	GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1240 	    sc, service, opcode, arg1, arg2, arg3));
1241 
1242 	bzero(sc->sc_cmd, GDT_CMD_SZ);
1243 
1244 	for (retries = GDT_RETRIES; ; ) {
1245 		ccb = gdt_get_ccb(sc, SCSI_NOSLEEP);
1246 		if (ccb == NULL) {
1247 			printf("%s: no free command index found\n",
1248 			    DEVNAME(sc));
1249 			return (0);
1250 		}
1251 		ccb->gc_service = service;
1252 		gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1253 
1254 		sc->sc_set_sema0(sc);
1255 		gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX,
1256 		    ccb->gc_cmd_index);
1257 		gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode);
1258 		gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1259 
1260 		switch (service) {
1261 		case GDT_CACHESERVICE:
1262 			if (opcode == GDT_IOCTL) {
1263 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1264 				    GDT_IOCTL_SUBFUNC, arg1);
1265 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1266 				    GDT_IOCTL_CHANNEL, arg2);
1267 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1268 				    GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1269 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1270 				    GDT_IOCTL_P_PARAM,
1271 				    sc->sc_scratch_seg.ds_addr);
1272 			} else {
1273 				gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1274 				    GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1275 				gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1276 				    GDT_CACHE_BLOCKNO, arg2);
1277 			}
1278 			break;
1279 
1280 		case GDT_SCSIRAWSERVICE:
1281 			gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1282 			    GDT_RAW_DIRECTION, arg1);
1283 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1284 			    (u_int8_t)arg2;
1285 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1286 			    (u_int8_t)arg3;
1287 			sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1288 			    (u_int8_t)(arg3 >> 8);
1289 		}
1290 
1291 		sc->sc_cmd_len = GDT_CMD_SZ;
1292 		sc->sc_cmd_off = 0;
1293 		sc->sc_cmd_cnt = 0;
1294 		sc->sc_copy_cmd(sc, ccb);
1295 		sc->sc_release_event(sc, ccb);
1296 		DELAY(20);
1297 		if (!gdt_wait(sc, ccb, GDT_POLL_TIMEOUT))
1298 			return (0);
1299 		if (sc->sc_status != GDT_S_BSY || --retries == 0)
1300 			break;
1301 		DELAY(1);
1302 	}
1303 	return (sc->sc_status == GDT_S_OK);
1304 }
1305 
1306 struct gdt_ccb *
1307 gdt_get_ccb(struct gdt_softc *sc, int flags)
1308 {
1309 	struct gdt_ccb *ccb;
1310 	int s;
1311 
1312 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p, 0x%x) ", sc, flags));
1313 
1314 	s = splbio();
1315 
1316 	for (;;) {
1317 		ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1318 		if (ccb != NULL)
1319 			break;
1320 		if (flags & SCSI_NOSLEEP)
1321 			goto bail_out;
1322 		tsleep(&sc->sc_free_ccb, PRIBIO, "gdt_ccb", 0);
1323 	}
1324 
1325 	TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain);
1326 
1327  bail_out:
1328 	splx(s);
1329 	return (ccb);
1330 }
1331 
1332 void
1333 gdt_free_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1334 {
1335 	int s;
1336 
1337 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p) ", sc, ccb));
1338 
1339 	s = splbio();
1340 
1341 	TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain);
1342 
1343 	/* If the free list was empty, wake up potential waiters. */
1344 	if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1345 		wakeup(&sc->sc_free_ccb);
1346 
1347 	splx(s);
1348 }
1349 
1350 void
1351 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1352 {
1353 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb));
1354 
1355 	timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1356 	TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain);
1357 	gdt_start_ccbs(sc);
1358 }
1359 
1360 void
1361 gdt_start_ccbs(struct gdt_softc *sc)
1362 {
1363 	struct gdt_ccb *ccb;
1364 	struct scsi_xfer *xs;
1365 
1366 	GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc));
1367 
1368 	while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) {
1369 
1370 		xs = ccb->gc_xs;
1371 		if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1372 			timeout_del(&xs->stimeout);
1373 
1374 		if (gdt_exec_ccb(ccb) == 0) {
1375 			ccb->gc_flags |= GDT_GCF_WATCHDOG;
1376 			timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1377 			timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT);
1378 			break;
1379 		}
1380 		TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain);
1381 
1382 		if ((xs->flags & SCSI_POLL) == 0) {
1383 			timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1384 			timeout_add_msec(&xs->stimeout, ccb->gc_timeout);
1385 		}
1386 	}
1387 }
1388 
1389 void
1390 gdt_chain(struct gdt_softc *sc)
1391 {
1392 	GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc));
1393 
1394 	if (LIST_FIRST(&sc->sc_queue))
1395 		gdt_scsi_cmd(LIST_FIRST(&sc->sc_queue));
1396 }
1397 
1398 void
1399 gdt_timeout(void *arg)
1400 {
1401 	struct gdt_ccb *ccb = arg;
1402 	struct scsi_link *link = ccb->gc_xs->sc_link;
1403 	struct gdt_softc *sc = link->adapter_softc;
1404 	int s;
1405 
1406 	sc_print_addr(link);
1407 	printf("timed out\n");
1408 
1409 	/* XXX Test for multiple timeouts */
1410 
1411 	ccb->gc_xs->error = XS_TIMEOUT;
1412 	s = splbio();
1413 	gdt_enqueue_ccb(sc, ccb);
1414 	splx(s);
1415 }
1416 
1417 void
1418 gdt_watchdog(void *arg)
1419 {
1420 	struct gdt_ccb *ccb = arg;
1421 	struct scsi_link *link = ccb->gc_xs->sc_link;
1422 	struct gdt_softc *sc = link->adapter_softc;
1423 	int s;
1424 
1425 	s = splbio();
1426 	ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1427 	gdt_start_ccbs(sc);
1428 	splx(s);
1429 }
1430 
1431 #if NBIO > 0
1432 int
1433 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1434 {
1435 	struct gdt_softc *sc = (struct gdt_softc *)dev;
1436 	int error = 0;
1437 
1438 	GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc)));
1439 
1440 	switch (cmd) {
1441 	case BIOCINQ:
1442 		GDT_DPRINTF(GDT_D_IOCTL, ("inq "));
1443 		error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr);
1444 		break;
1445 
1446 	case BIOCVOL:
1447 		GDT_DPRINTF(GDT_D_IOCTL, ("vol "));
1448 		error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr);
1449 		break;
1450 
1451 	case BIOCDISK:
1452 		GDT_DPRINTF(GDT_D_IOCTL, ("disk "));
1453 		error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr);
1454 		break;
1455 
1456 	case BIOCALARM:
1457 		GDT_DPRINTF(GDT_D_IOCTL, ("alarm "));
1458 		error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1459 		break;
1460 
1461 	case BIOCSETSTATE:
1462 		GDT_DPRINTF(GDT_D_IOCTL, ("setstate "));
1463 		error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1464 		break;
1465 
1466 	default:
1467 		GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n"));
1468 		error = EINVAL;
1469 	}
1470 
1471 	return (error);
1472 }
1473 
1474 int
1475 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi)
1476 {
1477 	bi->bi_novol = sc->sc_ndevs;
1478 	bi->bi_nodisk = sc->sc_total_disks;
1479 
1480 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1481 
1482 	return (0);
1483 }
1484 
1485 int
1486 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv)
1487 {
1488 	return (1); /* XXX not yet */
1489 }
1490 
1491 int
1492 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd)
1493 {
1494 	return (1); /* XXX not yet */
1495 }
1496 
1497 int
1498 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba)
1499 {
1500 	return (1); /* XXX not yet */
1501 }
1502 
1503 int
1504 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs)
1505 {
1506 	return (1); /* XXX not yet */
1507 }
1508 
1509 #if 0
1510 int
1511 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1512 {
1513 	int error = 0;
1514 	struct gdt_dummy *dummy;
1515 
1516 	switch (cmd) {
1517 	case GDT_IOCTL_DUMMY:
1518 		dummy = (struct gdt_dummy *)addr;
1519 		printf("%s: GDT_IOCTL_DUMMY %d\n", dev->dv_xname, dummy->x++);
1520 		break;
1521 
1522 	case GDT_IOCTL_GENERAL: {
1523 		gdt_ucmd_t *ucmd;
1524 		struct gdt_softc *sc = (struct gdt_softc *)dev;
1525 		int s;
1526 
1527 		ucmd = (gdt_ucmd_t *)addr;
1528 		s = splbio();
1529 		TAILQ_INSERT_TAIL(&sc->sc_ucmdq, ucmd, links);
1530 		ucmd->complete_flag = FALSE;
1531 		splx(s);
1532 		gdt_chain(sc);
1533 		if (!ucmd->complete_flag)
1534 			(void)tsleep((void *)ucmd, PCATCH | PRIBIO, "gdtucw",
1535 			    0);
1536 		break;
1537 	}
1538 
1539 	case GDT_IOCTL_DRVERS:
1540 		((gdt_drvers_t *)addr)->vers =
1541 		    (GDT_DRIVER_VERSION << 8) | GDT_DRIVER_SUBVERSION;
1542 		break;
1543 
1544 	case GDT_IOCTL_CTRCNT:
1545 		((gdt_ctrcnt_t *)addr)->cnt = gdt_cnt;
1546 		break;
1547 
1548 #ifdef notyet
1549 	case GDT_IOCTL_CTRTYPE: {
1550 		gdt_ctrt_t *p;
1551 		struct gdt_softc *sc = (struct gdt_softc *)dev;
1552 
1553 		p = (gdt_ctrt_t *)addr;
1554 		p->oem_id = 0x8000;
1555 		p->type = 0xfd;
1556 		p->info = (sc->sc_bus << 8) | (sc->sc_slot << 3);
1557 		p->ext_type = 0x6000 | sc->sc_subdevice;
1558 		p->device_id = sc->sc_device;
1559 		p->sub_device_id = sc->sc_subdevice;
1560 		break;
1561 	}
1562 #endif
1563 
1564 	case GDT_IOCTL_OSVERS: {
1565 		gdt_osv_t *p;
1566 
1567 		p = (gdt_osv_t *)addr;
1568 		p->oscode = 10;
1569 		p->version = osrelease[0] - '0';
1570 		if (osrelease[1] == '.')
1571 			p->subversion = osrelease[2] - '0';
1572 		else
1573 			p->subversion = 0;
1574 		if (osrelease[3] == '.')
1575 			p->revision = osrelease[4] - '0';
1576 		else
1577 			p->revision = 0;
1578 		strlcpy(p->name, ostype, sizeof p->name);
1579 		break;
1580 	}
1581 
1582 #ifdef notyet
1583 	case GDT_IOCTL_EVENT: {
1584 		gdt_event_t *p;
1585 		int s;
1586 
1587 		p = (gdt_event_t *)addr;
1588 		if (p->erase == 0xff) {
1589 			if (p->dvr.event_source == GDT_ES_TEST)
1590 				p->dvr.event_data.size =
1591 				    sizeof(p->dvr.event_data.eu.test);
1592 			else if (p->dvr.event_source == GDT_ES_DRIVER)
1593 				p->dvr.event_data.size =
1594 				    sizeof(p->dvr.event_data.eu.driver);
1595 			else if (p->dvr.event_source == GDT_ES_SYNC)
1596 				p->dvr.event_data.size =
1597 				    sizeof(p->dvr.event_data.eu.sync);
1598 			else
1599 				p->dvr.event_data.size =
1600 				    sizeof(p->dvr.event_data.eu.async);
1601 			s = splbio();
1602 			gdt_store_event(p->dvr.event_source, p->dvr.event_idx,
1603 			    &p->dvr.event_data);
1604 			splx(s);
1605 		} else if (p->erase == 0xfe) {
1606 			s = splbio();
1607 			gdt_clear_events();
1608 			splx(s);
1609 		} else if (p->erase == 0) {
1610 			p->handle = gdt_read_event(p->handle, &p->dvr);
1611 		} else {
1612 			gdt_readapp_event((u_int8_t)p->erase, &p->dvr);
1613 		}
1614 		break;
1615 	}
1616 #endif
1617 
1618 	case GDT_IOCTL_STATIST:
1619 #if 0
1620 		bcopy(&gdt_stat, (gdt_statist_t *)addr, sizeof gdt_stat);
1621 #else
1622 		error = EOPNOTSUPP;
1623 #endif
1624 		break;
1625 
1626 	default:
1627 		error = EINVAL;
1628 	}
1629 	return (error);
1630 }
1631 #endif /* 0 */
1632 #endif /* NBIO > 0 */
1633