xref: /openbsd-src/sys/dev/ic/qlw.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: qlw.c,v 1.24 2014/07/13 23:10:23 deraadt Exp $ */
2 
3 /*
4  * Copyright (c) 2011 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
6  * Copyright (c) 2014 Mark Kettenis <kettenis@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/device.h>
25 #include <sys/ioctl.h>
26 #include <sys/proc.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/mutex.h>
30 #include <sys/rwlock.h>
31 #include <sys/sensors.h>
32 #include <sys/queue.h>
33 
34 #include <machine/bus.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/ic/qlwreg.h>
40 #include <dev/ic/qlwvar.h>
41 
42 #define QLW_DEBUG
43 
44 #ifdef QLW_DEBUG
45 #define DPRINTF(m, f...) do { if ((qlwdebug & (m)) == (m)) printf(f); } \
46     while (0)
47 #define QLW_D_MBOX		0x01
48 #define QLW_D_INTR		0x02
49 #define QLW_D_PORT		0x04
50 #define QLW_D_IO		0x08
51 #define QLW_D_IOCB		0x10
52 int qlwdebug = QLW_D_PORT | QLW_D_INTR | QLW_D_MBOX;
53 #else
54 #define DPRINTF(m, f...)
55 #endif
56 
57 struct cfdriver qlw_cd = {
58 	NULL,
59 	"qlw",
60 	DV_DULL
61 };
62 
63 void		qlw_scsi_cmd(struct scsi_xfer *);
64 int		qlw_scsi_probe(struct scsi_link *);
65 
66 u_int16_t	qlw_read(struct qlw_softc *, bus_size_t);
67 void		qlw_write(struct qlw_softc *, bus_size_t, u_int16_t);
68 void		qlw_host_cmd(struct qlw_softc *sc, u_int16_t);
69 
70 int		qlw_mbox(struct qlw_softc *, int, int);
71 void		qlw_mbox_putaddr(u_int16_t *, struct qlw_dmamem *);
72 u_int16_t	qlw_read_mbox(struct qlw_softc *, int);
73 void		qlw_write_mbox(struct qlw_softc *, int, u_int16_t);
74 
75 int		qlw_config_bus(struct qlw_softc *, int);
76 int		qlw_config_target(struct qlw_softc *, int, int);
77 void		qlw_update_bus(struct qlw_softc *, int);
78 void		qlw_update_target(struct qlw_softc *, int, int);
79 void		qlw_update_task(void *, void *);
80 
81 void		qlw_handle_intr(struct qlw_softc *, u_int16_t, u_int16_t);
82 void		qlw_set_ints(struct qlw_softc *, int);
83 int		qlw_read_isr(struct qlw_softc *, u_int16_t *, u_int16_t *);
84 void		qlw_clear_isr(struct qlw_softc *, u_int16_t);
85 
86 void		qlw_update(struct qlw_softc *, int);
87 void		qlw_put_marker(struct qlw_softc *, int, void *);
88 void		qlw_put_cmd(struct qlw_softc *, void *, struct scsi_xfer *,
89 		    struct qlw_ccb *);
90 void		qlw_put_cont(struct qlw_softc *, void *, struct scsi_xfer *,
91 		    struct qlw_ccb *, int);
92 struct qlw_ccb *qlw_handle_resp(struct qlw_softc *, u_int16_t);
93 void		qlw_get_header(struct qlw_softc *, struct qlw_iocb_hdr *,
94 		    int *, int *);
95 void		qlw_put_header(struct qlw_softc *, struct qlw_iocb_hdr *,
96 		    int, int);
97 void		qlw_put_data_seg(struct qlw_softc *, struct qlw_iocb_seg *,
98 		    bus_dmamap_t, int);
99 
100 int		qlw_softreset(struct qlw_softc *);
101 void		qlw_dma_burst_enable(struct qlw_softc *);
102 
103 int		qlw_async(struct qlw_softc *, u_int16_t);
104 
105 int		qlw_load_firmware_words(struct qlw_softc *, const u_int16_t *,
106 		    u_int16_t);
107 int		qlw_load_firmware(struct qlw_softc *);
108 int		qlw_read_nvram(struct qlw_softc *);
109 void		qlw_parse_nvram_1040(struct qlw_softc *, int);
110 void		qlw_parse_nvram_1080(struct qlw_softc *, int);
111 void		qlw_init_defaults(struct qlw_softc *, int);
112 
113 struct qlw_dmamem *qlw_dmamem_alloc(struct qlw_softc *, size_t);
114 void		qlw_dmamem_free(struct qlw_softc *, struct qlw_dmamem *);
115 
116 int		qlw_alloc_ccbs(struct qlw_softc *);
117 void		qlw_free_ccbs(struct qlw_softc *);
118 void		*qlw_get_ccb(void *);
119 void		qlw_put_ccb(void *, void *);
120 
121 void		qlw_dump_iocb(struct qlw_softc *, void *, int);
122 void		qlw_dump_iocb_segs(struct qlw_softc *, void *, int);
123 
124 static inline int
125 qlw_xs_bus(struct qlw_softc *sc, struct scsi_xfer *xs)
126 {
127 	return ((xs->sc_link->scsibus == sc->sc_link[0].scsibus) ? 0 : 1);
128 }
129 
130 static inline u_int16_t
131 qlw_swap16(struct qlw_softc *sc, u_int16_t value)
132 {
133 	if (sc->sc_isp_gen == QLW_GEN_ISP1000)
134 		return htobe16(value);
135 	else
136 		return htole16(value);
137 }
138 
139 static inline u_int32_t
140 qlw_swap32(struct qlw_softc *sc, u_int32_t value)
141 {
142 	if (sc->sc_isp_gen == QLW_GEN_ISP1000)
143 		return htobe32(value);
144 	else
145 		return htole32(value);
146 }
147 
148 static inline u_int16_t
149 qlw_queue_read(struct qlw_softc *sc, bus_size_t offset)
150 {
151 	return qlw_read(sc, sc->sc_mbox_base + offset);
152 }
153 
154 static inline void
155 qlw_queue_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
156 {
157 	qlw_write(sc, sc->sc_mbox_base + offset, value);
158 }
159 
160 struct scsi_adapter qlw_switch = {
161 	qlw_scsi_cmd,
162 	scsi_minphys,
163 	qlw_scsi_probe,
164 	NULL,	/* scsi_free */
165 	NULL	/* ioctl */
166 };
167 
168 int
169 qlw_attach(struct qlw_softc *sc)
170 {
171 	struct scsibus_attach_args saa;
172 	void (*parse_nvram)(struct qlw_softc *, int);
173 	int reset_delay;
174 	int bus;
175 
176 	task_set(&sc->sc_update_task, qlw_update_task, sc, NULL);
177 
178 	switch (sc->sc_isp_gen) {
179 	case QLW_GEN_ISP1000:
180 		sc->sc_nvram_size = 0;
181 		break;
182 	case QLW_GEN_ISP1040:
183 		sc->sc_nvram_size = 128;
184 		sc->sc_nvram_minversion = 2;
185 		parse_nvram = qlw_parse_nvram_1040;
186 		break;
187 	case QLW_GEN_ISP1080:
188 	case QLW_GEN_ISP12160:
189 		sc->sc_nvram_size = 256;
190 		sc->sc_nvram_minversion = 1;
191 		parse_nvram = qlw_parse_nvram_1080;
192 		break;
193 
194 	default:
195 		printf("unknown isp type\n");
196 		return (ENXIO);
197 	}
198 
199 	/* after reset, mbox registers 1-3 should contain the string "ISP   " */
200 	if (qlw_read_mbox(sc, 1) != 0x4953 ||
201 	    qlw_read_mbox(sc, 2) != 0x5020 ||
202 	    qlw_read_mbox(sc, 3) != 0x2020) {
203 		/* try releasing the risc processor */
204 		qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
205 	}
206 
207 	qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
208 	if (qlw_softreset(sc) != 0) {
209 		printf("softreset failed\n");
210 		return (ENXIO);
211 	}
212 
213 	for (bus = 0; bus < sc->sc_numbusses; bus++)
214 		qlw_init_defaults(sc, bus);
215 
216 	if (qlw_read_nvram(sc) == 0) {
217 		for (bus = 0; bus < sc->sc_numbusses; bus++)
218 			parse_nvram(sc, bus);
219 	}
220 
221 #ifndef ISP_NOFIRMWARE
222 	if (sc->sc_firmware && qlw_load_firmware(sc)) {
223 		printf("firmware load failed\n");
224 		return (ENXIO);
225 	}
226 #endif
227 
228 	/* execute firmware */
229 	sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE;
230 	sc->sc_mbox[1] = QLW_CODE_ORG;
231 	if (qlw_mbox(sc, 0x0003, 0x0001)) {
232 		printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
233 		return (ENXIO);
234 	}
235 
236 	delay(250000);		/* from isp(4) */
237 
238 	sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE;
239 	if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN,
240 	    QLW_MBOX_ABOUT_FIRMWARE_OUT)) {
241 		printf("ISP not talking after firmware exec: %x\n",
242 		    sc->sc_mbox[0]);
243 		return (ENXIO);
244 	}
245 	/* The ISP1000 firmware we use doesn't return a version number. */
246 	if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) {
247 		sc->sc_mbox[1] = 1;
248 		sc->sc_mbox[2] = 37;
249 		sc->sc_mbox[3] = 0;
250 		sc->sc_mbox[6] = 0;
251 	}
252 	printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc),
253 	    sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
254 
255 	/* work out how many ccbs to allocate */
256 	sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS;
257 	if (qlw_mbox(sc, 0x0001, 0x0007)) {
258 		printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]);
259 		return (ENXIO);
260 	}
261 	sc->sc_maxrequests = sc->sc_mbox[2];
262 	if (sc->sc_maxrequests > 512)
263 		sc->sc_maxrequests = 512;
264 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
265 		if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests)
266 			sc->sc_max_queue_depth[bus] = sc->sc_maxrequests;
267 	}
268 
269 	/*
270 	 * On some 1020/1040 variants the response queue is limited to
271 	 * 256 entries.  We don't really need all that many anyway.
272 	 */
273 	sc->sc_maxresponses = sc->sc_maxrequests / 2;
274 	if (sc->sc_maxresponses < 64)
275 		sc->sc_maxresponses = 64;
276 
277 	/* We may need up to 3 request entries per SCSI command. */
278 	sc->sc_maxccbs = sc->sc_maxrequests / 3;
279 
280 	/* Allegedly the FIFO is busted on the 1040A. */
281 	if (sc->sc_isp_type == QLW_ISP1040A)
282 		sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK;
283 	qlw_write(sc, QLW_CFG1, sc->sc_isp_config);
284 
285 	if (sc->sc_isp_config & QLW_BURST_ENABLE)
286 		qlw_dma_burst_enable(sc);
287 
288 	sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES;
289 	sc->sc_mbox[1] = 0;
290 	if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY)
291 		sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY;
292 	if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) {
293 		printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]);
294 		return (ENXIO);
295 	}
296 
297 	sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE;
298 	sc->sc_mbox[1] = sc->sc_clock;
299 	if (qlw_mbox(sc, 0x0003, 0x0001)) {
300 		printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]);
301 		return (ENXIO);
302 	}
303 
304 	sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT;
305 	sc->sc_mbox[1] = sc->sc_retry_count[0];
306 	sc->sc_mbox[2] = sc->sc_retry_delay[0];
307 	sc->sc_mbox[6] = sc->sc_retry_count[1];
308 	sc->sc_mbox[7] = sc->sc_retry_delay[1];
309 	if (qlw_mbox(sc, 0x00c7, 0x0001)) {
310 		printf("couldn't set retry count: %x\n", sc->sc_mbox[0]);
311 		return (ENXIO);
312 	}
313 
314 	sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP;
315 	sc->sc_mbox[1] = sc->sc_async_data_setup[0];
316 	sc->sc_mbox[2] = sc->sc_async_data_setup[1];
317 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
318 		printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]);
319 		return (ENXIO);
320 	}
321 
322 	sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION;
323 	sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5;
324 	sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4;
325 	sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5;
326 	sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4;
327 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
328 		printf("couldn't set active negation: %x\n", sc->sc_mbox[0]);
329 		return (ENXIO);
330 	}
331 
332 	sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT;
333 	sc->sc_mbox[1] = sc->sc_tag_age_limit[0];
334 	sc->sc_mbox[2] = sc->sc_tag_age_limit[1];
335 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
336 		printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]);
337 		return (ENXIO);
338 	}
339 
340 	sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT;
341 	sc->sc_mbox[1] = sc->sc_selection_timeout[0];
342 	sc->sc_mbox[2] = sc->sc_selection_timeout[1];
343 	if (qlw_mbox(sc, 0x0007, 0x0001)) {
344 		printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]);
345 		return (ENXIO);
346 	}
347 
348 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
349 		if (qlw_config_bus(sc, bus))
350 			return (ENXIO);
351 	}
352 
353 	if (qlw_alloc_ccbs(sc)) {
354 		/* error already printed */
355 		return (ENOMEM);
356 	}
357 
358 	sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE;
359 	sc->sc_mbox[1] = sc->sc_maxrequests;
360 	qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests);
361 	sc->sc_mbox[4] = 0;
362 	if (qlw_mbox(sc, 0x00df, 0x0001)) {
363 		printf("couldn't init request queue: %x\n", sc->sc_mbox[0]);
364 		goto free_ccbs;
365 	}
366 
367 	sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE;
368 	sc->sc_mbox[1] = sc->sc_maxresponses;
369 	qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses);
370 	sc->sc_mbox[5] = 0;
371 	if (qlw_mbox(sc, 0x00ef, 0x0001)) {
372 		printf("couldn't init response queue: %x\n", sc->sc_mbox[0]);
373 		goto free_ccbs;
374 	}
375 
376 	reset_delay = 0;
377 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
378 		sc->sc_mbox[0] = QLW_MBOX_BUS_RESET;
379 		sc->sc_mbox[1] = sc->sc_reset_delay[bus];
380 		sc->sc_mbox[2] = bus;
381 		if (qlw_mbox(sc, 0x0007, 0x0001)) {
382 			printf("couldn't reset bus: %x\n", sc->sc_mbox[0]);
383 			goto free_ccbs;
384 		}
385 		sc->sc_marker_required[bus] = 1;
386 		sc->sc_update_required[bus] = 0xffff;
387 
388 		if (sc->sc_reset_delay[bus] > reset_delay)
389 			reset_delay = sc->sc_reset_delay[bus];
390 	}
391 
392 	/* wait for the busses to settle */
393 	delay(reset_delay * 1000000);
394 
395 	/* we should be good to go now, attach scsibus */
396 	for (bus = 0; bus < sc->sc_numbusses; bus++) {
397 		sc->sc_link[bus].adapter = &qlw_switch;
398 		sc->sc_link[bus].adapter_softc = sc;
399 		sc->sc_link[bus].adapter_target = sc->sc_initiator[bus];
400 		sc->sc_link[bus].adapter_buswidth = QLW_MAX_TARGETS;
401 		sc->sc_link[bus].openings = sc->sc_max_queue_depth[bus];
402 		sc->sc_link[bus].pool = &sc->sc_iopool;
403 
404 		memset(&saa, 0, sizeof(saa));
405 		saa.saa_sc_link = &sc->sc_link[bus];
406 
407 		/* config_found() returns the scsibus attached to us */
408 		sc->sc_scsibus[bus] = (struct scsibus_softc *)
409 		    config_found(&sc->sc_dev, &saa, scsiprint);
410 
411 		qlw_update_bus(sc, bus);
412 	}
413 
414 	sc->sc_running = 1;
415 	return(0);
416 
417 free_ccbs:
418 	qlw_free_ccbs(sc);
419 	return (ENXIO);
420 }
421 
422 int
423 qlw_detach(struct qlw_softc *sc, int flags)
424 {
425 	return (0);
426 }
427 
428 int
429 qlw_config_bus(struct qlw_softc *sc, int bus)
430 {
431 	int target, err;
432 
433 	sc->sc_mbox[0] = QLW_MBOX_SET_INITIATOR_ID;
434 	sc->sc_mbox[1] = (bus << 7) | sc->sc_initiator[bus];
435 
436 	if (qlw_mbox(sc, 0x0003, 0x0001)) {
437 		printf("couldn't set initiator id: %x\n", sc->sc_mbox[0]);
438 		return (ENXIO);
439 	}
440 
441 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
442 		err = qlw_config_target(sc, bus, target);
443 		if (err)
444 			return (err);
445 	}
446 
447 	return (0);
448 }
449 
450 int
451 qlw_config_target(struct qlw_softc *sc, int bus, int target)
452 {
453 	int lun;
454 
455 	sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
456 	sc->sc_mbox[1] = (((bus << 7) | target) << 8);
457 	sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
458 	sc->sc_mbox[2] &= QLW_TARGET_SAFE;
459 	sc->sc_mbox[2] |= QLW_TARGET_NARROW | QLW_TARGET_ASYNC;
460 	sc->sc_mbox[3] = 0;
461 
462 	if (qlw_mbox(sc, 0x000f, 0x0001)) {
463 		printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
464 		return (ENXIO);
465 	}
466 
467 	for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
468 		sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
469 		sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
470 		sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
471 		sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
472 		if (qlw_mbox(sc, 0x000f, 0x0001)) {
473 			printf("couldn't set lun parameters: %x\n",
474 			    sc->sc_mbox[0]);
475 			return (ENXIO);
476 		}
477 	}
478 
479 	return (0);
480 }
481 
482 void
483 qlw_update_bus(struct qlw_softc *sc, int bus)
484 {
485 	int target;
486 
487 	for (target = 0; target < QLW_MAX_TARGETS; target++)
488 		qlw_update_target(sc, bus, target);
489 }
490 
491 void
492 qlw_update_target(struct qlw_softc *sc, int bus, int target)
493 {
494 	struct scsi_link *link;
495 	int lun;
496 
497 	if ((sc->sc_update_required[bus] & (1 << target)) == 0)
498 		return;
499 	atomic_clearbits_int(&sc->sc_update_required[bus], (1 << target));
500 
501 	link = scsi_get_link(sc->sc_scsibus[bus], target, 0);
502 	if (link == NULL)
503 		return;
504 
505 	sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS;
506 	sc->sc_mbox[1] = (((bus << 7) | target) << 8);
507 	sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params;
508 	sc->sc_mbox[2] |= QLW_TARGET_RENEG;
509 	sc->sc_mbox[2] &= ~QLW_TARGET_QFRZ;
510 	if (link->quirks & SDEV_NOSYNC)
511 		sc->sc_mbox[2] &= ~QLW_TARGET_SYNC;
512 	if (link->quirks & SDEV_NOWIDE)
513 		sc->sc_mbox[2] &= ~QLW_TARGET_WIDE;
514 	if (link->quirks & SDEV_NOTAGS)
515 		sc->sc_mbox[2] &= ~QLW_TARGET_TAGS;
516 
517 	sc->sc_mbox[3] = sc->sc_target[bus][target].qt_sync_period;
518 	sc->sc_mbox[3] |= (sc->sc_target[bus][target].qt_sync_offset << 8);
519 
520 	if (qlw_mbox(sc, 0x000f, 0x0001)) {
521 		printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]);
522 		return;
523 	}
524 
525 	/* XXX do PPR detection */
526 
527 	for (lun = 0; lun < QLW_MAX_LUNS; lun++) {
528 		sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE;
529 		sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun;
530 		sc->sc_mbox[2] = sc->sc_max_queue_depth[bus];
531 		sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle;
532 		if (qlw_mbox(sc, 0x000f, 0x0001)) {
533 			printf("couldn't set lun parameters: %x\n",
534 			    sc->sc_mbox[0]);
535 			return;
536 		}
537 	}
538 }
539 
540 void
541 qlw_update_task(void *arg1, void *arg2)
542 {
543 	struct qlw_softc *sc = arg1;
544 	int bus;
545 
546 	for (bus = 0; bus < sc->sc_numbusses; bus++)
547 		qlw_update_bus(sc, bus);
548 }
549 
550 struct qlw_ccb *
551 qlw_handle_resp(struct qlw_softc *sc, u_int16_t id)
552 {
553 	struct qlw_ccb *ccb;
554 	struct qlw_iocb_hdr *hdr;
555 	struct qlw_iocb_status *status;
556 	struct scsi_xfer *xs;
557 	u_int32_t handle;
558 	int entry_type;
559 	int flags;
560 	int bus;
561 
562 	ccb = NULL;
563 	hdr = QLW_DMA_KVA(sc->sc_responses) + (id * QLW_QUEUE_ENTRY_SIZE);
564 
565 	bus_dmamap_sync(sc->sc_dmat,
566 	    QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE,
567 	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD);
568 
569 	qlw_get_header(sc, hdr, &entry_type, &flags);
570 	switch (entry_type) {
571 	case QLW_IOCB_STATUS:
572 		status = (struct qlw_iocb_status *)hdr;
573 		handle = qlw_swap32(sc, status->handle);
574 		if (handle > sc->sc_maxccbs) {
575 			panic("bad completed command handle: %d (> %d)",
576 			    handle, sc->sc_maxccbs);
577 		}
578 
579 		ccb = &sc->sc_ccbs[handle];
580 		xs = ccb->ccb_xs;
581 		if (xs == NULL) {
582 			DPRINTF(QLW_D_INTR, "%s: got status for inactive"
583 			    " ccb %d\n", DEVNAME(sc), handle);
584 			qlw_dump_iocb(sc, hdr, QLW_D_INTR);
585 			ccb = NULL;
586 			break;
587 		}
588 		if (xs->io != ccb) {
589 			panic("completed command handle doesn't match xs "
590 			    "(handle %d, ccb %p, xs->io %p)", handle, ccb,
591 			    xs->io);
592 		}
593 
594 		if (xs->datalen > 0) {
595 			bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
596 			    ccb->ccb_dmamap->dm_mapsize,
597 			    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
598 			    BUS_DMASYNC_POSTWRITE);
599 			bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
600 		}
601 
602 		bus = qlw_xs_bus(sc, xs);
603 		xs->status = qlw_swap16(sc, status->scsi_status);
604 		switch (qlw_swap16(sc, status->completion)) {
605 		case QLW_IOCB_STATUS_COMPLETE:
606 			if (qlw_swap16(sc, status->scsi_status) &
607 			    QLW_SCSI_STATUS_SENSE_VALID) {
608 				memcpy(&xs->sense, status->sense_data,
609 				    sizeof(xs->sense));
610 				xs->error = XS_SENSE;
611 			} else {
612 				xs->error = XS_NOERROR;
613 			}
614 			xs->resid = 0;
615 			break;
616 
617 		case QLW_IOCB_STATUS_INCOMPLETE:
618 			if (flags & QLW_STATE_GOT_TARGET) {
619 				xs->error = XS_DRIVER_STUFFUP;
620 			} else {
621 				xs->error = XS_SELTIMEOUT;
622 			}
623 			break;
624 
625 		case QLW_IOCB_STATUS_DMA_ERROR:
626 			DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc));
627 			/* set resid apparently? */
628 			break;
629 
630 		case QLW_IOCB_STATUS_RESET:
631 			DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n",
632 			    DEVNAME(sc));
633 			sc->sc_marker_required[bus] = 1;
634 			xs->error = XS_RESET;
635 			break;
636 
637 		case QLW_IOCB_STATUS_ABORTED:
638 			DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc));
639 			sc->sc_marker_required[bus] = 1;
640 			xs->error = XS_DRIVER_STUFFUP;
641 			break;
642 
643 		case QLW_IOCB_STATUS_TIMEOUT:
644 			DPRINTF(QLW_D_INTR, "%s: command timed out\n",
645 			    DEVNAME(sc));
646 			xs->error = XS_TIMEOUT;
647 			break;
648 
649 		case QLW_IOCB_STATUS_DATA_OVERRUN:
650 		case QLW_IOCB_STATUS_DATA_UNDERRUN:
651 			xs->resid = qlw_swap32(sc, status->resid);
652 			xs->error = XS_NOERROR;
653 			break;
654 
655 		case QLW_IOCB_STATUS_QUEUE_FULL:
656 			DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc));
657 			xs->error = XS_BUSY;
658 			break;
659 
660 		case QLW_IOCB_STATUS_WIDE_FAILED:
661 			DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc));
662 			sc->sc_link->quirks |= SDEV_NOWIDE;
663 			atomic_setbits_int(&sc->sc_update_required[bus],
664 			    1 << xs->sc_link->target);
665 			task_add(systq, &sc->sc_update_task);
666 			xs->resid = qlw_swap32(sc, status->resid);
667 			xs->error = XS_NOERROR;
668 			break;
669 
670 		case QLW_IOCB_STATUS_SYNCXFER_FAILED:
671 			DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc));
672 			sc->sc_link->quirks |= SDEV_NOSYNC;
673 			atomic_setbits_int(&sc->sc_update_required[bus],
674 			    1 << xs->sc_link->target);
675 			task_add(systq, &sc->sc_update_task);
676 			xs->resid = qlw_swap32(sc, status->resid);
677 			xs->error = XS_NOERROR;
678 			break;
679 
680 		default:
681 			DPRINTF(QLW_D_INTR, "%s: unexpected completion"
682 			    " status %x\n", DEVNAME(sc),
683 			    qlw_swap16(sc, status->completion));
684 			qlw_dump_iocb(sc, hdr, QLW_D_INTR);
685 			xs->error = XS_DRIVER_STUFFUP;
686 			break;
687 		}
688 		break;
689 
690 	default:
691 		DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n",
692 		    DEVNAME(sc), entry_type);
693 		qlw_dump_iocb(sc, hdr, QLW_D_INTR);
694 		break;
695 	}
696 
697 	return (ccb);
698 }
699 
700 void
701 qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info)
702 {
703 	int i;
704 	u_int16_t rspin;
705 	struct qlw_ccb *ccb;
706 
707 	switch (isr) {
708 	case QLW_INT_TYPE_ASYNC:
709 		qlw_async(sc, info);
710 		qlw_clear_isr(sc, isr);
711 		break;
712 
713 	case QLW_INT_TYPE_IO:
714 		qlw_clear_isr(sc, isr);
715 		rspin = qlw_queue_read(sc, QLW_RESP_IN);
716 		if (rspin == sc->sc_last_resp_id) {
717 			/* seems to happen a lot on 2200s when mbox commands
718 			 * complete but it doesn't want to give us the register
719 			 * semaphore, or something.
720 			 *
721 			 * if we're waiting on a mailbox command, don't ack
722 			 * the interrupt yet.
723 			 */
724 			if (sc->sc_mbox_pending) {
725 				DPRINTF(QLW_D_MBOX, "%s: ignoring premature"
726 				    " mbox int\n", DEVNAME(sc));
727 				return;
728 			}
729 
730 			break;
731 		}
732 
733 		if (sc->sc_responses == NULL)
734 			break;
735 
736 		DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n",
737 		    DEVNAME(sc), sc->sc_last_resp_id, rspin);
738 
739 		do {
740 			ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
741 			if (ccb)
742 				scsi_done(ccb->ccb_xs);
743 
744 			sc->sc_last_resp_id++;
745 			sc->sc_last_resp_id %= sc->sc_maxresponses;
746 		} while (sc->sc_last_resp_id != rspin);
747 
748 		qlw_queue_write(sc, QLW_RESP_OUT, rspin);
749 		break;
750 
751 	case QLW_INT_TYPE_MBOX:
752 		if (sc->sc_mbox_pending) {
753 			if (info == QLW_MBOX_COMPLETE) {
754 				for (i = 1; i < nitems(sc->sc_mbox); i++) {
755 					sc->sc_mbox[i] = qlw_read_mbox(sc, i);
756 				}
757 			} else {
758 				sc->sc_mbox[0] = info;
759 			}
760 			wakeup(sc->sc_mbox);
761 		} else {
762 			DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:"
763 			    " %x\n", DEVNAME(sc), info);
764 		}
765 		qlw_clear_isr(sc, isr);
766 		break;
767 
768 	default:
769 		/* maybe log something? */
770 		break;
771 	}
772 }
773 
774 int
775 qlw_intr(void *xsc)
776 {
777 	struct qlw_softc *sc = xsc;
778 	u_int16_t isr;
779 	u_int16_t info;
780 
781 	if (qlw_read_isr(sc, &isr, &info) == 0)
782 		return (0);
783 
784 	qlw_handle_intr(sc, isr, info);
785 	return (1);
786 }
787 
788 int
789 qlw_scsi_probe(struct scsi_link *link)
790 {
791 	if (link->lun >= QLW_MAX_LUNS)
792 		return (EINVAL);
793 
794 	return (0);
795 }
796 
797 void
798 qlw_scsi_cmd(struct scsi_xfer *xs)
799 {
800 	struct scsi_link	*link = xs->sc_link;
801 	struct qlw_softc	*sc = link->adapter_softc;
802 	struct qlw_ccb		*ccb;
803 	struct qlw_iocb_req0	*iocb;
804 	struct qlw_ccb_list	list;
805 	u_int16_t		req, rspin;
806 	int			offset, error, done;
807 	bus_dmamap_t		dmap;
808 	int			bus;
809 	int			seg;
810 
811 	if (xs->cmdlen > sizeof(iocb->cdb)) {
812 		DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc),
813 		    xs->cmdlen);
814 		memset(&xs->sense, 0, sizeof(xs->sense));
815 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
816 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
817 		xs->sense.add_sense_code = 0x20;
818 		xs->error = XS_SENSE;
819 		scsi_done(xs);
820 		return;
821 	}
822 
823 	ccb = xs->io;
824 	dmap = ccb->ccb_dmamap;
825 	if (xs->datalen > 0) {
826 		error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,
827 		    xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?
828 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
829 		if (error) {
830 			xs->error = XS_DRIVER_STUFFUP;
831 			scsi_done(xs);
832 			return;
833 		}
834 
835 		bus_dmamap_sync(sc->sc_dmat, dmap, 0,
836 		    dmap->dm_mapsize,
837 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
838 		    BUS_DMASYNC_PREWRITE);
839 	}
840 
841 	mtx_enter(&sc->sc_queue_mtx);
842 
843 	/* put in a sync marker if required */
844 	bus = qlw_xs_bus(sc, xs);
845 	if (sc->sc_marker_required[bus]) {
846 		req = sc->sc_next_req_id++;
847 		if (sc->sc_next_req_id == sc->sc_maxrequests)
848 			sc->sc_next_req_id = 0;
849 
850 		DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n",
851 		    DEVNAME(sc), req);
852 		offset = (req * QLW_QUEUE_ENTRY_SIZE);
853 		iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
854 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
855 		    offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
856 		qlw_put_marker(sc, bus, iocb);
857 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests),
858 		    offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
859 		qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
860 		sc->sc_marker_required[bus] = 0;
861 	}
862 
863 	req = sc->sc_next_req_id++;
864 	if (sc->sc_next_req_id == sc->sc_maxrequests)
865 		sc->sc_next_req_id = 0;
866 
867 	offset = (req * QLW_QUEUE_ENTRY_SIZE);
868 	iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
869 	bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
870 	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
871 
872 	ccb->ccb_xs = xs;
873 
874 	DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req);
875 	qlw_put_cmd(sc, iocb, xs, ccb);
876 	seg = QLW_IOCB_SEGS_PER_CMD;
877 
878 	bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
879 	    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
880 
881 	while (seg < ccb->ccb_dmamap->dm_nsegs) {
882 		req = sc->sc_next_req_id++;
883 		if (sc->sc_next_req_id == sc->sc_maxrequests)
884 			sc->sc_next_req_id = 0;
885 
886 		offset = (req * QLW_QUEUE_ENTRY_SIZE);
887 		iocb = QLW_DMA_KVA(sc->sc_requests) + offset;
888 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
889 		    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE);
890 
891 		DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req);
892 		qlw_put_cont(sc, iocb, xs, ccb, seg);
893 		seg += QLW_IOCB_SEGS_PER_CONT;
894 
895 		bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset,
896 		    QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE);
897 	}
898 
899 	qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id);
900 
901 	if (!ISSET(xs->flags, SCSI_POLL)) {
902 		mtx_leave(&sc->sc_queue_mtx);
903 		return;
904 	}
905 
906 	done = 0;
907 	SIMPLEQ_INIT(&list);
908 	do {
909 		u_int16_t isr, info;
910 
911 		delay(100);
912 
913 		if (qlw_read_isr(sc, &isr, &info) == 0) {
914 			continue;
915 		}
916 
917 		if (isr != QLW_INT_TYPE_IO) {
918 			qlw_handle_intr(sc, isr, info);
919 			continue;
920 		}
921 
922 		qlw_clear_isr(sc, isr);
923 
924 		rspin = qlw_queue_read(sc, QLW_RESP_IN);
925 		while (rspin != sc->sc_last_resp_id) {
926 			ccb = qlw_handle_resp(sc, sc->sc_last_resp_id);
927 
928 			sc->sc_last_resp_id++;
929 			if (sc->sc_last_resp_id == sc->sc_maxresponses)
930 				sc->sc_last_resp_id = 0;
931 
932 			if (ccb != NULL)
933 				SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link);
934 			if (ccb == xs->io)
935 				done = 1;
936 		}
937 		qlw_queue_write(sc, QLW_RESP_OUT, rspin);
938 	} while (done == 0);
939 
940 	mtx_leave(&sc->sc_queue_mtx);
941 
942 	while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) {
943 		SIMPLEQ_REMOVE_HEAD(&list, ccb_link);
944 		scsi_done(ccb->ccb_xs);
945 	}
946 }
947 
948 u_int16_t
949 qlw_read(struct qlw_softc *sc, bus_size_t offset)
950 {
951 	u_int16_t v;
952 	v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset);
953 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
954 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
955 	return (v);
956 }
957 
958 void
959 qlw_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value)
960 {
961 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value);
962 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2,
963 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
964 }
965 
966 u_int16_t
967 qlw_read_mbox(struct qlw_softc *sc, int mbox)
968 {
969 	/* could range-check mboxes according to chip type? */
970 	return (qlw_read(sc, sc->sc_mbox_base + (mbox * 2)));
971 }
972 
973 void
974 qlw_write_mbox(struct qlw_softc *sc, int mbox, u_int16_t value)
975 {
976 	qlw_write(sc, sc->sc_mbox_base + (mbox * 2), value);
977 }
978 
979 void
980 qlw_host_cmd(struct qlw_softc *sc, u_int16_t cmd)
981 {
982 	qlw_write(sc, sc->sc_host_cmd_ctrl, cmd << QLW_HOST_CMD_SHIFT);
983 }
984 
985 #define MBOX_COMMAND_TIMEOUT	4000
986 
987 int
988 qlw_mbox(struct qlw_softc *sc, int maskin, int maskout)
989 {
990 	int i;
991 	int result = 0;
992 	int rv;
993 
994 	sc->sc_mbox_pending = 1;
995 	for (i = 0; i < nitems(sc->sc_mbox); i++) {
996 		if (maskin & (1 << i)) {
997 			qlw_write_mbox(sc, i, sc->sc_mbox[i]);
998 		}
999 	}
1000 	qlw_host_cmd(sc, QLW_HOST_CMD_SET_HOST_INT);
1001 
1002 	if (sc->sc_running == 0) {
1003 		for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) {
1004 			u_int16_t isr, info;
1005 
1006 			delay(100);
1007 
1008 			if (qlw_read_isr(sc, &isr, &info) == 0)
1009 				continue;
1010 
1011 			switch (isr) {
1012 			case QLW_INT_TYPE_MBOX:
1013 				result = info;
1014 				break;
1015 
1016 			default:
1017 				qlw_handle_intr(sc, isr, info);
1018 				break;
1019 			}
1020 		}
1021 	} else {
1022 		tsleep(sc->sc_mbox, PRIBIO, "qlw_mbox", 0);
1023 		result = sc->sc_mbox[0];
1024 	}
1025 
1026 	switch (result) {
1027 	case QLW_MBOX_COMPLETE:
1028 		for (i = 1; i < nitems(sc->sc_mbox); i++) {
1029 			sc->sc_mbox[i] = (maskout & (1 << i)) ?
1030 			    qlw_read_mbox(sc, i) : 0;
1031 		}
1032 		rv = 0;
1033 		break;
1034 
1035 	case 0:
1036 		/* timed out; do something? */
1037 		DPRINTF(QLW_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc));
1038 		rv = 1;
1039 		break;
1040 
1041 	default:
1042 		sc->sc_mbox[0] = result;
1043 		rv = result;
1044 		break;
1045 	}
1046 
1047 	qlw_clear_isr(sc, QLW_INT_TYPE_MBOX);
1048 	sc->sc_mbox_pending = 0;
1049 	return (rv);
1050 }
1051 
1052 void
1053 qlw_mbox_putaddr(u_int16_t *mbox, struct qlw_dmamem *mem)
1054 {
1055 	mbox[2] = (QLW_DMA_DVA(mem) >> 16) & 0xffff;
1056 	mbox[3] = (QLW_DMA_DVA(mem) >> 0) & 0xffff;
1057 	mbox[6] = (QLW_DMA_DVA(mem) >> 48) & 0xffff;
1058 	mbox[7] = (QLW_DMA_DVA(mem) >> 32) & 0xffff;
1059 }
1060 
1061 void
1062 qlw_set_ints(struct qlw_softc *sc, int enabled)
1063 {
1064 	u_int16_t v = enabled ? (QLW_INT_REQ | QLW_RISC_INT_REQ) : 0;
1065 	qlw_write(sc, QLW_INT_CTRL, v);
1066 }
1067 
1068 int
1069 qlw_read_isr(struct qlw_softc *sc, u_int16_t *isr, u_int16_t *info)
1070 {
1071 	u_int16_t int_status;
1072 
1073 	if (qlw_read(sc, QLW_SEMA) & QLW_SEMA_LOCK) {
1074 		*info = qlw_read_mbox(sc, 0);
1075 		if (*info & QLW_MBOX_HAS_STATUS)
1076 			*isr = QLW_INT_TYPE_MBOX;
1077 		else
1078 			*isr = QLW_INT_TYPE_ASYNC;
1079 	} else {
1080 		int_status = qlw_read(sc, QLW_INT_STATUS);
1081 		if ((int_status & (QLW_INT_REQ | QLW_RISC_INT_REQ)) == 0)
1082 			return (0);
1083 
1084 		*isr = QLW_INT_TYPE_IO;
1085 	}
1086 
1087 	return (1);
1088 }
1089 
1090 void
1091 qlw_clear_isr(struct qlw_softc *sc, u_int16_t isr)
1092 {
1093 	qlw_host_cmd(sc, QLW_HOST_CMD_CLR_RISC_INT);
1094 	switch (isr) {
1095 	case QLW_INT_TYPE_MBOX:
1096 	case QLW_INT_TYPE_ASYNC:
1097 		qlw_write(sc, QLW_SEMA, 0);
1098 		break;
1099 	default:
1100 		break;
1101 	}
1102 }
1103 
1104 int
1105 qlw_softreset(struct qlw_softc *sc)
1106 {
1107 	int i;
1108 
1109 	qlw_set_ints(sc, 0);
1110 
1111 	/* reset */
1112 	qlw_write(sc, QLW_INT_CTRL, QLW_RESET);
1113 	delay(100);
1114 	/* clear data and control dma engines? */
1115 
1116 	/* wait for soft reset to clear */
1117 	for (i = 0; i < 1000; i++) {
1118 		if ((qlw_read(sc, QLW_INT_CTRL) & QLW_RESET) == 0)
1119 			break;
1120 
1121 		delay(100);
1122 	}
1123 
1124 	if (i == 1000) {
1125 		DPRINTF(QLW_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc));
1126 		qlw_set_ints(sc, 0);
1127 		return (ENXIO);
1128 	}
1129 
1130 	qlw_write(sc, QLW_CFG1, 0);
1131 
1132 	/* reset risc processor */
1133 	qlw_host_cmd(sc, QLW_HOST_CMD_RESET);
1134 	delay(100);
1135 	qlw_write(sc, QLW_SEMA, 0);
1136 	qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1137 
1138 	/* reset queue pointers */
1139 	qlw_queue_write(sc, QLW_REQ_IN, 0);
1140 	qlw_queue_write(sc, QLW_REQ_OUT, 0);
1141 	qlw_queue_write(sc, QLW_RESP_IN, 0);
1142 	qlw_queue_write(sc, QLW_RESP_OUT, 0);
1143 
1144 	qlw_set_ints(sc, 1);
1145 	qlw_host_cmd(sc, QLW_HOST_CMD_BIOS);
1146 
1147 	/* do a basic mailbox operation to check we're alive */
1148 	sc->sc_mbox[0] = QLW_MBOX_NOP;
1149 	if (qlw_mbox(sc, 0x0001, 0x0001)) {
1150 		DPRINTF(QLW_D_INTR, "%s: ISP not responding after reset\n",
1151 		    DEVNAME(sc));
1152 		return (ENXIO);
1153 	}
1154 
1155 	return (0);
1156 }
1157 
1158 void
1159 qlw_dma_burst_enable(struct qlw_softc *sc)
1160 {
1161 	if (sc->sc_isp_gen == QLW_GEN_ISP1000 ||
1162 	    sc->sc_isp_gen == QLW_GEN_ISP1040) {
1163 		qlw_write(sc, QLW_CDMA_CFG,
1164 		    qlw_read(sc, QLW_CDMA_CFG) | QLW_DMA_BURST_ENABLE);
1165 		qlw_write(sc, QLW_DDMA_CFG,
1166 		    qlw_read(sc, QLW_DDMA_CFG) | QLW_DMA_BURST_ENABLE);
1167 	} else {
1168 		qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE);
1169 		qlw_write(sc, QLW_CFG1,
1170 		    qlw_read(sc, QLW_CFG1) | QLW_DMA_BANK);
1171 		qlw_write(sc, QLW_CDMA_CFG_1080,
1172 		    qlw_read(sc, QLW_CDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1173 		qlw_write(sc, QLW_DDMA_CFG_1080,
1174 		    qlw_read(sc, QLW_DDMA_CFG_1080) | QLW_DMA_BURST_ENABLE);
1175 		qlw_write(sc, QLW_CFG1,
1176 		    qlw_read(sc, QLW_CFG1) & ~QLW_DMA_BANK);
1177 		qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE);
1178 	}
1179 }
1180 
1181 void
1182 qlw_update(struct qlw_softc *sc, int task)
1183 {
1184 	/* do things */
1185 }
1186 
1187 int
1188 qlw_async(struct qlw_softc *sc, u_int16_t info)
1189 {
1190 	int bus;
1191 
1192 	switch (info) {
1193 	case QLW_ASYNC_BUS_RESET:
1194 		DPRINTF(QLW_D_PORT, "%s: bus reset\n", DEVNAME(sc));
1195 		bus = qlw_read_mbox(sc, 6);
1196 		sc->sc_marker_required[bus] = 1;
1197 		break;
1198 
1199 #if 0
1200 	case QLW_ASYNC_SYSTEM_ERROR:
1201 		qla_update(sc, QLW_UPDATE_SOFTRESET);
1202 		break;
1203 
1204 	case QLW_ASYNC_REQ_XFER_ERROR:
1205 		qla_update(sc, QLW_UPDATE_SOFTRESET);
1206 		break;
1207 
1208 	case QLW_ASYNC_RSP_XFER_ERROR:
1209 		qla_update(sc, QLW_UPDATE_SOFTRESET);
1210 		break;
1211 #endif
1212 
1213 	case QLW_ASYNC_SCSI_CMD_COMPLETE:
1214 		/* shouldn't happen, we disable fast posting */
1215 		break;
1216 
1217 	case QLW_ASYNC_CTIO_COMPLETE:
1218 		/* definitely shouldn't happen, we don't do target mode */
1219 		break;
1220 
1221 	default:
1222 		DPRINTF(QLW_D_INTR, "%s: unknown async %x\n", DEVNAME(sc),
1223 		    info);
1224 		break;
1225 	}
1226 	return (1);
1227 }
1228 
1229 void
1230 qlw_dump_iocb(struct qlw_softc *sc, void *buf, int flags)
1231 {
1232 #ifdef QLW_DEBUG
1233 	u_int8_t *iocb = buf;
1234 	int l;
1235 	int b;
1236 
1237 	if ((qlwdebug & flags) == 0)
1238 		return;
1239 
1240 	printf("%s: iocb:\n", DEVNAME(sc));
1241 	for (l = 0; l < 4; l++) {
1242 		for (b = 0; b < 16; b++) {
1243 			printf(" %2.2x", iocb[(l*16)+b]);
1244 		}
1245 		printf("\n");
1246 	}
1247 #endif
1248 }
1249 
1250 void
1251 qlw_dump_iocb_segs(struct qlw_softc *sc, void *segs, int n)
1252 {
1253 #ifdef QLW_DEBUG
1254 	u_int8_t *buf = segs;
1255 	int s, b;
1256 	if ((qlwdebug & QLW_D_IOCB) == 0)
1257 		return;
1258 
1259 	printf("%s: iocb segs:\n", DEVNAME(sc));
1260 	for (s = 0; s < n; s++) {
1261 		for (b = 0; b < sizeof(struct qlw_iocb_seg); b++) {
1262 			printf(" %2.2x", buf[(s*(sizeof(struct qlw_iocb_seg)))
1263 			    + b]);
1264 		}
1265 		printf("\n");
1266 	}
1267 #endif
1268 }
1269 
1270 /*
1271  * The PCI bus is little-endian whereas SBus is big-endian.  This
1272  * leads to some differences in byte twisting of DMA transfers of
1273  * request and response queue entries.  Most fields can be treated as
1274  * 16-bit or 32-bit with the endianness of the bus, but the header
1275  * fields end up being swapped by the ISP1000's SBus interface.
1276  */
1277 
1278 void
1279 qlw_get_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1280     int *type, int *flags)
1281 {
1282 	if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1283 		*type = hdr->entry_count;
1284 		*flags = hdr->seqno;
1285 	} else {
1286 		*type = hdr->entry_type;
1287 		*flags = hdr->flags;
1288 	}
1289 }
1290 
1291 void
1292 qlw_put_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr,
1293     int type, int count)
1294 {
1295 	if (sc->sc_isp_gen == QLW_GEN_ISP1000) {
1296 		hdr->entry_type = count;
1297 		hdr->entry_count = type;
1298 		hdr->seqno = 0;
1299 		hdr->flags = 0;
1300 	} else {
1301 		hdr->entry_type = type;
1302 		hdr->entry_count = count;
1303 		hdr->seqno = 0;
1304 		hdr->flags = 0;
1305 	}
1306 }
1307 
1308 void
1309 qlw_put_data_seg(struct qlw_softc *sc, struct qlw_iocb_seg *seg,
1310     bus_dmamap_t dmap, int num)
1311 {
1312 	seg->seg_addr = qlw_swap32(sc, dmap->dm_segs[num].ds_addr);
1313 	seg->seg_len = qlw_swap32(sc, dmap->dm_segs[num].ds_len);
1314 }
1315 
1316 void
1317 qlw_put_marker(struct qlw_softc *sc, int bus, void *buf)
1318 {
1319 	struct qlw_iocb_marker *marker = buf;
1320 
1321 	qlw_put_header(sc, &marker->hdr, QLW_IOCB_MARKER, 1);
1322 
1323 	/* could be more specific here; isp(4) isn't */
1324 	marker->device = qlw_swap16(sc, (bus << 7) << 8);
1325 	marker->modifier = qlw_swap16(sc, QLW_IOCB_MARKER_SYNC_ALL);
1326 	qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1327 }
1328 
1329 void
1330 qlw_put_cmd(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1331     struct qlw_ccb *ccb)
1332 {
1333 	struct qlw_iocb_req0 *req = buf;
1334 	int entry_count = 1;
1335 	u_int16_t dir;
1336 	int seg, nsegs;
1337 	int seg_count;
1338 	int timeout = 0;
1339 	int bus, target, lun;
1340 
1341 	if (xs->datalen == 0) {
1342 		dir = QLW_IOCB_CMD_NO_DATA;
1343 		seg_count = 1;
1344 	} else {
1345 		dir = xs->flags & SCSI_DATA_IN ? QLW_IOCB_CMD_READ_DATA :
1346 		    QLW_IOCB_CMD_WRITE_DATA;
1347 		seg_count = ccb->ccb_dmamap->dm_nsegs;
1348 		nsegs = ccb->ccb_dmamap->dm_nsegs - QLW_IOCB_SEGS_PER_CMD;
1349 		while (nsegs > 0) {
1350 			entry_count++;
1351 			nsegs -= QLW_IOCB_SEGS_PER_CONT;
1352 		}
1353 		for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1354 			if (seg >= QLW_IOCB_SEGS_PER_CMD)
1355 				break;
1356 			qlw_put_data_seg(sc, &req->segs[seg],
1357 			    ccb->ccb_dmamap, seg);
1358 		}
1359 	}
1360 
1361 	if (sc->sc_running && (xs->sc_link->quirks & SDEV_NOTAGS) == 0)
1362 		dir |= QLW_IOCB_CMD_SIMPLE_QUEUE;
1363 
1364 	qlw_put_header(sc, &req->hdr, QLW_IOCB_CMD_TYPE_0, entry_count);
1365 
1366 	/*
1367 	 * timeout is in seconds.  make sure it's at least 1 if a timeout
1368 	 * was specified in xs
1369 	 */
1370 	if (xs->timeout != 0)
1371 		timeout = MAX(1, xs->timeout/1000);
1372 
1373 	req->flags = qlw_swap16(sc, dir);
1374 	req->seg_count = qlw_swap16(sc, seg_count);
1375 	req->timeout = qlw_swap16(sc, timeout);
1376 
1377 	bus = qlw_xs_bus(sc, xs);
1378 	target = xs->sc_link->target;
1379 	lun = xs->sc_link->lun;
1380 	req->device = qlw_swap16(sc, (((bus << 7) | target) << 8) | lun);
1381 
1382 	memcpy(req->cdb, xs->cmd, xs->cmdlen);
1383 	req->ccblen = qlw_swap16(sc, xs->cmdlen);
1384 
1385 	req->handle = qlw_swap32(sc, ccb->ccb_id);
1386 
1387 	qlw_dump_iocb(sc, buf, QLW_D_IOCB);
1388 }
1389 
1390 void
1391 qlw_put_cont(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs,
1392     struct qlw_ccb *ccb, int seg0)
1393 {
1394 	struct qlw_iocb_cont0 *cont = buf;
1395 	int seg;
1396 
1397 	qlw_put_header(sc, &cont->hdr, QLW_IOCB_CONT_TYPE_0, 1);
1398 
1399 	for (seg = seg0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) {
1400 		if ((seg - seg0) >= QLW_IOCB_SEGS_PER_CONT)
1401 			break;
1402 		qlw_put_data_seg(sc, &cont->segs[seg - seg0],
1403 		    ccb->ccb_dmamap, seg);
1404 	}
1405 }
1406 
1407 #ifndef ISP_NOFIRMWARE
1408 int
1409 qlw_load_firmware_words(struct qlw_softc *sc, const u_int16_t *src,
1410     u_int16_t dest)
1411 {
1412 	u_int16_t i;
1413 
1414 	for (i = 0; i < src[3]; i++) {
1415 		sc->sc_mbox[0] = QLW_MBOX_WRITE_RAM_WORD;
1416 		sc->sc_mbox[1] = i + dest;
1417 		sc->sc_mbox[2] = src[i];
1418 		if (qlw_mbox(sc, 0x07, 0x01)) {
1419 			printf("firmware load failed\n");
1420 			return (1);
1421 		}
1422 	}
1423 
1424 	sc->sc_mbox[0] = QLW_MBOX_VERIFY_CSUM;
1425 	sc->sc_mbox[1] = dest;
1426 	if (qlw_mbox(sc, 0x0003, 0x0003)) {
1427 		printf("verification of chunk at %x failed: %x\n",
1428 		    dest, sc->sc_mbox[1]);
1429 		return (1);
1430 	}
1431 
1432 	return (0);
1433 }
1434 
1435 int
1436 qlw_load_firmware(struct qlw_softc *sc)
1437 {
1438 	return qlw_load_firmware_words(sc, sc->sc_firmware, QLW_CODE_ORG);
1439 }
1440 
1441 #endif	/* !ISP_NOFIRMWARE */
1442 
1443 int
1444 qlw_read_nvram(struct qlw_softc *sc)
1445 {
1446 	u_int16_t data[sizeof(sc->sc_nvram) >> 1];
1447 	u_int16_t req, cmd, val;
1448 	u_int8_t csum;
1449 	int i, bit;
1450 	int reqcmd;
1451 	int nbits;
1452 
1453 	if (sc->sc_nvram_size == 0)
1454 		return (1);
1455 
1456 	if (sc->sc_nvram_size == 128) {
1457 		reqcmd = (QLW_NVRAM_CMD_READ << 6);
1458 		nbits = 8;
1459 	} else {
1460 		reqcmd = (QLW_NVRAM_CMD_READ << 8);
1461 		nbits = 10;
1462 	}
1463 
1464 	qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1465 	delay(10);
1466 	qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL | QLW_NVRAM_CLOCK);
1467 	delay(10);
1468 
1469 	for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1470 		req = i | reqcmd;
1471 
1472 		/* write each bit out through the nvram register */
1473 		for (bit = nbits; bit >= 0; bit--) {
1474 			cmd = QLW_NVRAM_CHIP_SEL;
1475 			if ((req >> bit) & 1) {
1476 				cmd |= QLW_NVRAM_DATA_OUT;
1477 			}
1478 			qlw_write(sc, QLW_NVRAM, cmd);
1479 			delay(10);
1480 			qlw_read(sc, QLW_NVRAM);
1481 
1482 			qlw_write(sc, QLW_NVRAM, cmd | QLW_NVRAM_CLOCK);
1483 			delay(10);
1484 			qlw_read(sc, QLW_NVRAM);
1485 
1486 			qlw_write(sc, QLW_NVRAM, cmd);
1487 			delay(10);
1488 			qlw_read(sc, QLW_NVRAM);
1489 		}
1490 
1491 		/* read the result back */
1492 		val = 0;
1493 		for (bit = 0; bit < 16; bit++) {
1494 			val <<= 1;
1495 			qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL |
1496 			    QLW_NVRAM_CLOCK);
1497 			delay(10);
1498 			if (qlw_read(sc, QLW_NVRAM) & QLW_NVRAM_DATA_IN)
1499 				val |= 1;
1500 			delay(10);
1501 
1502 			qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL);
1503 			delay(10);
1504 			qlw_read(sc, QLW_NVRAM);
1505 		}
1506 
1507 		qlw_write(sc, QLW_NVRAM, 0);
1508 		delay(10);
1509 		qlw_read(sc, QLW_NVRAM);
1510 
1511 		data[i] = letoh16(val);
1512 	}
1513 
1514 	csum = 0;
1515 	for (i = 0; i < (sc->sc_nvram_size >> 1); i++) {
1516 		csum += data[i] & 0xff;
1517 		csum += data[i] >> 8;
1518 	}
1519 
1520 	bcopy(data, &sc->sc_nvram, sizeof(sc->sc_nvram));
1521 	/* id field should be 'ISP ', version should high enough */
1522 	if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
1523 	    sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' ||
1524 	    sc->sc_nvram.nvram_version < sc->sc_nvram_minversion ||
1525 	    (csum != 0)) {
1526 		printf("%s: nvram corrupt\n", DEVNAME(sc));
1527 		return (1);
1528 	}
1529 	return (0);
1530 }
1531 
1532 void
1533 qlw_parse_nvram_1040(struct qlw_softc *sc, int bus)
1534 {
1535 	struct qlw_nvram_1040 *nv = (struct qlw_nvram_1040 *)&sc->sc_nvram;
1536 	int target;
1537 
1538 	KASSERT(bus == 0);
1539 
1540 	if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1541 		sc->sc_initiator[0] = (nv->config1 >> 4);
1542 
1543 	sc->sc_retry_count[0] = nv->retry_count;
1544 	sc->sc_retry_delay[0] = nv->retry_delay;
1545 	sc->sc_reset_delay[0] = nv->reset_delay;
1546 	sc->sc_tag_age_limit[0] = nv->tag_age_limit;
1547 	sc->sc_selection_timeout[0] = letoh16(nv->selection_timeout);
1548 	sc->sc_max_queue_depth[0] = letoh16(nv->max_queue_depth);
1549 	sc->sc_async_data_setup[0] = (nv->config2 & 0x0f);
1550 	sc->sc_req_ack_active_neg[0] = ((nv->config2 & 0x10) >> 4);
1551 	sc->sc_data_line_active_neg[0] = ((nv->config2 & 0x20) >> 5);
1552 
1553 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
1554 		struct qlw_target *qt = &sc->sc_target[0][target];
1555 
1556 		qt->qt_params = (nv->target[target].parameter << 8);
1557 		qt->qt_exec_throttle = nv->target[target].execution_throttle;
1558 		qt->qt_sync_period = nv->target[target].sync_period;
1559 		qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1560 	}
1561 }
1562 
1563 void
1564 qlw_parse_nvram_1080(struct qlw_softc *sc, int bus)
1565 {
1566 	struct qlw_nvram_1080 *nvram = (struct qlw_nvram_1080 *)&sc->sc_nvram;
1567 	struct qlw_nvram_bus *nv = &nvram->bus[bus];
1568 	int target;
1569 
1570 	sc->sc_isp_config = nvram->isp_config;
1571 	sc->sc_fw_features = nvram->fw_features;
1572 
1573 	if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR))
1574 		sc->sc_initiator[bus] = (nv->config1 & 0x0f);
1575 
1576 	sc->sc_retry_count[bus] = nv->retry_count;
1577 	sc->sc_retry_delay[bus] = nv->retry_delay;
1578 	sc->sc_reset_delay[bus] = nv->reset_delay;
1579 	sc->sc_selection_timeout[bus] = letoh16(nv->selection_timeout);
1580 	sc->sc_max_queue_depth[bus] = letoh16(nv->max_queue_depth);
1581 	sc->sc_async_data_setup[bus] = (nv->config2 & 0x0f);
1582 	sc->sc_req_ack_active_neg[bus] = ((nv->config2 & 0x10) >> 4);
1583 	sc->sc_data_line_active_neg[bus] = ((nv->config2 & 0x20) >> 5);
1584 
1585 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
1586 		struct qlw_target *qt = &sc->sc_target[bus][target];
1587 
1588 		qt->qt_params = (nv->target[target].parameter << 8);
1589 		qt->qt_exec_throttle = nv->target[target].execution_throttle;
1590 		qt->qt_sync_period = nv->target[target].sync_period;
1591 		if (sc->sc_isp_gen == QLW_GEN_ISP12160)
1592 			qt->qt_sync_offset = nv->target[target].flags & 0x1f;
1593 		else
1594 			qt->qt_sync_offset = nv->target[target].flags & 0x0f;
1595 	}
1596 }
1597 
1598 void
1599 qlw_init_defaults(struct qlw_softc *sc, int bus)
1600 {
1601 	int target;
1602 
1603 	switch (sc->sc_isp_gen) {
1604 	case QLW_GEN_ISP1000:
1605 		break;
1606 	case QLW_GEN_ISP1040:
1607 		sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_64;
1608 		break;
1609 	case QLW_GEN_ISP1080:
1610 	case QLW_GEN_ISP12160:
1611 		sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_128;
1612 		sc->sc_fw_features = QLW_FW_FEATURE_LVD_NOTIFY;
1613 		break;
1614 	}
1615 
1616 	sc->sc_retry_count[bus] = 0;
1617 	sc->sc_retry_delay[bus] = 0;
1618 	sc->sc_reset_delay[bus] = 3;
1619 	sc->sc_tag_age_limit[bus] = 8;
1620 	sc->sc_selection_timeout[bus] = 250;
1621 	sc->sc_max_queue_depth[bus] = 32;
1622 	if (sc->sc_clock > 40)
1623 		sc->sc_async_data_setup[bus] = 9;
1624 	else
1625 		sc->sc_async_data_setup[bus] = 6;
1626 	sc->sc_req_ack_active_neg[bus] = 1;
1627 	sc->sc_data_line_active_neg[bus] = 1;
1628 
1629 	for (target = 0; target < QLW_MAX_TARGETS; target++) {
1630 		struct qlw_target *qt = &sc->sc_target[bus][target];
1631 
1632 		qt->qt_params = QLW_TARGET_DEFAULT;
1633 		qt->qt_exec_throttle = 16;
1634 		qt->qt_sync_period = 10;
1635 		qt->qt_sync_offset = 12;
1636 	}
1637 }
1638 
1639 struct qlw_dmamem *
1640 qlw_dmamem_alloc(struct qlw_softc *sc, size_t size)
1641 {
1642 	struct qlw_dmamem *m;
1643 	int nsegs;
1644 
1645 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1646 	if (m == NULL)
1647 		return (NULL);
1648 
1649 	m->qdm_size = size;
1650 
1651 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1652 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0)
1653 		goto qdmfree;
1654 
1655 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,
1656 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1657 		goto destroy;
1658 
1659 	if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,
1660 	    BUS_DMA_NOWAIT) != 0)
1661 		goto free;
1662 
1663 	if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,
1664 	    BUS_DMA_NOWAIT) != 0)
1665 		goto unmap;
1666 
1667 	return (m);
1668 
1669 unmap:
1670 	bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1671 free:
1672 	bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1673 destroy:
1674 	bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1675 qdmfree:
1676 	free(m, M_DEVBUF, 0);
1677 
1678 	return (NULL);
1679 }
1680 
1681 void
1682 qlw_dmamem_free(struct qlw_softc *sc, struct qlw_dmamem *m)
1683 {
1684 	bus_dmamap_unload(sc->sc_dmat, m->qdm_map);
1685 	bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size);
1686 	bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1);
1687 	bus_dmamap_destroy(sc->sc_dmat, m->qdm_map);
1688 	free(m, M_DEVBUF, 0);
1689 }
1690 
1691 int
1692 qlw_alloc_ccbs(struct qlw_softc *sc)
1693 {
1694 	struct qlw_ccb		*ccb;
1695 	u_int8_t		*cmd;
1696 	int			i;
1697 
1698 	SIMPLEQ_INIT(&sc->sc_ccb_free);
1699 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1700 	mtx_init(&sc->sc_queue_mtx, IPL_BIO);
1701 
1702 	sc->sc_ccbs = mallocarray(sc->sc_maxccbs, sizeof(struct qlw_ccb),
1703 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1704 	if (sc->sc_ccbs == NULL) {
1705 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1706 		return (1);
1707 	}
1708 
1709 	sc->sc_requests = qlw_dmamem_alloc(sc, sc->sc_maxrequests *
1710 	    QLW_QUEUE_ENTRY_SIZE);
1711 	if (sc->sc_requests == NULL) {
1712 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1713 		goto free_ccbs;
1714 	}
1715 	sc->sc_responses = qlw_dmamem_alloc(sc, sc->sc_maxresponses *
1716 	    QLW_QUEUE_ENTRY_SIZE);
1717 	if (sc->sc_responses == NULL) {
1718 		printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc));
1719 		goto free_req;
1720 	}
1721 
1722 	cmd = QLW_DMA_KVA(sc->sc_requests);
1723 	memset(cmd, 0, QLW_QUEUE_ENTRY_SIZE * sc->sc_maxccbs);
1724 	for (i = 0; i < sc->sc_maxccbs; i++) {
1725 		ccb = &sc->sc_ccbs[i];
1726 
1727 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1728 		    QLW_MAX_SEGS, MAXPHYS, 0,
1729 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1730 		    &ccb->ccb_dmamap) != 0) {
1731 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1732 			goto free_maps;
1733 		}
1734 
1735 		ccb->ccb_sc = sc;
1736 		ccb->ccb_id = i;
1737 
1738 		qlw_put_ccb(sc, ccb);
1739 	}
1740 
1741 	scsi_iopool_init(&sc->sc_iopool, sc, qlw_get_ccb, qlw_put_ccb);
1742 	return (0);
1743 
1744 free_maps:
1745 	while ((ccb = qlw_get_ccb(sc)) != NULL)
1746 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1747 
1748 	qlw_dmamem_free(sc, sc->sc_responses);
1749 free_req:
1750 	qlw_dmamem_free(sc, sc->sc_requests);
1751 free_ccbs:
1752 	free(sc->sc_ccbs, M_DEVBUF, 0);
1753 
1754 	return (1);
1755 }
1756 
1757 void
1758 qlw_free_ccbs(struct qlw_softc *sc)
1759 {
1760 	struct qlw_ccb		*ccb;
1761 
1762 	scsi_iopool_destroy(&sc->sc_iopool);
1763 	while ((ccb = qlw_get_ccb(sc)) != NULL)
1764 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1765 	qlw_dmamem_free(sc, sc->sc_responses);
1766 	qlw_dmamem_free(sc, sc->sc_requests);
1767 	free(sc->sc_ccbs, M_DEVBUF, 0);
1768 }
1769 
1770 void *
1771 qlw_get_ccb(void *xsc)
1772 {
1773 	struct qlw_softc	*sc = xsc;
1774 	struct qlw_ccb		*ccb;
1775 
1776 	mtx_enter(&sc->sc_ccb_mtx);
1777 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
1778 	if (ccb != NULL) {
1779 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1780 	}
1781 	mtx_leave(&sc->sc_ccb_mtx);
1782 	return (ccb);
1783 }
1784 
1785 void
1786 qlw_put_ccb(void *xsc, void *io)
1787 {
1788 	struct qlw_softc	*sc = xsc;
1789 	struct qlw_ccb		*ccb = io;
1790 
1791 	ccb->ccb_xs = NULL;
1792 	mtx_enter(&sc->sc_ccb_mtx);
1793 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1794 	mtx_leave(&sc->sc_ccb_mtx);
1795 }
1796