xref: /netbsd-src/sys/dev/ieee1394/sbp.c (revision bbde328be4e75ea9ad02e9715ea13ca54b797ada)
1 /*	$NetBSD: sbp.c,v 1.30 2010/04/29 06:51:26 kiyohara Exp $	*/
2 /*-
3  * Copyright (c) 2003 Hidetoshi Shimokawa
4  * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the acknowledgement as bellow:
17  *
18  *    This product includes software developed by K. Kobayashi and H. Shimokawa
19  *
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/dev/firewire/sbp.c,v 1.100 2009/02/18 18:41:34 sbruno Exp $
36  *
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: sbp.c,v 1.30 2010/04/29 06:51:26 kiyohara Exp $");
41 
42 
43 #include <sys/param.h>
44 #include <sys/device.h>
45 #include <sys/errno.h>
46 #include <sys/buf.h>
47 #include <sys/callout.h>
48 #include <sys/condvar.h>
49 #include <sys/kernel.h>
50 #include <sys/kthread.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
53 #include <sys/proc.h>
54 #include <sys/sysctl.h>
55 
56 #include <sys/bus.h>
57 
58 #include <dev/scsipi/scsi_spc.h>
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsiconf.h>
62 #include <dev/scsipi/scsipiconf.h>
63 
64 #include <dev/ieee1394/firewire.h>
65 #include <dev/ieee1394/firewirereg.h>
66 #include <dev/ieee1394/fwdma.h>
67 #include <dev/ieee1394/iec13213.h>
68 #include <dev/ieee1394/sbp.h>
69 
70 #include "locators.h"
71 
72 
73 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \
74 	&& crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2))
75 
76 #define SBP_NUM_TARGETS	8 /* MAX 64 */
77 #define SBP_NUM_LUNS	64
78 #define SBP_MAXPHYS	MIN(MAXPHYS, (512*1024) /* 512KB */)
79 #define SBP_DMA_SIZE	PAGE_SIZE
80 #define SBP_LOGIN_SIZE	sizeof(struct sbp_login_res)
81 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb))
82 #define SBP_NUM_OCB	(SBP_QUEUE_LEN * SBP_NUM_TARGETS)
83 
84 /*
85  * STATUS FIFO addressing
86  *   bit
87  * -----------------------
88  *  0- 1( 2): 0 (alignment)
89  *  2- 9( 8): lun
90  * 10-31(14): unit
91  * 32-47(16): SBP_BIND_HI
92  * 48-64(16): bus_id, node_id
93  */
94 #define SBP_BIND_HI 0x1
95 #define SBP_DEV2ADDR(u, l)		 \
96 	(((uint64_t)SBP_BIND_HI << 32)	|\
97 	 (((u) & 0x3fff) << 10)		|\
98 	 (((l) & 0xff) << 2))
99 #define SBP_ADDR2UNIT(a)	(((a) >> 10) & 0x3fff)
100 #define SBP_ADDR2LUN(a)		(((a) >> 2) & 0xff)
101 #define SBP_INITIATOR 7
102 
103 static const char *orb_fun_name[] = {
104 	ORB_FUN_NAMES
105 };
106 
107 static int debug = 0;
108 static int auto_login = 1;
109 static int max_speed = -1;
110 static int sbp_cold = 1;
111 static int ex_login = 1;
112 static int login_delay = 1000;	/* msec */
113 static int scan_delay = 500;	/* msec */
114 static int use_doorbell = 0;
115 static int sbp_tags = 0;
116 
117 static int sysctl_sbp_verify(SYSCTLFN_PROTO, int lower, int upper);
118 static int sysctl_sbp_verify_max_speed(SYSCTLFN_PROTO);
119 static int sysctl_sbp_verify_tags(SYSCTLFN_PROTO);
120 
121 /*
122  * Setup sysctl(3) MIB, hw.sbp.*
123  *
124  * TBD condition CTLFLAG_PERMANENT on being a module or not
125  */
126 SYSCTL_SETUP(sysctl_sbp, "sysctl sbp(4) subtree setup")
127 {
128 	int rc, sbp_node_num;
129 	const struct sysctlnode *node;
130 
131 	if ((rc = sysctl_createv(clog, 0, NULL, NULL,
132 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
133 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0)
134 		goto err;
135 
136 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
137 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "sbp",
138 	    SYSCTL_DESCR("sbp controls"), NULL, 0, NULL,
139 	    0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
140 		goto err;
141 	sbp_node_num = node->sysctl_num;
142 
143 	/* sbp auto login flag */
144 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
145 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
146 	    "auto_login", SYSCTL_DESCR("SBP perform login automatically"),
147 	    NULL, 0, &auto_login,
148 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
149 		goto err;
150 
151 	/* sbp max speed */
152 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
153 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
154 	    "max_speed", SYSCTL_DESCR("SBP transfer max speed"),
155 	    sysctl_sbp_verify_max_speed, 0, &max_speed,
156 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
157 		goto err;
158 
159 	/* sbp exclusive login flag */
160 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
161 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
162 	    "exclusive_login", SYSCTL_DESCR("SBP enable exclusive login"),
163 	    NULL, 0, &ex_login,
164 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
165 		goto err;
166 
167 	/* sbp login delay */
168 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
169 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
170 	    "login_delay", SYSCTL_DESCR("SBP login delay in msec"),
171 	    NULL, 0, &login_delay,
172 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
173 		goto err;
174 
175 	/* sbp scan delay */
176 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
177 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
178 	    "scan_delay", SYSCTL_DESCR("SBP scan delay in msec"),
179 	    NULL, 0, &scan_delay,
180 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
181 		goto err;
182 
183 	/* sbp use doorbell flag */
184 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
185 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
186 	    "use_doorbell", SYSCTL_DESCR("SBP use doorbell request"),
187 	    NULL, 0, &use_doorbell,
188 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
189 		goto err;
190 
191 	/* sbp force tagged queuing */
192 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
193 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
194 	    "tags", SYSCTL_DESCR("SBP tagged queuing support"),
195 	    sysctl_sbp_verify_tags, 0, &sbp_tags,
196 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
197 		goto err;
198 
199 	/* sbp driver debug flag */
200 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
201 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
202 	    "sbp_debug", SYSCTL_DESCR("SBP debug flag"),
203 	    NULL, 0, &debug,
204 	    0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0)
205 		goto err;
206 
207 	return;
208 
209 err:
210 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
211 }
212 
213 static int
214 sysctl_sbp_verify(SYSCTLFN_ARGS, int lower, int upper)
215 {
216 	int error, t;
217 	struct sysctlnode node;
218 
219 	node = *rnode;
220 	t = *(int*)rnode->sysctl_data;
221 	node.sysctl_data = &t;
222 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
223 	if (error || newp == NULL)
224 		return error;
225 
226 	if (t < lower || t > upper)
227 		return EINVAL;
228 
229 	*(int*)rnode->sysctl_data = t;
230 
231 	return 0;
232 }
233 
234 static int
235 sysctl_sbp_verify_max_speed(SYSCTLFN_ARGS)
236 {
237 
238 	return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), 0, FWSPD_S400);
239 }
240 
241 static int
242 sysctl_sbp_verify_tags(SYSCTLFN_ARGS)
243 {
244 
245 	return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), -1, 1);
246 }
247 
248 #define NEED_RESPONSE 0
249 
250 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE)
251 #ifdef __sparc64__ /* iommu */
252 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX)
253 #else
254 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE)
255 #endif
256 struct sbp_ocb {
257 	uint32_t	orb[8];
258 #define IND_PTR_OFFSET	(sizeof(uint32_t) * 8)
259 	struct ind_ptr	ind_ptr[SBP_IND_MAX];
260 	struct scsipi_xfer *xs;
261 	struct sbp_dev	*sdev;
262 	uint16_t	index;
263 	uint16_t	flags; /* XXX should be removed */
264 	bus_dmamap_t	dmamap;
265 	bus_addr_t	bus_addr;
266 	STAILQ_ENTRY(sbp_ocb)	ocb;
267 };
268 
269 #define SBP_ORB_DMA_SYNC(dma, i, op)			\
270 	bus_dmamap_sync((dma).dma_tag, (dma).dma_map,	\
271 	    sizeof(struct sbp_ocb) * (i),		\
272 	    sizeof(ocb->orb) + sizeof(ocb->ind_ptr), (op));
273 
274 #define OCB_ACT_MGM 0
275 #define OCB_ACT_CMD 1
276 #define OCB_MATCH(o,s)	((o)->bus_addr == ntohl((s)->orb_lo))
277 
278 struct sbp_dev{
279 #define SBP_DEV_RESET		0	/* accept login */
280 #define SBP_DEV_LOGIN		1	/* to login */
281 #if 0
282 #define SBP_DEV_RECONN		2	/* to reconnect */
283 #endif
284 #define SBP_DEV_TOATTACH	3	/* to attach */
285 #define SBP_DEV_PROBE		4	/* scan lun */
286 #define SBP_DEV_ATTACHED	5	/* in operation */
287 #define SBP_DEV_DEAD		6	/* unavailable unit */
288 #define SBP_DEV_RETRY		7	/* unavailable unit */
289 	uint8_t status:4,
290 		 timeout:4;
291 	uint8_t type;
292 	uint16_t lun_id;
293 	uint16_t freeze;
294 #define	ORB_LINK_DEAD		(1 << 0)
295 #define	VALID_LUN		(1 << 1)
296 #define	ORB_POINTER_ACTIVE	(1 << 2)
297 #define	ORB_POINTER_NEED	(1 << 3)
298 #define	ORB_DOORBELL_ACTIVE	(1 << 4)
299 #define	ORB_DOORBELL_NEED	(1 << 5)
300 #define	ORB_SHORTAGE		(1 << 6)
301 	uint16_t flags;
302 	struct scsipi_periph *periph;
303 	struct sbp_target *target;
304 	struct fwdma_alloc dma;
305 	struct sbp_login_res *login;
306 	struct callout login_callout;
307 	struct sbp_ocb *ocb;
308 	STAILQ_HEAD(, sbp_ocb) ocbs;
309 	STAILQ_HEAD(, sbp_ocb) free_ocbs;
310 	struct sbp_ocb *last_ocb;
311 	char vendor[32];
312 	char product[32];
313 	char revision[10];
314 	char bustgtlun[32];
315 };
316 
317 struct sbp_target {
318 	int target_id;
319 	int num_lun;
320 	struct sbp_dev	**luns;
321 	struct sbp_softc *sbp;
322 	struct fw_device *fwdev;
323 	uint32_t mgm_hi, mgm_lo;
324 	struct sbp_ocb *mgm_ocb_cur;
325 	STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue;
326 	struct callout mgm_ocb_timeout;
327 	STAILQ_HEAD(, fw_xfer) xferlist;
328 	int n_xfer;
329 };
330 
331 struct sbp_softc {
332 	struct firewire_dev_comm sc_fd;
333 	struct scsipi_adapter sc_adapter;
334 	struct scsipi_channel sc_channel;
335 	device_t sc_bus;
336 	struct lwp *sc_lwp;
337 	struct sbp_target sc_target;
338 	struct fw_bind sc_fwb;
339 	bus_dma_tag_t sc_dmat;
340 	struct timeval sc_last_busreset;
341 	int sc_flags;
342 	kmutex_t sc_mtx;
343 	kcondvar_t sc_cv;
344 };
345 
346 MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/IEEE1394");
347 
348 
349 static int sbpmatch(device_t, cfdata_t, void *);
350 static void sbpattach(device_t, device_t, void *);
351 static int sbpdetach(device_t, int);
352 
353 static void sbp_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t,
354 			       void *);
355 static void sbp_minphys(struct buf *);
356 
357 static void sbp_show_sdev_info(struct sbp_dev *);
358 static void sbp_alloc_lun(struct sbp_target *);
359 static struct sbp_target *sbp_alloc_target(struct sbp_softc *,
360 					   struct fw_device *);
361 static void sbp_probe_lun(struct sbp_dev *);
362 static void sbp_login_callout(void *);
363 static void sbp_login(struct sbp_dev *);
364 static void sbp_probe_target(void *);
365 static void sbp_post_busreset(void *);
366 static void sbp_post_explore(void *);
367 #if NEED_RESPONSE
368 static void sbp_loginres_callback(struct fw_xfer *);
369 #endif
370 static inline void sbp_xfer_free(struct fw_xfer *);
371 static void sbp_reset_start_callback(struct fw_xfer *);
372 static void sbp_reset_start(struct sbp_dev *);
373 static void sbp_mgm_callback(struct fw_xfer *);
374 static void sbp_scsipi_scan_target(void *);
375 static inline void sbp_scan_dev(struct sbp_dev *);
376 static void sbp_do_attach(struct fw_xfer *);
377 static void sbp_agent_reset_callback(struct fw_xfer *);
378 static void sbp_agent_reset(struct sbp_dev *);
379 static void sbp_busy_timeout_callback(struct fw_xfer *);
380 static void sbp_busy_timeout(struct sbp_dev *);
381 static void sbp_orb_pointer_callback(struct fw_xfer *);
382 static void sbp_orb_pointer(struct sbp_dev *, struct sbp_ocb *);
383 static void sbp_doorbell_callback(struct fw_xfer *);
384 static void sbp_doorbell(struct sbp_dev *);
385 static struct fw_xfer *sbp_write_cmd(struct sbp_dev *, int, int);
386 static void sbp_mgm_orb(struct sbp_dev *, int, struct sbp_ocb *);
387 static void sbp_print_scsi_cmd(struct sbp_ocb *);
388 static void sbp_scsi_status(struct sbp_status *, struct sbp_ocb *);
389 static void sbp_fix_inq_data(struct sbp_ocb *);
390 static void sbp_recv(struct fw_xfer *);
391 static int sbp_logout_all(struct sbp_softc *);
392 static void sbp_free_sdev(struct sbp_dev *);
393 static void sbp_free_target(struct sbp_target *);
394 static void sbp_scsipi_detach_sdev(struct sbp_dev *);
395 static void sbp_scsipi_detach_target(struct sbp_target *);
396 static void sbp_target_reset(struct sbp_dev *, int);
397 static void sbp_mgm_timeout(void *);
398 static void sbp_timeout(void *);
399 static void sbp_action1(struct sbp_softc *, struct scsipi_xfer *);
400 static void sbp_execute_ocb(struct sbp_ocb *, bus_dma_segment_t *, int);
401 static struct sbp_ocb *sbp_dequeue_ocb(struct sbp_dev *, struct sbp_status *);
402 static struct sbp_ocb *sbp_enqueue_ocb(struct sbp_dev *, struct sbp_ocb *);
403 static struct sbp_ocb *sbp_get_ocb(struct sbp_dev *);
404 static void sbp_free_ocb(struct sbp_dev *, struct sbp_ocb *);
405 static void sbp_abort_ocb(struct sbp_ocb *, int);
406 static void sbp_abort_all_ocbs(struct sbp_dev *, int);
407 
408 
409 static const char *orb_status0[] = {
410 	/* 0 */ "No additional information to report",
411 	/* 1 */ "Request type not supported",
412 	/* 2 */ "Speed not supported",
413 	/* 3 */ "Page size not supported",
414 	/* 4 */ "Access denied",
415 	/* 5 */ "Logical unit not supported",
416 	/* 6 */ "Maximum payload too small",
417 	/* 7 */ "Reserved for future standardization",
418 	/* 8 */ "Resources unavailable",
419 	/* 9 */ "Function rejected",
420 	/* A */ "Login ID not recognized",
421 	/* B */ "Dummy ORB completed",
422 	/* C */ "Request aborted",
423 	/* FF */ "Unspecified error"
424 #define MAX_ORB_STATUS0 0xd
425 };
426 
427 static const char *orb_status1_object[] = {
428 	/* 0 */ "Operation request block (ORB)",
429 	/* 1 */ "Data buffer",
430 	/* 2 */ "Page table",
431 	/* 3 */ "Unable to specify"
432 };
433 
434 static const char *orb_status1_serial_bus_error[] = {
435 	/* 0 */ "Missing acknowledge",
436 	/* 1 */ "Reserved; not to be used",
437 	/* 2 */ "Time-out error",
438 	/* 3 */ "Reserved; not to be used",
439 	/* 4 */ "Busy retry limit exceeded(X)",
440 	/* 5 */ "Busy retry limit exceeded(A)",
441 	/* 6 */ "Busy retry limit exceeded(B)",
442 	/* 7 */ "Reserved for future standardization",
443 	/* 8 */ "Reserved for future standardization",
444 	/* 9 */ "Reserved for future standardization",
445 	/* A */ "Reserved for future standardization",
446 	/* B */ "Tardy retry limit exceeded",
447 	/* C */ "Conflict error",
448 	/* D */ "Data error",
449 	/* E */ "Type error",
450 	/* F */ "Address error"
451 };
452 
453 
454 CFATTACH_DECL_NEW(sbp, sizeof(struct sbp_softc),
455     sbpmatch, sbpattach, sbpdetach, NULL);
456 
457 
458 int
459 sbpmatch(device_t parent, cfdata_t cf, void *aux)
460 {
461 	struct fw_attach_args *fwa = aux;
462 
463 	if (strcmp(fwa->name, "sbp") == 0)
464 		return 1;
465 	return 0;
466 }
467 
468 static void
469 sbpattach(device_t parent, device_t self, void *aux)
470 {
471 	struct sbp_softc *sc = device_private(self);
472 	struct fw_attach_args *fwa = (struct fw_attach_args *)aux;
473 	struct firewire_comm *fc;
474 	struct scsipi_adapter *sc_adapter = &sc->sc_adapter;
475 	struct scsipi_channel *sc_channel = &sc->sc_channel;
476 	struct sbp_target *target = &sc->sc_target;
477 	int dv_unit;
478 
479 	aprint_naive("\n");
480 	aprint_normal(": SBP-2/SCSI over IEEE1394\n");
481 
482 	sc->sc_fd.dev = self;
483 
484 	if (cold)
485 		sbp_cold++;
486 	sc->sc_fd.fc = fc = fwa->fc;
487 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
488 	cv_init(&sc->sc_cv, "sbp");
489 
490 	if (max_speed < 0)
491 		max_speed = fc->speed;
492 
493 	sc->sc_dmat = fc->dmat;
494 
495 	sc->sc_target.fwdev = NULL;
496 	sc->sc_target.luns = NULL;
497 
498 	if (sbp_alloc_target(sc, fwa->fwdev) == NULL)
499 		return;
500 
501 	sc_adapter->adapt_dev = sc->sc_fd.dev;
502 	sc_adapter->adapt_nchannels = 1;
503 	sc_adapter->adapt_max_periph = 1;
504 	sc_adapter->adapt_request = sbp_scsipi_request;
505 	sc_adapter->adapt_minphys = sbp_minphys;
506 	sc_adapter->adapt_openings = 8;
507 
508 	sc_channel->chan_adapter = sc_adapter;
509 	sc_channel->chan_bustype = &scsi_bustype;
510 	sc_channel->chan_defquirks = PQUIRK_ONLYBIG;
511 	sc_channel->chan_channel = 0;
512 	sc_channel->chan_flags = SCSIPI_CHAN_CANGROW | SCSIPI_CHAN_NOSETTLE;
513 
514 	sc_channel->chan_ntargets = 1;
515 	sc_channel->chan_nluns = target->num_lun;	/* We set nluns 0 now */
516 	sc_channel->chan_id = 1;
517 
518 	sc->sc_bus = config_found(sc->sc_fd.dev, sc_channel, scsiprint);
519 	if (sc->sc_bus == NULL) {
520 		aprint_error_dev(self, "attach failed\n");
521 		return;
522 	}
523 
524 	/* We reserve 16 bit space (4 bytes X 64 unit X 256 luns) */
525 	dv_unit = device_unit(sc->sc_fd.dev);
526 	sc->sc_fwb.start = SBP_DEV2ADDR(dv_unit, 0);
527 	sc->sc_fwb.end = SBP_DEV2ADDR(dv_unit, -1);
528 	mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_VM);
529 	/* pre-allocate xfer */
530 	STAILQ_INIT(&sc->sc_fwb.xferlist);
531 	fw_xferlist_add(&sc->sc_fwb.xferlist, M_SBP,
532 	    /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB / 2,
533 	    fc, (void *)sc, sbp_recv);
534 	fw_bindadd(fc, &sc->sc_fwb);
535 
536 	sc->sc_fd.post_busreset = sbp_post_busreset;
537 	sc->sc_fd.post_explore = sbp_post_explore;
538 
539 	if (fc->status != FWBUSNOTREADY) {
540 		sbp_post_busreset((void *)sc);
541 		sbp_post_explore((void *)sc);
542 	}
543 }
544 
545 static int
546 sbpdetach(device_t self, int flags)
547 {
548 	struct sbp_softc *sc = device_private(self);
549 	struct firewire_comm *fc = sc->sc_fd.fc;
550 
551 	sbp_scsipi_detach_target(&sc->sc_target);
552 
553 	if (SBP_FWDEV_ALIVE(sc->sc_target.fwdev)) {
554 		sbp_logout_all(sc);
555 
556 		/* XXX wait for logout completion */
557 		mutex_enter(&sc->sc_mtx);
558 		cv_timedwait_sig(&sc->sc_cv, &sc->sc_mtx, hz/2);
559 		mutex_exit(&sc->sc_mtx);
560 	}
561 
562 	sbp_free_target(&sc->sc_target);
563 
564 	fw_bindremove(fc, &sc->sc_fwb);
565 	fw_xferlist_remove(&sc->sc_fwb.xferlist);
566 	mutex_destroy(&sc->sc_fwb.fwb_mtx);
567 
568 	mutex_destroy(&sc->sc_mtx);
569 
570 	return 0;
571 }
572 
573 
574 static void
575 sbp_scsipi_request(struct scsipi_channel *channel, scsipi_adapter_req_t req,
576 		   void *arg)
577 {
578 	struct sbp_softc *sc = device_private(channel->chan_adapter->adapt_dev);
579 	struct scsipi_xfer *xs = arg;
580 	int i;
581 
582 SBP_DEBUG(1)
583 	printf("Called sbp_scsipi_request\n");
584 END_DEBUG
585 
586 	switch (req) {
587 	case ADAPTER_REQ_RUN_XFER:
588 SBP_DEBUG(1)
589 		printf("Got req_run_xfer\n");
590 		printf("xs control: 0x%08x, timeout: %d\n",
591 		    xs->xs_control, xs->timeout);
592 		printf("opcode: 0x%02x\n", (int)xs->cmd->opcode);
593 		for (i = 0; i < 15; i++)
594 			printf("0x%02x ",(int)xs->cmd->bytes[i]);
595 		printf("\n");
596 END_DEBUG
597 		if (xs->xs_control & XS_CTL_RESET) {
598 SBP_DEBUG(1)
599 				printf("XS_CTL_RESET not support\n");
600 END_DEBUG
601 			break;
602 		}
603 #define SBPSCSI_SBP2_MAX_CDB 12
604 		if (xs->cmdlen > SBPSCSI_SBP2_MAX_CDB) {
605 SBP_DEBUG(0)
606 			printf(
607 			    "sbp doesn't support cdb's larger than %d bytes\n",
608 			    SBPSCSI_SBP2_MAX_CDB);
609 END_DEBUG
610 			xs->error = XS_DRIVER_STUFFUP;
611 			scsipi_done(xs);
612 			return;
613 		}
614 		sbp_action1(sc, xs);
615 
616 		break;
617 	case ADAPTER_REQ_GROW_RESOURCES:
618 SBP_DEBUG(1)
619 		printf("Got req_grow_resources\n");
620 END_DEBUG
621 		break;
622 	case ADAPTER_REQ_SET_XFER_MODE:
623 SBP_DEBUG(1)
624 		printf("Got set xfer mode\n");
625 END_DEBUG
626 		break;
627 	default:
628 		panic("Unknown request: %d\n", (int)req);
629 	}
630 }
631 
632 static void
633 sbp_minphys(struct buf *bp)
634 {
635 
636 	minphys(bp);
637 }
638 
639 
640 /*
641  * Display device characteristics on the console
642  */
643 static void
644 sbp_show_sdev_info(struct sbp_dev *sdev)
645 {
646 	struct fw_device *fwdev = sdev->target->fwdev;
647 	struct sbp_softc *sc = sdev->target->sbp;
648 
649 	aprint_normal_dev(sc->sc_fd.dev,
650 	    "ordered:%d type:%d EUI:%08x%08x node:%d speed:%d maxrec:%d\n",
651 	    (sdev->type & 0x40) >> 6,
652 	    (sdev->type & 0x1f),
653 	    fwdev->eui.hi,
654 	    fwdev->eui.lo,
655 	    fwdev->dst,
656 	    fwdev->speed,
657 	    fwdev->maxrec);
658 	aprint_normal_dev(sc->sc_fd.dev, "%s '%s' '%s' '%s'\n",
659 	    sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision);
660 }
661 
662 static void
663 sbp_alloc_lun(struct sbp_target *target)
664 {
665 	struct crom_context cc;
666 	struct csrreg *reg;
667 	struct sbp_dev *sdev, **newluns;
668 	struct sbp_softc *sc;
669 	int maxlun, lun, i;
670 
671 	sc = target->sbp;
672 	crom_init_context(&cc, target->fwdev->csrrom);
673 	/* XXX shoud parse appropriate unit directories only */
674 	maxlun = -1;
675 	while (cc.depth >= 0) {
676 		reg = crom_search_key(&cc, CROM_LUN);
677 		if (reg == NULL)
678 			break;
679 		lun = reg->val & 0xffff;
680 SBP_DEBUG(0)
681 		printf("target %d lun %d found\n", target->target_id, lun);
682 END_DEBUG
683 		if (maxlun < lun)
684 			maxlun = lun;
685 		crom_next(&cc);
686 	}
687 	if (maxlun < 0)
688 		aprint_normal_dev(sc->sc_fd.dev, "%d: no LUN found\n",
689 		    target->target_id);
690 
691 	maxlun++;
692 	if (maxlun >= SBP_NUM_LUNS)
693 		maxlun = SBP_NUM_LUNS;
694 
695 	/* Invalidiate stale devices */
696 	for (lun = 0; lun < target->num_lun; lun++) {
697 		sdev = target->luns[lun];
698 		if (sdev == NULL)
699 			continue;
700 		sdev->flags &= ~VALID_LUN;
701 		if (lun >= maxlun) {
702 			/* lost device */
703 			sbp_scsipi_detach_sdev(sdev);
704 			sbp_free_sdev(sdev);
705 			target->luns[lun] = NULL;
706 		}
707 	}
708 
709 	/* Reallocate */
710 	if (maxlun != target->num_lun) {
711 		newluns = (struct sbp_dev **) realloc(target->luns,
712 		    sizeof(struct sbp_dev *) * maxlun,
713 		    M_SBP, M_NOWAIT | M_ZERO);
714 
715 		if (newluns == NULL) {
716 			aprint_error_dev(sc->sc_fd.dev, "realloc failed\n");
717 			newluns = target->luns;
718 			maxlun = target->num_lun;
719 		}
720 
721 		/*
722 		 * We must zero the extended region for the case
723 		 * realloc() doesn't allocate new buffer.
724 		 */
725 		if (maxlun > target->num_lun) {
726 			const int sbp_dev_p_sz = sizeof(struct sbp_dev *);
727 
728 			memset(&newluns[target->num_lun], 0,
729 			    sbp_dev_p_sz * (maxlun - target->num_lun));
730 		}
731 
732 		target->luns = newluns;
733 		target->num_lun = maxlun;
734 	}
735 
736 	crom_init_context(&cc, target->fwdev->csrrom);
737 	while (cc.depth >= 0) {
738 		int new = 0;
739 
740 		reg = crom_search_key(&cc, CROM_LUN);
741 		if (reg == NULL)
742 			break;
743 		lun = reg->val & 0xffff;
744 		if (lun >= SBP_NUM_LUNS) {
745 			aprint_error_dev(sc->sc_fd.dev, "too large lun %d\n",
746 			    lun);
747 			goto next;
748 		}
749 
750 		sdev = target->luns[lun];
751 		if (sdev == NULL) {
752 			sdev = malloc(sizeof(struct sbp_dev),
753 			    M_SBP, M_NOWAIT | M_ZERO);
754 			if (sdev == NULL) {
755 				aprint_error_dev(sc->sc_fd.dev,
756 				    "malloc failed\n");
757 				goto next;
758 			}
759 			target->luns[lun] = sdev;
760 			sdev->lun_id = lun;
761 			sdev->target = target;
762 			STAILQ_INIT(&sdev->ocbs);
763 			callout_init(&sdev->login_callout, CALLOUT_MPSAFE);
764 			callout_setfunc(&sdev->login_callout,
765 			    sbp_login_callout, sdev);
766 			sdev->status = SBP_DEV_RESET;
767 			new = 1;
768 			snprintf(sdev->bustgtlun, 32, "%s:%d:%d",
769 			    device_xname(sc->sc_fd.dev),
770 			    sdev->target->target_id,
771 			    sdev->lun_id);
772 			if (!sc->sc_lwp)
773 				if (kthread_create(
774 				    PRI_NONE, KTHREAD_MPSAFE, NULL,
775 				    sbp_scsipi_scan_target, &sc->sc_target,
776 				    &sc->sc_lwp,
777 				    "sbp%d_attach", device_unit(sc->sc_fd.dev)))
778 					aprint_error_dev(sc->sc_fd.dev,
779 					    "unable to create thread");
780 		}
781 		sdev->flags |= VALID_LUN;
782 		sdev->type = (reg->val & 0xff0000) >> 16;
783 
784 		if (new == 0)
785 			goto next;
786 
787 		fwdma_alloc_setup(sc->sc_fd.dev, sc->sc_dmat, SBP_DMA_SIZE,
788 		    &sdev->dma, sizeof(uint32_t), BUS_DMA_NOWAIT);
789 		if (sdev->dma.v_addr == NULL) {
790 			free(sdev, M_SBP);
791 			target->luns[lun] = NULL;
792 			goto next;
793 		}
794 		sdev->ocb = (struct sbp_ocb *)sdev->dma.v_addr;
795 		sdev->login = (struct sbp_login_res *)&sdev->ocb[SBP_QUEUE_LEN];
796 		memset((char *)sdev->ocb, 0,
797 		    sizeof(struct sbp_ocb) * SBP_QUEUE_LEN);
798 
799 		STAILQ_INIT(&sdev->free_ocbs);
800 		for (i = 0; i < SBP_QUEUE_LEN; i++) {
801 			struct sbp_ocb *ocb = &sdev->ocb[i];
802 
803 			ocb->index = i;
804 			ocb->bus_addr =
805 			    sdev->dma.bus_addr + sizeof(struct sbp_ocb) * i;
806 			if (bus_dmamap_create(sc->sc_dmat, 0x100000,
807 			    SBP_IND_MAX, SBP_SEG_MAX, 0, 0, &ocb->dmamap)) {
808 				aprint_error_dev(sc->sc_fd.dev,
809 				    "cannot create dmamap %d\n", i);
810 				/* XXX */
811 				goto next;
812 			}
813 			sbp_free_ocb(sdev, ocb);	/* into free queue */
814 		}
815 next:
816 		crom_next(&cc);
817 	}
818 
819 	for (lun = 0; lun < target->num_lun; lun++) {
820 		sdev = target->luns[lun];
821 		if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) {
822 			sbp_scsipi_detach_sdev(sdev);
823 			sbp_free_sdev(sdev);
824 			target->luns[lun] = NULL;
825 		}
826 	}
827 }
828 
829 static struct sbp_target *
830 sbp_alloc_target(struct sbp_softc *sc, struct fw_device *fwdev)
831 {
832 	struct sbp_target *target;
833 	struct crom_context cc;
834 	struct csrreg *reg;
835 
836 SBP_DEBUG(1)
837 	printf("sbp_alloc_target\n");
838 END_DEBUG
839 	/* new target */
840 	target = &sc->sc_target;
841 	target->sbp = sc;
842 	target->fwdev = fwdev;
843 	target->target_id = 0;
844 	/* XXX we may want to reload mgm port after each bus reset */
845 	/* XXX there might be multiple management agents */
846 	crom_init_context(&cc, target->fwdev->csrrom);
847 	reg = crom_search_key(&cc, CROM_MGM);
848 	if (reg == NULL || reg->val == 0) {
849 		aprint_error_dev(sc->sc_fd.dev, "NULL management address\n");
850 		target->fwdev = NULL;
851 		return NULL;
852 	}
853 	target->mgm_hi = 0xffff;
854 	target->mgm_lo = 0xf0000000 | (reg->val << 2);
855 	target->mgm_ocb_cur = NULL;
856 SBP_DEBUG(1)
857 	printf("target: mgm_port: %x\n", target->mgm_lo);
858 END_DEBUG
859 	STAILQ_INIT(&target->xferlist);
860 	target->n_xfer = 0;
861 	STAILQ_INIT(&target->mgm_ocb_queue);
862 	callout_init(&target->mgm_ocb_timeout, CALLOUT_MPSAFE);
863 
864 	target->luns = NULL;
865 	target->num_lun = 0;
866 	return target;
867 }
868 
869 static void
870 sbp_probe_lun(struct sbp_dev *sdev)
871 {
872 	struct fw_device *fwdev;
873 	struct crom_context c, *cc = &c;
874 	struct csrreg *reg;
875 
876 	memset(sdev->vendor, 0, sizeof(sdev->vendor));
877 	memset(sdev->product, 0, sizeof(sdev->product));
878 
879 	fwdev = sdev->target->fwdev;
880 	crom_init_context(cc, fwdev->csrrom);
881 	/* get vendor string */
882 	crom_search_key(cc, CSRKEY_VENDOR);
883 	crom_next(cc);
884 	crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor));
885 	/* skip to the unit directory for SBP-2 */
886 	while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) {
887 		if (reg->val == CSRVAL_T10SBP2)
888 			break;
889 		crom_next(cc);
890 	}
891 	/* get firmware revision */
892 	reg = crom_search_key(cc, CSRKEY_FIRM_VER);
893 	if (reg != NULL)
894 		snprintf(sdev->revision, sizeof(sdev->revision), "%06x",
895 		    reg->val);
896 	/* get product string */
897 	crom_search_key(cc, CSRKEY_MODEL);
898 	crom_next(cc);
899 	crom_parse_text(cc, sdev->product, sizeof(sdev->product));
900 }
901 
902 static void
903 sbp_login_callout(void *arg)
904 {
905 	struct sbp_dev *sdev = (struct sbp_dev *)arg;
906 
907 	sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL);
908 }
909 
910 static void
911 sbp_login(struct sbp_dev *sdev)
912 {
913 	struct sbp_softc *sc = sdev->target->sbp;
914 	struct timeval delta;
915 	struct timeval t;
916 	int ticks = 0;
917 
918 	microtime(&delta);
919 	timersub(&delta, &sc->sc_last_busreset, &delta);
920 	t.tv_sec = login_delay / 1000;
921 	t.tv_usec = (login_delay % 1000) * 1000;
922 	timersub(&t, &delta, &t);
923 	if (t.tv_sec >= 0 && t.tv_usec > 0)
924 		ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000;
925 SBP_DEBUG(0)
926 	printf("%s: sec = %lld usec = %ld ticks = %d\n", __func__,
927 	    (long long)t.tv_sec, (long)t.tv_usec, ticks);
928 END_DEBUG
929 	callout_schedule(&sdev->login_callout, ticks);
930 }
931 
932 static void
933 sbp_probe_target(void *arg)
934 {
935 	struct sbp_target *target = (struct sbp_target *)arg;
936 	struct sbp_dev *sdev;
937 	int i;
938 
939 SBP_DEBUG(1)
940 	printf("%s %d\n", __func__, target->target_id);
941 END_DEBUG
942 
943 	sbp_alloc_lun(target);
944 
945 	/* XXX untimeout mgm_ocb and dequeue */
946 	for (i = 0; i < target->num_lun; i++) {
947 		sdev = target->luns[i];
948 		if (sdev == NULL || sdev->status == SBP_DEV_DEAD)
949 			continue;
950 
951 		if (sdev->periph != NULL) {
952 			scsipi_periph_freeze(sdev->periph, 1);
953 			sdev->freeze++;
954 		}
955 		sbp_probe_lun(sdev);
956 		sbp_show_sdev_info(sdev);
957 
958 		sbp_abort_all_ocbs(sdev, XS_RESET);
959 		switch (sdev->status) {
960 		case SBP_DEV_RESET:
961 			/* new or revived target */
962 			if (auto_login)
963 				sbp_login(sdev);
964 			break;
965 		case SBP_DEV_TOATTACH:
966 		case SBP_DEV_PROBE:
967 		case SBP_DEV_ATTACHED:
968 		case SBP_DEV_RETRY:
969 		default:
970 			sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL);
971 			break;
972 		}
973 	}
974 }
975 
976 static void
977 sbp_post_busreset(void *arg)
978 {
979 	struct sbp_softc *sc = (struct sbp_softc *)arg;
980 	struct sbp_target *target = &sc->sc_target;
981 	struct fw_device *fwdev = target->fwdev;
982 	int alive;
983 
984 	alive = SBP_FWDEV_ALIVE(fwdev);
985 SBP_DEBUG(0)
986 	printf("sbp_post_busreset\n");
987 	if (!alive)
988 		printf("not alive\n");
989 END_DEBUG
990 	microtime(&sc->sc_last_busreset);
991 
992 	if (!alive)
993 		return;
994 
995 	scsipi_channel_freeze(&sc->sc_channel, 1);
996 }
997 
998 static void
999 sbp_post_explore(void *arg)
1000 {
1001 	struct sbp_softc *sc = (struct sbp_softc *)arg;
1002 	struct sbp_target *target = &sc->sc_target;
1003 	struct fw_device *fwdev = target->fwdev;
1004 	int alive;
1005 
1006 	alive = SBP_FWDEV_ALIVE(fwdev);
1007 SBP_DEBUG(0)
1008 	printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold);
1009 	if (!alive)
1010 		printf("not alive\n");
1011 END_DEBUG
1012 	if (!alive)
1013 		return;
1014 
1015 	if (!firewire_phydma_enable)
1016 		return;
1017 
1018 	if (sbp_cold > 0)
1019 		sbp_cold--;
1020 
1021 SBP_DEBUG(0)
1022 	printf("sbp_post_explore: EUI:%08x%08x ", fwdev->eui.hi, fwdev->eui.lo);
1023 END_DEBUG
1024 	sbp_probe_target((void *)target);
1025 	if (target->num_lun == 0)
1026 		sbp_free_target(target);
1027 
1028 	scsipi_channel_thaw(&sc->sc_channel, 1);
1029 }
1030 
1031 #if NEED_RESPONSE
1032 static void
1033 sbp_loginres_callback(struct fw_xfer *xfer)
1034 {
1035 	struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1036 	struct sbp_softc *sc = sdev->target->sbp;
1037 
1038 SBP_DEBUG(1)
1039 	printf("sbp_loginres_callback\n");
1040 END_DEBUG
1041 	/* recycle */
1042 	mutex_enter(&sc->sc_fwb.fwb_mtx);
1043 	STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
1044 	mutex_exit(&sc->sc_fwb.fwb_mtx);
1045 	return;
1046 }
1047 #endif
1048 
1049 static inline void
1050 sbp_xfer_free(struct fw_xfer *xfer)
1051 {
1052 	struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1053 	struct sbp_softc *sc = sdev->target->sbp;
1054 
1055 	fw_xfer_unload(xfer);
1056 	mutex_enter(&sc->sc_mtx);
1057 	STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link);
1058 	mutex_exit(&sc->sc_mtx);
1059 }
1060 
1061 static void
1062 sbp_reset_start_callback(struct fw_xfer *xfer)
1063 {
1064 	struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc;
1065 	struct sbp_target *target = sdev->target;
1066 	int i;
1067 
1068 	if (xfer->resp != 0)
1069 		aprint_error("%s: sbp_reset_start failed: resp=%d\n",
1070 		    sdev->bustgtlun, xfer->resp);
1071 
1072 	for (i = 0; i < target->num_lun; i++) {
1073 		tsdev = target->luns[i];
1074 		if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN)
1075 			sbp_login(tsdev);
1076 	}
1077 }
1078 
1079 static void
1080 sbp_reset_start(struct sbp_dev *sdev)
1081 {
1082 	struct fw_xfer *xfer;
1083 	struct fw_pkt *fp;
1084 
1085 SBP_DEBUG(0)
1086 	printf("%s: sbp_reset_start: %s\n",
1087 	    device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun);
1088 END_DEBUG
1089 
1090 	xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1091 	if (xfer == NULL)
1092 		return;
1093 	xfer->hand = sbp_reset_start_callback;
1094 	fp = &xfer->send.hdr;
1095 	fp->mode.wreqq.dest_hi = 0xffff;
1096 	fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START;
1097 	fp->mode.wreqq.data = htonl(0xf);
1098 	if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1099 		sbp_xfer_free(xfer);
1100 }
1101 
1102 static void
1103 sbp_mgm_callback(struct fw_xfer *xfer)
1104 {
1105 	struct sbp_dev *sdev;
1106 	int resp;
1107 
1108 	sdev = (struct sbp_dev *)xfer->sc;
1109 
1110 SBP_DEBUG(1)
1111 	printf("%s: sbp_mgm_callback: %s\n",
1112 	    device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun);
1113 END_DEBUG
1114 	resp = xfer->resp;
1115 	sbp_xfer_free(xfer);
1116 	return;
1117 }
1118 
1119 static void
1120 sbp_scsipi_scan_target(void *arg)
1121 {
1122 	struct sbp_target *target = (struct sbp_target *)arg;
1123 	struct sbp_softc *sc = target->sbp;
1124 	struct sbp_dev *sdev;
1125 	struct scsipi_channel *chan = &sc->sc_channel;
1126 	struct scsibus_softc *sc_bus = device_private(sc->sc_bus);
1127 	int lun, yet;
1128 
1129 	do {
1130 		mutex_enter(&sc->sc_mtx);
1131 		cv_wait_sig(&sc->sc_cv, &sc->sc_mtx);
1132 		mutex_exit(&sc->sc_mtx);
1133 		yet = 0;
1134 
1135 		for (lun = 0; lun < target->num_lun; lun++) {
1136 			sdev = target->luns[lun];
1137 			if (sdev == NULL)
1138 				continue;
1139 			if (sdev->status != SBP_DEV_PROBE) {
1140 				yet++;
1141 				continue;
1142 			}
1143 
1144 			if (sdev->periph == NULL) {
1145 				if (chan->chan_nluns < target->num_lun)
1146 					chan->chan_nluns = target->num_lun;
1147 
1148 				scsi_probe_bus(sc_bus, target->target_id,
1149 				    sdev->lun_id);
1150 				sdev->periph = scsipi_lookup_periph(chan,
1151 				    target->target_id, lun);
1152 			}
1153 			sdev->status = SBP_DEV_ATTACHED;
1154 		}
1155 	} while (yet > 0);
1156 
1157 	sc->sc_lwp = NULL;
1158 	kthread_exit(0);
1159 
1160 	/* NOTREACHED */
1161 }
1162 
1163 static inline void
1164 sbp_scan_dev(struct sbp_dev *sdev)
1165 {
1166 	struct sbp_softc *sc = sdev->target->sbp;
1167 
1168 	sdev->status = SBP_DEV_PROBE;
1169 	mutex_enter(&sc->sc_mtx);
1170 	cv_signal(&sdev->target->sbp->sc_cv);
1171 	mutex_exit(&sc->sc_mtx);
1172 }
1173 
1174 
1175 static void
1176 sbp_do_attach(struct fw_xfer *xfer)
1177 {
1178 	struct sbp_dev *sdev;
1179 	struct sbp_target *target;
1180 	struct sbp_softc *sc;
1181 
1182 	sdev = (struct sbp_dev *)xfer->sc;
1183 	target = sdev->target;
1184 	sc = target->sbp;
1185 
1186 SBP_DEBUG(0)
1187 	printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1188 	    sdev->bustgtlun);
1189 END_DEBUG
1190 	sbp_xfer_free(xfer);
1191 
1192 	sbp_scan_dev(sdev);
1193 	return;
1194 }
1195 
1196 static void
1197 sbp_agent_reset_callback(struct fw_xfer *xfer)
1198 {
1199 	struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1200 	struct sbp_softc *sc = sdev->target->sbp;
1201 
1202 SBP_DEBUG(1)
1203 	printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1204 	    sdev->bustgtlun);
1205 END_DEBUG
1206 	if (xfer->resp != 0)
1207 		aprint_error_dev(sc->sc_fd.dev, "%s:%s: resp=%d\n", __func__,
1208 		    sdev->bustgtlun, xfer->resp);
1209 
1210 	sbp_xfer_free(xfer);
1211 	if (sdev->periph != NULL) {
1212 		scsipi_periph_thaw(sdev->periph, sdev->freeze);
1213 		scsipi_channel_thaw(&sc->sc_channel, 0);
1214 		sdev->freeze = 0;
1215 	}
1216 }
1217 
1218 static void
1219 sbp_agent_reset(struct sbp_dev *sdev)
1220 {
1221 	struct fw_xfer *xfer;
1222 	struct fw_pkt *fp;
1223 
1224 SBP_DEBUG(0)
1225 	printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1226 	    __func__, sdev->bustgtlun);
1227 END_DEBUG
1228 	xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04);
1229 	if (xfer == NULL)
1230 		return;
1231 	if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE)
1232 		xfer->hand = sbp_agent_reset_callback;
1233 	else
1234 		xfer->hand = sbp_do_attach;
1235 	fp = &xfer->send.hdr;
1236 	fp->mode.wreqq.data = htonl(0xf);
1237 	if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1238 		sbp_xfer_free(xfer);
1239 	sbp_abort_all_ocbs(sdev, XS_RESET);
1240 }
1241 
1242 static void
1243 sbp_busy_timeout_callback(struct fw_xfer *xfer)
1244 {
1245 	struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1246 
1247 SBP_DEBUG(1)
1248 	printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1249 	    __func__, sdev->bustgtlun);
1250 END_DEBUG
1251 	sbp_xfer_free(xfer);
1252 	sbp_agent_reset(sdev);
1253 }
1254 
1255 static void
1256 sbp_busy_timeout(struct sbp_dev *sdev)
1257 {
1258 	struct fw_pkt *fp;
1259 	struct fw_xfer *xfer;
1260 
1261 SBP_DEBUG(0)
1262 	printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1263 	    __func__, sdev->bustgtlun);
1264 END_DEBUG
1265 	xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0);
1266 	if (xfer == NULL)
1267 		return;
1268 	xfer->hand = sbp_busy_timeout_callback;
1269 	fp = &xfer->send.hdr;
1270 	fp->mode.wreqq.dest_hi = 0xffff;
1271 	fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT;
1272 	fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf);
1273 	if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1274 		sbp_xfer_free(xfer);
1275 }
1276 
1277 static void
1278 sbp_orb_pointer_callback(struct fw_xfer *xfer)
1279 {
1280 	struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1281 	struct sbp_softc *sc = sdev->target->sbp;
1282 
1283 SBP_DEBUG(1)
1284 	printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1285 	    sdev->bustgtlun);
1286 END_DEBUG
1287 	if (xfer->resp != 0)
1288 		aprint_error_dev(sc->sc_fd.dev, "%s:%s: xfer->resp = %d\n",
1289 		    __func__, sdev->bustgtlun, xfer->resp);
1290 	sbp_xfer_free(xfer);
1291 	sdev->flags &= ~ORB_POINTER_ACTIVE;
1292 
1293 	if ((sdev->flags & ORB_POINTER_NEED) != 0) {
1294 		struct sbp_ocb *ocb;
1295 
1296 		sdev->flags &= ~ORB_POINTER_NEED;
1297 		ocb = STAILQ_FIRST(&sdev->ocbs);
1298 		if (ocb != NULL)
1299 			sbp_orb_pointer(sdev, ocb);
1300 	}
1301 	return;
1302 }
1303 
1304 static void
1305 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb)
1306 {
1307 	struct sbp_softc *sc = sdev->target->sbp;
1308 	struct fw_xfer *xfer;
1309 	struct fw_pkt *fp;
1310 
1311 SBP_DEBUG(1)
1312 	printf("%s:%s:%s: 0x%08x\n", device_xname(sc->sc_fd.dev), __func__,
1313 	    sdev->bustgtlun, (uint32_t)ocb->bus_addr);
1314 END_DEBUG
1315 
1316 	if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) {
1317 SBP_DEBUG(0)
1318 		printf("%s: orb pointer active\n", __func__);
1319 END_DEBUG
1320 		sdev->flags |= ORB_POINTER_NEED;
1321 		return;
1322 	}
1323 
1324 	sdev->flags |= ORB_POINTER_ACTIVE;
1325 	xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08);
1326 	if (xfer == NULL)
1327 		return;
1328 	xfer->hand = sbp_orb_pointer_callback;
1329 
1330 	fp = &xfer->send.hdr;
1331 	fp->mode.wreqb.len = 8;
1332 	fp->mode.wreqb.extcode = 0;
1333 	xfer->send.payload[0] =
1334 		htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16));
1335 	xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr);
1336 
1337 	if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1338 		sbp_xfer_free(xfer);
1339 		ocb->xs->error = XS_DRIVER_STUFFUP;
1340 		scsipi_done(ocb->xs);
1341 	}
1342 }
1343 
1344 static void
1345 sbp_doorbell_callback(struct fw_xfer *xfer)
1346 {
1347 	struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc;
1348 	struct sbp_softc *sc = sdev->target->sbp;
1349 
1350 SBP_DEBUG(1)
1351 	printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__,
1352 	    sdev->bustgtlun);
1353 END_DEBUG
1354 	if (xfer->resp != 0) {
1355 		aprint_error_dev(sc->sc_fd.dev, "%s: xfer->resp = %d\n",
1356 		    __func__, xfer->resp);
1357 	}
1358 	sbp_xfer_free(xfer);
1359 	sdev->flags &= ~ORB_DOORBELL_ACTIVE;
1360 	if ((sdev->flags & ORB_DOORBELL_NEED) != 0) {
1361 		sdev->flags &= ~ORB_DOORBELL_NEED;
1362 		sbp_doorbell(sdev);
1363 	}
1364 	return;
1365 }
1366 
1367 static void
1368 sbp_doorbell(struct sbp_dev *sdev)
1369 {
1370 	struct fw_xfer *xfer;
1371 	struct fw_pkt *fp;
1372 
1373 SBP_DEBUG(1)
1374 	printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1375 	    __func__, sdev->bustgtlun);
1376 END_DEBUG
1377 
1378 	if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) {
1379 		sdev->flags |= ORB_DOORBELL_NEED;
1380 		return;
1381 	}
1382 	sdev->flags |= ORB_DOORBELL_ACTIVE;
1383 	xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10);
1384 	if (xfer == NULL)
1385 		return;
1386 	xfer->hand = sbp_doorbell_callback;
1387 	fp = &xfer->send.hdr;
1388 	fp->mode.wreqq.data = htonl(0xf);
1389 	if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1390 		sbp_xfer_free(xfer);
1391 }
1392 
1393 static struct fw_xfer *
1394 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset)
1395 {
1396 	struct sbp_softc *sc;
1397 	struct fw_xfer *xfer;
1398 	struct fw_pkt *fp;
1399 	struct sbp_target *target;
1400 	int new = 0;
1401 
1402 	target = sdev->target;
1403 	sc = target->sbp;
1404 	mutex_enter(&sc->sc_mtx);
1405 	xfer = STAILQ_FIRST(&target->xferlist);
1406 	if (xfer == NULL) {
1407 		if (target->n_xfer > 5 /* XXX */) {
1408 			aprint_error_dev(sc->sc_fd.dev,
1409 			    "no more xfer for this target\n");
1410 			mutex_exit(&sc->sc_mtx);
1411 			return NULL;
1412 		}
1413 		xfer = fw_xfer_alloc_buf(M_SBP, 8, 0);
1414 		if (xfer == NULL) {
1415 			aprint_error_dev(sc->sc_fd.dev,
1416 			    "fw_xfer_alloc_buf failed\n");
1417 			mutex_exit(&sc->sc_mtx);
1418 			return NULL;
1419 		}
1420 		target->n_xfer++;
1421 SBP_DEBUG(0)
1422 			printf("sbp: alloc %d xfer\n", target->n_xfer);
1423 END_DEBUG
1424 		new = 1;
1425 	} else
1426 		STAILQ_REMOVE_HEAD(&target->xferlist, link);
1427 	mutex_exit(&sc->sc_mtx);
1428 
1429 	microtime(&xfer->tv);
1430 
1431 	if (new) {
1432 		xfer->recv.pay_len = 0;
1433 		xfer->send.spd = min(target->fwdev->speed, max_speed);
1434 		xfer->fc = target->sbp->sc_fd.fc;
1435 	}
1436 
1437 	if (tcode == FWTCODE_WREQB)
1438 		xfer->send.pay_len = 8;
1439 	else
1440 		xfer->send.pay_len = 0;
1441 
1442 	xfer->sc = (void *)sdev;
1443 	fp = &xfer->send.hdr;
1444 	fp->mode.wreqq.dest_hi = sdev->login->cmd_hi;
1445 	fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset;
1446 	fp->mode.wreqq.tlrt = 0;
1447 	fp->mode.wreqq.tcode = tcode;
1448 	fp->mode.wreqq.pri = 0;
1449 	fp->mode.wreqq.dst = FWLOCALBUS | target->fwdev->dst;
1450 
1451 	return xfer;
1452 }
1453 
1454 static void
1455 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb)
1456 {
1457 	struct fw_xfer *xfer;
1458 	struct fw_pkt *fp;
1459 	struct sbp_ocb *ocb;
1460 	struct sbp_target *target;
1461 	int nid, dv_unit;
1462 
1463 	target = sdev->target;
1464 	nid = target->sbp->sc_fd.fc->nodeid | FWLOCALBUS;
1465 	dv_unit = device_unit(target->sbp->sc_fd.dev);
1466 
1467 	mutex_enter(&target->sbp->sc_mtx);
1468 	if (func == ORB_FUN_RUNQUEUE) {
1469 		ocb = STAILQ_FIRST(&target->mgm_ocb_queue);
1470 		if (target->mgm_ocb_cur != NULL || ocb == NULL) {
1471 			mutex_exit(&target->sbp->sc_mtx);
1472 			return;
1473 		}
1474 		STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb);
1475 		mutex_exit(&target->sbp->sc_mtx);
1476 		goto start;
1477 	}
1478 	if ((ocb = sbp_get_ocb(sdev)) == NULL) {
1479 		mutex_exit(&target->sbp->sc_mtx);
1480 		/* XXX */
1481 		return;
1482 	}
1483 	mutex_exit(&target->sbp->sc_mtx);
1484 	ocb->flags = OCB_ACT_MGM;
1485 	ocb->sdev = sdev;
1486 
1487 	memset(ocb->orb, 0, sizeof(ocb->orb));
1488 	ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI);
1489 	ocb->orb[7] = htonl(SBP_DEV2ADDR(dv_unit, sdev->lun_id));
1490 
1491 SBP_DEBUG(0)
1492 	printf("%s:%s:%s: %s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1493 	    __func__, sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]);
1494 END_DEBUG
1495 	switch (func) {
1496 	case ORB_FUN_LGI:
1497 	{
1498 		const off_t sbp_login_off =
1499 		    sizeof(struct sbp_ocb) * SBP_QUEUE_LEN;
1500 
1501 		ocb->orb[0] = ocb->orb[1] = 0; /* password */
1502 		ocb->orb[2] = htonl(nid << 16);
1503 		ocb->orb[3] = htonl(sdev->dma.bus_addr + sbp_login_off);
1504 		ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id);
1505 		if (ex_login)
1506 			ocb->orb[4] |= htonl(ORB_EXV);
1507 		ocb->orb[5] = htonl(SBP_LOGIN_SIZE);
1508 		bus_dmamap_sync(sdev->dma.dma_tag, sdev->dma.dma_map,
1509 		    sbp_login_off, SBP_LOGIN_SIZE, BUS_DMASYNC_PREREAD);
1510 		break;
1511 	}
1512 
1513 	case ORB_FUN_ATA:
1514 		ocb->orb[0] = htonl((0 << 16) | 0);
1515 		ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff);
1516 		/* fall through */
1517 	case ORB_FUN_RCN:
1518 	case ORB_FUN_LGO:
1519 	case ORB_FUN_LUR:
1520 	case ORB_FUN_RST:
1521 	case ORB_FUN_ATS:
1522 		ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id);
1523 		break;
1524 	}
1525 
1526 	if (target->mgm_ocb_cur != NULL) {
1527 		/* there is a standing ORB */
1528 		mutex_enter(&target->sbp->sc_mtx);
1529 		STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb);
1530 		mutex_exit(&target->sbp->sc_mtx);
1531 		return;
1532 	}
1533 start:
1534 	target->mgm_ocb_cur = ocb;
1535 
1536 	callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, ocb);
1537 	xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0);
1538 	if (xfer == NULL)
1539 		return;
1540 	xfer->hand = sbp_mgm_callback;
1541 
1542 	fp = &xfer->send.hdr;
1543 	fp->mode.wreqb.dest_hi = sdev->target->mgm_hi;
1544 	fp->mode.wreqb.dest_lo = sdev->target->mgm_lo;
1545 	fp->mode.wreqb.len = 8;
1546 	fp->mode.wreqb.extcode = 0;
1547 	xfer->send.payload[0] = htonl(nid << 16);
1548 	xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff);
1549 
1550 	/* cache writeback & invalidate(required ORB_FUN_LGI func) */
1551 	/* when abort_ocb, should sync POST ope ? */
1552 	SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE);
1553 	if (fw_asyreq(xfer->fc, -1, xfer) != 0)
1554 		sbp_xfer_free(xfer);
1555 }
1556 
1557 static void
1558 sbp_print_scsi_cmd(struct sbp_ocb *ocb)
1559 {
1560 	struct scsipi_xfer *xs = ocb->xs;
1561 
1562 	printf("%s:%d:%d:"
1563 		" cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x,"
1564 		" flags: 0x%02x, %db cmd/%db data\n",
1565 		device_xname(ocb->sdev->target->sbp->sc_fd.dev),
1566 		xs->xs_periph->periph_target,
1567 		xs->xs_periph->periph_lun,
1568 		xs->cmd->opcode,
1569 		xs->cmd->bytes[0], xs->cmd->bytes[1],
1570 		xs->cmd->bytes[2], xs->cmd->bytes[3],
1571 		xs->cmd->bytes[4], xs->cmd->bytes[5],
1572 		xs->cmd->bytes[6], xs->cmd->bytes[7],
1573 		xs->cmd->bytes[8],
1574 		xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT),
1575 		xs->cmdlen, xs->datalen);
1576 }
1577 
1578 static void
1579 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb)
1580 {
1581 	struct sbp_cmd_status *sbp_cmd_status;
1582 	struct scsi_sense_data *sense = &ocb->xs->sense.scsi_sense;
1583 
1584 	sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data;
1585 
1586 SBP_DEBUG(0)
1587 	sbp_print_scsi_cmd(ocb);
1588 	/* XXX need decode status */
1589 	printf("%s:"
1590 	    " SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n",
1591 	    ocb->sdev->bustgtlun,
1592 	    sbp_cmd_status->status,
1593 	    sbp_cmd_status->sfmt,
1594 	    sbp_cmd_status->valid,
1595 	    sbp_cmd_status->s_key,
1596 	    sbp_cmd_status->s_code,
1597 	    sbp_cmd_status->s_qlfr,
1598 	    sbp_status->len);
1599 END_DEBUG
1600 
1601 	switch (sbp_cmd_status->status) {
1602 	case SCSI_CHECK:
1603 	case SCSI_BUSY:
1604 	case SCSI_TERMINATED:
1605 		if (sbp_cmd_status->sfmt == SBP_SFMT_CURR)
1606 			sense->response_code = SSD_RCODE_CURRENT;
1607 		else
1608 			sense->response_code = SSD_RCODE_DEFERRED;
1609 		if (sbp_cmd_status->valid)
1610 			sense->response_code |= SSD_RCODE_VALID;
1611 		sense->flags = sbp_cmd_status->s_key;
1612 		if (sbp_cmd_status->mark)
1613 			sense->flags |= SSD_FILEMARK;
1614 		if (sbp_cmd_status->eom)
1615 			sense->flags |= SSD_EOM;
1616 		if (sbp_cmd_status->ill_len)
1617 			sense->flags |= SSD_ILI;
1618 
1619 		memcpy(sense->info, &sbp_cmd_status->info, 4);
1620 
1621 		if (sbp_status->len <= 1)
1622 			/* XXX not scsi status. shouldn't be happened */
1623 			sense->extra_len = 0;
1624 		else if (sbp_status->len <= 4)
1625 			/* add_sense_code(_qual), info, cmd_spec_info */
1626 			sense->extra_len = 6;
1627 		else
1628 			/* fru, sense_key_spec */
1629 			sense->extra_len = 10;
1630 
1631 		memcpy(sense->csi, &sbp_cmd_status->cdb, 4);
1632 
1633 		sense->asc = sbp_cmd_status->s_code;
1634 		sense->ascq = sbp_cmd_status->s_qlfr;
1635 		sense->fru = sbp_cmd_status->fru;
1636 
1637 		memcpy(sense->sks.sks_bytes, sbp_cmd_status->s_keydep, 3);
1638 		ocb->xs->error = XS_SENSE;
1639 		ocb->xs->xs_status = sbp_cmd_status->status;
1640 /*
1641 {
1642 		uint8_t j, *tmp;
1643 		tmp = sense;
1644 		for (j = 0; j < 32; j += 8)
1645 			aprint_normal(
1646 			    "sense %02x%02x %02x%02x %02x%02x %02x%02x\n",
1647 			    tmp[j], tmp[j+1], tmp[j+2], tmp[j+3],
1648 			    tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]);
1649 
1650 }
1651 */
1652 		break;
1653 	default:
1654 		aprint_error_dev(ocb->sdev->target->sbp->sc_fd.dev,
1655 		    "%s:%s: unknown scsi status 0x%x\n",
1656 		    __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status);
1657 	}
1658 }
1659 
1660 static void
1661 sbp_fix_inq_data(struct sbp_ocb *ocb)
1662 {
1663 	struct scsipi_xfer *xs = ocb->xs;
1664 	struct sbp_dev *sdev;
1665 	struct scsipi_inquiry_data *inq =
1666 	    (struct scsipi_inquiry_data *)xs->data;
1667 
1668 	sdev = ocb->sdev;
1669 
1670 #if 0
1671 /*
1672  * NetBSD is assuming always 0 for EVPD-bit and 'Page Code'.
1673  */
1674 #define SI_EVPD		0x01
1675 	if (xs->cmd->bytes[0] & SI_EVPD)
1676 		return;
1677 #endif
1678 SBP_DEBUG(1)
1679 	printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev),
1680 	    __func__, sdev->bustgtlun);
1681 END_DEBUG
1682 	switch (inq->device & SID_TYPE) {
1683 	case T_DIRECT:
1684 #if 0
1685 		/*
1686 		 * XXX Convert Direct Access device to RBC.
1687 		 * I've never seen FireWire DA devices which support READ_6.
1688 		 */
1689 		if ((inq->device & SID_TYPE) == T_DIRECT)
1690 			inq->device |= T_SIMPLE_DIRECT; /* T_DIRECT == 0 */
1691 #endif
1692 		/* FALLTHROUGH */
1693 
1694 	case T_SIMPLE_DIRECT:
1695 		/*
1696 		 * Override vendor/product/revision information.
1697 		 * Some devices sometimes return strange strings.
1698 		 */
1699 #if 1
1700 		memcpy(inq->vendor, sdev->vendor, sizeof(inq->vendor));
1701 		memcpy(inq->product, sdev->product, sizeof(inq->product));
1702 		memcpy(inq->revision + 2, sdev->revision,
1703 		    sizeof(inq->revision));
1704 #endif
1705 		break;
1706 	}
1707 	/*
1708 	 * Force to enable/disable tagged queuing.
1709 	 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page.
1710 	 */
1711 	if (sbp_tags > 0)
1712 		inq->flags3 |= SID_CmdQue;
1713 	else if (sbp_tags < 0)
1714 		inq->flags3 &= ~SID_CmdQue;
1715 
1716 }
1717 
1718 static void
1719 sbp_recv(struct fw_xfer *xfer)
1720 {
1721 	struct fw_pkt *rfp;
1722 #if NEED_RESPONSE
1723 	struct fw_pkt *sfp;
1724 #endif
1725 	struct sbp_softc *sc;
1726 	struct sbp_dev *sdev;
1727 	struct sbp_ocb *ocb;
1728 	struct sbp_login_res *login_res = NULL;
1729 	struct sbp_status *sbp_status;
1730 	struct sbp_target *target;
1731 	int	orb_fun, status_valid0, status_valid, l, reset_agent = 0;
1732 	uint32_t addr;
1733 /*
1734 	uint32_t *ld;
1735 	ld = xfer->recv.buf;
1736 printf("sbp %x %d %d %08x %08x %08x %08x\n",
1737 			xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3]));
1738 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7]));
1739 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11]));
1740 */
1741 
1742 	sc = (struct sbp_softc *)xfer->sc;
1743 	if (xfer->resp != 0) {
1744 		aprint_error_dev(sc->sc_fd.dev,
1745 		    "sbp_recv: xfer->resp = %d\n", xfer->resp);
1746 		goto done0;
1747 	}
1748 	if (xfer->recv.payload == NULL) {
1749 		aprint_error_dev(sc->sc_fd.dev,
1750 		    "sbp_recv: xfer->recv.payload == NULL\n");
1751 		goto done0;
1752 	}
1753 	rfp = &xfer->recv.hdr;
1754 	if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) {
1755 		aprint_error_dev(sc->sc_fd.dev,
1756 		    "sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode);
1757 		goto done0;
1758 	}
1759 	sbp_status = (struct sbp_status *)xfer->recv.payload;
1760 	addr = rfp->mode.wreqb.dest_lo;
1761 SBP_DEBUG(2)
1762 	printf("received address 0x%x\n", addr);
1763 END_DEBUG
1764 	target = &sc->sc_target;
1765 	l = SBP_ADDR2LUN(addr);
1766 	if (l >= target->num_lun || target->luns[l] == NULL) {
1767 		aprint_error_dev(sc->sc_fd.dev,
1768 			"sbp_recv1: invalid lun %d (target=%d)\n",
1769 			l, target->target_id);
1770 		goto done0;
1771 	}
1772 	sdev = target->luns[l];
1773 
1774 	ocb = NULL;
1775 	switch (sbp_status->src) {
1776 	case SRC_NEXT_EXISTS:
1777 	case SRC_NO_NEXT:
1778 		/* check mgm_ocb_cur first */
1779 		ocb = target->mgm_ocb_cur;
1780 		if (ocb != NULL)
1781 			if (OCB_MATCH(ocb, sbp_status)) {
1782 				callout_stop(&target->mgm_ocb_timeout);
1783 				target->mgm_ocb_cur = NULL;
1784 				break;
1785 			}
1786 		ocb = sbp_dequeue_ocb(sdev, sbp_status);
1787 		if (ocb == NULL)
1788 			aprint_error_dev(sc->sc_fd.dev,
1789 			    "%s:%s: No ocb(%x) on the queue\n", __func__,
1790 			    sdev->bustgtlun, ntohl(sbp_status->orb_lo));
1791 		break;
1792 	case SRC_UNSOL:
1793 		/* unsolicit */
1794 		aprint_error_dev(sc->sc_fd.dev,
1795 		    "%s:%s: unsolicit status received\n",
1796 		    __func__, sdev->bustgtlun);
1797 		break;
1798 	default:
1799 		aprint_error_dev(sc->sc_fd.dev,
1800 		    "%s:%s: unknown sbp_status->src\n",
1801 		    __func__, sdev->bustgtlun);
1802 	}
1803 
1804 	status_valid0 = (sbp_status->src < 2
1805 			&& sbp_status->resp == SBP_REQ_CMP
1806 			&& sbp_status->dead == 0);
1807 	status_valid = (status_valid0 && sbp_status->status == 0);
1808 
1809 	if (!status_valid0 || debug > 2) {
1810 		int status;
1811 SBP_DEBUG(0)
1812 		printf("%s:%s:%s: ORB status src:%x resp:%x dead:%x"
1813 		    " len:%x stat:%x orb:%x%08x\n",
1814 		    device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun,
1815 		    sbp_status->src, sbp_status->resp, sbp_status->dead,
1816 		    sbp_status->len, sbp_status->status,
1817 		    ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo));
1818 END_DEBUG
1819 		printf("%s:%s\n", device_xname(sc->sc_fd.dev), sdev->bustgtlun);
1820 		status = sbp_status->status;
1821 		switch (sbp_status->resp) {
1822 		case SBP_REQ_CMP:
1823 			if (status > MAX_ORB_STATUS0)
1824 				printf("%s\n", orb_status0[MAX_ORB_STATUS0]);
1825 			else
1826 				printf("%s\n", orb_status0[status]);
1827 			break;
1828 		case SBP_TRANS_FAIL:
1829 			printf("Obj: %s, Error: %s\n",
1830 			    orb_status1_object[(status>>6) & 3],
1831 			    orb_status1_serial_bus_error[status & 0xf]);
1832 			break;
1833 		case SBP_ILLE_REQ:
1834 			printf("Illegal request\n");
1835 			break;
1836 		case SBP_VEND_DEP:
1837 			printf("Vendor dependent\n");
1838 			break;
1839 		default:
1840 			printf("unknown respose code %d\n", sbp_status->resp);
1841 		}
1842 	}
1843 
1844 	/* we have to reset the fetch agent if it's dead */
1845 	if (sbp_status->dead) {
1846 		if (sdev->periph != NULL) {
1847 			scsipi_periph_freeze(sdev->periph, 1);
1848 			sdev->freeze++;
1849 		}
1850 		reset_agent = 1;
1851 	}
1852 
1853 	if (ocb == NULL)
1854 		goto done;
1855 
1856 	switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) {
1857 	case ORB_FMT_NOP:
1858 		break;
1859 	case ORB_FMT_VED:
1860 		break;
1861 	case ORB_FMT_STD:
1862 		switch (ocb->flags) {
1863 		case OCB_ACT_MGM:
1864 			orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK;
1865 			reset_agent = 0;
1866 			switch (orb_fun) {
1867 			case ORB_FUN_LGI:
1868 			{
1869 				const struct fwdma_alloc *dma = &sdev->dma;
1870 				const off_t sbp_login_off =
1871 				    sizeof(struct sbp_ocb) * SBP_QUEUE_LEN;
1872 
1873 				bus_dmamap_sync(dma->dma_tag, dma->dma_map,
1874 				    sbp_login_off, SBP_LOGIN_SIZE,
1875 				    BUS_DMASYNC_POSTREAD);
1876 				login_res = sdev->login;
1877 				login_res->len = ntohs(login_res->len);
1878 				login_res->id = ntohs(login_res->id);
1879 				login_res->cmd_hi = ntohs(login_res->cmd_hi);
1880 				login_res->cmd_lo = ntohl(login_res->cmd_lo);
1881 				if (status_valid) {
1882 SBP_DEBUG(0)
1883 					printf("%s:%s:%s: login:"
1884 					    " len %d, ID %d, cmd %08x%08x,"
1885 					    " recon_hold %d\n",
1886 					    device_xname(sc->sc_fd.dev),
1887 					    __func__, sdev->bustgtlun,
1888 					    login_res->len, login_res->id,
1889 					    login_res->cmd_hi,
1890 					    login_res->cmd_lo,
1891 					    ntohs(login_res->recon_hold));
1892 END_DEBUG
1893 					sbp_busy_timeout(sdev);
1894 				} else {
1895 					/* forgot logout? */
1896 					aprint_error_dev(sc->sc_fd.dev,
1897 					    "%s:%s: login failed\n",
1898 					    __func__, sdev->bustgtlun);
1899 					sdev->status = SBP_DEV_RESET;
1900 				}
1901 				break;
1902 			}
1903 			case ORB_FUN_RCN:
1904 				login_res = sdev->login;
1905 				if (status_valid) {
1906 SBP_DEBUG(0)
1907 					printf("%s:%s:%s: reconnect:"
1908 					    " len %d, ID %d, cmd %08x%08x\n",
1909 					    device_xname(sc->sc_fd.dev),
1910 					    __func__, sdev->bustgtlun,
1911 					    login_res->len, login_res->id,
1912 					    login_res->cmd_hi,
1913 					    login_res->cmd_lo);
1914 END_DEBUG
1915 					sbp_agent_reset(sdev);
1916 				} else {
1917 					/* reconnection hold time exceed? */
1918 SBP_DEBUG(0)
1919 					aprint_error_dev(sc->sc_fd.dev,
1920 					    "%s:%s: reconnect failed\n",
1921 					    __func__, sdev->bustgtlun);
1922 END_DEBUG
1923 					sbp_login(sdev);
1924 				}
1925 				break;
1926 			case ORB_FUN_LGO:
1927 				sdev->status = SBP_DEV_RESET;
1928 				break;
1929 			case ORB_FUN_RST:
1930 				sbp_busy_timeout(sdev);
1931 				break;
1932 			case ORB_FUN_LUR:
1933 			case ORB_FUN_ATA:
1934 			case ORB_FUN_ATS:
1935 				sbp_agent_reset(sdev);
1936 				break;
1937 			default:
1938 				aprint_error_dev(sc->sc_fd.dev,
1939 				    "%s:%s: unknown function %d\n",
1940 				    __func__, sdev->bustgtlun, orb_fun);
1941 				break;
1942 			}
1943 			sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
1944 			break;
1945 		case OCB_ACT_CMD:
1946 			sdev->timeout = 0;
1947 			if (ocb->xs != NULL) {
1948 				struct scsipi_xfer *xs = ocb->xs;
1949 
1950 				if (sbp_status->len > 1)
1951 					sbp_scsi_status(sbp_status, ocb);
1952 				else
1953 					if (sbp_status->resp != SBP_REQ_CMP)
1954 						xs->error = XS_DRIVER_STUFFUP;
1955 					else {
1956 						xs->error = XS_NOERROR;
1957 						xs->resid = 0;
1958 					}
1959 				/* fix up inq data */
1960 				if (xs->cmd->opcode == INQUIRY)
1961 					sbp_fix_inq_data(ocb);
1962 				scsipi_done(xs);
1963 			}
1964 			break;
1965 		default:
1966 			break;
1967 		}
1968 	}
1969 
1970 	if (!use_doorbell)
1971 		sbp_free_ocb(sdev, ocb);
1972 done:
1973 	if (reset_agent)
1974 		sbp_agent_reset(sdev);
1975 
1976 done0:
1977 	xfer->recv.pay_len = SBP_RECV_LEN;
1978 /* The received packet is usually small enough to be stored within
1979  * the buffer. In that case, the controller return ack_complete and
1980  * no respose is necessary.
1981  *
1982  * XXX fwohci.c and firewire.c should inform event_code such as
1983  * ack_complete or ack_pending to upper driver.
1984  */
1985 #if NEED_RESPONSE
1986 	xfer->send.off = 0;
1987 	sfp = (struct fw_pkt *)xfer->send.buf;
1988 	sfp->mode.wres.dst = rfp->mode.wreqb.src;
1989 	xfer->dst = sfp->mode.wres.dst;
1990 	xfer->spd = min(sdev->target->fwdev->speed, max_speed);
1991 	xfer->hand = sbp_loginres_callback;
1992 
1993 	sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt;
1994 	sfp->mode.wres.tcode = FWTCODE_WRES;
1995 	sfp->mode.wres.rtcode = 0;
1996 	sfp->mode.wres.pri = 0;
1997 
1998 	if (fw_asyreq(xfer->fc, -1, xfer) != 0) {
1999 		aprint_error_dev(sc->sc_fd.dev, "mgm_orb failed\n");
2000 		mutex_enter(&sc->sc_fwb.fwb_mtx);
2001 		STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
2002 		mutex_exit(&sc->sc_fwb.fwb_mtx);
2003 	}
2004 #else
2005 	/* recycle */
2006 	mutex_enter(&sc->sc_fwb.fwb_mtx);
2007 	STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
2008 	mutex_exit(&sc->sc_fwb.fwb_mtx);
2009 #endif
2010 
2011 	return;
2012 
2013 }
2014 
2015 static int
2016 sbp_logout_all(struct sbp_softc *sbp)
2017 {
2018 	struct sbp_target *target;
2019 	struct sbp_dev *sdev;
2020 	int i;
2021 
2022 SBP_DEBUG(0)
2023 	printf("sbp_logout_all\n");
2024 END_DEBUG
2025 	target = &sbp->sc_target;
2026 	if (target->luns != NULL)
2027 		for (i = 0; i < target->num_lun; i++) {
2028 			sdev = target->luns[i];
2029 			if (sdev == NULL)
2030 				continue;
2031 			callout_stop(&sdev->login_callout);
2032 			if (sdev->status >= SBP_DEV_TOATTACH &&
2033 			    sdev->status <= SBP_DEV_ATTACHED)
2034 				sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL);
2035 		}
2036 
2037 	return 0;
2038 }
2039 
2040 static void
2041 sbp_free_sdev(struct sbp_dev *sdev)
2042 {
2043 	struct sbp_softc *sc = sdev->target->sbp;
2044 	int i;
2045 
2046 	if (sdev == NULL)
2047 		return;
2048 	for (i = 0; i < SBP_QUEUE_LEN; i++)
2049 		bus_dmamap_destroy(sc->sc_dmat, sdev->ocb[i].dmamap);
2050 	fwdma_free(sdev->dma.dma_tag, sdev->dma.dma_map, sdev->dma.v_addr);
2051 	free(sdev, M_SBP);
2052 	sdev = NULL;
2053 }
2054 
2055 static void
2056 sbp_free_target(struct sbp_target *target)
2057 {
2058 	struct fw_xfer *xfer, *next;
2059 	int i;
2060 
2061 	if (target->luns == NULL)
2062 		return;
2063 	callout_stop(&target->mgm_ocb_timeout);
2064 	for (i = 0; i < target->num_lun; i++)
2065 		sbp_free_sdev(target->luns[i]);
2066 
2067 	for (xfer = STAILQ_FIRST(&target->xferlist);
2068 	    xfer != NULL; xfer = next) {
2069 		next = STAILQ_NEXT(xfer, link);
2070 		fw_xfer_free_buf(xfer);
2071 	}
2072 	STAILQ_INIT(&target->xferlist);
2073 	free(target->luns, M_SBP);
2074 	target->num_lun = 0;
2075 	target->luns = NULL;
2076 	target->fwdev = NULL;
2077 }
2078 
2079 static void
2080 sbp_scsipi_detach_sdev(struct sbp_dev *sdev)
2081 {
2082 	struct sbp_target *target;
2083 	struct sbp_softc *sbp;
2084 
2085 	if (sdev == NULL)
2086 		return;
2087 
2088 	target = sdev->target;
2089 	if (target == NULL)
2090 		return;
2091 
2092 	sbp = target->sbp;
2093 
2094 	if (sdev->status == SBP_DEV_DEAD)
2095 		return;
2096 	if (sdev->status == SBP_DEV_RESET)
2097 		return;
2098 	if (sdev->periph != NULL) {
2099 		scsipi_periph_thaw(sdev->periph, sdev->freeze);
2100 		scsipi_channel_thaw(&sbp->sc_channel, 0);	/* XXXX */
2101 		sdev->freeze = 0;
2102 		if (scsipi_target_detach(&sbp->sc_channel,
2103 		    target->target_id, sdev->lun_id, DETACH_FORCE) != 0) {
2104 			aprint_error_dev(sbp->sc_fd.dev, "detach failed\n");
2105 		}
2106 		sdev->periph = NULL;
2107 	}
2108 	sbp_abort_all_ocbs(sdev, XS_DRIVER_STUFFUP);
2109 }
2110 
2111 static void
2112 sbp_scsipi_detach_target(struct sbp_target *target)
2113 {
2114 	struct sbp_softc *sbp = target->sbp;
2115 	int i;
2116 
2117 	if (target->luns != NULL) {
2118 SBP_DEBUG(0)
2119 		printf("sbp_detach_target %d\n", target->target_id);
2120 END_DEBUG
2121 		for (i = 0; i < target->num_lun; i++)
2122 			sbp_scsipi_detach_sdev(target->luns[i]);
2123 		if (config_detach(sbp->sc_bus, DETACH_FORCE) != 0)
2124 			aprint_error_dev(sbp->sc_fd.dev, "%d detach failed\n",
2125 			    target->target_id);
2126 		sbp->sc_bus = NULL;
2127 	}
2128 }
2129 
2130 static void
2131 sbp_target_reset(struct sbp_dev *sdev, int method)
2132 {
2133 	struct sbp_softc *sc;
2134 	struct sbp_target *target = sdev->target;
2135 	struct sbp_dev *tsdev;
2136 	int i;
2137 
2138 	sc = target->sbp;
2139 	for (i = 0; i < target->num_lun; i++) {
2140 		tsdev = target->luns[i];
2141 		if (tsdev == NULL)
2142 			continue;
2143 		if (tsdev->status == SBP_DEV_DEAD)
2144 			continue;
2145 		if (tsdev->status == SBP_DEV_RESET)
2146 			continue;
2147 		if (sdev->periph != NULL) {
2148 			scsipi_periph_freeze(tsdev->periph, 1);
2149 			tsdev->freeze++;
2150 		}
2151 		sbp_abort_all_ocbs(tsdev, XS_TIMEOUT);
2152 		if (method == 2)
2153 			tsdev->status = SBP_DEV_LOGIN;
2154 	}
2155 	switch (method) {
2156 	case 1:
2157 		aprint_error("target reset\n");
2158 		sbp_mgm_orb(sdev, ORB_FUN_RST, NULL);
2159 		break;
2160 	case 2:
2161 		aprint_error("reset start\n");
2162 		sbp_reset_start(sdev);
2163 		break;
2164 	}
2165 }
2166 
2167 static void
2168 sbp_mgm_timeout(void *arg)
2169 {
2170 	struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2171 	struct sbp_dev *sdev = ocb->sdev;
2172 	struct sbp_target *target = sdev->target;
2173 
2174 	aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2175 	    "%s:%s: request timeout(mgm orb:0x%08x) ... ",
2176 	    __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2177 	target->mgm_ocb_cur = NULL;
2178 	sbp_free_ocb(sdev, ocb);
2179 #if 0
2180 	/* XXX */
2181 	aprint_error("run next request\n");
2182 	sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL);
2183 #endif
2184 	aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2185 	    "%s:%s: reset start\n", __func__, sdev->bustgtlun);
2186 	sbp_reset_start(sdev);
2187 }
2188 
2189 static void
2190 sbp_timeout(void *arg)
2191 {
2192 	struct sbp_ocb *ocb = (struct sbp_ocb *)arg;
2193 	struct sbp_dev *sdev = ocb->sdev;
2194 
2195 	aprint_error_dev(sdev->target->sbp->sc_fd.dev,
2196 	    "%s:%s: request timeout(cmd orb:0x%08x) ... ",
2197 	    __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr);
2198 
2199 	sdev->timeout++;
2200 	switch (sdev->timeout) {
2201 	case 1:
2202 		aprint_error("agent reset\n");
2203 		if (sdev->periph != NULL) {
2204 			scsipi_periph_freeze(sdev->periph, 1);
2205 			sdev->freeze++;
2206 		}
2207 		sbp_abort_all_ocbs(sdev, XS_TIMEOUT);
2208 		sbp_agent_reset(sdev);
2209 		break;
2210 	case 2:
2211 	case 3:
2212 		sbp_target_reset(sdev, sdev->timeout - 1);
2213 		break;
2214 	default:
2215 		aprint_error("\n");
2216 #if 0
2217 		/* XXX give up */
2218 		sbp_scsipi_detach_target(target);
2219 		if (target->luns != NULL)
2220 			free(target->luns, M_SBP);
2221 		target->num_lun = 0;
2222 		target->luns = NULL;
2223 		target->fwdev = NULL;
2224 #endif
2225 	}
2226 }
2227 
2228 static void
2229 sbp_action1(struct sbp_softc *sc, struct scsipi_xfer *xs)
2230 {
2231 	struct sbp_target *target = &sc->sc_target;
2232 	struct sbp_dev *sdev = NULL;
2233 	struct sbp_ocb *ocb;
2234 	int speed, flag, error;
2235 	void *cdb;
2236 
2237 	/* target:lun -> sdev mapping */
2238 	if (target->fwdev != NULL &&
2239 	    xs->xs_periph->periph_lun < target->num_lun) {
2240 		sdev = target->luns[xs->xs_periph->periph_lun];
2241 		if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED &&
2242 		    sdev->status != SBP_DEV_PROBE)
2243 			sdev = NULL;
2244 	}
2245 
2246 	if (sdev == NULL) {
2247 SBP_DEBUG(1)
2248 		printf("%s:%d:%d: Invalid target (target needed)\n",
2249 			sc ? device_xname(sc->sc_fd.dev) : "???",
2250 			xs->xs_periph->periph_target,
2251 			xs->xs_periph->periph_lun);
2252 END_DEBUG
2253 
2254 		xs->error = XS_DRIVER_STUFFUP;
2255 		scsipi_done(xs);
2256 		return;
2257 	}
2258 
2259 SBP_DEBUG(2)
2260 	printf("%s:%d:%d:"
2261 		" cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x,"
2262 		" flags: 0x%02x, %db cmd/%db data\n",
2263 		device_xname(sc->sc_fd.dev),
2264 		xs->xs_periph->periph_target,
2265 		xs->xs_periph->periph_lun,
2266 		xs->cmd->opcode,
2267 		xs->cmd->bytes[0], xs->cmd->bytes[1],
2268 		xs->cmd->bytes[2], xs->cmd->bytes[3],
2269 		xs->cmd->bytes[4], xs->cmd->bytes[5],
2270 		xs->cmd->bytes[6], xs->cmd->bytes[7],
2271 		xs->cmd->bytes[8],
2272 		xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT),
2273 		xs->cmdlen, xs->datalen);
2274 END_DEBUG
2275 	mutex_enter(&sc->sc_mtx);
2276 	ocb = sbp_get_ocb(sdev);
2277 	mutex_exit(&sc->sc_mtx);
2278 	if (ocb == NULL) {
2279 		xs->error = XS_REQUEUE;
2280 		if (sdev->freeze == 0) {
2281 			scsipi_periph_freeze(sdev->periph, 1);
2282 			sdev->freeze++;
2283 		}
2284 		scsipi_done(xs);
2285 		return;
2286 	}
2287 
2288 	ocb->flags = OCB_ACT_CMD;
2289 	ocb->sdev = sdev;
2290 	ocb->xs = xs;
2291 	ocb->orb[0] = htonl(1 << 31);
2292 	ocb->orb[1] = 0;
2293 	ocb->orb[2] = htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16));
2294 	ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET);
2295 	speed = min(target->fwdev->speed, max_speed);
2296 	ocb->orb[4] =
2297 	    htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7));
2298 	if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) ==
2299 	    XS_CTL_DATA_IN) {
2300 		ocb->orb[4] |= htonl(ORB_CMD_IN);
2301 		flag = BUS_DMA_READ;
2302 	} else
2303 		flag = BUS_DMA_WRITE;
2304 
2305 	cdb = xs->cmd;
2306 	memcpy((void *)&ocb->orb[5], cdb, xs->cmdlen);
2307 /*
2308 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3]));
2309 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7]));
2310 */
2311 	if (xs->datalen > 0) {
2312 		error = bus_dmamap_load(sc->sc_dmat, ocb->dmamap,
2313 		    xs->data, xs->datalen, NULL, BUS_DMA_NOWAIT | flag);
2314 		if (error) {
2315 			aprint_error_dev(sc->sc_fd.dev,
2316 			    "DMA map load error %d\n", error);
2317 			xs->error = XS_DRIVER_STUFFUP;
2318 			scsipi_done(xs);
2319 		} else
2320 			sbp_execute_ocb(ocb, ocb->dmamap->dm_segs,
2321 			    ocb->dmamap->dm_nsegs);
2322 	} else
2323 		sbp_execute_ocb(ocb, NULL, 0);
2324 
2325 	return;
2326 }
2327 
2328 static void
2329 sbp_execute_ocb(struct sbp_ocb *ocb, bus_dma_segment_t *segments, int seg)
2330 {
2331 	struct sbp_ocb *prev;
2332 	bus_dma_segment_t *s;
2333 	int i;
2334 
2335 SBP_DEBUG(2)
2336 	printf("sbp_execute_ocb: seg %d", seg);
2337 	for (i = 0; i < seg; i++)
2338 		printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr,
2339 		    (uintmax_t)segments[i].ds_len);
2340 	printf("\n");
2341 END_DEBUG
2342 
2343 	if (seg == 1) {
2344 		/* direct pointer */
2345 		s = segments;
2346 		if (s->ds_len > SBP_SEG_MAX)
2347 			panic("ds_len > SBP_SEG_MAX, fix busdma code");
2348 		ocb->orb[3] = htonl(s->ds_addr);
2349 		ocb->orb[4] |= htonl(s->ds_len);
2350 	} else if (seg > 1) {
2351 		/* page table */
2352 		for (i = 0; i < seg; i++) {
2353 			s = &segments[i];
2354 SBP_DEBUG(0)
2355 			/* XXX LSI Logic "< 16 byte" bug might be hit */
2356 			if (s->ds_len < 16)
2357 				printf("sbp_execute_ocb: warning, "
2358 				    "segment length(%jd) is less than 16."
2359 				    "(seg=%d/%d)\n",
2360 				    (uintmax_t)s->ds_len, i + 1, seg);
2361 END_DEBUG
2362 			if (s->ds_len > SBP_SEG_MAX)
2363 				panic("ds_len > SBP_SEG_MAX, fix busdma code");
2364 			ocb->ind_ptr[i].hi = htonl(s->ds_len << 16);
2365 			ocb->ind_ptr[i].lo = htonl(s->ds_addr);
2366 		}
2367 		ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg);
2368 	}
2369 
2370 	if (seg > 0) {
2371 		struct sbp_softc *sc = ocb->sdev->target->sbp;
2372 		const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2373 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
2374 
2375 		bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2376 		    0, ocb->dmamap->dm_mapsize, flag);
2377 	}
2378 	prev = sbp_enqueue_ocb(ocb->sdev, ocb);
2379 	SBP_ORB_DMA_SYNC(ocb->sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE);
2380 	if (use_doorbell) {
2381 		if (prev == NULL) {
2382 			if (ocb->sdev->last_ocb != NULL)
2383 				sbp_doorbell(ocb->sdev);
2384 			else
2385 				sbp_orb_pointer(ocb->sdev, ocb);
2386 		}
2387 	} else
2388 		if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) {
2389 			ocb->sdev->flags &= ~ORB_LINK_DEAD;
2390 			sbp_orb_pointer(ocb->sdev, ocb);
2391 		}
2392 }
2393 
2394 static struct sbp_ocb *
2395 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status)
2396 {
2397 	struct sbp_softc *sc = sdev->target->sbp;
2398 	struct sbp_ocb *ocb;
2399 	struct sbp_ocb *next;
2400 	int order = 0;
2401 	int flags;
2402 
2403 SBP_DEBUG(1)
2404 	printf("%s:%s:%s: 0x%08x src %d\n", device_xname(sc->sc_fd.dev),
2405 	    __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo),
2406 	    sbp_status->src);
2407 END_DEBUG
2408 	mutex_enter(&sc->sc_mtx);
2409 	for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) {
2410 		next = STAILQ_NEXT(ocb, ocb);
2411 		flags = ocb->flags;
2412 		if (OCB_MATCH(ocb, sbp_status)) {
2413 			/* found */
2414 			SBP_ORB_DMA_SYNC(sdev->dma, ocb->index,
2415 			    BUS_DMASYNC_POSTWRITE);
2416 			STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb);
2417 			if (ocb->xs != NULL)
2418 				callout_stop(&ocb->xs->xs_callout);
2419 			if (ntohl(ocb->orb[4]) & 0xffff) {
2420 				const int flag =
2421 				    (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2422 							BUS_DMASYNC_POSTREAD :
2423 							BUS_DMASYNC_POSTWRITE;
2424 
2425 				bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2426 				    0, ocb->dmamap->dm_mapsize, flag);
2427 				bus_dmamap_unload(sc->sc_dmat, ocb->dmamap);
2428 
2429 			}
2430 			if (!use_doorbell) {
2431 				if (sbp_status->src == SRC_NO_NEXT) {
2432 					if (next != NULL)
2433 						sbp_orb_pointer(sdev, next);
2434 					else if (order > 0)
2435 						/*
2436 						 * Unordered execution
2437 						 * We need to send pointer for
2438 						 * next ORB
2439 						 */
2440 						sdev->flags |= ORB_LINK_DEAD;
2441 				}
2442 			}
2443 			break;
2444 		} else
2445 			order++;
2446 	}
2447 	mutex_exit(&sc->sc_mtx);
2448 
2449 	if (ocb && use_doorbell) {
2450 		/*
2451 		 * XXX this is not correct for unordered
2452 		 * execution.
2453 		 */
2454 		if (sdev->last_ocb != NULL)
2455 			sbp_free_ocb(sdev, sdev->last_ocb);
2456 		sdev->last_ocb = ocb;
2457 		if (next != NULL &&
2458 		    sbp_status->src == SRC_NO_NEXT)
2459 			sbp_doorbell(sdev);
2460 	}
2461 
2462 SBP_DEBUG(0)
2463 	if (ocb && order > 0)
2464 		printf("%s:%s:%s: unordered execution order:%d\n",
2465 		    device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun,
2466 		    order);
2467 END_DEBUG
2468 	return ocb;
2469 }
2470 
2471 static struct sbp_ocb *
2472 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2473 {
2474 	struct sbp_softc *sc = sdev->target->sbp;
2475 	struct sbp_ocb *tocb, *prev, *prev2;
2476 
2477 SBP_DEBUG(1)
2478 	printf("%s:%s:%s: 0x%08jx\n", device_xname(sc->sc_fd.dev),
2479 	    __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2480 END_DEBUG
2481 	mutex_enter(&sc->sc_mtx);
2482 	prev = NULL;
2483 	STAILQ_FOREACH(tocb, &sdev->ocbs, ocb)
2484 		prev = tocb;
2485 	prev2 = prev;
2486 	STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb);
2487 	mutex_exit(&sc->sc_mtx);
2488 
2489 	callout_reset(&ocb->xs->xs_callout, mstohz(ocb->xs->timeout),
2490 	    sbp_timeout, ocb);
2491 
2492 	if (use_doorbell && prev == NULL)
2493 		prev2 = sdev->last_ocb;
2494 
2495 	if (prev2 != NULL) {
2496 SBP_DEBUG(2)
2497 		printf("linking chain 0x%jx -> 0x%jx\n",
2498 		    (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr);
2499 END_DEBUG
2500 		/*
2501 		 * Suppress compiler optimization so that orb[1] must be
2502 		 * written first.
2503 		 * XXX We may need an explicit memory barrier for other
2504 		 * architectures other than i386/amd64.
2505 		 */
2506 		*(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr);
2507 		*(volatile uint32_t *)&prev2->orb[0] = 0;
2508 	}
2509 
2510 	return prev;
2511 }
2512 
2513 static struct sbp_ocb *
2514 sbp_get_ocb(struct sbp_dev *sdev)
2515 {
2516 	struct sbp_softc *sc = sdev->target->sbp;
2517 	struct sbp_ocb *ocb;
2518 
2519 	KASSERT(mutex_owned(&sc->sc_mtx));
2520 
2521 	ocb = STAILQ_FIRST(&sdev->free_ocbs);
2522 	if (ocb == NULL) {
2523 		sdev->flags |= ORB_SHORTAGE;
2524 		aprint_error_dev(sc->sc_fd.dev,
2525 		    "ocb shortage!!!\n");
2526 		return NULL;
2527 	}
2528 	STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb);
2529 	ocb->xs = NULL;
2530 	return ocb;
2531 }
2532 
2533 static void
2534 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb)
2535 {
2536 	struct sbp_softc *sc = sdev->target->sbp;
2537 	int count;
2538 
2539 	ocb->flags = 0;
2540 	ocb->xs = NULL;
2541 
2542 	mutex_enter(&sc->sc_mtx);
2543 	STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb);
2544 	mutex_exit(&sc->sc_mtx);
2545 	if (sdev->flags & ORB_SHORTAGE) {
2546 		sdev->flags &= ~ORB_SHORTAGE;
2547 		count = sdev->freeze;
2548 		sdev->freeze = 0;
2549 		if (sdev->periph)
2550 			scsipi_periph_thaw(sdev->periph, count);
2551 		scsipi_channel_thaw(&sc->sc_channel, 0);
2552 	}
2553 }
2554 
2555 static void
2556 sbp_abort_ocb(struct sbp_ocb *ocb, int status)
2557 {
2558 	struct sbp_softc *sc;
2559 	struct sbp_dev *sdev;
2560 
2561 	sdev = ocb->sdev;
2562 	sc = sdev->target->sbp;
2563 SBP_DEBUG(0)
2564 	printf("%s:%s:%s: sbp_abort_ocb 0x%jx\n", device_xname(sc->sc_fd.dev),
2565 	    __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr);
2566 END_DEBUG
2567 SBP_DEBUG(1)
2568 	if (ocb->xs != NULL)
2569 		sbp_print_scsi_cmd(ocb);
2570 END_DEBUG
2571 	if (ntohl(ocb->orb[4]) & 0xffff) {
2572 		const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ?
2573 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE;
2574 
2575 		bus_dmamap_sync(sc->sc_dmat, ocb->dmamap,
2576 		    0, ocb->dmamap->dm_mapsize, flag);
2577 		bus_dmamap_unload(sc->sc_dmat, ocb->dmamap);
2578 	}
2579 	if (ocb->xs != NULL) {
2580 		callout_stop(&ocb->xs->xs_callout);
2581 		ocb->xs->error = status;
2582 		scsipi_done(ocb->xs);
2583 	}
2584 	sbp_free_ocb(sdev, ocb);
2585 }
2586 
2587 static void
2588 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status)
2589 {
2590 	struct sbp_softc *sc = sdev->target->sbp;
2591 	struct sbp_ocb *ocb, *next;
2592 	STAILQ_HEAD(, sbp_ocb) temp;
2593 
2594 	mutex_enter(&sc->sc_mtx);
2595 	STAILQ_INIT(&temp);
2596 	STAILQ_CONCAT(&temp, &sdev->ocbs);
2597 	STAILQ_INIT(&sdev->ocbs);
2598 	mutex_exit(&sc->sc_mtx);
2599 
2600 	for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) {
2601 		next = STAILQ_NEXT(ocb, ocb);
2602 		sbp_abort_ocb(ocb, status);
2603 	}
2604 	if (sdev->last_ocb != NULL) {
2605 		sbp_free_ocb(sdev, sdev->last_ocb);
2606 		sdev->last_ocb = NULL;
2607 	}
2608 }
2609