1 /* $NetBSD: sbp.c,v 1.33 2010/08/14 10:39:33 cegger Exp $ */ 2 /*- 3 * Copyright (c) 2003 Hidetoshi Shimokawa 4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the acknowledgement as bellow: 17 * 18 * This product includes software developed by K. Kobayashi and H. Shimokawa 19 * 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/firewire/sbp.c,v 1.100 2009/02/18 18:41:34 sbruno Exp $ 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: sbp.c,v 1.33 2010/08/14 10:39:33 cegger Exp $"); 41 42 43 #include <sys/param.h> 44 #include <sys/device.h> 45 #include <sys/errno.h> 46 #include <sys/buf.h> 47 #include <sys/callout.h> 48 #include <sys/condvar.h> 49 #include <sys/kernel.h> 50 #include <sys/kthread.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/sysctl.h> 55 56 #include <sys/bus.h> 57 58 #include <dev/scsipi/scsi_spc.h> 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsipi_all.h> 61 #include <dev/scsipi/scsiconf.h> 62 #include <dev/scsipi/scsipiconf.h> 63 64 #include <dev/ieee1394/firewire.h> 65 #include <dev/ieee1394/firewirereg.h> 66 #include <dev/ieee1394/fwdma.h> 67 #include <dev/ieee1394/iec13213.h> 68 #include <dev/ieee1394/sbp.h> 69 70 #include "locators.h" 71 72 73 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \ 74 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2)) 75 76 #define SBP_NUM_TARGETS 8 /* MAX 64 */ 77 #define SBP_NUM_LUNS 64 78 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */) 79 #define SBP_DMA_SIZE PAGE_SIZE 80 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res) 81 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb)) 82 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS) 83 84 /* 85 * STATUS FIFO addressing 86 * bit 87 * ----------------------- 88 * 0- 1( 2): 0 (alignment) 89 * 2- 9( 8): lun 90 * 10-31(14): unit 91 * 32-47(16): SBP_BIND_HI 92 * 48-64(16): bus_id, node_id 93 */ 94 #define SBP_BIND_HI 0x1 95 #define SBP_DEV2ADDR(u, l) \ 96 (((uint64_t)SBP_BIND_HI << 32) |\ 97 (((u) & 0x3fff) << 10) |\ 98 (((l) & 0xff) << 2)) 99 #define SBP_ADDR2UNIT(a) (((a) >> 10) & 0x3fff) 100 #define SBP_ADDR2LUN(a) (((a) >> 2) & 0xff) 101 #define SBP_INITIATOR 7 102 103 static const char *orb_fun_name[] = { 104 ORB_FUN_NAMES 105 }; 106 107 static int debug = 0; 108 static int auto_login = 1; 109 static int max_speed = -1; 110 static int sbp_cold = 1; 111 static int ex_login = 1; 112 static int login_delay = 1000; /* msec */ 113 static int scan_delay = 500; /* msec */ 114 static int use_doorbell = 0; 115 static int sbp_tags = 0; 116 117 static int sysctl_sbp_verify(SYSCTLFN_PROTO, int lower, int upper); 118 static int sysctl_sbp_verify_max_speed(SYSCTLFN_PROTO); 119 static int sysctl_sbp_verify_tags(SYSCTLFN_PROTO); 120 121 /* 122 * Setup sysctl(3) MIB, hw.sbp.* 123 * 124 * TBD condition CTLFLAG_PERMANENT on being a module or not 125 */ 126 SYSCTL_SETUP(sysctl_sbp, "sysctl sbp(4) subtree setup") 127 { 128 int rc, sbp_node_num; 129 const struct sysctlnode *node; 130 131 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 132 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 133 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) 134 goto err; 135 136 if ((rc = sysctl_createv(clog, 0, NULL, &node, 137 CTLFLAG_PERMANENT, CTLTYPE_NODE, "sbp", 138 SYSCTL_DESCR("sbp controls"), NULL, 0, NULL, 139 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 140 goto err; 141 sbp_node_num = node->sysctl_num; 142 143 /* sbp auto login flag */ 144 if ((rc = sysctl_createv(clog, 0, NULL, &node, 145 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 146 "auto_login", SYSCTL_DESCR("SBP perform login automatically"), 147 NULL, 0, &auto_login, 148 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 149 goto err; 150 151 /* sbp max speed */ 152 if ((rc = sysctl_createv(clog, 0, NULL, &node, 153 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 154 "max_speed", SYSCTL_DESCR("SBP transfer max speed"), 155 sysctl_sbp_verify_max_speed, 0, &max_speed, 156 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 157 goto err; 158 159 /* sbp exclusive login flag */ 160 if ((rc = sysctl_createv(clog, 0, NULL, &node, 161 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 162 "exclusive_login", SYSCTL_DESCR("SBP enable exclusive login"), 163 NULL, 0, &ex_login, 164 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 165 goto err; 166 167 /* sbp login delay */ 168 if ((rc = sysctl_createv(clog, 0, NULL, &node, 169 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 170 "login_delay", SYSCTL_DESCR("SBP login delay in msec"), 171 NULL, 0, &login_delay, 172 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 173 goto err; 174 175 /* sbp scan delay */ 176 if ((rc = sysctl_createv(clog, 0, NULL, &node, 177 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 178 "scan_delay", SYSCTL_DESCR("SBP scan delay in msec"), 179 NULL, 0, &scan_delay, 180 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 181 goto err; 182 183 /* sbp use doorbell flag */ 184 if ((rc = sysctl_createv(clog, 0, NULL, &node, 185 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 186 "use_doorbell", SYSCTL_DESCR("SBP use doorbell request"), 187 NULL, 0, &use_doorbell, 188 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 189 goto err; 190 191 /* sbp force tagged queuing */ 192 if ((rc = sysctl_createv(clog, 0, NULL, &node, 193 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 194 "tags", SYSCTL_DESCR("SBP tagged queuing support"), 195 sysctl_sbp_verify_tags, 0, &sbp_tags, 196 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 197 goto err; 198 199 /* sbp driver debug flag */ 200 if ((rc = sysctl_createv(clog, 0, NULL, &node, 201 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 202 "sbp_debug", SYSCTL_DESCR("SBP debug flag"), 203 NULL, 0, &debug, 204 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 205 goto err; 206 207 return; 208 209 err: 210 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 211 } 212 213 static int 214 sysctl_sbp_verify(SYSCTLFN_ARGS, int lower, int upper) 215 { 216 int error, t; 217 struct sysctlnode node; 218 219 node = *rnode; 220 t = *(int*)rnode->sysctl_data; 221 node.sysctl_data = &t; 222 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 223 if (error || newp == NULL) 224 return error; 225 226 if (t < lower || t > upper) 227 return EINVAL; 228 229 *(int*)rnode->sysctl_data = t; 230 231 return 0; 232 } 233 234 static int 235 sysctl_sbp_verify_max_speed(SYSCTLFN_ARGS) 236 { 237 238 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), 0, FWSPD_S400); 239 } 240 241 static int 242 sysctl_sbp_verify_tags(SYSCTLFN_ARGS) 243 { 244 245 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), -1, 1); 246 } 247 248 #define NEED_RESPONSE 0 249 250 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) 251 #ifdef __sparc64__ /* iommu */ 252 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX) 253 #else 254 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE) 255 #endif 256 struct sbp_ocb { 257 uint32_t orb[8]; 258 #define IND_PTR_OFFSET (sizeof(uint32_t) * 8) 259 struct ind_ptr ind_ptr[SBP_IND_MAX]; 260 struct scsipi_xfer *xs; 261 struct sbp_dev *sdev; 262 uint16_t index; 263 uint16_t flags; /* XXX should be removed */ 264 bus_dmamap_t dmamap; 265 bus_addr_t bus_addr; 266 STAILQ_ENTRY(sbp_ocb) ocb; 267 }; 268 269 #define SBP_ORB_DMA_SYNC(dma, i, op) \ 270 bus_dmamap_sync((dma).dma_tag, (dma).dma_map, \ 271 sizeof(struct sbp_ocb) * (i), \ 272 sizeof(ocb->orb) + sizeof(ocb->ind_ptr), (op)); 273 274 #define OCB_ACT_MGM 0 275 #define OCB_ACT_CMD 1 276 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo)) 277 278 struct sbp_dev{ 279 #define SBP_DEV_RESET 0 /* accept login */ 280 #define SBP_DEV_LOGIN 1 /* to login */ 281 #if 0 282 #define SBP_DEV_RECONN 2 /* to reconnect */ 283 #endif 284 #define SBP_DEV_TOATTACH 3 /* to attach */ 285 #define SBP_DEV_PROBE 4 /* scan lun */ 286 #define SBP_DEV_ATTACHED 5 /* in operation */ 287 #define SBP_DEV_DEAD 6 /* unavailable unit */ 288 #define SBP_DEV_RETRY 7 /* unavailable unit */ 289 uint8_t status:4, 290 timeout:4; 291 uint8_t type; 292 uint16_t lun_id; 293 uint16_t freeze; 294 #define ORB_LINK_DEAD (1 << 0) 295 #define VALID_LUN (1 << 1) 296 #define ORB_POINTER_ACTIVE (1 << 2) 297 #define ORB_POINTER_NEED (1 << 3) 298 #define ORB_DOORBELL_ACTIVE (1 << 4) 299 #define ORB_DOORBELL_NEED (1 << 5) 300 #define ORB_SHORTAGE (1 << 6) 301 uint16_t flags; 302 struct scsipi_periph *periph; 303 struct sbp_target *target; 304 struct fwdma_alloc dma; 305 struct sbp_login_res *login; 306 struct callout login_callout; 307 struct sbp_ocb *ocb; 308 STAILQ_HEAD(, sbp_ocb) ocbs; 309 STAILQ_HEAD(, sbp_ocb) free_ocbs; 310 struct sbp_ocb *last_ocb; 311 char vendor[32]; 312 char product[32]; 313 char revision[10]; 314 char bustgtlun[32]; 315 }; 316 317 struct sbp_target { 318 int target_id; 319 int num_lun; 320 struct sbp_dev **luns; 321 struct sbp_softc *sbp; 322 struct fw_device *fwdev; 323 uint32_t mgm_hi, mgm_lo; 324 struct sbp_ocb *mgm_ocb_cur; 325 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue; 326 struct callout mgm_ocb_timeout; 327 STAILQ_HEAD(, fw_xfer) xferlist; 328 int n_xfer; 329 }; 330 331 struct sbp_softc { 332 struct firewire_dev_comm sc_fd; 333 struct scsipi_adapter sc_adapter; 334 struct scsipi_channel sc_channel; 335 device_t sc_bus; 336 struct lwp *sc_lwp; 337 struct sbp_target sc_target; 338 struct fw_bind sc_fwb; 339 bus_dma_tag_t sc_dmat; 340 struct timeval sc_last_busreset; 341 int sc_flags; 342 kmutex_t sc_mtx; 343 kcondvar_t sc_cv; 344 }; 345 346 MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/IEEE1394"); 347 348 349 static int sbpmatch(device_t, cfdata_t, void *); 350 static void sbpattach(device_t, device_t, void *); 351 static int sbpdetach(device_t, int); 352 353 static void sbp_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 354 void *); 355 static void sbp_minphys(struct buf *); 356 357 static void sbp_show_sdev_info(struct sbp_dev *); 358 static void sbp_alloc_lun(struct sbp_target *); 359 static struct sbp_target *sbp_alloc_target(struct sbp_softc *, 360 struct fw_device *); 361 static void sbp_probe_lun(struct sbp_dev *); 362 static void sbp_login_callout(void *); 363 static void sbp_login(struct sbp_dev *); 364 static void sbp_probe_target(void *); 365 static void sbp_post_busreset(void *); 366 static void sbp_post_explore(void *); 367 #if NEED_RESPONSE 368 static void sbp_loginres_callback(struct fw_xfer *); 369 #endif 370 static inline void sbp_xfer_free(struct fw_xfer *); 371 static void sbp_reset_start_callback(struct fw_xfer *); 372 static void sbp_reset_start(struct sbp_dev *); 373 static void sbp_mgm_callback(struct fw_xfer *); 374 static void sbp_scsipi_scan_target(void *); 375 static inline void sbp_scan_dev(struct sbp_dev *); 376 static void sbp_do_attach(struct fw_xfer *); 377 static void sbp_agent_reset_callback(struct fw_xfer *); 378 static void sbp_agent_reset(struct sbp_dev *); 379 static void sbp_busy_timeout_callback(struct fw_xfer *); 380 static void sbp_busy_timeout(struct sbp_dev *); 381 static void sbp_orb_pointer_callback(struct fw_xfer *); 382 static void sbp_orb_pointer(struct sbp_dev *, struct sbp_ocb *); 383 static void sbp_doorbell_callback(struct fw_xfer *); 384 static void sbp_doorbell(struct sbp_dev *); 385 static struct fw_xfer *sbp_write_cmd(struct sbp_dev *, int, int); 386 static void sbp_mgm_orb(struct sbp_dev *, int, struct sbp_ocb *); 387 static void sbp_print_scsi_cmd(struct sbp_ocb *); 388 static void sbp_scsi_status(struct sbp_status *, struct sbp_ocb *); 389 static void sbp_fix_inq_data(struct sbp_ocb *); 390 static void sbp_recv(struct fw_xfer *); 391 static int sbp_logout_all(struct sbp_softc *); 392 static void sbp_free_sdev(struct sbp_dev *); 393 static void sbp_free_target(struct sbp_target *); 394 static void sbp_scsipi_detach_sdev(struct sbp_dev *); 395 static void sbp_scsipi_detach_target(struct sbp_target *); 396 static void sbp_target_reset(struct sbp_dev *, int); 397 static void sbp_mgm_timeout(void *); 398 static void sbp_timeout(void *); 399 static void sbp_action1(struct sbp_softc *, struct scsipi_xfer *); 400 static void sbp_execute_ocb(struct sbp_ocb *, bus_dma_segment_t *, int); 401 static struct sbp_ocb *sbp_dequeue_ocb(struct sbp_dev *, struct sbp_status *); 402 static struct sbp_ocb *sbp_enqueue_ocb(struct sbp_dev *, struct sbp_ocb *); 403 static struct sbp_ocb *sbp_get_ocb(struct sbp_dev *); 404 static void sbp_free_ocb(struct sbp_dev *, struct sbp_ocb *); 405 static void sbp_abort_ocb(struct sbp_ocb *, int); 406 static void sbp_abort_all_ocbs(struct sbp_dev *, int); 407 408 409 static const char *orb_status0[] = { 410 /* 0 */ "No additional information to report", 411 /* 1 */ "Request type not supported", 412 /* 2 */ "Speed not supported", 413 /* 3 */ "Page size not supported", 414 /* 4 */ "Access denied", 415 /* 5 */ "Logical unit not supported", 416 /* 6 */ "Maximum payload too small", 417 /* 7 */ "Reserved for future standardization", 418 /* 8 */ "Resources unavailable", 419 /* 9 */ "Function rejected", 420 /* A */ "Login ID not recognized", 421 /* B */ "Dummy ORB completed", 422 /* C */ "Request aborted", 423 /* FF */ "Unspecified error" 424 #define MAX_ORB_STATUS0 0xd 425 }; 426 427 static const char *orb_status1_object[] = { 428 /* 0 */ "Operation request block (ORB)", 429 /* 1 */ "Data buffer", 430 /* 2 */ "Page table", 431 /* 3 */ "Unable to specify" 432 }; 433 434 static const char *orb_status1_serial_bus_error[] = { 435 /* 0 */ "Missing acknowledge", 436 /* 1 */ "Reserved; not to be used", 437 /* 2 */ "Time-out error", 438 /* 3 */ "Reserved; not to be used", 439 /* 4 */ "Busy retry limit exceeded(X)", 440 /* 5 */ "Busy retry limit exceeded(A)", 441 /* 6 */ "Busy retry limit exceeded(B)", 442 /* 7 */ "Reserved for future standardization", 443 /* 8 */ "Reserved for future standardization", 444 /* 9 */ "Reserved for future standardization", 445 /* A */ "Reserved for future standardization", 446 /* B */ "Tardy retry limit exceeded", 447 /* C */ "Conflict error", 448 /* D */ "Data error", 449 /* E */ "Type error", 450 /* F */ "Address error" 451 }; 452 453 454 CFATTACH_DECL_NEW(sbp, sizeof(struct sbp_softc), 455 sbpmatch, sbpattach, sbpdetach, NULL); 456 457 458 int 459 sbpmatch(device_t parent, cfdata_t cf, void *aux) 460 { 461 struct fw_attach_args *fwa = aux; 462 463 if (strcmp(fwa->name, "sbp") == 0) 464 return 1; 465 return 0; 466 } 467 468 static void 469 sbpattach(device_t parent, device_t self, void *aux) 470 { 471 struct sbp_softc *sc = device_private(self); 472 struct fw_attach_args *fwa = (struct fw_attach_args *)aux; 473 struct firewire_comm *fc; 474 struct scsipi_adapter *sc_adapter = &sc->sc_adapter; 475 struct scsipi_channel *sc_channel = &sc->sc_channel; 476 struct sbp_target *target = &sc->sc_target; 477 int dv_unit; 478 479 aprint_naive("\n"); 480 aprint_normal(": SBP-2/SCSI over IEEE1394\n"); 481 482 sc->sc_fd.dev = self; 483 484 if (cold) 485 sbp_cold++; 486 sc->sc_fd.fc = fc = fwa->fc; 487 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); 488 cv_init(&sc->sc_cv, "sbp"); 489 490 if (max_speed < 0) 491 max_speed = fc->speed; 492 493 sc->sc_dmat = fc->dmat; 494 495 sc->sc_target.fwdev = NULL; 496 sc->sc_target.luns = NULL; 497 498 /* Initialize mutexes and lists before we can error out 499 * to prevent crashes on detach 500 */ 501 mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_VM); 502 STAILQ_INIT(&sc->sc_fwb.xferlist); 503 504 if (sbp_alloc_target(sc, fwa->fwdev) == NULL) 505 return; 506 507 sc_adapter->adapt_dev = sc->sc_fd.dev; 508 sc_adapter->adapt_nchannels = 1; 509 sc_adapter->adapt_max_periph = 1; 510 sc_adapter->adapt_request = sbp_scsipi_request; 511 sc_adapter->adapt_minphys = sbp_minphys; 512 sc_adapter->adapt_openings = 8; 513 514 sc_channel->chan_adapter = sc_adapter; 515 sc_channel->chan_bustype = &scsi_bustype; 516 sc_channel->chan_defquirks = PQUIRK_ONLYBIG; 517 sc_channel->chan_channel = 0; 518 sc_channel->chan_flags = SCSIPI_CHAN_CANGROW | SCSIPI_CHAN_NOSETTLE; 519 520 sc_channel->chan_ntargets = 1; 521 sc_channel->chan_nluns = target->num_lun; /* We set nluns 0 now */ 522 sc_channel->chan_id = 1; 523 524 sc->sc_bus = config_found(sc->sc_fd.dev, sc_channel, scsiprint); 525 if (sc->sc_bus == NULL) { 526 aprint_error_dev(self, "attach failed\n"); 527 return; 528 } 529 530 /* We reserve 16 bit space (4 bytes X 64 unit X 256 luns) */ 531 dv_unit = device_unit(sc->sc_fd.dev); 532 sc->sc_fwb.start = SBP_DEV2ADDR(dv_unit, 0); 533 sc->sc_fwb.end = SBP_DEV2ADDR(dv_unit, -1); 534 /* pre-allocate xfer */ 535 fw_xferlist_add(&sc->sc_fwb.xferlist, M_SBP, 536 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB / 2, 537 fc, (void *)sc, sbp_recv); 538 fw_bindadd(fc, &sc->sc_fwb); 539 540 sc->sc_fd.post_busreset = sbp_post_busreset; 541 sc->sc_fd.post_explore = sbp_post_explore; 542 543 if (fc->status != FWBUSNOTREADY) { 544 sbp_post_busreset((void *)sc); 545 sbp_post_explore((void *)sc); 546 } 547 } 548 549 static int 550 sbpdetach(device_t self, int flags) 551 { 552 struct sbp_softc *sc = device_private(self); 553 struct firewire_comm *fc = sc->sc_fd.fc; 554 555 sbp_scsipi_detach_target(&sc->sc_target); 556 557 if (sc->sc_target.fwdev && SBP_FWDEV_ALIVE(sc->sc_target.fwdev)) { 558 sbp_logout_all(sc); 559 560 /* XXX wait for logout completion */ 561 mutex_enter(&sc->sc_mtx); 562 cv_timedwait_sig(&sc->sc_cv, &sc->sc_mtx, hz/2); 563 mutex_exit(&sc->sc_mtx); 564 } 565 566 sbp_free_target(&sc->sc_target); 567 568 fw_bindremove(fc, &sc->sc_fwb); 569 fw_xferlist_remove(&sc->sc_fwb.xferlist); 570 mutex_destroy(&sc->sc_fwb.fwb_mtx); 571 572 mutex_destroy(&sc->sc_mtx); 573 cv_destroy(&sc->sc_cv); 574 575 return 0; 576 } 577 578 579 static void 580 sbp_scsipi_request(struct scsipi_channel *channel, scsipi_adapter_req_t req, 581 void *arg) 582 { 583 struct sbp_softc *sc = device_private(channel->chan_adapter->adapt_dev); 584 struct scsipi_xfer *xs = arg; 585 int i; 586 587 SBP_DEBUG(1) 588 printf("Called sbp_scsipi_request\n"); 589 END_DEBUG 590 591 switch (req) { 592 case ADAPTER_REQ_RUN_XFER: 593 SBP_DEBUG(1) 594 printf("Got req_run_xfer\n"); 595 printf("xs control: 0x%08x, timeout: %d\n", 596 xs->xs_control, xs->timeout); 597 printf("opcode: 0x%02x\n", (int)xs->cmd->opcode); 598 for (i = 0; i < 15; i++) 599 printf("0x%02x ",(int)xs->cmd->bytes[i]); 600 printf("\n"); 601 END_DEBUG 602 if (xs->xs_control & XS_CTL_RESET) { 603 SBP_DEBUG(1) 604 printf("XS_CTL_RESET not support\n"); 605 END_DEBUG 606 break; 607 } 608 #define SBPSCSI_SBP2_MAX_CDB 12 609 if (xs->cmdlen > SBPSCSI_SBP2_MAX_CDB) { 610 SBP_DEBUG(0) 611 printf( 612 "sbp doesn't support cdb's larger than %d bytes\n", 613 SBPSCSI_SBP2_MAX_CDB); 614 END_DEBUG 615 xs->error = XS_DRIVER_STUFFUP; 616 scsipi_done(xs); 617 return; 618 } 619 sbp_action1(sc, xs); 620 621 break; 622 case ADAPTER_REQ_GROW_RESOURCES: 623 SBP_DEBUG(1) 624 printf("Got req_grow_resources\n"); 625 END_DEBUG 626 break; 627 case ADAPTER_REQ_SET_XFER_MODE: 628 SBP_DEBUG(1) 629 printf("Got set xfer mode\n"); 630 END_DEBUG 631 break; 632 default: 633 panic("Unknown request: %d\n", (int)req); 634 } 635 } 636 637 static void 638 sbp_minphys(struct buf *bp) 639 { 640 641 minphys(bp); 642 } 643 644 645 /* 646 * Display device characteristics on the console 647 */ 648 static void 649 sbp_show_sdev_info(struct sbp_dev *sdev) 650 { 651 struct fw_device *fwdev = sdev->target->fwdev; 652 struct sbp_softc *sc = sdev->target->sbp; 653 654 aprint_normal_dev(sc->sc_fd.dev, 655 "ordered:%d type:%d EUI:%08x%08x node:%d speed:%d maxrec:%d\n", 656 (sdev->type & 0x40) >> 6, 657 (sdev->type & 0x1f), 658 fwdev->eui.hi, 659 fwdev->eui.lo, 660 fwdev->dst, 661 fwdev->speed, 662 fwdev->maxrec); 663 aprint_normal_dev(sc->sc_fd.dev, "%s '%s' '%s' '%s'\n", 664 sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision); 665 } 666 667 static void 668 sbp_alloc_lun(struct sbp_target *target) 669 { 670 struct crom_context cc; 671 struct csrreg *reg; 672 struct sbp_dev *sdev, **newluns; 673 struct sbp_softc *sc; 674 int maxlun, lun, i; 675 676 sc = target->sbp; 677 crom_init_context(&cc, target->fwdev->csrrom); 678 /* XXX shoud parse appropriate unit directories only */ 679 maxlun = -1; 680 while (cc.depth >= 0) { 681 reg = crom_search_key(&cc, CROM_LUN); 682 if (reg == NULL) 683 break; 684 lun = reg->val & 0xffff; 685 SBP_DEBUG(0) 686 printf("target %d lun %d found\n", target->target_id, lun); 687 END_DEBUG 688 if (maxlun < lun) 689 maxlun = lun; 690 crom_next(&cc); 691 } 692 if (maxlun < 0) 693 aprint_normal_dev(sc->sc_fd.dev, "%d: no LUN found\n", 694 target->target_id); 695 696 maxlun++; 697 if (maxlun >= SBP_NUM_LUNS) 698 maxlun = SBP_NUM_LUNS; 699 700 /* Invalidiate stale devices */ 701 for (lun = 0; lun < target->num_lun; lun++) { 702 sdev = target->luns[lun]; 703 if (sdev == NULL) 704 continue; 705 sdev->flags &= ~VALID_LUN; 706 if (lun >= maxlun) { 707 /* lost device */ 708 sbp_scsipi_detach_sdev(sdev); 709 sbp_free_sdev(sdev); 710 target->luns[lun] = NULL; 711 } 712 } 713 714 /* Reallocate */ 715 if (maxlun != target->num_lun) { 716 newluns = (struct sbp_dev **) realloc(target->luns, 717 sizeof(struct sbp_dev *) * maxlun, 718 M_SBP, M_NOWAIT | M_ZERO); 719 720 if (newluns == NULL) { 721 aprint_error_dev(sc->sc_fd.dev, "realloc failed\n"); 722 newluns = target->luns; 723 maxlun = target->num_lun; 724 } 725 726 /* 727 * We must zero the extended region for the case 728 * realloc() doesn't allocate new buffer. 729 */ 730 if (maxlun > target->num_lun) { 731 const int sbp_dev_p_sz = sizeof(struct sbp_dev *); 732 733 memset(&newluns[target->num_lun], 0, 734 sbp_dev_p_sz * (maxlun - target->num_lun)); 735 } 736 737 target->luns = newluns; 738 target->num_lun = maxlun; 739 } 740 741 crom_init_context(&cc, target->fwdev->csrrom); 742 while (cc.depth >= 0) { 743 int new = 0; 744 745 reg = crom_search_key(&cc, CROM_LUN); 746 if (reg == NULL) 747 break; 748 lun = reg->val & 0xffff; 749 if (lun >= SBP_NUM_LUNS) { 750 aprint_error_dev(sc->sc_fd.dev, "too large lun %d\n", 751 lun); 752 goto next; 753 } 754 755 sdev = target->luns[lun]; 756 if (sdev == NULL) { 757 sdev = malloc(sizeof(struct sbp_dev), 758 M_SBP, M_NOWAIT | M_ZERO); 759 if (sdev == NULL) { 760 aprint_error_dev(sc->sc_fd.dev, 761 "malloc failed\n"); 762 goto next; 763 } 764 target->luns[lun] = sdev; 765 sdev->lun_id = lun; 766 sdev->target = target; 767 STAILQ_INIT(&sdev->ocbs); 768 callout_init(&sdev->login_callout, CALLOUT_MPSAFE); 769 callout_setfunc(&sdev->login_callout, 770 sbp_login_callout, sdev); 771 sdev->status = SBP_DEV_RESET; 772 new = 1; 773 snprintf(sdev->bustgtlun, 32, "%s:%d:%d", 774 device_xname(sc->sc_fd.dev), 775 sdev->target->target_id, 776 sdev->lun_id); 777 if (!sc->sc_lwp) 778 if (kthread_create( 779 PRI_NONE, KTHREAD_MPSAFE, NULL, 780 sbp_scsipi_scan_target, &sc->sc_target, 781 &sc->sc_lwp, 782 "sbp%d_attach", device_unit(sc->sc_fd.dev))) 783 aprint_error_dev(sc->sc_fd.dev, 784 "unable to create thread"); 785 } 786 sdev->flags |= VALID_LUN; 787 sdev->type = (reg->val & 0xff0000) >> 16; 788 789 if (new == 0) 790 goto next; 791 792 fwdma_alloc_setup(sc->sc_fd.dev, sc->sc_dmat, SBP_DMA_SIZE, 793 &sdev->dma, sizeof(uint32_t), BUS_DMA_NOWAIT); 794 if (sdev->dma.v_addr == NULL) { 795 free(sdev, M_SBP); 796 target->luns[lun] = NULL; 797 goto next; 798 } 799 sdev->ocb = (struct sbp_ocb *)sdev->dma.v_addr; 800 sdev->login = (struct sbp_login_res *)&sdev->ocb[SBP_QUEUE_LEN]; 801 memset((char *)sdev->ocb, 0, 802 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN); 803 804 STAILQ_INIT(&sdev->free_ocbs); 805 for (i = 0; i < SBP_QUEUE_LEN; i++) { 806 struct sbp_ocb *ocb = &sdev->ocb[i]; 807 808 ocb->index = i; 809 ocb->bus_addr = 810 sdev->dma.bus_addr + sizeof(struct sbp_ocb) * i; 811 if (bus_dmamap_create(sc->sc_dmat, 0x100000, 812 SBP_IND_MAX, SBP_SEG_MAX, 0, 0, &ocb->dmamap)) { 813 aprint_error_dev(sc->sc_fd.dev, 814 "cannot create dmamap %d\n", i); 815 /* XXX */ 816 goto next; 817 } 818 sbp_free_ocb(sdev, ocb); /* into free queue */ 819 } 820 next: 821 crom_next(&cc); 822 } 823 824 for (lun = 0; lun < target->num_lun; lun++) { 825 sdev = target->luns[lun]; 826 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) { 827 sbp_scsipi_detach_sdev(sdev); 828 sbp_free_sdev(sdev); 829 target->luns[lun] = NULL; 830 } 831 } 832 } 833 834 static struct sbp_target * 835 sbp_alloc_target(struct sbp_softc *sc, struct fw_device *fwdev) 836 { 837 struct sbp_target *target; 838 struct crom_context cc; 839 struct csrreg *reg; 840 841 SBP_DEBUG(1) 842 printf("sbp_alloc_target\n"); 843 END_DEBUG 844 /* new target */ 845 target = &sc->sc_target; 846 target->sbp = sc; 847 target->fwdev = fwdev; 848 target->target_id = 0; 849 target->mgm_ocb_cur = NULL; 850 SBP_DEBUG(1) 851 printf("target: mgm_port: %x\n", target->mgm_lo); 852 END_DEBUG 853 STAILQ_INIT(&target->xferlist); 854 target->n_xfer = 0; 855 STAILQ_INIT(&target->mgm_ocb_queue); 856 callout_init(&target->mgm_ocb_timeout, CALLOUT_MPSAFE); 857 858 target->luns = NULL; 859 target->num_lun = 0; 860 861 /* XXX we may want to reload mgm port after each bus reset */ 862 /* XXX there might be multiple management agents */ 863 crom_init_context(&cc, target->fwdev->csrrom); 864 reg = crom_search_key(&cc, CROM_MGM); 865 if (reg == NULL || reg->val == 0) { 866 aprint_error_dev(sc->sc_fd.dev, "NULL management address\n"); 867 target->fwdev = NULL; 868 return NULL; 869 } 870 871 target->mgm_hi = 0xffff; 872 target->mgm_lo = 0xf0000000 | (reg->val << 2); 873 874 return target; 875 } 876 877 static void 878 sbp_probe_lun(struct sbp_dev *sdev) 879 { 880 struct fw_device *fwdev; 881 struct crom_context c, *cc = &c; 882 struct csrreg *reg; 883 884 memset(sdev->vendor, 0, sizeof(sdev->vendor)); 885 memset(sdev->product, 0, sizeof(sdev->product)); 886 887 fwdev = sdev->target->fwdev; 888 crom_init_context(cc, fwdev->csrrom); 889 /* get vendor string */ 890 crom_search_key(cc, CSRKEY_VENDOR); 891 crom_next(cc); 892 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor)); 893 /* skip to the unit directory for SBP-2 */ 894 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) { 895 if (reg->val == CSRVAL_T10SBP2) 896 break; 897 crom_next(cc); 898 } 899 /* get firmware revision */ 900 reg = crom_search_key(cc, CSRKEY_FIRM_VER); 901 if (reg != NULL) 902 snprintf(sdev->revision, sizeof(sdev->revision), "%06x", 903 reg->val); 904 /* get product string */ 905 crom_search_key(cc, CSRKEY_MODEL); 906 crom_next(cc); 907 crom_parse_text(cc, sdev->product, sizeof(sdev->product)); 908 } 909 910 static void 911 sbp_login_callout(void *arg) 912 { 913 struct sbp_dev *sdev = (struct sbp_dev *)arg; 914 915 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL); 916 } 917 918 static void 919 sbp_login(struct sbp_dev *sdev) 920 { 921 struct sbp_softc *sc = sdev->target->sbp; 922 struct timeval delta; 923 struct timeval t; 924 int ticks = 0; 925 926 microtime(&delta); 927 timersub(&delta, &sc->sc_last_busreset, &delta); 928 t.tv_sec = login_delay / 1000; 929 t.tv_usec = (login_delay % 1000) * 1000; 930 timersub(&t, &delta, &t); 931 if (t.tv_sec >= 0 && t.tv_usec > 0) 932 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000; 933 SBP_DEBUG(0) 934 printf("%s: sec = %lld usec = %ld ticks = %d\n", __func__, 935 (long long)t.tv_sec, (long)t.tv_usec, ticks); 936 END_DEBUG 937 callout_schedule(&sdev->login_callout, ticks); 938 } 939 940 static void 941 sbp_probe_target(void *arg) 942 { 943 struct sbp_target *target = (struct sbp_target *)arg; 944 struct sbp_dev *sdev; 945 int i; 946 947 SBP_DEBUG(1) 948 printf("%s %d\n", __func__, target->target_id); 949 END_DEBUG 950 951 sbp_alloc_lun(target); 952 953 /* XXX untimeout mgm_ocb and dequeue */ 954 for (i = 0; i < target->num_lun; i++) { 955 sdev = target->luns[i]; 956 if (sdev == NULL || sdev->status == SBP_DEV_DEAD) 957 continue; 958 959 if (sdev->periph != NULL) { 960 scsipi_periph_freeze(sdev->periph, 1); 961 sdev->freeze++; 962 } 963 sbp_probe_lun(sdev); 964 sbp_show_sdev_info(sdev); 965 966 sbp_abort_all_ocbs(sdev, XS_RESET); 967 switch (sdev->status) { 968 case SBP_DEV_RESET: 969 /* new or revived target */ 970 if (auto_login) 971 sbp_login(sdev); 972 break; 973 case SBP_DEV_TOATTACH: 974 case SBP_DEV_PROBE: 975 case SBP_DEV_ATTACHED: 976 case SBP_DEV_RETRY: 977 default: 978 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL); 979 break; 980 } 981 } 982 } 983 984 static void 985 sbp_post_busreset(void *arg) 986 { 987 struct sbp_softc *sc = (struct sbp_softc *)arg; 988 struct sbp_target *target = &sc->sc_target; 989 struct fw_device *fwdev = target->fwdev; 990 int alive; 991 992 alive = SBP_FWDEV_ALIVE(fwdev); 993 SBP_DEBUG(0) 994 printf("sbp_post_busreset\n"); 995 if (!alive) 996 printf("not alive\n"); 997 END_DEBUG 998 microtime(&sc->sc_last_busreset); 999 1000 if (!alive) 1001 return; 1002 1003 scsipi_channel_freeze(&sc->sc_channel, 1); 1004 } 1005 1006 static void 1007 sbp_post_explore(void *arg) 1008 { 1009 struct sbp_softc *sc = (struct sbp_softc *)arg; 1010 struct sbp_target *target = &sc->sc_target; 1011 struct fw_device *fwdev = target->fwdev; 1012 int alive; 1013 1014 alive = SBP_FWDEV_ALIVE(fwdev); 1015 SBP_DEBUG(0) 1016 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold); 1017 if (!alive) 1018 printf("not alive\n"); 1019 END_DEBUG 1020 if (!alive) 1021 return; 1022 1023 if (!firewire_phydma_enable) 1024 return; 1025 1026 if (sbp_cold > 0) 1027 sbp_cold--; 1028 1029 SBP_DEBUG(0) 1030 printf("sbp_post_explore: EUI:%08x%08x ", fwdev->eui.hi, fwdev->eui.lo); 1031 END_DEBUG 1032 sbp_probe_target((void *)target); 1033 if (target->num_lun == 0) 1034 sbp_free_target(target); 1035 1036 scsipi_channel_thaw(&sc->sc_channel, 1); 1037 } 1038 1039 #if NEED_RESPONSE 1040 static void 1041 sbp_loginres_callback(struct fw_xfer *xfer) 1042 { 1043 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1044 struct sbp_softc *sc = sdev->target->sbp; 1045 1046 SBP_DEBUG(1) 1047 printf("sbp_loginres_callback\n"); 1048 END_DEBUG 1049 /* recycle */ 1050 mutex_enter(&sc->sc_fwb.fwb_mtx); 1051 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 1052 mutex_exit(&sc->sc_fwb.fwb_mtx); 1053 return; 1054 } 1055 #endif 1056 1057 static inline void 1058 sbp_xfer_free(struct fw_xfer *xfer) 1059 { 1060 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1061 struct sbp_softc *sc = sdev->target->sbp; 1062 1063 fw_xfer_unload(xfer); 1064 mutex_enter(&sc->sc_mtx); 1065 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link); 1066 mutex_exit(&sc->sc_mtx); 1067 } 1068 1069 static void 1070 sbp_reset_start_callback(struct fw_xfer *xfer) 1071 { 1072 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc; 1073 struct sbp_target *target = sdev->target; 1074 int i; 1075 1076 if (xfer->resp != 0) 1077 aprint_error("%s: sbp_reset_start failed: resp=%d\n", 1078 sdev->bustgtlun, xfer->resp); 1079 1080 for (i = 0; i < target->num_lun; i++) { 1081 tsdev = target->luns[i]; 1082 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN) 1083 sbp_login(tsdev); 1084 } 1085 } 1086 1087 static void 1088 sbp_reset_start(struct sbp_dev *sdev) 1089 { 1090 struct fw_xfer *xfer; 1091 struct fw_pkt *fp; 1092 1093 SBP_DEBUG(0) 1094 printf("%s: sbp_reset_start: %s\n", 1095 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun); 1096 END_DEBUG 1097 1098 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); 1099 if (xfer == NULL) 1100 return; 1101 xfer->hand = sbp_reset_start_callback; 1102 fp = &xfer->send.hdr; 1103 fp->mode.wreqq.dest_hi = 0xffff; 1104 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START; 1105 fp->mode.wreqq.data = htonl(0xf); 1106 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1107 sbp_xfer_free(xfer); 1108 } 1109 1110 static void 1111 sbp_mgm_callback(struct fw_xfer *xfer) 1112 { 1113 struct sbp_dev *sdev; 1114 int resp; 1115 1116 sdev = (struct sbp_dev *)xfer->sc; 1117 1118 SBP_DEBUG(1) 1119 printf("%s: sbp_mgm_callback: %s\n", 1120 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun); 1121 END_DEBUG 1122 resp = xfer->resp; 1123 sbp_xfer_free(xfer); 1124 return; 1125 } 1126 1127 static void 1128 sbp_scsipi_scan_target(void *arg) 1129 { 1130 struct sbp_target *target = (struct sbp_target *)arg; 1131 struct sbp_softc *sc = target->sbp; 1132 struct sbp_dev *sdev; 1133 struct scsipi_channel *chan = &sc->sc_channel; 1134 struct scsibus_softc *sc_bus = device_private(sc->sc_bus); 1135 int lun, yet; 1136 1137 do { 1138 mutex_enter(&sc->sc_mtx); 1139 cv_wait_sig(&sc->sc_cv, &sc->sc_mtx); 1140 mutex_exit(&sc->sc_mtx); 1141 yet = 0; 1142 1143 for (lun = 0; lun < target->num_lun; lun++) { 1144 sdev = target->luns[lun]; 1145 if (sdev == NULL) 1146 continue; 1147 if (sdev->status != SBP_DEV_PROBE) { 1148 yet++; 1149 continue; 1150 } 1151 1152 if (sdev->periph == NULL) { 1153 if (chan->chan_nluns < target->num_lun) 1154 chan->chan_nluns = target->num_lun; 1155 1156 scsi_probe_bus(sc_bus, target->target_id, 1157 sdev->lun_id); 1158 sdev->periph = scsipi_lookup_periph(chan, 1159 target->target_id, lun); 1160 } 1161 sdev->status = SBP_DEV_ATTACHED; 1162 } 1163 } while (yet > 0); 1164 1165 sc->sc_lwp = NULL; 1166 kthread_exit(0); 1167 1168 /* NOTREACHED */ 1169 } 1170 1171 static inline void 1172 sbp_scan_dev(struct sbp_dev *sdev) 1173 { 1174 struct sbp_softc *sc = sdev->target->sbp; 1175 1176 sdev->status = SBP_DEV_PROBE; 1177 mutex_enter(&sc->sc_mtx); 1178 cv_signal(&sdev->target->sbp->sc_cv); 1179 mutex_exit(&sc->sc_mtx); 1180 } 1181 1182 1183 static void 1184 sbp_do_attach(struct fw_xfer *xfer) 1185 { 1186 struct sbp_dev *sdev; 1187 struct sbp_target *target; 1188 struct sbp_softc *sc; 1189 1190 sdev = (struct sbp_dev *)xfer->sc; 1191 target = sdev->target; 1192 sc = target->sbp; 1193 1194 SBP_DEBUG(0) 1195 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1196 sdev->bustgtlun); 1197 END_DEBUG 1198 sbp_xfer_free(xfer); 1199 1200 sbp_scan_dev(sdev); 1201 return; 1202 } 1203 1204 static void 1205 sbp_agent_reset_callback(struct fw_xfer *xfer) 1206 { 1207 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1208 struct sbp_softc *sc = sdev->target->sbp; 1209 1210 SBP_DEBUG(1) 1211 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1212 sdev->bustgtlun); 1213 END_DEBUG 1214 if (xfer->resp != 0) 1215 aprint_error_dev(sc->sc_fd.dev, "%s:%s: resp=%d\n", __func__, 1216 sdev->bustgtlun, xfer->resp); 1217 1218 sbp_xfer_free(xfer); 1219 if (sdev->periph != NULL) { 1220 scsipi_periph_thaw(sdev->periph, sdev->freeze); 1221 scsipi_channel_thaw(&sc->sc_channel, 0); 1222 sdev->freeze = 0; 1223 } 1224 } 1225 1226 static void 1227 sbp_agent_reset(struct sbp_dev *sdev) 1228 { 1229 struct fw_xfer *xfer; 1230 struct fw_pkt *fp; 1231 1232 SBP_DEBUG(0) 1233 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1234 __func__, sdev->bustgtlun); 1235 END_DEBUG 1236 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04); 1237 if (xfer == NULL) 1238 return; 1239 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE) 1240 xfer->hand = sbp_agent_reset_callback; 1241 else 1242 xfer->hand = sbp_do_attach; 1243 fp = &xfer->send.hdr; 1244 fp->mode.wreqq.data = htonl(0xf); 1245 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1246 sbp_xfer_free(xfer); 1247 sbp_abort_all_ocbs(sdev, XS_RESET); 1248 } 1249 1250 static void 1251 sbp_busy_timeout_callback(struct fw_xfer *xfer) 1252 { 1253 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1254 1255 SBP_DEBUG(1) 1256 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1257 __func__, sdev->bustgtlun); 1258 END_DEBUG 1259 sbp_xfer_free(xfer); 1260 sbp_agent_reset(sdev); 1261 } 1262 1263 static void 1264 sbp_busy_timeout(struct sbp_dev *sdev) 1265 { 1266 struct fw_pkt *fp; 1267 struct fw_xfer *xfer; 1268 1269 SBP_DEBUG(0) 1270 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1271 __func__, sdev->bustgtlun); 1272 END_DEBUG 1273 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); 1274 if (xfer == NULL) 1275 return; 1276 xfer->hand = sbp_busy_timeout_callback; 1277 fp = &xfer->send.hdr; 1278 fp->mode.wreqq.dest_hi = 0xffff; 1279 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT; 1280 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf); 1281 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1282 sbp_xfer_free(xfer); 1283 } 1284 1285 static void 1286 sbp_orb_pointer_callback(struct fw_xfer *xfer) 1287 { 1288 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1289 struct sbp_softc *sc = sdev->target->sbp; 1290 1291 SBP_DEBUG(1) 1292 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1293 sdev->bustgtlun); 1294 END_DEBUG 1295 if (xfer->resp != 0) 1296 aprint_error_dev(sc->sc_fd.dev, "%s:%s: xfer->resp = %d\n", 1297 __func__, sdev->bustgtlun, xfer->resp); 1298 sbp_xfer_free(xfer); 1299 sdev->flags &= ~ORB_POINTER_ACTIVE; 1300 1301 if ((sdev->flags & ORB_POINTER_NEED) != 0) { 1302 struct sbp_ocb *ocb; 1303 1304 sdev->flags &= ~ORB_POINTER_NEED; 1305 ocb = STAILQ_FIRST(&sdev->ocbs); 1306 if (ocb != NULL) 1307 sbp_orb_pointer(sdev, ocb); 1308 } 1309 return; 1310 } 1311 1312 static void 1313 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb) 1314 { 1315 struct sbp_softc *sc = sdev->target->sbp; 1316 struct fw_xfer *xfer; 1317 struct fw_pkt *fp; 1318 1319 SBP_DEBUG(1) 1320 printf("%s:%s:%s: 0x%08x\n", device_xname(sc->sc_fd.dev), __func__, 1321 sdev->bustgtlun, (uint32_t)ocb->bus_addr); 1322 END_DEBUG 1323 1324 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) { 1325 SBP_DEBUG(0) 1326 printf("%s: orb pointer active\n", __func__); 1327 END_DEBUG 1328 sdev->flags |= ORB_POINTER_NEED; 1329 return; 1330 } 1331 1332 sdev->flags |= ORB_POINTER_ACTIVE; 1333 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08); 1334 if (xfer == NULL) 1335 return; 1336 xfer->hand = sbp_orb_pointer_callback; 1337 1338 fp = &xfer->send.hdr; 1339 fp->mode.wreqb.len = 8; 1340 fp->mode.wreqb.extcode = 0; 1341 xfer->send.payload[0] = 1342 htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16)); 1343 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr); 1344 1345 if (fw_asyreq(xfer->fc, -1, xfer) != 0) { 1346 sbp_xfer_free(xfer); 1347 ocb->xs->error = XS_DRIVER_STUFFUP; 1348 scsipi_done(ocb->xs); 1349 } 1350 } 1351 1352 static void 1353 sbp_doorbell_callback(struct fw_xfer *xfer) 1354 { 1355 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1356 struct sbp_softc *sc = sdev->target->sbp; 1357 1358 SBP_DEBUG(1) 1359 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1360 sdev->bustgtlun); 1361 END_DEBUG 1362 if (xfer->resp != 0) { 1363 aprint_error_dev(sc->sc_fd.dev, "%s: xfer->resp = %d\n", 1364 __func__, xfer->resp); 1365 } 1366 sbp_xfer_free(xfer); 1367 sdev->flags &= ~ORB_DOORBELL_ACTIVE; 1368 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) { 1369 sdev->flags &= ~ORB_DOORBELL_NEED; 1370 sbp_doorbell(sdev); 1371 } 1372 return; 1373 } 1374 1375 static void 1376 sbp_doorbell(struct sbp_dev *sdev) 1377 { 1378 struct fw_xfer *xfer; 1379 struct fw_pkt *fp; 1380 1381 SBP_DEBUG(1) 1382 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1383 __func__, sdev->bustgtlun); 1384 END_DEBUG 1385 1386 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) { 1387 sdev->flags |= ORB_DOORBELL_NEED; 1388 return; 1389 } 1390 sdev->flags |= ORB_DOORBELL_ACTIVE; 1391 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10); 1392 if (xfer == NULL) 1393 return; 1394 xfer->hand = sbp_doorbell_callback; 1395 fp = &xfer->send.hdr; 1396 fp->mode.wreqq.data = htonl(0xf); 1397 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1398 sbp_xfer_free(xfer); 1399 } 1400 1401 static struct fw_xfer * 1402 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset) 1403 { 1404 struct sbp_softc *sc; 1405 struct fw_xfer *xfer; 1406 struct fw_pkt *fp; 1407 struct sbp_target *target; 1408 int new = 0; 1409 1410 target = sdev->target; 1411 sc = target->sbp; 1412 mutex_enter(&sc->sc_mtx); 1413 xfer = STAILQ_FIRST(&target->xferlist); 1414 if (xfer == NULL) { 1415 if (target->n_xfer > 5 /* XXX */) { 1416 aprint_error_dev(sc->sc_fd.dev, 1417 "no more xfer for this target\n"); 1418 mutex_exit(&sc->sc_mtx); 1419 return NULL; 1420 } 1421 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0); 1422 if (xfer == NULL) { 1423 aprint_error_dev(sc->sc_fd.dev, 1424 "fw_xfer_alloc_buf failed\n"); 1425 mutex_exit(&sc->sc_mtx); 1426 return NULL; 1427 } 1428 target->n_xfer++; 1429 SBP_DEBUG(0) 1430 printf("sbp: alloc %d xfer\n", target->n_xfer); 1431 END_DEBUG 1432 new = 1; 1433 } else 1434 STAILQ_REMOVE_HEAD(&target->xferlist, link); 1435 mutex_exit(&sc->sc_mtx); 1436 1437 microtime(&xfer->tv); 1438 1439 if (new) { 1440 xfer->recv.pay_len = 0; 1441 xfer->send.spd = min(target->fwdev->speed, max_speed); 1442 xfer->fc = target->sbp->sc_fd.fc; 1443 } 1444 1445 if (tcode == FWTCODE_WREQB) 1446 xfer->send.pay_len = 8; 1447 else 1448 xfer->send.pay_len = 0; 1449 1450 xfer->sc = (void *)sdev; 1451 fp = &xfer->send.hdr; 1452 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi; 1453 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset; 1454 fp->mode.wreqq.tlrt = 0; 1455 fp->mode.wreqq.tcode = tcode; 1456 fp->mode.wreqq.pri = 0; 1457 fp->mode.wreqq.dst = FWLOCALBUS | target->fwdev->dst; 1458 1459 return xfer; 1460 } 1461 1462 static void 1463 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb) 1464 { 1465 struct fw_xfer *xfer; 1466 struct fw_pkt *fp; 1467 struct sbp_ocb *ocb; 1468 struct sbp_target *target; 1469 int nid, dv_unit; 1470 1471 target = sdev->target; 1472 nid = target->sbp->sc_fd.fc->nodeid | FWLOCALBUS; 1473 dv_unit = device_unit(target->sbp->sc_fd.dev); 1474 1475 mutex_enter(&target->sbp->sc_mtx); 1476 if (func == ORB_FUN_RUNQUEUE) { 1477 ocb = STAILQ_FIRST(&target->mgm_ocb_queue); 1478 if (target->mgm_ocb_cur != NULL || ocb == NULL) { 1479 mutex_exit(&target->sbp->sc_mtx); 1480 return; 1481 } 1482 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb); 1483 mutex_exit(&target->sbp->sc_mtx); 1484 goto start; 1485 } 1486 if ((ocb = sbp_get_ocb(sdev)) == NULL) { 1487 mutex_exit(&target->sbp->sc_mtx); 1488 /* XXX */ 1489 return; 1490 } 1491 mutex_exit(&target->sbp->sc_mtx); 1492 ocb->flags = OCB_ACT_MGM; 1493 ocb->sdev = sdev; 1494 1495 memset(ocb->orb, 0, sizeof(ocb->orb)); 1496 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI); 1497 ocb->orb[7] = htonl(SBP_DEV2ADDR(dv_unit, sdev->lun_id)); 1498 1499 SBP_DEBUG(0) 1500 printf("%s:%s:%s: %s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1501 __func__, sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]); 1502 END_DEBUG 1503 switch (func) { 1504 case ORB_FUN_LGI: 1505 { 1506 const off_t sbp_login_off = 1507 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN; 1508 1509 ocb->orb[0] = ocb->orb[1] = 0; /* password */ 1510 ocb->orb[2] = htonl(nid << 16); 1511 ocb->orb[3] = htonl(sdev->dma.bus_addr + sbp_login_off); 1512 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id); 1513 if (ex_login) 1514 ocb->orb[4] |= htonl(ORB_EXV); 1515 ocb->orb[5] = htonl(SBP_LOGIN_SIZE); 1516 bus_dmamap_sync(sdev->dma.dma_tag, sdev->dma.dma_map, 1517 sbp_login_off, SBP_LOGIN_SIZE, BUS_DMASYNC_PREREAD); 1518 break; 1519 } 1520 1521 case ORB_FUN_ATA: 1522 ocb->orb[0] = htonl((0 << 16) | 0); 1523 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff); 1524 /* fall through */ 1525 case ORB_FUN_RCN: 1526 case ORB_FUN_LGO: 1527 case ORB_FUN_LUR: 1528 case ORB_FUN_RST: 1529 case ORB_FUN_ATS: 1530 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id); 1531 break; 1532 } 1533 1534 if (target->mgm_ocb_cur != NULL) { 1535 /* there is a standing ORB */ 1536 mutex_enter(&target->sbp->sc_mtx); 1537 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb); 1538 mutex_exit(&target->sbp->sc_mtx); 1539 return; 1540 } 1541 start: 1542 target->mgm_ocb_cur = ocb; 1543 1544 callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, ocb); 1545 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0); 1546 if (xfer == NULL) 1547 return; 1548 xfer->hand = sbp_mgm_callback; 1549 1550 fp = &xfer->send.hdr; 1551 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi; 1552 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo; 1553 fp->mode.wreqb.len = 8; 1554 fp->mode.wreqb.extcode = 0; 1555 xfer->send.payload[0] = htonl(nid << 16); 1556 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff); 1557 1558 /* cache writeback & invalidate(required ORB_FUN_LGI func) */ 1559 /* when abort_ocb, should sync POST ope ? */ 1560 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE); 1561 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1562 sbp_xfer_free(xfer); 1563 } 1564 1565 static void 1566 sbp_print_scsi_cmd(struct sbp_ocb *ocb) 1567 { 1568 struct scsipi_xfer *xs = ocb->xs; 1569 1570 printf("%s:%d:%d:" 1571 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x," 1572 " flags: 0x%02x, %db cmd/%db data\n", 1573 device_xname(ocb->sdev->target->sbp->sc_fd.dev), 1574 xs->xs_periph->periph_target, 1575 xs->xs_periph->periph_lun, 1576 xs->cmd->opcode, 1577 xs->cmd->bytes[0], xs->cmd->bytes[1], 1578 xs->cmd->bytes[2], xs->cmd->bytes[3], 1579 xs->cmd->bytes[4], xs->cmd->bytes[5], 1580 xs->cmd->bytes[6], xs->cmd->bytes[7], 1581 xs->cmd->bytes[8], 1582 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT), 1583 xs->cmdlen, xs->datalen); 1584 } 1585 1586 static void 1587 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb) 1588 { 1589 struct sbp_cmd_status *sbp_cmd_status; 1590 struct scsi_sense_data *sense = &ocb->xs->sense.scsi_sense; 1591 1592 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data; 1593 1594 SBP_DEBUG(0) 1595 sbp_print_scsi_cmd(ocb); 1596 /* XXX need decode status */ 1597 printf("%s:" 1598 " SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n", 1599 ocb->sdev->bustgtlun, 1600 sbp_cmd_status->status, 1601 sbp_cmd_status->sfmt, 1602 sbp_cmd_status->valid, 1603 sbp_cmd_status->s_key, 1604 sbp_cmd_status->s_code, 1605 sbp_cmd_status->s_qlfr, 1606 sbp_status->len); 1607 END_DEBUG 1608 1609 switch (sbp_cmd_status->status) { 1610 case SCSI_CHECK: 1611 case SCSI_BUSY: 1612 case SCSI_TERMINATED: 1613 if (sbp_cmd_status->sfmt == SBP_SFMT_CURR) 1614 sense->response_code = SSD_RCODE_CURRENT; 1615 else 1616 sense->response_code = SSD_RCODE_DEFERRED; 1617 if (sbp_cmd_status->valid) 1618 sense->response_code |= SSD_RCODE_VALID; 1619 sense->flags = sbp_cmd_status->s_key; 1620 if (sbp_cmd_status->mark) 1621 sense->flags |= SSD_FILEMARK; 1622 if (sbp_cmd_status->eom) 1623 sense->flags |= SSD_EOM; 1624 if (sbp_cmd_status->ill_len) 1625 sense->flags |= SSD_ILI; 1626 1627 memcpy(sense->info, &sbp_cmd_status->info, 4); 1628 1629 if (sbp_status->len <= 1) 1630 /* XXX not scsi status. shouldn't be happened */ 1631 sense->extra_len = 0; 1632 else if (sbp_status->len <= 4) 1633 /* add_sense_code(_qual), info, cmd_spec_info */ 1634 sense->extra_len = 6; 1635 else 1636 /* fru, sense_key_spec */ 1637 sense->extra_len = 10; 1638 1639 memcpy(sense->csi, &sbp_cmd_status->cdb, 4); 1640 1641 sense->asc = sbp_cmd_status->s_code; 1642 sense->ascq = sbp_cmd_status->s_qlfr; 1643 sense->fru = sbp_cmd_status->fru; 1644 1645 memcpy(sense->sks.sks_bytes, sbp_cmd_status->s_keydep, 3); 1646 ocb->xs->error = XS_SENSE; 1647 ocb->xs->xs_status = sbp_cmd_status->status; 1648 /* 1649 { 1650 uint8_t j, *tmp; 1651 tmp = sense; 1652 for (j = 0; j < 32; j += 8) 1653 aprint_normal( 1654 "sense %02x%02x %02x%02x %02x%02x %02x%02x\n", 1655 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3], 1656 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]); 1657 1658 } 1659 */ 1660 break; 1661 default: 1662 aprint_error_dev(ocb->sdev->target->sbp->sc_fd.dev, 1663 "%s:%s: unknown scsi status 0x%x\n", 1664 __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status); 1665 } 1666 } 1667 1668 static void 1669 sbp_fix_inq_data(struct sbp_ocb *ocb) 1670 { 1671 struct scsipi_xfer *xs = ocb->xs; 1672 struct sbp_dev *sdev; 1673 struct scsipi_inquiry_data *inq = 1674 (struct scsipi_inquiry_data *)xs->data; 1675 1676 sdev = ocb->sdev; 1677 1678 #if 0 1679 /* 1680 * NetBSD is assuming always 0 for EVPD-bit and 'Page Code'. 1681 */ 1682 #define SI_EVPD 0x01 1683 if (xs->cmd->bytes[0] & SI_EVPD) 1684 return; 1685 #endif 1686 SBP_DEBUG(1) 1687 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1688 __func__, sdev->bustgtlun); 1689 END_DEBUG 1690 switch (inq->device & SID_TYPE) { 1691 case T_DIRECT: 1692 #if 0 1693 /* 1694 * XXX Convert Direct Access device to RBC. 1695 * I've never seen FireWire DA devices which support READ_6. 1696 */ 1697 if ((inq->device & SID_TYPE) == T_DIRECT) 1698 inq->device |= T_SIMPLE_DIRECT; /* T_DIRECT == 0 */ 1699 #endif 1700 /* FALLTHROUGH */ 1701 1702 case T_SIMPLE_DIRECT: 1703 /* 1704 * Override vendor/product/revision information. 1705 * Some devices sometimes return strange strings. 1706 */ 1707 #if 1 1708 memcpy(inq->vendor, sdev->vendor, sizeof(inq->vendor)); 1709 memcpy(inq->product, sdev->product, sizeof(inq->product)); 1710 memcpy(inq->revision + 2, sdev->revision, 1711 sizeof(inq->revision)); 1712 #endif 1713 break; 1714 } 1715 /* 1716 * Force to enable/disable tagged queuing. 1717 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page. 1718 */ 1719 if (sbp_tags > 0) 1720 inq->flags3 |= SID_CmdQue; 1721 else if (sbp_tags < 0) 1722 inq->flags3 &= ~SID_CmdQue; 1723 1724 } 1725 1726 static void 1727 sbp_recv(struct fw_xfer *xfer) 1728 { 1729 struct fw_pkt *rfp; 1730 #if NEED_RESPONSE 1731 struct fw_pkt *sfp; 1732 #endif 1733 struct sbp_softc *sc; 1734 struct sbp_dev *sdev; 1735 struct sbp_ocb *ocb; 1736 struct sbp_login_res *login_res = NULL; 1737 struct sbp_status *sbp_status; 1738 struct sbp_target *target; 1739 int orb_fun, status_valid0, status_valid, l, reset_agent = 0; 1740 uint32_t addr; 1741 /* 1742 uint32_t *ld; 1743 ld = xfer->recv.buf; 1744 printf("sbp %x %d %d %08x %08x %08x %08x\n", 1745 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); 1746 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); 1747 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11])); 1748 */ 1749 1750 sc = (struct sbp_softc *)xfer->sc; 1751 if (xfer->resp != 0) { 1752 aprint_error_dev(sc->sc_fd.dev, 1753 "sbp_recv: xfer->resp = %d\n", xfer->resp); 1754 goto done0; 1755 } 1756 if (xfer->recv.payload == NULL) { 1757 aprint_error_dev(sc->sc_fd.dev, 1758 "sbp_recv: xfer->recv.payload == NULL\n"); 1759 goto done0; 1760 } 1761 rfp = &xfer->recv.hdr; 1762 if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) { 1763 aprint_error_dev(sc->sc_fd.dev, 1764 "sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode); 1765 goto done0; 1766 } 1767 sbp_status = (struct sbp_status *)xfer->recv.payload; 1768 addr = rfp->mode.wreqb.dest_lo; 1769 SBP_DEBUG(2) 1770 printf("received address 0x%x\n", addr); 1771 END_DEBUG 1772 target = &sc->sc_target; 1773 l = SBP_ADDR2LUN(addr); 1774 if (l >= target->num_lun || target->luns[l] == NULL) { 1775 aprint_error_dev(sc->sc_fd.dev, 1776 "sbp_recv1: invalid lun %d (target=%d)\n", 1777 l, target->target_id); 1778 goto done0; 1779 } 1780 sdev = target->luns[l]; 1781 1782 ocb = NULL; 1783 switch (sbp_status->src) { 1784 case SRC_NEXT_EXISTS: 1785 case SRC_NO_NEXT: 1786 /* check mgm_ocb_cur first */ 1787 ocb = target->mgm_ocb_cur; 1788 if (ocb != NULL) 1789 if (OCB_MATCH(ocb, sbp_status)) { 1790 callout_stop(&target->mgm_ocb_timeout); 1791 target->mgm_ocb_cur = NULL; 1792 break; 1793 } 1794 ocb = sbp_dequeue_ocb(sdev, sbp_status); 1795 if (ocb == NULL) 1796 aprint_error_dev(sc->sc_fd.dev, 1797 "%s:%s: No ocb(%x) on the queue\n", __func__, 1798 sdev->bustgtlun, ntohl(sbp_status->orb_lo)); 1799 break; 1800 case SRC_UNSOL: 1801 /* unsolicit */ 1802 aprint_error_dev(sc->sc_fd.dev, 1803 "%s:%s: unsolicit status received\n", 1804 __func__, sdev->bustgtlun); 1805 break; 1806 default: 1807 aprint_error_dev(sc->sc_fd.dev, 1808 "%s:%s: unknown sbp_status->src\n", 1809 __func__, sdev->bustgtlun); 1810 } 1811 1812 status_valid0 = (sbp_status->src < 2 1813 && sbp_status->resp == SBP_REQ_CMP 1814 && sbp_status->dead == 0); 1815 status_valid = (status_valid0 && sbp_status->status == 0); 1816 1817 if (!status_valid0 || debug > 2) { 1818 int status; 1819 SBP_DEBUG(0) 1820 printf("%s:%s:%s: ORB status src:%x resp:%x dead:%x" 1821 " len:%x stat:%x orb:%x%08x\n", 1822 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun, 1823 sbp_status->src, sbp_status->resp, sbp_status->dead, 1824 sbp_status->len, sbp_status->status, 1825 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo)); 1826 END_DEBUG 1827 printf("%s:%s\n", device_xname(sc->sc_fd.dev), sdev->bustgtlun); 1828 status = sbp_status->status; 1829 switch (sbp_status->resp) { 1830 case SBP_REQ_CMP: 1831 if (status > MAX_ORB_STATUS0) 1832 printf("%s\n", orb_status0[MAX_ORB_STATUS0]); 1833 else 1834 printf("%s\n", orb_status0[status]); 1835 break; 1836 case SBP_TRANS_FAIL: 1837 printf("Obj: %s, Error: %s\n", 1838 orb_status1_object[(status>>6) & 3], 1839 orb_status1_serial_bus_error[status & 0xf]); 1840 break; 1841 case SBP_ILLE_REQ: 1842 printf("Illegal request\n"); 1843 break; 1844 case SBP_VEND_DEP: 1845 printf("Vendor dependent\n"); 1846 break; 1847 default: 1848 printf("unknown respose code %d\n", sbp_status->resp); 1849 } 1850 } 1851 1852 /* we have to reset the fetch agent if it's dead */ 1853 if (sbp_status->dead) { 1854 if (sdev->periph != NULL) { 1855 scsipi_periph_freeze(sdev->periph, 1); 1856 sdev->freeze++; 1857 } 1858 reset_agent = 1; 1859 } 1860 1861 if (ocb == NULL) 1862 goto done; 1863 1864 switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) { 1865 case ORB_FMT_NOP: 1866 break; 1867 case ORB_FMT_VED: 1868 break; 1869 case ORB_FMT_STD: 1870 switch (ocb->flags) { 1871 case OCB_ACT_MGM: 1872 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK; 1873 reset_agent = 0; 1874 switch (orb_fun) { 1875 case ORB_FUN_LGI: 1876 { 1877 const struct fwdma_alloc *dma = &sdev->dma; 1878 const off_t sbp_login_off = 1879 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN; 1880 1881 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 1882 sbp_login_off, SBP_LOGIN_SIZE, 1883 BUS_DMASYNC_POSTREAD); 1884 login_res = sdev->login; 1885 login_res->len = ntohs(login_res->len); 1886 login_res->id = ntohs(login_res->id); 1887 login_res->cmd_hi = ntohs(login_res->cmd_hi); 1888 login_res->cmd_lo = ntohl(login_res->cmd_lo); 1889 if (status_valid) { 1890 SBP_DEBUG(0) 1891 printf("%s:%s:%s: login:" 1892 " len %d, ID %d, cmd %08x%08x," 1893 " recon_hold %d\n", 1894 device_xname(sc->sc_fd.dev), 1895 __func__, sdev->bustgtlun, 1896 login_res->len, login_res->id, 1897 login_res->cmd_hi, 1898 login_res->cmd_lo, 1899 ntohs(login_res->recon_hold)); 1900 END_DEBUG 1901 sbp_busy_timeout(sdev); 1902 } else { 1903 /* forgot logout? */ 1904 aprint_error_dev(sc->sc_fd.dev, 1905 "%s:%s: login failed\n", 1906 __func__, sdev->bustgtlun); 1907 sdev->status = SBP_DEV_RESET; 1908 } 1909 break; 1910 } 1911 case ORB_FUN_RCN: 1912 login_res = sdev->login; 1913 if (status_valid) { 1914 SBP_DEBUG(0) 1915 printf("%s:%s:%s: reconnect:" 1916 " len %d, ID %d, cmd %08x%08x\n", 1917 device_xname(sc->sc_fd.dev), 1918 __func__, sdev->bustgtlun, 1919 login_res->len, login_res->id, 1920 login_res->cmd_hi, 1921 login_res->cmd_lo); 1922 END_DEBUG 1923 sbp_agent_reset(sdev); 1924 } else { 1925 /* reconnection hold time exceed? */ 1926 SBP_DEBUG(0) 1927 aprint_error_dev(sc->sc_fd.dev, 1928 "%s:%s: reconnect failed\n", 1929 __func__, sdev->bustgtlun); 1930 END_DEBUG 1931 sbp_login(sdev); 1932 } 1933 break; 1934 case ORB_FUN_LGO: 1935 sdev->status = SBP_DEV_RESET; 1936 break; 1937 case ORB_FUN_RST: 1938 sbp_busy_timeout(sdev); 1939 break; 1940 case ORB_FUN_LUR: 1941 case ORB_FUN_ATA: 1942 case ORB_FUN_ATS: 1943 sbp_agent_reset(sdev); 1944 break; 1945 default: 1946 aprint_error_dev(sc->sc_fd.dev, 1947 "%s:%s: unknown function %d\n", 1948 __func__, sdev->bustgtlun, orb_fun); 1949 break; 1950 } 1951 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); 1952 break; 1953 case OCB_ACT_CMD: 1954 sdev->timeout = 0; 1955 if (ocb->xs != NULL) { 1956 struct scsipi_xfer *xs = ocb->xs; 1957 1958 if (sbp_status->len > 1) 1959 sbp_scsi_status(sbp_status, ocb); 1960 else 1961 if (sbp_status->resp != SBP_REQ_CMP) 1962 xs->error = XS_DRIVER_STUFFUP; 1963 else { 1964 xs->error = XS_NOERROR; 1965 xs->resid = 0; 1966 } 1967 /* fix up inq data */ 1968 if (xs->cmd->opcode == INQUIRY) 1969 sbp_fix_inq_data(ocb); 1970 scsipi_done(xs); 1971 } 1972 break; 1973 default: 1974 break; 1975 } 1976 } 1977 1978 if (!use_doorbell) 1979 sbp_free_ocb(sdev, ocb); 1980 done: 1981 if (reset_agent) 1982 sbp_agent_reset(sdev); 1983 1984 done0: 1985 xfer->recv.pay_len = SBP_RECV_LEN; 1986 /* The received packet is usually small enough to be stored within 1987 * the buffer. In that case, the controller return ack_complete and 1988 * no respose is necessary. 1989 * 1990 * XXX fwohci.c and firewire.c should inform event_code such as 1991 * ack_complete or ack_pending to upper driver. 1992 */ 1993 #if NEED_RESPONSE 1994 xfer->send.off = 0; 1995 sfp = (struct fw_pkt *)xfer->send.buf; 1996 sfp->mode.wres.dst = rfp->mode.wreqb.src; 1997 xfer->dst = sfp->mode.wres.dst; 1998 xfer->spd = min(sdev->target->fwdev->speed, max_speed); 1999 xfer->hand = sbp_loginres_callback; 2000 2001 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt; 2002 sfp->mode.wres.tcode = FWTCODE_WRES; 2003 sfp->mode.wres.rtcode = 0; 2004 sfp->mode.wres.pri = 0; 2005 2006 if (fw_asyreq(xfer->fc, -1, xfer) != 0) { 2007 aprint_error_dev(sc->sc_fd.dev, "mgm_orb failed\n"); 2008 mutex_enter(&sc->sc_fwb.fwb_mtx); 2009 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 2010 mutex_exit(&sc->sc_fwb.fwb_mtx); 2011 } 2012 #else 2013 /* recycle */ 2014 mutex_enter(&sc->sc_fwb.fwb_mtx); 2015 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 2016 mutex_exit(&sc->sc_fwb.fwb_mtx); 2017 #endif 2018 2019 return; 2020 2021 } 2022 2023 static int 2024 sbp_logout_all(struct sbp_softc *sbp) 2025 { 2026 struct sbp_target *target; 2027 struct sbp_dev *sdev; 2028 int i; 2029 2030 SBP_DEBUG(0) 2031 printf("sbp_logout_all\n"); 2032 END_DEBUG 2033 target = &sbp->sc_target; 2034 if (target->luns != NULL) { 2035 for (i = 0; i < target->num_lun; i++) { 2036 sdev = target->luns[i]; 2037 if (sdev == NULL) 2038 continue; 2039 callout_stop(&sdev->login_callout); 2040 if (sdev->status >= SBP_DEV_TOATTACH && 2041 sdev->status <= SBP_DEV_ATTACHED) 2042 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL); 2043 } 2044 } 2045 2046 return 0; 2047 } 2048 2049 static void 2050 sbp_free_sdev(struct sbp_dev *sdev) 2051 { 2052 struct sbp_softc *sc = sdev->target->sbp; 2053 int i; 2054 2055 if (sdev == NULL) 2056 return; 2057 for (i = 0; i < SBP_QUEUE_LEN; i++) 2058 bus_dmamap_destroy(sc->sc_dmat, sdev->ocb[i].dmamap); 2059 fwdma_free(sdev->dma.dma_tag, sdev->dma.dma_map, sdev->dma.v_addr); 2060 free(sdev, M_SBP); 2061 sdev = NULL; 2062 } 2063 2064 static void 2065 sbp_free_target(struct sbp_target *target) 2066 { 2067 struct fw_xfer *xfer, *next; 2068 int i; 2069 2070 if (target->luns == NULL) 2071 return; 2072 callout_stop(&target->mgm_ocb_timeout); 2073 for (i = 0; i < target->num_lun; i++) 2074 sbp_free_sdev(target->luns[i]); 2075 2076 for (xfer = STAILQ_FIRST(&target->xferlist); 2077 xfer != NULL; xfer = next) { 2078 next = STAILQ_NEXT(xfer, link); 2079 fw_xfer_free_buf(xfer); 2080 } 2081 STAILQ_INIT(&target->xferlist); 2082 free(target->luns, M_SBP); 2083 target->num_lun = 0; 2084 target->luns = NULL; 2085 target->fwdev = NULL; 2086 } 2087 2088 static void 2089 sbp_scsipi_detach_sdev(struct sbp_dev *sdev) 2090 { 2091 struct sbp_target *target; 2092 struct sbp_softc *sbp; 2093 2094 if (sdev == NULL) 2095 return; 2096 2097 target = sdev->target; 2098 if (target == NULL) 2099 return; 2100 2101 sbp = target->sbp; 2102 2103 if (sdev->status == SBP_DEV_DEAD) 2104 return; 2105 if (sdev->status == SBP_DEV_RESET) 2106 return; 2107 if (sdev->periph != NULL) { 2108 scsipi_periph_thaw(sdev->periph, sdev->freeze); 2109 scsipi_channel_thaw(&sbp->sc_channel, 0); /* XXXX */ 2110 sdev->freeze = 0; 2111 if (scsipi_target_detach(&sbp->sc_channel, 2112 target->target_id, sdev->lun_id, DETACH_FORCE) != 0) { 2113 aprint_error_dev(sbp->sc_fd.dev, "detach failed\n"); 2114 } 2115 sdev->periph = NULL; 2116 } 2117 sbp_abort_all_ocbs(sdev, XS_DRIVER_STUFFUP); 2118 } 2119 2120 static void 2121 sbp_scsipi_detach_target(struct sbp_target *target) 2122 { 2123 struct sbp_softc *sbp = target->sbp; 2124 int i; 2125 2126 if (target->luns != NULL) { 2127 SBP_DEBUG(0) 2128 printf("sbp_detach_target %d\n", target->target_id); 2129 END_DEBUG 2130 for (i = 0; i < target->num_lun; i++) 2131 sbp_scsipi_detach_sdev(target->luns[i]); 2132 if (config_detach(sbp->sc_bus, DETACH_FORCE) != 0) 2133 aprint_error_dev(sbp->sc_fd.dev, "%d detach failed\n", 2134 target->target_id); 2135 sbp->sc_bus = NULL; 2136 } 2137 } 2138 2139 static void 2140 sbp_target_reset(struct sbp_dev *sdev, int method) 2141 { 2142 struct sbp_softc *sc; 2143 struct sbp_target *target = sdev->target; 2144 struct sbp_dev *tsdev; 2145 int i; 2146 2147 sc = target->sbp; 2148 for (i = 0; i < target->num_lun; i++) { 2149 tsdev = target->luns[i]; 2150 if (tsdev == NULL) 2151 continue; 2152 if (tsdev->status == SBP_DEV_DEAD) 2153 continue; 2154 if (tsdev->status == SBP_DEV_RESET) 2155 continue; 2156 if (sdev->periph != NULL) { 2157 scsipi_periph_freeze(tsdev->periph, 1); 2158 tsdev->freeze++; 2159 } 2160 sbp_abort_all_ocbs(tsdev, XS_TIMEOUT); 2161 if (method == 2) 2162 tsdev->status = SBP_DEV_LOGIN; 2163 } 2164 switch (method) { 2165 case 1: 2166 aprint_error("target reset\n"); 2167 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL); 2168 break; 2169 case 2: 2170 aprint_error("reset start\n"); 2171 sbp_reset_start(sdev); 2172 break; 2173 } 2174 } 2175 2176 static void 2177 sbp_mgm_timeout(void *arg) 2178 { 2179 struct sbp_ocb *ocb = (struct sbp_ocb *)arg; 2180 struct sbp_dev *sdev = ocb->sdev; 2181 struct sbp_target *target = sdev->target; 2182 2183 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2184 "%s:%s: request timeout(mgm orb:0x%08x) ... ", 2185 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); 2186 target->mgm_ocb_cur = NULL; 2187 sbp_free_ocb(sdev, ocb); 2188 #if 0 2189 /* XXX */ 2190 aprint_error("run next request\n"); 2191 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); 2192 #endif 2193 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2194 "%s:%s: reset start\n", __func__, sdev->bustgtlun); 2195 sbp_reset_start(sdev); 2196 } 2197 2198 static void 2199 sbp_timeout(void *arg) 2200 { 2201 struct sbp_ocb *ocb = (struct sbp_ocb *)arg; 2202 struct sbp_dev *sdev = ocb->sdev; 2203 2204 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2205 "%s:%s: request timeout(cmd orb:0x%08x) ... ", 2206 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); 2207 2208 sdev->timeout++; 2209 switch (sdev->timeout) { 2210 case 1: 2211 aprint_error("agent reset\n"); 2212 if (sdev->periph != NULL) { 2213 scsipi_periph_freeze(sdev->periph, 1); 2214 sdev->freeze++; 2215 } 2216 sbp_abort_all_ocbs(sdev, XS_TIMEOUT); 2217 sbp_agent_reset(sdev); 2218 break; 2219 case 2: 2220 case 3: 2221 sbp_target_reset(sdev, sdev->timeout - 1); 2222 break; 2223 default: 2224 aprint_error("\n"); 2225 #if 0 2226 /* XXX give up */ 2227 sbp_scsipi_detach_target(target); 2228 if (target->luns != NULL) 2229 free(target->luns, M_SBP); 2230 target->num_lun = 0; 2231 target->luns = NULL; 2232 target->fwdev = NULL; 2233 #endif 2234 } 2235 } 2236 2237 static void 2238 sbp_action1(struct sbp_softc *sc, struct scsipi_xfer *xs) 2239 { 2240 struct sbp_target *target = &sc->sc_target; 2241 struct sbp_dev *sdev = NULL; 2242 struct sbp_ocb *ocb; 2243 int speed, flag, error; 2244 void *cdb; 2245 2246 /* target:lun -> sdev mapping */ 2247 if (target->fwdev != NULL && 2248 xs->xs_periph->periph_lun < target->num_lun) { 2249 sdev = target->luns[xs->xs_periph->periph_lun]; 2250 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED && 2251 sdev->status != SBP_DEV_PROBE) 2252 sdev = NULL; 2253 } 2254 2255 if (sdev == NULL) { 2256 SBP_DEBUG(1) 2257 printf("%s:%d:%d: Invalid target (target needed)\n", 2258 sc ? device_xname(sc->sc_fd.dev) : "???", 2259 xs->xs_periph->periph_target, 2260 xs->xs_periph->periph_lun); 2261 END_DEBUG 2262 2263 xs->error = XS_DRIVER_STUFFUP; 2264 scsipi_done(xs); 2265 return; 2266 } 2267 2268 SBP_DEBUG(2) 2269 printf("%s:%d:%d:" 2270 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x," 2271 " flags: 0x%02x, %db cmd/%db data\n", 2272 device_xname(sc->sc_fd.dev), 2273 xs->xs_periph->periph_target, 2274 xs->xs_periph->periph_lun, 2275 xs->cmd->opcode, 2276 xs->cmd->bytes[0], xs->cmd->bytes[1], 2277 xs->cmd->bytes[2], xs->cmd->bytes[3], 2278 xs->cmd->bytes[4], xs->cmd->bytes[5], 2279 xs->cmd->bytes[6], xs->cmd->bytes[7], 2280 xs->cmd->bytes[8], 2281 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT), 2282 xs->cmdlen, xs->datalen); 2283 END_DEBUG 2284 mutex_enter(&sc->sc_mtx); 2285 ocb = sbp_get_ocb(sdev); 2286 mutex_exit(&sc->sc_mtx); 2287 if (ocb == NULL) { 2288 xs->error = XS_REQUEUE; 2289 if (sdev->freeze == 0) { 2290 scsipi_periph_freeze(sdev->periph, 1); 2291 sdev->freeze++; 2292 } 2293 scsipi_done(xs); 2294 return; 2295 } 2296 2297 ocb->flags = OCB_ACT_CMD; 2298 ocb->sdev = sdev; 2299 ocb->xs = xs; 2300 ocb->orb[0] = htonl(1 << 31); 2301 ocb->orb[1] = 0; 2302 ocb->orb[2] = htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16)); 2303 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET); 2304 speed = min(target->fwdev->speed, max_speed); 2305 ocb->orb[4] = 2306 htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7)); 2307 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) == 2308 XS_CTL_DATA_IN) { 2309 ocb->orb[4] |= htonl(ORB_CMD_IN); 2310 flag = BUS_DMA_READ; 2311 } else 2312 flag = BUS_DMA_WRITE; 2313 2314 cdb = xs->cmd; 2315 memcpy((void *)&ocb->orb[5], cdb, xs->cmdlen); 2316 /* 2317 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3])); 2318 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7])); 2319 */ 2320 if (xs->datalen > 0) { 2321 error = bus_dmamap_load(sc->sc_dmat, ocb->dmamap, 2322 xs->data, xs->datalen, NULL, BUS_DMA_NOWAIT | flag); 2323 if (error) { 2324 aprint_error_dev(sc->sc_fd.dev, 2325 "DMA map load error %d\n", error); 2326 xs->error = XS_DRIVER_STUFFUP; 2327 scsipi_done(xs); 2328 } else 2329 sbp_execute_ocb(ocb, ocb->dmamap->dm_segs, 2330 ocb->dmamap->dm_nsegs); 2331 } else 2332 sbp_execute_ocb(ocb, NULL, 0); 2333 2334 return; 2335 } 2336 2337 static void 2338 sbp_execute_ocb(struct sbp_ocb *ocb, bus_dma_segment_t *segments, int seg) 2339 { 2340 struct sbp_ocb *prev; 2341 bus_dma_segment_t *s; 2342 int i; 2343 2344 SBP_DEBUG(2) 2345 printf("sbp_execute_ocb: seg %d", seg); 2346 for (i = 0; i < seg; i++) 2347 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr, 2348 (uintmax_t)segments[i].ds_len); 2349 printf("\n"); 2350 END_DEBUG 2351 2352 if (seg == 1) { 2353 /* direct pointer */ 2354 s = segments; 2355 if (s->ds_len > SBP_SEG_MAX) 2356 panic("ds_len > SBP_SEG_MAX, fix busdma code"); 2357 ocb->orb[3] = htonl(s->ds_addr); 2358 ocb->orb[4] |= htonl(s->ds_len); 2359 } else if (seg > 1) { 2360 /* page table */ 2361 for (i = 0; i < seg; i++) { 2362 s = &segments[i]; 2363 SBP_DEBUG(0) 2364 /* XXX LSI Logic "< 16 byte" bug might be hit */ 2365 if (s->ds_len < 16) 2366 printf("sbp_execute_ocb: warning, " 2367 "segment length(%jd) is less than 16." 2368 "(seg=%d/%d)\n", 2369 (uintmax_t)s->ds_len, i + 1, seg); 2370 END_DEBUG 2371 if (s->ds_len > SBP_SEG_MAX) 2372 panic("ds_len > SBP_SEG_MAX, fix busdma code"); 2373 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16); 2374 ocb->ind_ptr[i].lo = htonl(s->ds_addr); 2375 } 2376 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg); 2377 } 2378 2379 if (seg > 0) { 2380 struct sbp_softc *sc = ocb->sdev->target->sbp; 2381 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2382 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 2383 2384 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2385 0, ocb->dmamap->dm_mapsize, flag); 2386 } 2387 prev = sbp_enqueue_ocb(ocb->sdev, ocb); 2388 SBP_ORB_DMA_SYNC(ocb->sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE); 2389 if (use_doorbell) { 2390 if (prev == NULL) { 2391 if (ocb->sdev->last_ocb != NULL) 2392 sbp_doorbell(ocb->sdev); 2393 else 2394 sbp_orb_pointer(ocb->sdev, ocb); 2395 } 2396 } else 2397 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) { 2398 ocb->sdev->flags &= ~ORB_LINK_DEAD; 2399 sbp_orb_pointer(ocb->sdev, ocb); 2400 } 2401 } 2402 2403 static struct sbp_ocb * 2404 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status) 2405 { 2406 struct sbp_softc *sc = sdev->target->sbp; 2407 struct sbp_ocb *ocb; 2408 struct sbp_ocb *next; 2409 int order = 0; 2410 int flags; 2411 2412 SBP_DEBUG(1) 2413 printf("%s:%s:%s: 0x%08x src %d\n", device_xname(sc->sc_fd.dev), 2414 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), 2415 sbp_status->src); 2416 END_DEBUG 2417 mutex_enter(&sc->sc_mtx); 2418 for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) { 2419 next = STAILQ_NEXT(ocb, ocb); 2420 flags = ocb->flags; 2421 if (OCB_MATCH(ocb, sbp_status)) { 2422 /* found */ 2423 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, 2424 BUS_DMASYNC_POSTWRITE); 2425 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb); 2426 if (ocb->xs != NULL) 2427 callout_stop(&ocb->xs->xs_callout); 2428 if (ntohl(ocb->orb[4]) & 0xffff) { 2429 const int flag = 2430 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2431 BUS_DMASYNC_POSTREAD : 2432 BUS_DMASYNC_POSTWRITE; 2433 2434 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2435 0, ocb->dmamap->dm_mapsize, flag); 2436 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap); 2437 2438 } 2439 if (!use_doorbell) { 2440 if (sbp_status->src == SRC_NO_NEXT) { 2441 if (next != NULL) 2442 sbp_orb_pointer(sdev, next); 2443 else if (order > 0) 2444 /* 2445 * Unordered execution 2446 * We need to send pointer for 2447 * next ORB 2448 */ 2449 sdev->flags |= ORB_LINK_DEAD; 2450 } 2451 } 2452 break; 2453 } else 2454 order++; 2455 } 2456 mutex_exit(&sc->sc_mtx); 2457 2458 if (ocb && use_doorbell) { 2459 /* 2460 * XXX this is not correct for unordered 2461 * execution. 2462 */ 2463 if (sdev->last_ocb != NULL) 2464 sbp_free_ocb(sdev, sdev->last_ocb); 2465 sdev->last_ocb = ocb; 2466 if (next != NULL && 2467 sbp_status->src == SRC_NO_NEXT) 2468 sbp_doorbell(sdev); 2469 } 2470 2471 SBP_DEBUG(0) 2472 if (ocb && order > 0) 2473 printf("%s:%s:%s: unordered execution order:%d\n", 2474 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun, 2475 order); 2476 END_DEBUG 2477 return ocb; 2478 } 2479 2480 static struct sbp_ocb * 2481 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) 2482 { 2483 struct sbp_softc *sc = sdev->target->sbp; 2484 struct sbp_ocb *tocb, *prev, *prev2; 2485 2486 SBP_DEBUG(1) 2487 printf("%s:%s:%s: 0x%08jx\n", device_xname(sc->sc_fd.dev), 2488 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); 2489 END_DEBUG 2490 mutex_enter(&sc->sc_mtx); 2491 prev = NULL; 2492 STAILQ_FOREACH(tocb, &sdev->ocbs, ocb) 2493 prev = tocb; 2494 prev2 = prev; 2495 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb); 2496 mutex_exit(&sc->sc_mtx); 2497 2498 callout_reset(&ocb->xs->xs_callout, mstohz(ocb->xs->timeout), 2499 sbp_timeout, ocb); 2500 2501 if (use_doorbell && prev == NULL) 2502 prev2 = sdev->last_ocb; 2503 2504 if (prev2 != NULL) { 2505 SBP_DEBUG(2) 2506 printf("linking chain 0x%jx -> 0x%jx\n", 2507 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr); 2508 END_DEBUG 2509 /* 2510 * Suppress compiler optimization so that orb[1] must be 2511 * written first. 2512 * XXX We may need an explicit memory barrier for other 2513 * architectures other than i386/amd64. 2514 */ 2515 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr); 2516 *(volatile uint32_t *)&prev2->orb[0] = 0; 2517 } 2518 2519 return prev; 2520 } 2521 2522 static struct sbp_ocb * 2523 sbp_get_ocb(struct sbp_dev *sdev) 2524 { 2525 struct sbp_softc *sc = sdev->target->sbp; 2526 struct sbp_ocb *ocb; 2527 2528 KASSERT(mutex_owned(&sc->sc_mtx)); 2529 2530 ocb = STAILQ_FIRST(&sdev->free_ocbs); 2531 if (ocb == NULL) { 2532 sdev->flags |= ORB_SHORTAGE; 2533 aprint_error_dev(sc->sc_fd.dev, 2534 "ocb shortage!!!\n"); 2535 return NULL; 2536 } 2537 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb); 2538 ocb->xs = NULL; 2539 return ocb; 2540 } 2541 2542 static void 2543 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) 2544 { 2545 struct sbp_softc *sc = sdev->target->sbp; 2546 int count; 2547 2548 ocb->flags = 0; 2549 ocb->xs = NULL; 2550 2551 mutex_enter(&sc->sc_mtx); 2552 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb); 2553 mutex_exit(&sc->sc_mtx); 2554 if (sdev->flags & ORB_SHORTAGE) { 2555 sdev->flags &= ~ORB_SHORTAGE; 2556 count = sdev->freeze; 2557 sdev->freeze = 0; 2558 if (sdev->periph) 2559 scsipi_periph_thaw(sdev->periph, count); 2560 scsipi_channel_thaw(&sc->sc_channel, 0); 2561 } 2562 } 2563 2564 static void 2565 sbp_abort_ocb(struct sbp_ocb *ocb, int status) 2566 { 2567 struct sbp_softc *sc; 2568 struct sbp_dev *sdev; 2569 2570 sdev = ocb->sdev; 2571 sc = sdev->target->sbp; 2572 SBP_DEBUG(0) 2573 printf("%s:%s:%s: sbp_abort_ocb 0x%jx\n", device_xname(sc->sc_fd.dev), 2574 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); 2575 END_DEBUG 2576 SBP_DEBUG(1) 2577 if (ocb->xs != NULL) 2578 sbp_print_scsi_cmd(ocb); 2579 END_DEBUG 2580 if (ntohl(ocb->orb[4]) & 0xffff) { 2581 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2582 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 2583 2584 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2585 0, ocb->dmamap->dm_mapsize, flag); 2586 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap); 2587 } 2588 if (ocb->xs != NULL) { 2589 callout_stop(&ocb->xs->xs_callout); 2590 ocb->xs->error = status; 2591 scsipi_done(ocb->xs); 2592 } 2593 sbp_free_ocb(sdev, ocb); 2594 } 2595 2596 static void 2597 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status) 2598 { 2599 struct sbp_softc *sc = sdev->target->sbp; 2600 struct sbp_ocb *ocb, *next; 2601 STAILQ_HEAD(, sbp_ocb) temp; 2602 2603 mutex_enter(&sc->sc_mtx); 2604 STAILQ_INIT(&temp); 2605 STAILQ_CONCAT(&temp, &sdev->ocbs); 2606 STAILQ_INIT(&sdev->ocbs); 2607 mutex_exit(&sc->sc_mtx); 2608 2609 for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) { 2610 next = STAILQ_NEXT(ocb, ocb); 2611 sbp_abort_ocb(ocb, status); 2612 } 2613 if (sdev->last_ocb != NULL) { 2614 sbp_free_ocb(sdev, sdev->last_ocb); 2615 sdev->last_ocb = NULL; 2616 } 2617 } 2618