1 /* $NetBSD: sbp.c,v 1.34 2012/04/29 20:27:31 dsl Exp $ */ 2 /*- 3 * Copyright (c) 2003 Hidetoshi Shimokawa 4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the acknowledgement as bellow: 17 * 18 * This product includes software developed by K. Kobayashi and H. Shimokawa 19 * 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/firewire/sbp.c,v 1.100 2009/02/18 18:41:34 sbruno Exp $ 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: sbp.c,v 1.34 2012/04/29 20:27:31 dsl Exp $"); 41 42 43 #include <sys/param.h> 44 #include <sys/device.h> 45 #include <sys/errno.h> 46 #include <sys/buf.h> 47 #include <sys/callout.h> 48 #include <sys/condvar.h> 49 #include <sys/kernel.h> 50 #include <sys/kthread.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/sysctl.h> 55 56 #include <sys/bus.h> 57 58 #include <dev/scsipi/scsi_spc.h> 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsipi_all.h> 61 #include <dev/scsipi/scsiconf.h> 62 #include <dev/scsipi/scsipiconf.h> 63 64 #include <dev/ieee1394/firewire.h> 65 #include <dev/ieee1394/firewirereg.h> 66 #include <dev/ieee1394/fwdma.h> 67 #include <dev/ieee1394/iec13213.h> 68 #include <dev/ieee1394/sbp.h> 69 70 #include "locators.h" 71 72 73 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \ 74 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2)) 75 76 #define SBP_NUM_TARGETS 8 /* MAX 64 */ 77 #define SBP_NUM_LUNS 64 78 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */) 79 #define SBP_DMA_SIZE PAGE_SIZE 80 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res) 81 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb)) 82 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS) 83 84 /* 85 * STATUS FIFO addressing 86 * bit 87 * ----------------------- 88 * 0- 1( 2): 0 (alignment) 89 * 2- 9( 8): lun 90 * 10-31(14): unit 91 * 32-47(16): SBP_BIND_HI 92 * 48-64(16): bus_id, node_id 93 */ 94 #define SBP_BIND_HI 0x1 95 #define SBP_DEV2ADDR(u, l) \ 96 (((uint64_t)SBP_BIND_HI << 32) |\ 97 (((u) & 0x3fff) << 10) |\ 98 (((l) & 0xff) << 2)) 99 #define SBP_ADDR2UNIT(a) (((a) >> 10) & 0x3fff) 100 #define SBP_ADDR2LUN(a) (((a) >> 2) & 0xff) 101 #define SBP_INITIATOR 7 102 103 static const char *orb_fun_name[] = { 104 ORB_FUN_NAMES 105 }; 106 107 static int debug = 0; 108 static int auto_login = 1; 109 static int max_speed = -1; 110 static int sbp_cold = 1; 111 static int ex_login = 1; 112 static int login_delay = 1000; /* msec */ 113 static int scan_delay = 500; /* msec */ 114 static int use_doorbell = 0; 115 static int sbp_tags = 0; 116 117 static int sysctl_sbp_verify(SYSCTLFN_PROTO, int lower, int upper); 118 static int sysctl_sbp_verify_max_speed(SYSCTLFN_PROTO); 119 static int sysctl_sbp_verify_tags(SYSCTLFN_PROTO); 120 121 /* 122 * Setup sysctl(3) MIB, hw.sbp.* 123 * 124 * TBD condition CTLFLAG_PERMANENT on being a module or not 125 */ 126 SYSCTL_SETUP(sysctl_sbp, "sysctl sbp(4) subtree setup") 127 { 128 int rc, sbp_node_num; 129 const struct sysctlnode *node; 130 131 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 132 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 133 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) 134 goto err; 135 136 if ((rc = sysctl_createv(clog, 0, NULL, &node, 137 CTLFLAG_PERMANENT, CTLTYPE_NODE, "sbp", 138 SYSCTL_DESCR("sbp controls"), NULL, 0, NULL, 139 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 140 goto err; 141 sbp_node_num = node->sysctl_num; 142 143 /* sbp auto login flag */ 144 if ((rc = sysctl_createv(clog, 0, NULL, &node, 145 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 146 "auto_login", SYSCTL_DESCR("SBP perform login automatically"), 147 NULL, 0, &auto_login, 148 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 149 goto err; 150 151 /* sbp max speed */ 152 if ((rc = sysctl_createv(clog, 0, NULL, &node, 153 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 154 "max_speed", SYSCTL_DESCR("SBP transfer max speed"), 155 sysctl_sbp_verify_max_speed, 0, &max_speed, 156 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 157 goto err; 158 159 /* sbp exclusive login flag */ 160 if ((rc = sysctl_createv(clog, 0, NULL, &node, 161 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 162 "exclusive_login", SYSCTL_DESCR("SBP enable exclusive login"), 163 NULL, 0, &ex_login, 164 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 165 goto err; 166 167 /* sbp login delay */ 168 if ((rc = sysctl_createv(clog, 0, NULL, &node, 169 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 170 "login_delay", SYSCTL_DESCR("SBP login delay in msec"), 171 NULL, 0, &login_delay, 172 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 173 goto err; 174 175 /* sbp scan delay */ 176 if ((rc = sysctl_createv(clog, 0, NULL, &node, 177 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 178 "scan_delay", SYSCTL_DESCR("SBP scan delay in msec"), 179 NULL, 0, &scan_delay, 180 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 181 goto err; 182 183 /* sbp use doorbell flag */ 184 if ((rc = sysctl_createv(clog, 0, NULL, &node, 185 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 186 "use_doorbell", SYSCTL_DESCR("SBP use doorbell request"), 187 NULL, 0, &use_doorbell, 188 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 189 goto err; 190 191 /* sbp force tagged queuing */ 192 if ((rc = sysctl_createv(clog, 0, NULL, &node, 193 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 194 "tags", SYSCTL_DESCR("SBP tagged queuing support"), 195 sysctl_sbp_verify_tags, 0, &sbp_tags, 196 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 197 goto err; 198 199 /* sbp driver debug flag */ 200 if ((rc = sysctl_createv(clog, 0, NULL, &node, 201 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 202 "sbp_debug", SYSCTL_DESCR("SBP debug flag"), 203 NULL, 0, &debug, 204 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 205 goto err; 206 207 return; 208 209 err: 210 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 211 } 212 213 static int 214 sysctl_sbp_verify(SYSCTLFN_ARGS, int lower, int upper) 215 { 216 int error, t; 217 struct sysctlnode node; 218 219 node = *rnode; 220 t = *(int*)rnode->sysctl_data; 221 node.sysctl_data = &t; 222 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 223 if (error || newp == NULL) 224 return error; 225 226 if (t < lower || t > upper) 227 return EINVAL; 228 229 *(int*)rnode->sysctl_data = t; 230 231 return 0; 232 } 233 234 static int 235 sysctl_sbp_verify_max_speed(SYSCTLFN_ARGS) 236 { 237 238 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), 0, FWSPD_S400); 239 } 240 241 static int 242 sysctl_sbp_verify_tags(SYSCTLFN_ARGS) 243 { 244 245 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), -1, 1); 246 } 247 248 #define NEED_RESPONSE 0 249 250 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) 251 #ifdef __sparc64__ /* iommu */ 252 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX) 253 #else 254 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE) 255 #endif 256 struct sbp_ocb { 257 uint32_t orb[8]; 258 #define IND_PTR_OFFSET (sizeof(uint32_t) * 8) 259 struct ind_ptr ind_ptr[SBP_IND_MAX]; 260 struct scsipi_xfer *xs; 261 struct sbp_dev *sdev; 262 uint16_t index; 263 uint16_t flags; /* XXX should be removed */ 264 bus_dmamap_t dmamap; 265 bus_addr_t bus_addr; 266 STAILQ_ENTRY(sbp_ocb) ocb; 267 }; 268 269 #define SBP_ORB_DMA_SYNC(dma, i, op) \ 270 bus_dmamap_sync((dma).dma_tag, (dma).dma_map, \ 271 sizeof(struct sbp_ocb) * (i), \ 272 sizeof(ocb->orb) + sizeof(ocb->ind_ptr), (op)); 273 274 #define OCB_ACT_MGM 0 275 #define OCB_ACT_CMD 1 276 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo)) 277 278 struct sbp_dev{ 279 #define SBP_DEV_RESET 0 /* accept login */ 280 #define SBP_DEV_LOGIN 1 /* to login */ 281 #if 0 282 #define SBP_DEV_RECONN 2 /* to reconnect */ 283 #endif 284 #define SBP_DEV_TOATTACH 3 /* to attach */ 285 #define SBP_DEV_PROBE 4 /* scan lun */ 286 #define SBP_DEV_ATTACHED 5 /* in operation */ 287 #define SBP_DEV_DEAD 6 /* unavailable unit */ 288 #define SBP_DEV_RETRY 7 /* unavailable unit */ 289 uint8_t status:4, 290 timeout:4; 291 uint8_t type; 292 uint16_t lun_id; 293 uint16_t freeze; 294 #define ORB_LINK_DEAD (1 << 0) 295 #define VALID_LUN (1 << 1) 296 #define ORB_POINTER_ACTIVE (1 << 2) 297 #define ORB_POINTER_NEED (1 << 3) 298 #define ORB_DOORBELL_ACTIVE (1 << 4) 299 #define ORB_DOORBELL_NEED (1 << 5) 300 #define ORB_SHORTAGE (1 << 6) 301 uint16_t flags; 302 struct scsipi_periph *periph; 303 struct sbp_target *target; 304 struct fwdma_alloc dma; 305 struct sbp_login_res *login; 306 struct callout login_callout; 307 struct sbp_ocb *ocb; 308 STAILQ_HEAD(, sbp_ocb) ocbs; 309 STAILQ_HEAD(, sbp_ocb) free_ocbs; 310 struct sbp_ocb *last_ocb; 311 char vendor[32]; 312 char product[32]; 313 char revision[10]; 314 char bustgtlun[32]; 315 }; 316 317 struct sbp_target { 318 int target_id; 319 int num_lun; 320 struct sbp_dev **luns; 321 struct sbp_softc *sbp; 322 struct fw_device *fwdev; 323 uint32_t mgm_hi, mgm_lo; 324 struct sbp_ocb *mgm_ocb_cur; 325 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue; 326 struct callout mgm_ocb_timeout; 327 STAILQ_HEAD(, fw_xfer) xferlist; 328 int n_xfer; 329 }; 330 331 struct sbp_softc { 332 struct firewire_dev_comm sc_fd; 333 struct scsipi_adapter sc_adapter; 334 struct scsipi_channel sc_channel; 335 device_t sc_bus; 336 struct lwp *sc_lwp; 337 struct sbp_target sc_target; 338 struct fw_bind sc_fwb; 339 bus_dma_tag_t sc_dmat; 340 struct timeval sc_last_busreset; 341 int sc_flags; 342 kmutex_t sc_mtx; 343 kcondvar_t sc_cv; 344 }; 345 346 MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/IEEE1394"); 347 MALLOC_DECLARE(M_SBP); 348 349 350 static int sbpmatch(device_t, cfdata_t, void *); 351 static void sbpattach(device_t, device_t, void *); 352 static int sbpdetach(device_t, int); 353 354 static void sbp_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 355 void *); 356 static void sbp_minphys(struct buf *); 357 358 static void sbp_show_sdev_info(struct sbp_dev *); 359 static void sbp_alloc_lun(struct sbp_target *); 360 static struct sbp_target *sbp_alloc_target(struct sbp_softc *, 361 struct fw_device *); 362 static void sbp_probe_lun(struct sbp_dev *); 363 static void sbp_login_callout(void *); 364 static void sbp_login(struct sbp_dev *); 365 static void sbp_probe_target(void *); 366 static void sbp_post_busreset(void *); 367 static void sbp_post_explore(void *); 368 #if NEED_RESPONSE 369 static void sbp_loginres_callback(struct fw_xfer *); 370 #endif 371 static inline void sbp_xfer_free(struct fw_xfer *); 372 static void sbp_reset_start_callback(struct fw_xfer *); 373 static void sbp_reset_start(struct sbp_dev *); 374 static void sbp_mgm_callback(struct fw_xfer *); 375 static void sbp_scsipi_scan_target(void *); 376 static inline void sbp_scan_dev(struct sbp_dev *); 377 static void sbp_do_attach(struct fw_xfer *); 378 static void sbp_agent_reset_callback(struct fw_xfer *); 379 static void sbp_agent_reset(struct sbp_dev *); 380 static void sbp_busy_timeout_callback(struct fw_xfer *); 381 static void sbp_busy_timeout(struct sbp_dev *); 382 static void sbp_orb_pointer_callback(struct fw_xfer *); 383 static void sbp_orb_pointer(struct sbp_dev *, struct sbp_ocb *); 384 static void sbp_doorbell_callback(struct fw_xfer *); 385 static void sbp_doorbell(struct sbp_dev *); 386 static struct fw_xfer *sbp_write_cmd(struct sbp_dev *, int, int); 387 static void sbp_mgm_orb(struct sbp_dev *, int, struct sbp_ocb *); 388 static void sbp_print_scsi_cmd(struct sbp_ocb *); 389 static void sbp_scsi_status(struct sbp_status *, struct sbp_ocb *); 390 static void sbp_fix_inq_data(struct sbp_ocb *); 391 static void sbp_recv(struct fw_xfer *); 392 static int sbp_logout_all(struct sbp_softc *); 393 static void sbp_free_sdev(struct sbp_dev *); 394 static void sbp_free_target(struct sbp_target *); 395 static void sbp_scsipi_detach_sdev(struct sbp_dev *); 396 static void sbp_scsipi_detach_target(struct sbp_target *); 397 static void sbp_target_reset(struct sbp_dev *, int); 398 static void sbp_mgm_timeout(void *); 399 static void sbp_timeout(void *); 400 static void sbp_action1(struct sbp_softc *, struct scsipi_xfer *); 401 static void sbp_execute_ocb(struct sbp_ocb *, bus_dma_segment_t *, int); 402 static struct sbp_ocb *sbp_dequeue_ocb(struct sbp_dev *, struct sbp_status *); 403 static struct sbp_ocb *sbp_enqueue_ocb(struct sbp_dev *, struct sbp_ocb *); 404 static struct sbp_ocb *sbp_get_ocb(struct sbp_dev *); 405 static void sbp_free_ocb(struct sbp_dev *, struct sbp_ocb *); 406 static void sbp_abort_ocb(struct sbp_ocb *, int); 407 static void sbp_abort_all_ocbs(struct sbp_dev *, int); 408 409 410 static const char *orb_status0[] = { 411 /* 0 */ "No additional information to report", 412 /* 1 */ "Request type not supported", 413 /* 2 */ "Speed not supported", 414 /* 3 */ "Page size not supported", 415 /* 4 */ "Access denied", 416 /* 5 */ "Logical unit not supported", 417 /* 6 */ "Maximum payload too small", 418 /* 7 */ "Reserved for future standardization", 419 /* 8 */ "Resources unavailable", 420 /* 9 */ "Function rejected", 421 /* A */ "Login ID not recognized", 422 /* B */ "Dummy ORB completed", 423 /* C */ "Request aborted", 424 /* FF */ "Unspecified error" 425 #define MAX_ORB_STATUS0 0xd 426 }; 427 428 static const char *orb_status1_object[] = { 429 /* 0 */ "Operation request block (ORB)", 430 /* 1 */ "Data buffer", 431 /* 2 */ "Page table", 432 /* 3 */ "Unable to specify" 433 }; 434 435 static const char *orb_status1_serial_bus_error[] = { 436 /* 0 */ "Missing acknowledge", 437 /* 1 */ "Reserved; not to be used", 438 /* 2 */ "Time-out error", 439 /* 3 */ "Reserved; not to be used", 440 /* 4 */ "Busy retry limit exceeded(X)", 441 /* 5 */ "Busy retry limit exceeded(A)", 442 /* 6 */ "Busy retry limit exceeded(B)", 443 /* 7 */ "Reserved for future standardization", 444 /* 8 */ "Reserved for future standardization", 445 /* 9 */ "Reserved for future standardization", 446 /* A */ "Reserved for future standardization", 447 /* B */ "Tardy retry limit exceeded", 448 /* C */ "Conflict error", 449 /* D */ "Data error", 450 /* E */ "Type error", 451 /* F */ "Address error" 452 }; 453 454 455 CFATTACH_DECL_NEW(sbp, sizeof(struct sbp_softc), 456 sbpmatch, sbpattach, sbpdetach, NULL); 457 458 459 int 460 sbpmatch(device_t parent, cfdata_t cf, void *aux) 461 { 462 struct fw_attach_args *fwa = aux; 463 464 if (strcmp(fwa->name, "sbp") == 0) 465 return 1; 466 return 0; 467 } 468 469 static void 470 sbpattach(device_t parent, device_t self, void *aux) 471 { 472 struct sbp_softc *sc = device_private(self); 473 struct fw_attach_args *fwa = (struct fw_attach_args *)aux; 474 struct firewire_comm *fc; 475 struct scsipi_adapter *sc_adapter = &sc->sc_adapter; 476 struct scsipi_channel *sc_channel = &sc->sc_channel; 477 struct sbp_target *target = &sc->sc_target; 478 int dv_unit; 479 480 aprint_naive("\n"); 481 aprint_normal(": SBP-2/SCSI over IEEE1394\n"); 482 483 sc->sc_fd.dev = self; 484 485 if (cold) 486 sbp_cold++; 487 sc->sc_fd.fc = fc = fwa->fc; 488 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); 489 cv_init(&sc->sc_cv, "sbp"); 490 491 if (max_speed < 0) 492 max_speed = fc->speed; 493 494 sc->sc_dmat = fc->dmat; 495 496 sc->sc_target.fwdev = NULL; 497 sc->sc_target.luns = NULL; 498 499 /* Initialize mutexes and lists before we can error out 500 * to prevent crashes on detach 501 */ 502 mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_VM); 503 STAILQ_INIT(&sc->sc_fwb.xferlist); 504 505 if (sbp_alloc_target(sc, fwa->fwdev) == NULL) 506 return; 507 508 sc_adapter->adapt_dev = sc->sc_fd.dev; 509 sc_adapter->adapt_nchannels = 1; 510 sc_adapter->adapt_max_periph = 1; 511 sc_adapter->adapt_request = sbp_scsipi_request; 512 sc_adapter->adapt_minphys = sbp_minphys; 513 sc_adapter->adapt_openings = 8; 514 515 sc_channel->chan_adapter = sc_adapter; 516 sc_channel->chan_bustype = &scsi_bustype; 517 sc_channel->chan_defquirks = PQUIRK_ONLYBIG; 518 sc_channel->chan_channel = 0; 519 sc_channel->chan_flags = SCSIPI_CHAN_CANGROW | SCSIPI_CHAN_NOSETTLE; 520 521 sc_channel->chan_ntargets = 1; 522 sc_channel->chan_nluns = target->num_lun; /* We set nluns 0 now */ 523 sc_channel->chan_id = 1; 524 525 sc->sc_bus = config_found(sc->sc_fd.dev, sc_channel, scsiprint); 526 if (sc->sc_bus == NULL) { 527 aprint_error_dev(self, "attach failed\n"); 528 return; 529 } 530 531 /* We reserve 16 bit space (4 bytes X 64 unit X 256 luns) */ 532 dv_unit = device_unit(sc->sc_fd.dev); 533 sc->sc_fwb.start = SBP_DEV2ADDR(dv_unit, 0); 534 sc->sc_fwb.end = SBP_DEV2ADDR(dv_unit, -1); 535 /* pre-allocate xfer */ 536 fw_xferlist_add(&sc->sc_fwb.xferlist, M_SBP, 537 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB / 2, 538 fc, (void *)sc, sbp_recv); 539 fw_bindadd(fc, &sc->sc_fwb); 540 541 sc->sc_fd.post_busreset = sbp_post_busreset; 542 sc->sc_fd.post_explore = sbp_post_explore; 543 544 if (fc->status != FWBUSNOTREADY) { 545 sbp_post_busreset((void *)sc); 546 sbp_post_explore((void *)sc); 547 } 548 } 549 550 static int 551 sbpdetach(device_t self, int flags) 552 { 553 struct sbp_softc *sc = device_private(self); 554 struct firewire_comm *fc = sc->sc_fd.fc; 555 556 sbp_scsipi_detach_target(&sc->sc_target); 557 558 if (sc->sc_target.fwdev && SBP_FWDEV_ALIVE(sc->sc_target.fwdev)) { 559 sbp_logout_all(sc); 560 561 /* XXX wait for logout completion */ 562 mutex_enter(&sc->sc_mtx); 563 cv_timedwait_sig(&sc->sc_cv, &sc->sc_mtx, hz/2); 564 mutex_exit(&sc->sc_mtx); 565 } 566 567 sbp_free_target(&sc->sc_target); 568 569 fw_bindremove(fc, &sc->sc_fwb); 570 fw_xferlist_remove(&sc->sc_fwb.xferlist); 571 mutex_destroy(&sc->sc_fwb.fwb_mtx); 572 573 mutex_destroy(&sc->sc_mtx); 574 cv_destroy(&sc->sc_cv); 575 576 return 0; 577 } 578 579 580 static void 581 sbp_scsipi_request(struct scsipi_channel *channel, scsipi_adapter_req_t req, 582 void *arg) 583 { 584 struct sbp_softc *sc = device_private(channel->chan_adapter->adapt_dev); 585 struct scsipi_xfer *xs = arg; 586 int i; 587 588 SBP_DEBUG(1) 589 printf("Called sbp_scsipi_request\n"); 590 END_DEBUG 591 592 switch (req) { 593 case ADAPTER_REQ_RUN_XFER: 594 SBP_DEBUG(1) 595 printf("Got req_run_xfer\n"); 596 printf("xs control: 0x%08x, timeout: %d\n", 597 xs->xs_control, xs->timeout); 598 printf("opcode: 0x%02x\n", (int)xs->cmd->opcode); 599 for (i = 0; i < 15; i++) 600 printf("0x%02x ",(int)xs->cmd->bytes[i]); 601 printf("\n"); 602 END_DEBUG 603 if (xs->xs_control & XS_CTL_RESET) { 604 SBP_DEBUG(1) 605 printf("XS_CTL_RESET not support\n"); 606 END_DEBUG 607 break; 608 } 609 #define SBPSCSI_SBP2_MAX_CDB 12 610 if (xs->cmdlen > SBPSCSI_SBP2_MAX_CDB) { 611 SBP_DEBUG(0) 612 printf( 613 "sbp doesn't support cdb's larger than %d bytes\n", 614 SBPSCSI_SBP2_MAX_CDB); 615 END_DEBUG 616 xs->error = XS_DRIVER_STUFFUP; 617 scsipi_done(xs); 618 return; 619 } 620 sbp_action1(sc, xs); 621 622 break; 623 case ADAPTER_REQ_GROW_RESOURCES: 624 SBP_DEBUG(1) 625 printf("Got req_grow_resources\n"); 626 END_DEBUG 627 break; 628 case ADAPTER_REQ_SET_XFER_MODE: 629 SBP_DEBUG(1) 630 printf("Got set xfer mode\n"); 631 END_DEBUG 632 break; 633 default: 634 panic("Unknown request: %d\n", (int)req); 635 } 636 } 637 638 static void 639 sbp_minphys(struct buf *bp) 640 { 641 642 minphys(bp); 643 } 644 645 646 /* 647 * Display device characteristics on the console 648 */ 649 static void 650 sbp_show_sdev_info(struct sbp_dev *sdev) 651 { 652 struct fw_device *fwdev = sdev->target->fwdev; 653 struct sbp_softc *sc = sdev->target->sbp; 654 655 aprint_normal_dev(sc->sc_fd.dev, 656 "ordered:%d type:%d EUI:%08x%08x node:%d speed:%d maxrec:%d\n", 657 (sdev->type & 0x40) >> 6, 658 (sdev->type & 0x1f), 659 fwdev->eui.hi, 660 fwdev->eui.lo, 661 fwdev->dst, 662 fwdev->speed, 663 fwdev->maxrec); 664 aprint_normal_dev(sc->sc_fd.dev, "%s '%s' '%s' '%s'\n", 665 sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision); 666 } 667 668 static void 669 sbp_alloc_lun(struct sbp_target *target) 670 { 671 struct crom_context cc; 672 struct csrreg *reg; 673 struct sbp_dev *sdev, **newluns; 674 struct sbp_softc *sc; 675 int maxlun, lun, i; 676 677 sc = target->sbp; 678 crom_init_context(&cc, target->fwdev->csrrom); 679 /* XXX shoud parse appropriate unit directories only */ 680 maxlun = -1; 681 while (cc.depth >= 0) { 682 reg = crom_search_key(&cc, CROM_LUN); 683 if (reg == NULL) 684 break; 685 lun = reg->val & 0xffff; 686 SBP_DEBUG(0) 687 printf("target %d lun %d found\n", target->target_id, lun); 688 END_DEBUG 689 if (maxlun < lun) 690 maxlun = lun; 691 crom_next(&cc); 692 } 693 if (maxlun < 0) 694 aprint_normal_dev(sc->sc_fd.dev, "%d: no LUN found\n", 695 target->target_id); 696 697 maxlun++; 698 if (maxlun >= SBP_NUM_LUNS) 699 maxlun = SBP_NUM_LUNS; 700 701 /* Invalidiate stale devices */ 702 for (lun = 0; lun < target->num_lun; lun++) { 703 sdev = target->luns[lun]; 704 if (sdev == NULL) 705 continue; 706 sdev->flags &= ~VALID_LUN; 707 if (lun >= maxlun) { 708 /* lost device */ 709 sbp_scsipi_detach_sdev(sdev); 710 sbp_free_sdev(sdev); 711 target->luns[lun] = NULL; 712 } 713 } 714 715 /* Reallocate */ 716 if (maxlun != target->num_lun) { 717 newluns = (struct sbp_dev **) realloc(target->luns, 718 sizeof(struct sbp_dev *) * maxlun, 719 M_SBP, M_NOWAIT | M_ZERO); 720 721 if (newluns == NULL) { 722 aprint_error_dev(sc->sc_fd.dev, "realloc failed\n"); 723 newluns = target->luns; 724 maxlun = target->num_lun; 725 } 726 727 /* 728 * We must zero the extended region for the case 729 * realloc() doesn't allocate new buffer. 730 */ 731 if (maxlun > target->num_lun) { 732 const int sbp_dev_p_sz = sizeof(struct sbp_dev *); 733 734 memset(&newluns[target->num_lun], 0, 735 sbp_dev_p_sz * (maxlun - target->num_lun)); 736 } 737 738 target->luns = newluns; 739 target->num_lun = maxlun; 740 } 741 742 crom_init_context(&cc, target->fwdev->csrrom); 743 while (cc.depth >= 0) { 744 int new = 0; 745 746 reg = crom_search_key(&cc, CROM_LUN); 747 if (reg == NULL) 748 break; 749 lun = reg->val & 0xffff; 750 if (lun >= SBP_NUM_LUNS) { 751 aprint_error_dev(sc->sc_fd.dev, "too large lun %d\n", 752 lun); 753 goto next; 754 } 755 756 sdev = target->luns[lun]; 757 if (sdev == NULL) { 758 sdev = malloc(sizeof(struct sbp_dev), 759 M_SBP, M_NOWAIT | M_ZERO); 760 if (sdev == NULL) { 761 aprint_error_dev(sc->sc_fd.dev, 762 "malloc failed\n"); 763 goto next; 764 } 765 target->luns[lun] = sdev; 766 sdev->lun_id = lun; 767 sdev->target = target; 768 STAILQ_INIT(&sdev->ocbs); 769 callout_init(&sdev->login_callout, CALLOUT_MPSAFE); 770 callout_setfunc(&sdev->login_callout, 771 sbp_login_callout, sdev); 772 sdev->status = SBP_DEV_RESET; 773 new = 1; 774 snprintf(sdev->bustgtlun, 32, "%s:%d:%d", 775 device_xname(sc->sc_fd.dev), 776 sdev->target->target_id, 777 sdev->lun_id); 778 if (!sc->sc_lwp) 779 if (kthread_create( 780 PRI_NONE, KTHREAD_MPSAFE, NULL, 781 sbp_scsipi_scan_target, &sc->sc_target, 782 &sc->sc_lwp, 783 "sbp%d_attach", device_unit(sc->sc_fd.dev))) 784 aprint_error_dev(sc->sc_fd.dev, 785 "unable to create thread"); 786 } 787 sdev->flags |= VALID_LUN; 788 sdev->type = (reg->val & 0xff0000) >> 16; 789 790 if (new == 0) 791 goto next; 792 793 fwdma_alloc_setup(sc->sc_fd.dev, sc->sc_dmat, SBP_DMA_SIZE, 794 &sdev->dma, sizeof(uint32_t), BUS_DMA_NOWAIT); 795 if (sdev->dma.v_addr == NULL) { 796 free(sdev, M_SBP); 797 target->luns[lun] = NULL; 798 goto next; 799 } 800 sdev->ocb = (struct sbp_ocb *)sdev->dma.v_addr; 801 sdev->login = (struct sbp_login_res *)&sdev->ocb[SBP_QUEUE_LEN]; 802 memset((char *)sdev->ocb, 0, 803 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN); 804 805 STAILQ_INIT(&sdev->free_ocbs); 806 for (i = 0; i < SBP_QUEUE_LEN; i++) { 807 struct sbp_ocb *ocb = &sdev->ocb[i]; 808 809 ocb->index = i; 810 ocb->bus_addr = 811 sdev->dma.bus_addr + sizeof(struct sbp_ocb) * i; 812 if (bus_dmamap_create(sc->sc_dmat, 0x100000, 813 SBP_IND_MAX, SBP_SEG_MAX, 0, 0, &ocb->dmamap)) { 814 aprint_error_dev(sc->sc_fd.dev, 815 "cannot create dmamap %d\n", i); 816 /* XXX */ 817 goto next; 818 } 819 sbp_free_ocb(sdev, ocb); /* into free queue */ 820 } 821 next: 822 crom_next(&cc); 823 } 824 825 for (lun = 0; lun < target->num_lun; lun++) { 826 sdev = target->luns[lun]; 827 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) { 828 sbp_scsipi_detach_sdev(sdev); 829 sbp_free_sdev(sdev); 830 target->luns[lun] = NULL; 831 } 832 } 833 } 834 835 static struct sbp_target * 836 sbp_alloc_target(struct sbp_softc *sc, struct fw_device *fwdev) 837 { 838 struct sbp_target *target; 839 struct crom_context cc; 840 struct csrreg *reg; 841 842 SBP_DEBUG(1) 843 printf("sbp_alloc_target\n"); 844 END_DEBUG 845 /* new target */ 846 target = &sc->sc_target; 847 target->sbp = sc; 848 target->fwdev = fwdev; 849 target->target_id = 0; 850 target->mgm_ocb_cur = NULL; 851 SBP_DEBUG(1) 852 printf("target: mgm_port: %x\n", target->mgm_lo); 853 END_DEBUG 854 STAILQ_INIT(&target->xferlist); 855 target->n_xfer = 0; 856 STAILQ_INIT(&target->mgm_ocb_queue); 857 callout_init(&target->mgm_ocb_timeout, CALLOUT_MPSAFE); 858 859 target->luns = NULL; 860 target->num_lun = 0; 861 862 /* XXX we may want to reload mgm port after each bus reset */ 863 /* XXX there might be multiple management agents */ 864 crom_init_context(&cc, target->fwdev->csrrom); 865 reg = crom_search_key(&cc, CROM_MGM); 866 if (reg == NULL || reg->val == 0) { 867 aprint_error_dev(sc->sc_fd.dev, "NULL management address\n"); 868 target->fwdev = NULL; 869 return NULL; 870 } 871 872 target->mgm_hi = 0xffff; 873 target->mgm_lo = 0xf0000000 | (reg->val << 2); 874 875 return target; 876 } 877 878 static void 879 sbp_probe_lun(struct sbp_dev *sdev) 880 { 881 struct fw_device *fwdev; 882 struct crom_context c, *cc = &c; 883 struct csrreg *reg; 884 885 memset(sdev->vendor, 0, sizeof(sdev->vendor)); 886 memset(sdev->product, 0, sizeof(sdev->product)); 887 888 fwdev = sdev->target->fwdev; 889 crom_init_context(cc, fwdev->csrrom); 890 /* get vendor string */ 891 crom_search_key(cc, CSRKEY_VENDOR); 892 crom_next(cc); 893 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor)); 894 /* skip to the unit directory for SBP-2 */ 895 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) { 896 if (reg->val == CSRVAL_T10SBP2) 897 break; 898 crom_next(cc); 899 } 900 /* get firmware revision */ 901 reg = crom_search_key(cc, CSRKEY_FIRM_VER); 902 if (reg != NULL) 903 snprintf(sdev->revision, sizeof(sdev->revision), "%06x", 904 reg->val); 905 /* get product string */ 906 crom_search_key(cc, CSRKEY_MODEL); 907 crom_next(cc); 908 crom_parse_text(cc, sdev->product, sizeof(sdev->product)); 909 } 910 911 static void 912 sbp_login_callout(void *arg) 913 { 914 struct sbp_dev *sdev = (struct sbp_dev *)arg; 915 916 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL); 917 } 918 919 static void 920 sbp_login(struct sbp_dev *sdev) 921 { 922 struct sbp_softc *sc = sdev->target->sbp; 923 struct timeval delta; 924 struct timeval t; 925 int ticks = 0; 926 927 microtime(&delta); 928 timersub(&delta, &sc->sc_last_busreset, &delta); 929 t.tv_sec = login_delay / 1000; 930 t.tv_usec = (login_delay % 1000) * 1000; 931 timersub(&t, &delta, &t); 932 if (t.tv_sec >= 0 && t.tv_usec > 0) 933 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000; 934 SBP_DEBUG(0) 935 printf("%s: sec = %lld usec = %ld ticks = %d\n", __func__, 936 (long long)t.tv_sec, (long)t.tv_usec, ticks); 937 END_DEBUG 938 callout_schedule(&sdev->login_callout, ticks); 939 } 940 941 static void 942 sbp_probe_target(void *arg) 943 { 944 struct sbp_target *target = (struct sbp_target *)arg; 945 struct sbp_dev *sdev; 946 int i; 947 948 SBP_DEBUG(1) 949 printf("%s %d\n", __func__, target->target_id); 950 END_DEBUG 951 952 sbp_alloc_lun(target); 953 954 /* XXX untimeout mgm_ocb and dequeue */ 955 for (i = 0; i < target->num_lun; i++) { 956 sdev = target->luns[i]; 957 if (sdev == NULL || sdev->status == SBP_DEV_DEAD) 958 continue; 959 960 if (sdev->periph != NULL) { 961 scsipi_periph_freeze(sdev->periph, 1); 962 sdev->freeze++; 963 } 964 sbp_probe_lun(sdev); 965 sbp_show_sdev_info(sdev); 966 967 sbp_abort_all_ocbs(sdev, XS_RESET); 968 switch (sdev->status) { 969 case SBP_DEV_RESET: 970 /* new or revived target */ 971 if (auto_login) 972 sbp_login(sdev); 973 break; 974 case SBP_DEV_TOATTACH: 975 case SBP_DEV_PROBE: 976 case SBP_DEV_ATTACHED: 977 case SBP_DEV_RETRY: 978 default: 979 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL); 980 break; 981 } 982 } 983 } 984 985 static void 986 sbp_post_busreset(void *arg) 987 { 988 struct sbp_softc *sc = (struct sbp_softc *)arg; 989 struct sbp_target *target = &sc->sc_target; 990 struct fw_device *fwdev = target->fwdev; 991 int alive; 992 993 alive = SBP_FWDEV_ALIVE(fwdev); 994 SBP_DEBUG(0) 995 printf("sbp_post_busreset\n"); 996 if (!alive) 997 printf("not alive\n"); 998 END_DEBUG 999 microtime(&sc->sc_last_busreset); 1000 1001 if (!alive) 1002 return; 1003 1004 scsipi_channel_freeze(&sc->sc_channel, 1); 1005 } 1006 1007 static void 1008 sbp_post_explore(void *arg) 1009 { 1010 struct sbp_softc *sc = (struct sbp_softc *)arg; 1011 struct sbp_target *target = &sc->sc_target; 1012 struct fw_device *fwdev = target->fwdev; 1013 int alive; 1014 1015 alive = SBP_FWDEV_ALIVE(fwdev); 1016 SBP_DEBUG(0) 1017 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold); 1018 if (!alive) 1019 printf("not alive\n"); 1020 END_DEBUG 1021 if (!alive) 1022 return; 1023 1024 if (!firewire_phydma_enable) 1025 return; 1026 1027 if (sbp_cold > 0) 1028 sbp_cold--; 1029 1030 SBP_DEBUG(0) 1031 printf("sbp_post_explore: EUI:%08x%08x ", fwdev->eui.hi, fwdev->eui.lo); 1032 END_DEBUG 1033 sbp_probe_target((void *)target); 1034 if (target->num_lun == 0) 1035 sbp_free_target(target); 1036 1037 scsipi_channel_thaw(&sc->sc_channel, 1); 1038 } 1039 1040 #if NEED_RESPONSE 1041 static void 1042 sbp_loginres_callback(struct fw_xfer *xfer) 1043 { 1044 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1045 struct sbp_softc *sc = sdev->target->sbp; 1046 1047 SBP_DEBUG(1) 1048 printf("sbp_loginres_callback\n"); 1049 END_DEBUG 1050 /* recycle */ 1051 mutex_enter(&sc->sc_fwb.fwb_mtx); 1052 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 1053 mutex_exit(&sc->sc_fwb.fwb_mtx); 1054 return; 1055 } 1056 #endif 1057 1058 static inline void 1059 sbp_xfer_free(struct fw_xfer *xfer) 1060 { 1061 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1062 struct sbp_softc *sc = sdev->target->sbp; 1063 1064 fw_xfer_unload(xfer); 1065 mutex_enter(&sc->sc_mtx); 1066 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link); 1067 mutex_exit(&sc->sc_mtx); 1068 } 1069 1070 static void 1071 sbp_reset_start_callback(struct fw_xfer *xfer) 1072 { 1073 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc; 1074 struct sbp_target *target = sdev->target; 1075 int i; 1076 1077 if (xfer->resp != 0) 1078 aprint_error("%s: sbp_reset_start failed: resp=%d\n", 1079 sdev->bustgtlun, xfer->resp); 1080 1081 for (i = 0; i < target->num_lun; i++) { 1082 tsdev = target->luns[i]; 1083 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN) 1084 sbp_login(tsdev); 1085 } 1086 } 1087 1088 static void 1089 sbp_reset_start(struct sbp_dev *sdev) 1090 { 1091 struct fw_xfer *xfer; 1092 struct fw_pkt *fp; 1093 1094 SBP_DEBUG(0) 1095 printf("%s: sbp_reset_start: %s\n", 1096 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun); 1097 END_DEBUG 1098 1099 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); 1100 if (xfer == NULL) 1101 return; 1102 xfer->hand = sbp_reset_start_callback; 1103 fp = &xfer->send.hdr; 1104 fp->mode.wreqq.dest_hi = 0xffff; 1105 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START; 1106 fp->mode.wreqq.data = htonl(0xf); 1107 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1108 sbp_xfer_free(xfer); 1109 } 1110 1111 static void 1112 sbp_mgm_callback(struct fw_xfer *xfer) 1113 { 1114 struct sbp_dev *sdev; 1115 int resp; 1116 1117 sdev = (struct sbp_dev *)xfer->sc; 1118 1119 SBP_DEBUG(1) 1120 printf("%s: sbp_mgm_callback: %s\n", 1121 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun); 1122 END_DEBUG 1123 resp = xfer->resp; 1124 sbp_xfer_free(xfer); 1125 return; 1126 } 1127 1128 static void 1129 sbp_scsipi_scan_target(void *arg) 1130 { 1131 struct sbp_target *target = (struct sbp_target *)arg; 1132 struct sbp_softc *sc = target->sbp; 1133 struct sbp_dev *sdev; 1134 struct scsipi_channel *chan = &sc->sc_channel; 1135 struct scsibus_softc *sc_bus = device_private(sc->sc_bus); 1136 int lun, yet; 1137 1138 do { 1139 mutex_enter(&sc->sc_mtx); 1140 cv_wait_sig(&sc->sc_cv, &sc->sc_mtx); 1141 mutex_exit(&sc->sc_mtx); 1142 yet = 0; 1143 1144 for (lun = 0; lun < target->num_lun; lun++) { 1145 sdev = target->luns[lun]; 1146 if (sdev == NULL) 1147 continue; 1148 if (sdev->status != SBP_DEV_PROBE) { 1149 yet++; 1150 continue; 1151 } 1152 1153 if (sdev->periph == NULL) { 1154 if (chan->chan_nluns < target->num_lun) 1155 chan->chan_nluns = target->num_lun; 1156 1157 scsi_probe_bus(sc_bus, target->target_id, 1158 sdev->lun_id); 1159 sdev->periph = scsipi_lookup_periph(chan, 1160 target->target_id, lun); 1161 } 1162 sdev->status = SBP_DEV_ATTACHED; 1163 } 1164 } while (yet > 0); 1165 1166 sc->sc_lwp = NULL; 1167 kthread_exit(0); 1168 1169 /* NOTREACHED */ 1170 } 1171 1172 static inline void 1173 sbp_scan_dev(struct sbp_dev *sdev) 1174 { 1175 struct sbp_softc *sc = sdev->target->sbp; 1176 1177 sdev->status = SBP_DEV_PROBE; 1178 mutex_enter(&sc->sc_mtx); 1179 cv_signal(&sdev->target->sbp->sc_cv); 1180 mutex_exit(&sc->sc_mtx); 1181 } 1182 1183 1184 static void 1185 sbp_do_attach(struct fw_xfer *xfer) 1186 { 1187 struct sbp_dev *sdev; 1188 struct sbp_target *target; 1189 struct sbp_softc *sc; 1190 1191 sdev = (struct sbp_dev *)xfer->sc; 1192 target = sdev->target; 1193 sc = target->sbp; 1194 1195 SBP_DEBUG(0) 1196 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1197 sdev->bustgtlun); 1198 END_DEBUG 1199 sbp_xfer_free(xfer); 1200 1201 sbp_scan_dev(sdev); 1202 return; 1203 } 1204 1205 static void 1206 sbp_agent_reset_callback(struct fw_xfer *xfer) 1207 { 1208 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1209 struct sbp_softc *sc = sdev->target->sbp; 1210 1211 SBP_DEBUG(1) 1212 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1213 sdev->bustgtlun); 1214 END_DEBUG 1215 if (xfer->resp != 0) 1216 aprint_error_dev(sc->sc_fd.dev, "%s:%s: resp=%d\n", __func__, 1217 sdev->bustgtlun, xfer->resp); 1218 1219 sbp_xfer_free(xfer); 1220 if (sdev->periph != NULL) { 1221 scsipi_periph_thaw(sdev->periph, sdev->freeze); 1222 scsipi_channel_thaw(&sc->sc_channel, 0); 1223 sdev->freeze = 0; 1224 } 1225 } 1226 1227 static void 1228 sbp_agent_reset(struct sbp_dev *sdev) 1229 { 1230 struct fw_xfer *xfer; 1231 struct fw_pkt *fp; 1232 1233 SBP_DEBUG(0) 1234 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1235 __func__, sdev->bustgtlun); 1236 END_DEBUG 1237 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04); 1238 if (xfer == NULL) 1239 return; 1240 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE) 1241 xfer->hand = sbp_agent_reset_callback; 1242 else 1243 xfer->hand = sbp_do_attach; 1244 fp = &xfer->send.hdr; 1245 fp->mode.wreqq.data = htonl(0xf); 1246 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1247 sbp_xfer_free(xfer); 1248 sbp_abort_all_ocbs(sdev, XS_RESET); 1249 } 1250 1251 static void 1252 sbp_busy_timeout_callback(struct fw_xfer *xfer) 1253 { 1254 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1255 1256 SBP_DEBUG(1) 1257 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1258 __func__, sdev->bustgtlun); 1259 END_DEBUG 1260 sbp_xfer_free(xfer); 1261 sbp_agent_reset(sdev); 1262 } 1263 1264 static void 1265 sbp_busy_timeout(struct sbp_dev *sdev) 1266 { 1267 struct fw_pkt *fp; 1268 struct fw_xfer *xfer; 1269 1270 SBP_DEBUG(0) 1271 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1272 __func__, sdev->bustgtlun); 1273 END_DEBUG 1274 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); 1275 if (xfer == NULL) 1276 return; 1277 xfer->hand = sbp_busy_timeout_callback; 1278 fp = &xfer->send.hdr; 1279 fp->mode.wreqq.dest_hi = 0xffff; 1280 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT; 1281 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf); 1282 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1283 sbp_xfer_free(xfer); 1284 } 1285 1286 static void 1287 sbp_orb_pointer_callback(struct fw_xfer *xfer) 1288 { 1289 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1290 struct sbp_softc *sc = sdev->target->sbp; 1291 1292 SBP_DEBUG(1) 1293 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1294 sdev->bustgtlun); 1295 END_DEBUG 1296 if (xfer->resp != 0) 1297 aprint_error_dev(sc->sc_fd.dev, "%s:%s: xfer->resp = %d\n", 1298 __func__, sdev->bustgtlun, xfer->resp); 1299 sbp_xfer_free(xfer); 1300 sdev->flags &= ~ORB_POINTER_ACTIVE; 1301 1302 if ((sdev->flags & ORB_POINTER_NEED) != 0) { 1303 struct sbp_ocb *ocb; 1304 1305 sdev->flags &= ~ORB_POINTER_NEED; 1306 ocb = STAILQ_FIRST(&sdev->ocbs); 1307 if (ocb != NULL) 1308 sbp_orb_pointer(sdev, ocb); 1309 } 1310 return; 1311 } 1312 1313 static void 1314 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb) 1315 { 1316 struct sbp_softc *sc = sdev->target->sbp; 1317 struct fw_xfer *xfer; 1318 struct fw_pkt *fp; 1319 1320 SBP_DEBUG(1) 1321 printf("%s:%s:%s: 0x%08x\n", device_xname(sc->sc_fd.dev), __func__, 1322 sdev->bustgtlun, (uint32_t)ocb->bus_addr); 1323 END_DEBUG 1324 1325 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) { 1326 SBP_DEBUG(0) 1327 printf("%s: orb pointer active\n", __func__); 1328 END_DEBUG 1329 sdev->flags |= ORB_POINTER_NEED; 1330 return; 1331 } 1332 1333 sdev->flags |= ORB_POINTER_ACTIVE; 1334 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08); 1335 if (xfer == NULL) 1336 return; 1337 xfer->hand = sbp_orb_pointer_callback; 1338 1339 fp = &xfer->send.hdr; 1340 fp->mode.wreqb.len = 8; 1341 fp->mode.wreqb.extcode = 0; 1342 xfer->send.payload[0] = 1343 htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16)); 1344 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr); 1345 1346 if (fw_asyreq(xfer->fc, -1, xfer) != 0) { 1347 sbp_xfer_free(xfer); 1348 ocb->xs->error = XS_DRIVER_STUFFUP; 1349 scsipi_done(ocb->xs); 1350 } 1351 } 1352 1353 static void 1354 sbp_doorbell_callback(struct fw_xfer *xfer) 1355 { 1356 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1357 struct sbp_softc *sc = sdev->target->sbp; 1358 1359 SBP_DEBUG(1) 1360 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1361 sdev->bustgtlun); 1362 END_DEBUG 1363 if (xfer->resp != 0) { 1364 aprint_error_dev(sc->sc_fd.dev, "%s: xfer->resp = %d\n", 1365 __func__, xfer->resp); 1366 } 1367 sbp_xfer_free(xfer); 1368 sdev->flags &= ~ORB_DOORBELL_ACTIVE; 1369 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) { 1370 sdev->flags &= ~ORB_DOORBELL_NEED; 1371 sbp_doorbell(sdev); 1372 } 1373 return; 1374 } 1375 1376 static void 1377 sbp_doorbell(struct sbp_dev *sdev) 1378 { 1379 struct fw_xfer *xfer; 1380 struct fw_pkt *fp; 1381 1382 SBP_DEBUG(1) 1383 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1384 __func__, sdev->bustgtlun); 1385 END_DEBUG 1386 1387 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) { 1388 sdev->flags |= ORB_DOORBELL_NEED; 1389 return; 1390 } 1391 sdev->flags |= ORB_DOORBELL_ACTIVE; 1392 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10); 1393 if (xfer == NULL) 1394 return; 1395 xfer->hand = sbp_doorbell_callback; 1396 fp = &xfer->send.hdr; 1397 fp->mode.wreqq.data = htonl(0xf); 1398 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1399 sbp_xfer_free(xfer); 1400 } 1401 1402 static struct fw_xfer * 1403 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset) 1404 { 1405 struct sbp_softc *sc; 1406 struct fw_xfer *xfer; 1407 struct fw_pkt *fp; 1408 struct sbp_target *target; 1409 int new = 0; 1410 1411 target = sdev->target; 1412 sc = target->sbp; 1413 mutex_enter(&sc->sc_mtx); 1414 xfer = STAILQ_FIRST(&target->xferlist); 1415 if (xfer == NULL) { 1416 if (target->n_xfer > 5 /* XXX */) { 1417 aprint_error_dev(sc->sc_fd.dev, 1418 "no more xfer for this target\n"); 1419 mutex_exit(&sc->sc_mtx); 1420 return NULL; 1421 } 1422 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0); 1423 if (xfer == NULL) { 1424 aprint_error_dev(sc->sc_fd.dev, 1425 "fw_xfer_alloc_buf failed\n"); 1426 mutex_exit(&sc->sc_mtx); 1427 return NULL; 1428 } 1429 target->n_xfer++; 1430 SBP_DEBUG(0) 1431 printf("sbp: alloc %d xfer\n", target->n_xfer); 1432 END_DEBUG 1433 new = 1; 1434 } else 1435 STAILQ_REMOVE_HEAD(&target->xferlist, link); 1436 mutex_exit(&sc->sc_mtx); 1437 1438 microtime(&xfer->tv); 1439 1440 if (new) { 1441 xfer->recv.pay_len = 0; 1442 xfer->send.spd = min(target->fwdev->speed, max_speed); 1443 xfer->fc = target->sbp->sc_fd.fc; 1444 } 1445 1446 if (tcode == FWTCODE_WREQB) 1447 xfer->send.pay_len = 8; 1448 else 1449 xfer->send.pay_len = 0; 1450 1451 xfer->sc = (void *)sdev; 1452 fp = &xfer->send.hdr; 1453 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi; 1454 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset; 1455 fp->mode.wreqq.tlrt = 0; 1456 fp->mode.wreqq.tcode = tcode; 1457 fp->mode.wreqq.pri = 0; 1458 fp->mode.wreqq.dst = FWLOCALBUS | target->fwdev->dst; 1459 1460 return xfer; 1461 } 1462 1463 static void 1464 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb) 1465 { 1466 struct fw_xfer *xfer; 1467 struct fw_pkt *fp; 1468 struct sbp_ocb *ocb; 1469 struct sbp_target *target; 1470 int nid, dv_unit; 1471 1472 target = sdev->target; 1473 nid = target->sbp->sc_fd.fc->nodeid | FWLOCALBUS; 1474 dv_unit = device_unit(target->sbp->sc_fd.dev); 1475 1476 mutex_enter(&target->sbp->sc_mtx); 1477 if (func == ORB_FUN_RUNQUEUE) { 1478 ocb = STAILQ_FIRST(&target->mgm_ocb_queue); 1479 if (target->mgm_ocb_cur != NULL || ocb == NULL) { 1480 mutex_exit(&target->sbp->sc_mtx); 1481 return; 1482 } 1483 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb); 1484 mutex_exit(&target->sbp->sc_mtx); 1485 goto start; 1486 } 1487 if ((ocb = sbp_get_ocb(sdev)) == NULL) { 1488 mutex_exit(&target->sbp->sc_mtx); 1489 /* XXX */ 1490 return; 1491 } 1492 mutex_exit(&target->sbp->sc_mtx); 1493 ocb->flags = OCB_ACT_MGM; 1494 ocb->sdev = sdev; 1495 1496 memset(ocb->orb, 0, sizeof(ocb->orb)); 1497 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI); 1498 ocb->orb[7] = htonl(SBP_DEV2ADDR(dv_unit, sdev->lun_id)); 1499 1500 SBP_DEBUG(0) 1501 printf("%s:%s:%s: %s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1502 __func__, sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]); 1503 END_DEBUG 1504 switch (func) { 1505 case ORB_FUN_LGI: 1506 { 1507 const off_t sbp_login_off = 1508 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN; 1509 1510 ocb->orb[0] = ocb->orb[1] = 0; /* password */ 1511 ocb->orb[2] = htonl(nid << 16); 1512 ocb->orb[3] = htonl(sdev->dma.bus_addr + sbp_login_off); 1513 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id); 1514 if (ex_login) 1515 ocb->orb[4] |= htonl(ORB_EXV); 1516 ocb->orb[5] = htonl(SBP_LOGIN_SIZE); 1517 bus_dmamap_sync(sdev->dma.dma_tag, sdev->dma.dma_map, 1518 sbp_login_off, SBP_LOGIN_SIZE, BUS_DMASYNC_PREREAD); 1519 break; 1520 } 1521 1522 case ORB_FUN_ATA: 1523 ocb->orb[0] = htonl((0 << 16) | 0); 1524 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff); 1525 /* fall through */ 1526 case ORB_FUN_RCN: 1527 case ORB_FUN_LGO: 1528 case ORB_FUN_LUR: 1529 case ORB_FUN_RST: 1530 case ORB_FUN_ATS: 1531 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id); 1532 break; 1533 } 1534 1535 if (target->mgm_ocb_cur != NULL) { 1536 /* there is a standing ORB */ 1537 mutex_enter(&target->sbp->sc_mtx); 1538 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb); 1539 mutex_exit(&target->sbp->sc_mtx); 1540 return; 1541 } 1542 start: 1543 target->mgm_ocb_cur = ocb; 1544 1545 callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, ocb); 1546 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0); 1547 if (xfer == NULL) 1548 return; 1549 xfer->hand = sbp_mgm_callback; 1550 1551 fp = &xfer->send.hdr; 1552 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi; 1553 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo; 1554 fp->mode.wreqb.len = 8; 1555 fp->mode.wreqb.extcode = 0; 1556 xfer->send.payload[0] = htonl(nid << 16); 1557 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff); 1558 1559 /* cache writeback & invalidate(required ORB_FUN_LGI func) */ 1560 /* when abort_ocb, should sync POST ope ? */ 1561 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE); 1562 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1563 sbp_xfer_free(xfer); 1564 } 1565 1566 static void 1567 sbp_print_scsi_cmd(struct sbp_ocb *ocb) 1568 { 1569 struct scsipi_xfer *xs = ocb->xs; 1570 1571 printf("%s:%d:%d:" 1572 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x," 1573 " flags: 0x%02x, %db cmd/%db data\n", 1574 device_xname(ocb->sdev->target->sbp->sc_fd.dev), 1575 xs->xs_periph->periph_target, 1576 xs->xs_periph->periph_lun, 1577 xs->cmd->opcode, 1578 xs->cmd->bytes[0], xs->cmd->bytes[1], 1579 xs->cmd->bytes[2], xs->cmd->bytes[3], 1580 xs->cmd->bytes[4], xs->cmd->bytes[5], 1581 xs->cmd->bytes[6], xs->cmd->bytes[7], 1582 xs->cmd->bytes[8], 1583 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT), 1584 xs->cmdlen, xs->datalen); 1585 } 1586 1587 static void 1588 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb) 1589 { 1590 struct sbp_cmd_status *sbp_cmd_status; 1591 struct scsi_sense_data *sense = &ocb->xs->sense.scsi_sense; 1592 1593 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data; 1594 1595 SBP_DEBUG(0) 1596 sbp_print_scsi_cmd(ocb); 1597 /* XXX need decode status */ 1598 printf("%s:" 1599 " SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n", 1600 ocb->sdev->bustgtlun, 1601 sbp_cmd_status->status, 1602 sbp_cmd_status->sfmt, 1603 sbp_cmd_status->valid, 1604 sbp_cmd_status->s_key, 1605 sbp_cmd_status->s_code, 1606 sbp_cmd_status->s_qlfr, 1607 sbp_status->len); 1608 END_DEBUG 1609 1610 switch (sbp_cmd_status->status) { 1611 case SCSI_CHECK: 1612 case SCSI_BUSY: 1613 case SCSI_TERMINATED: 1614 if (sbp_cmd_status->sfmt == SBP_SFMT_CURR) 1615 sense->response_code = SSD_RCODE_CURRENT; 1616 else 1617 sense->response_code = SSD_RCODE_DEFERRED; 1618 if (sbp_cmd_status->valid) 1619 sense->response_code |= SSD_RCODE_VALID; 1620 sense->flags = sbp_cmd_status->s_key; 1621 if (sbp_cmd_status->mark) 1622 sense->flags |= SSD_FILEMARK; 1623 if (sbp_cmd_status->eom) 1624 sense->flags |= SSD_EOM; 1625 if (sbp_cmd_status->ill_len) 1626 sense->flags |= SSD_ILI; 1627 1628 memcpy(sense->info, &sbp_cmd_status->info, 4); 1629 1630 if (sbp_status->len <= 1) 1631 /* XXX not scsi status. shouldn't be happened */ 1632 sense->extra_len = 0; 1633 else if (sbp_status->len <= 4) 1634 /* add_sense_code(_qual), info, cmd_spec_info */ 1635 sense->extra_len = 6; 1636 else 1637 /* fru, sense_key_spec */ 1638 sense->extra_len = 10; 1639 1640 memcpy(sense->csi, &sbp_cmd_status->cdb, 4); 1641 1642 sense->asc = sbp_cmd_status->s_code; 1643 sense->ascq = sbp_cmd_status->s_qlfr; 1644 sense->fru = sbp_cmd_status->fru; 1645 1646 memcpy(sense->sks.sks_bytes, sbp_cmd_status->s_keydep, 3); 1647 ocb->xs->error = XS_SENSE; 1648 ocb->xs->xs_status = sbp_cmd_status->status; 1649 /* 1650 { 1651 uint8_t j, *tmp; 1652 tmp = sense; 1653 for (j = 0; j < 32; j += 8) 1654 aprint_normal( 1655 "sense %02x%02x %02x%02x %02x%02x %02x%02x\n", 1656 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3], 1657 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]); 1658 1659 } 1660 */ 1661 break; 1662 default: 1663 aprint_error_dev(ocb->sdev->target->sbp->sc_fd.dev, 1664 "%s:%s: unknown scsi status 0x%x\n", 1665 __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status); 1666 } 1667 } 1668 1669 static void 1670 sbp_fix_inq_data(struct sbp_ocb *ocb) 1671 { 1672 struct scsipi_xfer *xs = ocb->xs; 1673 struct sbp_dev *sdev; 1674 struct scsipi_inquiry_data *inq = 1675 (struct scsipi_inquiry_data *)xs->data; 1676 1677 sdev = ocb->sdev; 1678 1679 #if 0 1680 /* 1681 * NetBSD is assuming always 0 for EVPD-bit and 'Page Code'. 1682 */ 1683 #define SI_EVPD 0x01 1684 if (xs->cmd->bytes[0] & SI_EVPD) 1685 return; 1686 #endif 1687 SBP_DEBUG(1) 1688 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1689 __func__, sdev->bustgtlun); 1690 END_DEBUG 1691 switch (inq->device & SID_TYPE) { 1692 case T_DIRECT: 1693 #if 0 1694 /* 1695 * XXX Convert Direct Access device to RBC. 1696 * I've never seen FireWire DA devices which support READ_6. 1697 */ 1698 if ((inq->device & SID_TYPE) == T_DIRECT) 1699 inq->device |= T_SIMPLE_DIRECT; /* T_DIRECT == 0 */ 1700 #endif 1701 /* FALLTHROUGH */ 1702 1703 case T_SIMPLE_DIRECT: 1704 /* 1705 * Override vendor/product/revision information. 1706 * Some devices sometimes return strange strings. 1707 */ 1708 #if 1 1709 memcpy(inq->vendor, sdev->vendor, sizeof(inq->vendor)); 1710 memcpy(inq->product, sdev->product, sizeof(inq->product)); 1711 memcpy(inq->revision + 2, sdev->revision, 1712 sizeof(inq->revision)); 1713 #endif 1714 break; 1715 } 1716 /* 1717 * Force to enable/disable tagged queuing. 1718 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page. 1719 */ 1720 if (sbp_tags > 0) 1721 inq->flags3 |= SID_CmdQue; 1722 else if (sbp_tags < 0) 1723 inq->flags3 &= ~SID_CmdQue; 1724 1725 } 1726 1727 static void 1728 sbp_recv(struct fw_xfer *xfer) 1729 { 1730 struct fw_pkt *rfp; 1731 #if NEED_RESPONSE 1732 struct fw_pkt *sfp; 1733 #endif 1734 struct sbp_softc *sc; 1735 struct sbp_dev *sdev; 1736 struct sbp_ocb *ocb; 1737 struct sbp_login_res *login_res = NULL; 1738 struct sbp_status *sbp_status; 1739 struct sbp_target *target; 1740 int orb_fun, status_valid0, status_valid, l, reset_agent = 0; 1741 uint32_t addr; 1742 /* 1743 uint32_t *ld; 1744 ld = xfer->recv.buf; 1745 printf("sbp %x %d %d %08x %08x %08x %08x\n", 1746 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); 1747 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); 1748 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11])); 1749 */ 1750 1751 sc = (struct sbp_softc *)xfer->sc; 1752 if (xfer->resp != 0) { 1753 aprint_error_dev(sc->sc_fd.dev, 1754 "sbp_recv: xfer->resp = %d\n", xfer->resp); 1755 goto done0; 1756 } 1757 if (xfer->recv.payload == NULL) { 1758 aprint_error_dev(sc->sc_fd.dev, 1759 "sbp_recv: xfer->recv.payload == NULL\n"); 1760 goto done0; 1761 } 1762 rfp = &xfer->recv.hdr; 1763 if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) { 1764 aprint_error_dev(sc->sc_fd.dev, 1765 "sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode); 1766 goto done0; 1767 } 1768 sbp_status = (struct sbp_status *)xfer->recv.payload; 1769 addr = rfp->mode.wreqb.dest_lo; 1770 SBP_DEBUG(2) 1771 printf("received address 0x%x\n", addr); 1772 END_DEBUG 1773 target = &sc->sc_target; 1774 l = SBP_ADDR2LUN(addr); 1775 if (l >= target->num_lun || target->luns[l] == NULL) { 1776 aprint_error_dev(sc->sc_fd.dev, 1777 "sbp_recv1: invalid lun %d (target=%d)\n", 1778 l, target->target_id); 1779 goto done0; 1780 } 1781 sdev = target->luns[l]; 1782 1783 ocb = NULL; 1784 switch (sbp_status->src) { 1785 case SRC_NEXT_EXISTS: 1786 case SRC_NO_NEXT: 1787 /* check mgm_ocb_cur first */ 1788 ocb = target->mgm_ocb_cur; 1789 if (ocb != NULL) 1790 if (OCB_MATCH(ocb, sbp_status)) { 1791 callout_stop(&target->mgm_ocb_timeout); 1792 target->mgm_ocb_cur = NULL; 1793 break; 1794 } 1795 ocb = sbp_dequeue_ocb(sdev, sbp_status); 1796 if (ocb == NULL) 1797 aprint_error_dev(sc->sc_fd.dev, 1798 "%s:%s: No ocb(%x) on the queue\n", __func__, 1799 sdev->bustgtlun, ntohl(sbp_status->orb_lo)); 1800 break; 1801 case SRC_UNSOL: 1802 /* unsolicit */ 1803 aprint_error_dev(sc->sc_fd.dev, 1804 "%s:%s: unsolicit status received\n", 1805 __func__, sdev->bustgtlun); 1806 break; 1807 default: 1808 aprint_error_dev(sc->sc_fd.dev, 1809 "%s:%s: unknown sbp_status->src\n", 1810 __func__, sdev->bustgtlun); 1811 } 1812 1813 status_valid0 = (sbp_status->src < 2 1814 && sbp_status->resp == SBP_REQ_CMP 1815 && sbp_status->dead == 0); 1816 status_valid = (status_valid0 && sbp_status->status == 0); 1817 1818 if (!status_valid0 || debug > 2) { 1819 int status; 1820 SBP_DEBUG(0) 1821 printf("%s:%s:%s: ORB status src:%x resp:%x dead:%x" 1822 " len:%x stat:%x orb:%x%08x\n", 1823 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun, 1824 sbp_status->src, sbp_status->resp, sbp_status->dead, 1825 sbp_status->len, sbp_status->status, 1826 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo)); 1827 END_DEBUG 1828 printf("%s:%s\n", device_xname(sc->sc_fd.dev), sdev->bustgtlun); 1829 status = sbp_status->status; 1830 switch (sbp_status->resp) { 1831 case SBP_REQ_CMP: 1832 if (status > MAX_ORB_STATUS0) 1833 printf("%s\n", orb_status0[MAX_ORB_STATUS0]); 1834 else 1835 printf("%s\n", orb_status0[status]); 1836 break; 1837 case SBP_TRANS_FAIL: 1838 printf("Obj: %s, Error: %s\n", 1839 orb_status1_object[(status>>6) & 3], 1840 orb_status1_serial_bus_error[status & 0xf]); 1841 break; 1842 case SBP_ILLE_REQ: 1843 printf("Illegal request\n"); 1844 break; 1845 case SBP_VEND_DEP: 1846 printf("Vendor dependent\n"); 1847 break; 1848 default: 1849 printf("unknown respose code %d\n", sbp_status->resp); 1850 } 1851 } 1852 1853 /* we have to reset the fetch agent if it's dead */ 1854 if (sbp_status->dead) { 1855 if (sdev->periph != NULL) { 1856 scsipi_periph_freeze(sdev->periph, 1); 1857 sdev->freeze++; 1858 } 1859 reset_agent = 1; 1860 } 1861 1862 if (ocb == NULL) 1863 goto done; 1864 1865 switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) { 1866 case ORB_FMT_NOP: 1867 break; 1868 case ORB_FMT_VED: 1869 break; 1870 case ORB_FMT_STD: 1871 switch (ocb->flags) { 1872 case OCB_ACT_MGM: 1873 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK; 1874 reset_agent = 0; 1875 switch (orb_fun) { 1876 case ORB_FUN_LGI: 1877 { 1878 const struct fwdma_alloc *dma = &sdev->dma; 1879 const off_t sbp_login_off = 1880 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN; 1881 1882 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 1883 sbp_login_off, SBP_LOGIN_SIZE, 1884 BUS_DMASYNC_POSTREAD); 1885 login_res = sdev->login; 1886 login_res->len = ntohs(login_res->len); 1887 login_res->id = ntohs(login_res->id); 1888 login_res->cmd_hi = ntohs(login_res->cmd_hi); 1889 login_res->cmd_lo = ntohl(login_res->cmd_lo); 1890 if (status_valid) { 1891 SBP_DEBUG(0) 1892 printf("%s:%s:%s: login:" 1893 " len %d, ID %d, cmd %08x%08x," 1894 " recon_hold %d\n", 1895 device_xname(sc->sc_fd.dev), 1896 __func__, sdev->bustgtlun, 1897 login_res->len, login_res->id, 1898 login_res->cmd_hi, 1899 login_res->cmd_lo, 1900 ntohs(login_res->recon_hold)); 1901 END_DEBUG 1902 sbp_busy_timeout(sdev); 1903 } else { 1904 /* forgot logout? */ 1905 aprint_error_dev(sc->sc_fd.dev, 1906 "%s:%s: login failed\n", 1907 __func__, sdev->bustgtlun); 1908 sdev->status = SBP_DEV_RESET; 1909 } 1910 break; 1911 } 1912 case ORB_FUN_RCN: 1913 login_res = sdev->login; 1914 if (status_valid) { 1915 SBP_DEBUG(0) 1916 printf("%s:%s:%s: reconnect:" 1917 " len %d, ID %d, cmd %08x%08x\n", 1918 device_xname(sc->sc_fd.dev), 1919 __func__, sdev->bustgtlun, 1920 login_res->len, login_res->id, 1921 login_res->cmd_hi, 1922 login_res->cmd_lo); 1923 END_DEBUG 1924 sbp_agent_reset(sdev); 1925 } else { 1926 /* reconnection hold time exceed? */ 1927 SBP_DEBUG(0) 1928 aprint_error_dev(sc->sc_fd.dev, 1929 "%s:%s: reconnect failed\n", 1930 __func__, sdev->bustgtlun); 1931 END_DEBUG 1932 sbp_login(sdev); 1933 } 1934 break; 1935 case ORB_FUN_LGO: 1936 sdev->status = SBP_DEV_RESET; 1937 break; 1938 case ORB_FUN_RST: 1939 sbp_busy_timeout(sdev); 1940 break; 1941 case ORB_FUN_LUR: 1942 case ORB_FUN_ATA: 1943 case ORB_FUN_ATS: 1944 sbp_agent_reset(sdev); 1945 break; 1946 default: 1947 aprint_error_dev(sc->sc_fd.dev, 1948 "%s:%s: unknown function %d\n", 1949 __func__, sdev->bustgtlun, orb_fun); 1950 break; 1951 } 1952 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); 1953 break; 1954 case OCB_ACT_CMD: 1955 sdev->timeout = 0; 1956 if (ocb->xs != NULL) { 1957 struct scsipi_xfer *xs = ocb->xs; 1958 1959 if (sbp_status->len > 1) 1960 sbp_scsi_status(sbp_status, ocb); 1961 else 1962 if (sbp_status->resp != SBP_REQ_CMP) 1963 xs->error = XS_DRIVER_STUFFUP; 1964 else { 1965 xs->error = XS_NOERROR; 1966 xs->resid = 0; 1967 } 1968 /* fix up inq data */ 1969 if (xs->cmd->opcode == INQUIRY) 1970 sbp_fix_inq_data(ocb); 1971 scsipi_done(xs); 1972 } 1973 break; 1974 default: 1975 break; 1976 } 1977 } 1978 1979 if (!use_doorbell) 1980 sbp_free_ocb(sdev, ocb); 1981 done: 1982 if (reset_agent) 1983 sbp_agent_reset(sdev); 1984 1985 done0: 1986 xfer->recv.pay_len = SBP_RECV_LEN; 1987 /* The received packet is usually small enough to be stored within 1988 * the buffer. In that case, the controller return ack_complete and 1989 * no respose is necessary. 1990 * 1991 * XXX fwohci.c and firewire.c should inform event_code such as 1992 * ack_complete or ack_pending to upper driver. 1993 */ 1994 #if NEED_RESPONSE 1995 xfer->send.off = 0; 1996 sfp = (struct fw_pkt *)xfer->send.buf; 1997 sfp->mode.wres.dst = rfp->mode.wreqb.src; 1998 xfer->dst = sfp->mode.wres.dst; 1999 xfer->spd = min(sdev->target->fwdev->speed, max_speed); 2000 xfer->hand = sbp_loginres_callback; 2001 2002 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt; 2003 sfp->mode.wres.tcode = FWTCODE_WRES; 2004 sfp->mode.wres.rtcode = 0; 2005 sfp->mode.wres.pri = 0; 2006 2007 if (fw_asyreq(xfer->fc, -1, xfer) != 0) { 2008 aprint_error_dev(sc->sc_fd.dev, "mgm_orb failed\n"); 2009 mutex_enter(&sc->sc_fwb.fwb_mtx); 2010 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 2011 mutex_exit(&sc->sc_fwb.fwb_mtx); 2012 } 2013 #else 2014 /* recycle */ 2015 mutex_enter(&sc->sc_fwb.fwb_mtx); 2016 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 2017 mutex_exit(&sc->sc_fwb.fwb_mtx); 2018 #endif 2019 2020 return; 2021 2022 } 2023 2024 static int 2025 sbp_logout_all(struct sbp_softc *sbp) 2026 { 2027 struct sbp_target *target; 2028 struct sbp_dev *sdev; 2029 int i; 2030 2031 SBP_DEBUG(0) 2032 printf("sbp_logout_all\n"); 2033 END_DEBUG 2034 target = &sbp->sc_target; 2035 if (target->luns != NULL) { 2036 for (i = 0; i < target->num_lun; i++) { 2037 sdev = target->luns[i]; 2038 if (sdev == NULL) 2039 continue; 2040 callout_stop(&sdev->login_callout); 2041 if (sdev->status >= SBP_DEV_TOATTACH && 2042 sdev->status <= SBP_DEV_ATTACHED) 2043 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL); 2044 } 2045 } 2046 2047 return 0; 2048 } 2049 2050 static void 2051 sbp_free_sdev(struct sbp_dev *sdev) 2052 { 2053 struct sbp_softc *sc = sdev->target->sbp; 2054 int i; 2055 2056 if (sdev == NULL) 2057 return; 2058 for (i = 0; i < SBP_QUEUE_LEN; i++) 2059 bus_dmamap_destroy(sc->sc_dmat, sdev->ocb[i].dmamap); 2060 fwdma_free(sdev->dma.dma_tag, sdev->dma.dma_map, sdev->dma.v_addr); 2061 free(sdev, M_SBP); 2062 sdev = NULL; 2063 } 2064 2065 static void 2066 sbp_free_target(struct sbp_target *target) 2067 { 2068 struct fw_xfer *xfer, *next; 2069 int i; 2070 2071 if (target->luns == NULL) 2072 return; 2073 callout_stop(&target->mgm_ocb_timeout); 2074 for (i = 0; i < target->num_lun; i++) 2075 sbp_free_sdev(target->luns[i]); 2076 2077 for (xfer = STAILQ_FIRST(&target->xferlist); 2078 xfer != NULL; xfer = next) { 2079 next = STAILQ_NEXT(xfer, link); 2080 fw_xfer_free_buf(xfer); 2081 } 2082 STAILQ_INIT(&target->xferlist); 2083 free(target->luns, M_SBP); 2084 target->num_lun = 0; 2085 target->luns = NULL; 2086 target->fwdev = NULL; 2087 } 2088 2089 static void 2090 sbp_scsipi_detach_sdev(struct sbp_dev *sdev) 2091 { 2092 struct sbp_target *target; 2093 struct sbp_softc *sbp; 2094 2095 if (sdev == NULL) 2096 return; 2097 2098 target = sdev->target; 2099 if (target == NULL) 2100 return; 2101 2102 sbp = target->sbp; 2103 2104 if (sdev->status == SBP_DEV_DEAD) 2105 return; 2106 if (sdev->status == SBP_DEV_RESET) 2107 return; 2108 if (sdev->periph != NULL) { 2109 scsipi_periph_thaw(sdev->periph, sdev->freeze); 2110 scsipi_channel_thaw(&sbp->sc_channel, 0); /* XXXX */ 2111 sdev->freeze = 0; 2112 if (scsipi_target_detach(&sbp->sc_channel, 2113 target->target_id, sdev->lun_id, DETACH_FORCE) != 0) { 2114 aprint_error_dev(sbp->sc_fd.dev, "detach failed\n"); 2115 } 2116 sdev->periph = NULL; 2117 } 2118 sbp_abort_all_ocbs(sdev, XS_DRIVER_STUFFUP); 2119 } 2120 2121 static void 2122 sbp_scsipi_detach_target(struct sbp_target *target) 2123 { 2124 struct sbp_softc *sbp = target->sbp; 2125 int i; 2126 2127 if (target->luns != NULL) { 2128 SBP_DEBUG(0) 2129 printf("sbp_detach_target %d\n", target->target_id); 2130 END_DEBUG 2131 for (i = 0; i < target->num_lun; i++) 2132 sbp_scsipi_detach_sdev(target->luns[i]); 2133 if (config_detach(sbp->sc_bus, DETACH_FORCE) != 0) 2134 aprint_error_dev(sbp->sc_fd.dev, "%d detach failed\n", 2135 target->target_id); 2136 sbp->sc_bus = NULL; 2137 } 2138 } 2139 2140 static void 2141 sbp_target_reset(struct sbp_dev *sdev, int method) 2142 { 2143 struct sbp_softc *sc; 2144 struct sbp_target *target = sdev->target; 2145 struct sbp_dev *tsdev; 2146 int i; 2147 2148 sc = target->sbp; 2149 for (i = 0; i < target->num_lun; i++) { 2150 tsdev = target->luns[i]; 2151 if (tsdev == NULL) 2152 continue; 2153 if (tsdev->status == SBP_DEV_DEAD) 2154 continue; 2155 if (tsdev->status == SBP_DEV_RESET) 2156 continue; 2157 if (sdev->periph != NULL) { 2158 scsipi_periph_freeze(tsdev->periph, 1); 2159 tsdev->freeze++; 2160 } 2161 sbp_abort_all_ocbs(tsdev, XS_TIMEOUT); 2162 if (method == 2) 2163 tsdev->status = SBP_DEV_LOGIN; 2164 } 2165 switch (method) { 2166 case 1: 2167 aprint_error("target reset\n"); 2168 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL); 2169 break; 2170 case 2: 2171 aprint_error("reset start\n"); 2172 sbp_reset_start(sdev); 2173 break; 2174 } 2175 } 2176 2177 static void 2178 sbp_mgm_timeout(void *arg) 2179 { 2180 struct sbp_ocb *ocb = (struct sbp_ocb *)arg; 2181 struct sbp_dev *sdev = ocb->sdev; 2182 struct sbp_target *target = sdev->target; 2183 2184 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2185 "%s:%s: request timeout(mgm orb:0x%08x) ... ", 2186 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); 2187 target->mgm_ocb_cur = NULL; 2188 sbp_free_ocb(sdev, ocb); 2189 #if 0 2190 /* XXX */ 2191 aprint_error("run next request\n"); 2192 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); 2193 #endif 2194 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2195 "%s:%s: reset start\n", __func__, sdev->bustgtlun); 2196 sbp_reset_start(sdev); 2197 } 2198 2199 static void 2200 sbp_timeout(void *arg) 2201 { 2202 struct sbp_ocb *ocb = (struct sbp_ocb *)arg; 2203 struct sbp_dev *sdev = ocb->sdev; 2204 2205 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2206 "%s:%s: request timeout(cmd orb:0x%08x) ... ", 2207 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); 2208 2209 sdev->timeout++; 2210 switch (sdev->timeout) { 2211 case 1: 2212 aprint_error("agent reset\n"); 2213 if (sdev->periph != NULL) { 2214 scsipi_periph_freeze(sdev->periph, 1); 2215 sdev->freeze++; 2216 } 2217 sbp_abort_all_ocbs(sdev, XS_TIMEOUT); 2218 sbp_agent_reset(sdev); 2219 break; 2220 case 2: 2221 case 3: 2222 sbp_target_reset(sdev, sdev->timeout - 1); 2223 break; 2224 default: 2225 aprint_error("\n"); 2226 #if 0 2227 /* XXX give up */ 2228 sbp_scsipi_detach_target(target); 2229 if (target->luns != NULL) 2230 free(target->luns, M_SBP); 2231 target->num_lun = 0; 2232 target->luns = NULL; 2233 target->fwdev = NULL; 2234 #endif 2235 } 2236 } 2237 2238 static void 2239 sbp_action1(struct sbp_softc *sc, struct scsipi_xfer *xs) 2240 { 2241 struct sbp_target *target = &sc->sc_target; 2242 struct sbp_dev *sdev = NULL; 2243 struct sbp_ocb *ocb; 2244 int speed, flag, error; 2245 void *cdb; 2246 2247 /* target:lun -> sdev mapping */ 2248 if (target->fwdev != NULL && 2249 xs->xs_periph->periph_lun < target->num_lun) { 2250 sdev = target->luns[xs->xs_periph->periph_lun]; 2251 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED && 2252 sdev->status != SBP_DEV_PROBE) 2253 sdev = NULL; 2254 } 2255 2256 if (sdev == NULL) { 2257 SBP_DEBUG(1) 2258 printf("%s:%d:%d: Invalid target (target needed)\n", 2259 sc ? device_xname(sc->sc_fd.dev) : "???", 2260 xs->xs_periph->periph_target, 2261 xs->xs_periph->periph_lun); 2262 END_DEBUG 2263 2264 xs->error = XS_DRIVER_STUFFUP; 2265 scsipi_done(xs); 2266 return; 2267 } 2268 2269 SBP_DEBUG(2) 2270 printf("%s:%d:%d:" 2271 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x," 2272 " flags: 0x%02x, %db cmd/%db data\n", 2273 device_xname(sc->sc_fd.dev), 2274 xs->xs_periph->periph_target, 2275 xs->xs_periph->periph_lun, 2276 xs->cmd->opcode, 2277 xs->cmd->bytes[0], xs->cmd->bytes[1], 2278 xs->cmd->bytes[2], xs->cmd->bytes[3], 2279 xs->cmd->bytes[4], xs->cmd->bytes[5], 2280 xs->cmd->bytes[6], xs->cmd->bytes[7], 2281 xs->cmd->bytes[8], 2282 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT), 2283 xs->cmdlen, xs->datalen); 2284 END_DEBUG 2285 mutex_enter(&sc->sc_mtx); 2286 ocb = sbp_get_ocb(sdev); 2287 mutex_exit(&sc->sc_mtx); 2288 if (ocb == NULL) { 2289 xs->error = XS_REQUEUE; 2290 if (sdev->freeze == 0) { 2291 scsipi_periph_freeze(sdev->periph, 1); 2292 sdev->freeze++; 2293 } 2294 scsipi_done(xs); 2295 return; 2296 } 2297 2298 ocb->flags = OCB_ACT_CMD; 2299 ocb->sdev = sdev; 2300 ocb->xs = xs; 2301 ocb->orb[0] = htonl(1 << 31); 2302 ocb->orb[1] = 0; 2303 ocb->orb[2] = htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16)); 2304 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET); 2305 speed = min(target->fwdev->speed, max_speed); 2306 ocb->orb[4] = 2307 htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7)); 2308 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) == 2309 XS_CTL_DATA_IN) { 2310 ocb->orb[4] |= htonl(ORB_CMD_IN); 2311 flag = BUS_DMA_READ; 2312 } else 2313 flag = BUS_DMA_WRITE; 2314 2315 cdb = xs->cmd; 2316 memcpy((void *)&ocb->orb[5], cdb, xs->cmdlen); 2317 /* 2318 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3])); 2319 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7])); 2320 */ 2321 if (xs->datalen > 0) { 2322 error = bus_dmamap_load(sc->sc_dmat, ocb->dmamap, 2323 xs->data, xs->datalen, NULL, BUS_DMA_NOWAIT | flag); 2324 if (error) { 2325 aprint_error_dev(sc->sc_fd.dev, 2326 "DMA map load error %d\n", error); 2327 xs->error = XS_DRIVER_STUFFUP; 2328 scsipi_done(xs); 2329 } else 2330 sbp_execute_ocb(ocb, ocb->dmamap->dm_segs, 2331 ocb->dmamap->dm_nsegs); 2332 } else 2333 sbp_execute_ocb(ocb, NULL, 0); 2334 2335 return; 2336 } 2337 2338 static void 2339 sbp_execute_ocb(struct sbp_ocb *ocb, bus_dma_segment_t *segments, int seg) 2340 { 2341 struct sbp_ocb *prev; 2342 bus_dma_segment_t *s; 2343 int i; 2344 2345 SBP_DEBUG(2) 2346 printf("sbp_execute_ocb: seg %d", seg); 2347 for (i = 0; i < seg; i++) 2348 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr, 2349 (uintmax_t)segments[i].ds_len); 2350 printf("\n"); 2351 END_DEBUG 2352 2353 if (seg == 1) { 2354 /* direct pointer */ 2355 s = segments; 2356 if (s->ds_len > SBP_SEG_MAX) 2357 panic("ds_len > SBP_SEG_MAX, fix busdma code"); 2358 ocb->orb[3] = htonl(s->ds_addr); 2359 ocb->orb[4] |= htonl(s->ds_len); 2360 } else if (seg > 1) { 2361 /* page table */ 2362 for (i = 0; i < seg; i++) { 2363 s = &segments[i]; 2364 SBP_DEBUG(0) 2365 /* XXX LSI Logic "< 16 byte" bug might be hit */ 2366 if (s->ds_len < 16) 2367 printf("sbp_execute_ocb: warning, " 2368 "segment length(%jd) is less than 16." 2369 "(seg=%d/%d)\n", 2370 (uintmax_t)s->ds_len, i + 1, seg); 2371 END_DEBUG 2372 if (s->ds_len > SBP_SEG_MAX) 2373 panic("ds_len > SBP_SEG_MAX, fix busdma code"); 2374 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16); 2375 ocb->ind_ptr[i].lo = htonl(s->ds_addr); 2376 } 2377 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg); 2378 } 2379 2380 if (seg > 0) { 2381 struct sbp_softc *sc = ocb->sdev->target->sbp; 2382 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2383 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 2384 2385 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2386 0, ocb->dmamap->dm_mapsize, flag); 2387 } 2388 prev = sbp_enqueue_ocb(ocb->sdev, ocb); 2389 SBP_ORB_DMA_SYNC(ocb->sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE); 2390 if (use_doorbell) { 2391 if (prev == NULL) { 2392 if (ocb->sdev->last_ocb != NULL) 2393 sbp_doorbell(ocb->sdev); 2394 else 2395 sbp_orb_pointer(ocb->sdev, ocb); 2396 } 2397 } else 2398 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) { 2399 ocb->sdev->flags &= ~ORB_LINK_DEAD; 2400 sbp_orb_pointer(ocb->sdev, ocb); 2401 } 2402 } 2403 2404 static struct sbp_ocb * 2405 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status) 2406 { 2407 struct sbp_softc *sc = sdev->target->sbp; 2408 struct sbp_ocb *ocb; 2409 struct sbp_ocb *next; 2410 int order = 0; 2411 int flags; 2412 2413 SBP_DEBUG(1) 2414 printf("%s:%s:%s: 0x%08x src %d\n", device_xname(sc->sc_fd.dev), 2415 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), 2416 sbp_status->src); 2417 END_DEBUG 2418 mutex_enter(&sc->sc_mtx); 2419 for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) { 2420 next = STAILQ_NEXT(ocb, ocb); 2421 flags = ocb->flags; 2422 if (OCB_MATCH(ocb, sbp_status)) { 2423 /* found */ 2424 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, 2425 BUS_DMASYNC_POSTWRITE); 2426 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb); 2427 if (ocb->xs != NULL) 2428 callout_stop(&ocb->xs->xs_callout); 2429 if (ntohl(ocb->orb[4]) & 0xffff) { 2430 const int flag = 2431 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2432 BUS_DMASYNC_POSTREAD : 2433 BUS_DMASYNC_POSTWRITE; 2434 2435 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2436 0, ocb->dmamap->dm_mapsize, flag); 2437 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap); 2438 2439 } 2440 if (!use_doorbell) { 2441 if (sbp_status->src == SRC_NO_NEXT) { 2442 if (next != NULL) 2443 sbp_orb_pointer(sdev, next); 2444 else if (order > 0) 2445 /* 2446 * Unordered execution 2447 * We need to send pointer for 2448 * next ORB 2449 */ 2450 sdev->flags |= ORB_LINK_DEAD; 2451 } 2452 } 2453 break; 2454 } else 2455 order++; 2456 } 2457 mutex_exit(&sc->sc_mtx); 2458 2459 if (ocb && use_doorbell) { 2460 /* 2461 * XXX this is not correct for unordered 2462 * execution. 2463 */ 2464 if (sdev->last_ocb != NULL) 2465 sbp_free_ocb(sdev, sdev->last_ocb); 2466 sdev->last_ocb = ocb; 2467 if (next != NULL && 2468 sbp_status->src == SRC_NO_NEXT) 2469 sbp_doorbell(sdev); 2470 } 2471 2472 SBP_DEBUG(0) 2473 if (ocb && order > 0) 2474 printf("%s:%s:%s: unordered execution order:%d\n", 2475 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun, 2476 order); 2477 END_DEBUG 2478 return ocb; 2479 } 2480 2481 static struct sbp_ocb * 2482 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) 2483 { 2484 struct sbp_softc *sc = sdev->target->sbp; 2485 struct sbp_ocb *tocb, *prev, *prev2; 2486 2487 SBP_DEBUG(1) 2488 printf("%s:%s:%s: 0x%08jx\n", device_xname(sc->sc_fd.dev), 2489 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); 2490 END_DEBUG 2491 mutex_enter(&sc->sc_mtx); 2492 prev = NULL; 2493 STAILQ_FOREACH(tocb, &sdev->ocbs, ocb) 2494 prev = tocb; 2495 prev2 = prev; 2496 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb); 2497 mutex_exit(&sc->sc_mtx); 2498 2499 callout_reset(&ocb->xs->xs_callout, mstohz(ocb->xs->timeout), 2500 sbp_timeout, ocb); 2501 2502 if (use_doorbell && prev == NULL) 2503 prev2 = sdev->last_ocb; 2504 2505 if (prev2 != NULL) { 2506 SBP_DEBUG(2) 2507 printf("linking chain 0x%jx -> 0x%jx\n", 2508 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr); 2509 END_DEBUG 2510 /* 2511 * Suppress compiler optimization so that orb[1] must be 2512 * written first. 2513 * XXX We may need an explicit memory barrier for other 2514 * architectures other than i386/amd64. 2515 */ 2516 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr); 2517 *(volatile uint32_t *)&prev2->orb[0] = 0; 2518 } 2519 2520 return prev; 2521 } 2522 2523 static struct sbp_ocb * 2524 sbp_get_ocb(struct sbp_dev *sdev) 2525 { 2526 struct sbp_softc *sc = sdev->target->sbp; 2527 struct sbp_ocb *ocb; 2528 2529 KASSERT(mutex_owned(&sc->sc_mtx)); 2530 2531 ocb = STAILQ_FIRST(&sdev->free_ocbs); 2532 if (ocb == NULL) { 2533 sdev->flags |= ORB_SHORTAGE; 2534 aprint_error_dev(sc->sc_fd.dev, 2535 "ocb shortage!!!\n"); 2536 return NULL; 2537 } 2538 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb); 2539 ocb->xs = NULL; 2540 return ocb; 2541 } 2542 2543 static void 2544 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) 2545 { 2546 struct sbp_softc *sc = sdev->target->sbp; 2547 int count; 2548 2549 ocb->flags = 0; 2550 ocb->xs = NULL; 2551 2552 mutex_enter(&sc->sc_mtx); 2553 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb); 2554 mutex_exit(&sc->sc_mtx); 2555 if (sdev->flags & ORB_SHORTAGE) { 2556 sdev->flags &= ~ORB_SHORTAGE; 2557 count = sdev->freeze; 2558 sdev->freeze = 0; 2559 if (sdev->periph) 2560 scsipi_periph_thaw(sdev->periph, count); 2561 scsipi_channel_thaw(&sc->sc_channel, 0); 2562 } 2563 } 2564 2565 static void 2566 sbp_abort_ocb(struct sbp_ocb *ocb, int status) 2567 { 2568 struct sbp_softc *sc; 2569 struct sbp_dev *sdev; 2570 2571 sdev = ocb->sdev; 2572 sc = sdev->target->sbp; 2573 SBP_DEBUG(0) 2574 printf("%s:%s:%s: sbp_abort_ocb 0x%jx\n", device_xname(sc->sc_fd.dev), 2575 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); 2576 END_DEBUG 2577 SBP_DEBUG(1) 2578 if (ocb->xs != NULL) 2579 sbp_print_scsi_cmd(ocb); 2580 END_DEBUG 2581 if (ntohl(ocb->orb[4]) & 0xffff) { 2582 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2583 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 2584 2585 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2586 0, ocb->dmamap->dm_mapsize, flag); 2587 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap); 2588 } 2589 if (ocb->xs != NULL) { 2590 callout_stop(&ocb->xs->xs_callout); 2591 ocb->xs->error = status; 2592 scsipi_done(ocb->xs); 2593 } 2594 sbp_free_ocb(sdev, ocb); 2595 } 2596 2597 static void 2598 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status) 2599 { 2600 struct sbp_softc *sc = sdev->target->sbp; 2601 struct sbp_ocb *ocb, *next; 2602 STAILQ_HEAD(, sbp_ocb) temp; 2603 2604 mutex_enter(&sc->sc_mtx); 2605 STAILQ_INIT(&temp); 2606 STAILQ_CONCAT(&temp, &sdev->ocbs); 2607 STAILQ_INIT(&sdev->ocbs); 2608 mutex_exit(&sc->sc_mtx); 2609 2610 for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) { 2611 next = STAILQ_NEXT(ocb, ocb); 2612 sbp_abort_ocb(ocb, status); 2613 } 2614 if (sdev->last_ocb != NULL) { 2615 sbp_free_ocb(sdev, sdev->last_ocb); 2616 sdev->last_ocb = NULL; 2617 } 2618 } 2619