1 /* $NetBSD: sbp.c,v 1.35 2013/09/15 13:52:23 martin Exp $ */ 2 /*- 3 * Copyright (c) 2003 Hidetoshi Shimokawa 4 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the acknowledgement as bellow: 17 * 18 * This product includes software developed by K. Kobayashi and H. Shimokawa 19 * 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 26 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 32 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 * 35 * $FreeBSD: src/sys/dev/firewire/sbp.c,v 1.100 2009/02/18 18:41:34 sbruno Exp $ 36 * 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: sbp.c,v 1.35 2013/09/15 13:52:23 martin Exp $"); 41 42 43 #include <sys/param.h> 44 #include <sys/device.h> 45 #include <sys/errno.h> 46 #include <sys/buf.h> 47 #include <sys/callout.h> 48 #include <sys/condvar.h> 49 #include <sys/kernel.h> 50 #include <sys/kthread.h> 51 #include <sys/malloc.h> 52 #include <sys/mutex.h> 53 #include <sys/proc.h> 54 #include <sys/sysctl.h> 55 56 #include <sys/bus.h> 57 58 #include <dev/scsipi/scsi_spc.h> 59 #include <dev/scsipi/scsi_all.h> 60 #include <dev/scsipi/scsipi_all.h> 61 #include <dev/scsipi/scsiconf.h> 62 #include <dev/scsipi/scsipiconf.h> 63 64 #include <dev/ieee1394/firewire.h> 65 #include <dev/ieee1394/firewirereg.h> 66 #include <dev/ieee1394/fwdma.h> 67 #include <dev/ieee1394/iec13213.h> 68 #include <dev/ieee1394/sbp.h> 69 70 #include "locators.h" 71 72 73 #define SBP_FWDEV_ALIVE(fwdev) (((fwdev)->status == FWDEVATTACHED) \ 74 && crom_has_specver((fwdev)->csrrom, CSRVAL_ANSIT10, CSRVAL_T10SBP2)) 75 76 #define SBP_NUM_TARGETS 8 /* MAX 64 */ 77 #define SBP_NUM_LUNS 64 78 #define SBP_MAXPHYS MIN(MAXPHYS, (512*1024) /* 512KB */) 79 #define SBP_DMA_SIZE PAGE_SIZE 80 #define SBP_LOGIN_SIZE sizeof(struct sbp_login_res) 81 #define SBP_QUEUE_LEN ((SBP_DMA_SIZE - SBP_LOGIN_SIZE) / sizeof(struct sbp_ocb)) 82 #define SBP_NUM_OCB (SBP_QUEUE_LEN * SBP_NUM_TARGETS) 83 84 /* 85 * STATUS FIFO addressing 86 * bit 87 * ----------------------- 88 * 0- 1( 2): 0 (alignment) 89 * 2- 9( 8): lun 90 * 10-31(14): unit 91 * 32-47(16): SBP_BIND_HI 92 * 48-64(16): bus_id, node_id 93 */ 94 #define SBP_BIND_HI 0x1 95 #define SBP_DEV2ADDR(u, l) \ 96 (((uint64_t)SBP_BIND_HI << 32) |\ 97 (((u) & 0x3fff) << 10) |\ 98 (((l) & 0xff) << 2)) 99 #define SBP_ADDR2UNIT(a) (((a) >> 10) & 0x3fff) 100 #define SBP_ADDR2LUN(a) (((a) >> 2) & 0xff) 101 #define SBP_INITIATOR 7 102 103 static const char *orb_fun_name[] = { 104 ORB_FUN_NAMES 105 }; 106 107 static int debug = 0; 108 static int auto_login = 1; 109 static int max_speed = -1; 110 static int sbp_cold = 1; 111 static int ex_login = 1; 112 static int login_delay = 1000; /* msec */ 113 static int scan_delay = 500; /* msec */ 114 static int use_doorbell = 0; 115 static int sbp_tags = 0; 116 117 static int sysctl_sbp_verify(SYSCTLFN_PROTO, int lower, int upper); 118 static int sysctl_sbp_verify_max_speed(SYSCTLFN_PROTO); 119 static int sysctl_sbp_verify_tags(SYSCTLFN_PROTO); 120 121 /* 122 * Setup sysctl(3) MIB, hw.sbp.* 123 * 124 * TBD condition CTLFLAG_PERMANENT on being a module or not 125 */ 126 SYSCTL_SETUP(sysctl_sbp, "sysctl sbp(4) subtree setup") 127 { 128 int rc, sbp_node_num; 129 const struct sysctlnode *node; 130 131 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 132 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 133 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) 134 goto err; 135 136 if ((rc = sysctl_createv(clog, 0, NULL, &node, 137 CTLFLAG_PERMANENT, CTLTYPE_NODE, "sbp", 138 SYSCTL_DESCR("sbp controls"), NULL, 0, NULL, 139 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) 140 goto err; 141 sbp_node_num = node->sysctl_num; 142 143 /* sbp auto login flag */ 144 if ((rc = sysctl_createv(clog, 0, NULL, &node, 145 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 146 "auto_login", SYSCTL_DESCR("SBP perform login automatically"), 147 NULL, 0, &auto_login, 148 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 149 goto err; 150 151 /* sbp max speed */ 152 if ((rc = sysctl_createv(clog, 0, NULL, &node, 153 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 154 "max_speed", SYSCTL_DESCR("SBP transfer max speed"), 155 sysctl_sbp_verify_max_speed, 0, &max_speed, 156 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 157 goto err; 158 159 /* sbp exclusive login flag */ 160 if ((rc = sysctl_createv(clog, 0, NULL, &node, 161 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 162 "exclusive_login", SYSCTL_DESCR("SBP enable exclusive login"), 163 NULL, 0, &ex_login, 164 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 165 goto err; 166 167 /* sbp login delay */ 168 if ((rc = sysctl_createv(clog, 0, NULL, &node, 169 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 170 "login_delay", SYSCTL_DESCR("SBP login delay in msec"), 171 NULL, 0, &login_delay, 172 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 173 goto err; 174 175 /* sbp scan delay */ 176 if ((rc = sysctl_createv(clog, 0, NULL, &node, 177 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 178 "scan_delay", SYSCTL_DESCR("SBP scan delay in msec"), 179 NULL, 0, &scan_delay, 180 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 181 goto err; 182 183 /* sbp use doorbell flag */ 184 if ((rc = sysctl_createv(clog, 0, NULL, &node, 185 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 186 "use_doorbell", SYSCTL_DESCR("SBP use doorbell request"), 187 NULL, 0, &use_doorbell, 188 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 189 goto err; 190 191 /* sbp force tagged queuing */ 192 if ((rc = sysctl_createv(clog, 0, NULL, &node, 193 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 194 "tags", SYSCTL_DESCR("SBP tagged queuing support"), 195 sysctl_sbp_verify_tags, 0, &sbp_tags, 196 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 197 goto err; 198 199 /* sbp driver debug flag */ 200 if ((rc = sysctl_createv(clog, 0, NULL, &node, 201 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 202 "sbp_debug", SYSCTL_DESCR("SBP debug flag"), 203 NULL, 0, &debug, 204 0, CTL_HW, sbp_node_num, CTL_CREATE, CTL_EOL)) != 0) 205 goto err; 206 207 return; 208 209 err: 210 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 211 } 212 213 static int 214 sysctl_sbp_verify(SYSCTLFN_ARGS, int lower, int upper) 215 { 216 int error, t; 217 struct sysctlnode node; 218 219 node = *rnode; 220 t = *(int*)rnode->sysctl_data; 221 node.sysctl_data = &t; 222 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 223 if (error || newp == NULL) 224 return error; 225 226 if (t < lower || t > upper) 227 return EINVAL; 228 229 *(int*)rnode->sysctl_data = t; 230 231 return 0; 232 } 233 234 static int 235 sysctl_sbp_verify_max_speed(SYSCTLFN_ARGS) 236 { 237 238 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), 0, FWSPD_S400); 239 } 240 241 static int 242 sysctl_sbp_verify_tags(SYSCTLFN_ARGS) 243 { 244 245 return sysctl_sbp_verify(SYSCTLFN_CALL(rnode), -1, 1); 246 } 247 248 #define NEED_RESPONSE 0 249 250 #define SBP_SEG_MAX rounddown(0xffff, PAGE_SIZE) 251 #ifdef __sparc64__ /* iommu */ 252 #define SBP_IND_MAX howmany(SBP_MAXPHYS, SBP_SEG_MAX) 253 #else 254 #define SBP_IND_MAX howmany(SBP_MAXPHYS, PAGE_SIZE) 255 #endif 256 struct sbp_ocb { 257 uint32_t orb[8]; 258 #define IND_PTR_OFFSET (sizeof(uint32_t) * 8) 259 struct ind_ptr ind_ptr[SBP_IND_MAX]; 260 struct scsipi_xfer *xs; 261 struct sbp_dev *sdev; 262 uint16_t index; 263 uint16_t flags; /* XXX should be removed */ 264 bus_dmamap_t dmamap; 265 bus_addr_t bus_addr; 266 STAILQ_ENTRY(sbp_ocb) ocb; 267 }; 268 269 #define SBP_ORB_DMA_SYNC(dma, i, op) \ 270 bus_dmamap_sync((dma).dma_tag, (dma).dma_map, \ 271 sizeof(struct sbp_ocb) * (i), \ 272 sizeof(ocb->orb) + sizeof(ocb->ind_ptr), (op)); 273 274 #define OCB_ACT_MGM 0 275 #define OCB_ACT_CMD 1 276 #define OCB_MATCH(o,s) ((o)->bus_addr == ntohl((s)->orb_lo)) 277 278 struct sbp_dev{ 279 #define SBP_DEV_RESET 0 /* accept login */ 280 #define SBP_DEV_LOGIN 1 /* to login */ 281 #if 0 282 #define SBP_DEV_RECONN 2 /* to reconnect */ 283 #endif 284 #define SBP_DEV_TOATTACH 3 /* to attach */ 285 #define SBP_DEV_PROBE 4 /* scan lun */ 286 #define SBP_DEV_ATTACHED 5 /* in operation */ 287 #define SBP_DEV_DEAD 6 /* unavailable unit */ 288 #define SBP_DEV_RETRY 7 /* unavailable unit */ 289 uint8_t status:4, 290 timeout:4; 291 uint8_t type; 292 uint16_t lun_id; 293 uint16_t freeze; 294 #define ORB_LINK_DEAD (1 << 0) 295 #define VALID_LUN (1 << 1) 296 #define ORB_POINTER_ACTIVE (1 << 2) 297 #define ORB_POINTER_NEED (1 << 3) 298 #define ORB_DOORBELL_ACTIVE (1 << 4) 299 #define ORB_DOORBELL_NEED (1 << 5) 300 #define ORB_SHORTAGE (1 << 6) 301 uint16_t flags; 302 struct scsipi_periph *periph; 303 struct sbp_target *target; 304 struct fwdma_alloc dma; 305 struct sbp_login_res *login; 306 struct callout login_callout; 307 struct sbp_ocb *ocb; 308 STAILQ_HEAD(, sbp_ocb) ocbs; 309 STAILQ_HEAD(, sbp_ocb) free_ocbs; 310 struct sbp_ocb *last_ocb; 311 char vendor[32]; 312 char product[32]; 313 char revision[10]; 314 char bustgtlun[32]; 315 }; 316 317 struct sbp_target { 318 int target_id; 319 int num_lun; 320 struct sbp_dev **luns; 321 struct sbp_softc *sbp; 322 struct fw_device *fwdev; 323 uint32_t mgm_hi, mgm_lo; 324 struct sbp_ocb *mgm_ocb_cur; 325 STAILQ_HEAD(, sbp_ocb) mgm_ocb_queue; 326 struct callout mgm_ocb_timeout; 327 STAILQ_HEAD(, fw_xfer) xferlist; 328 int n_xfer; 329 }; 330 331 struct sbp_softc { 332 struct firewire_dev_comm sc_fd; 333 struct scsipi_adapter sc_adapter; 334 struct scsipi_channel sc_channel; 335 device_t sc_bus; 336 struct lwp *sc_lwp; 337 struct sbp_target sc_target; 338 struct fw_bind sc_fwb; 339 bus_dma_tag_t sc_dmat; 340 struct timeval sc_last_busreset; 341 int sc_flags; 342 kmutex_t sc_mtx; 343 kcondvar_t sc_cv; 344 }; 345 346 MALLOC_DEFINE(M_SBP, "sbp", "SBP-II/IEEE1394"); 347 MALLOC_DECLARE(M_SBP); 348 349 350 static int sbpmatch(device_t, cfdata_t, void *); 351 static void sbpattach(device_t, device_t, void *); 352 static int sbpdetach(device_t, int); 353 354 static void sbp_scsipi_request(struct scsipi_channel *, scsipi_adapter_req_t, 355 void *); 356 static void sbp_minphys(struct buf *); 357 358 static void sbp_show_sdev_info(struct sbp_dev *); 359 static void sbp_alloc_lun(struct sbp_target *); 360 static struct sbp_target *sbp_alloc_target(struct sbp_softc *, 361 struct fw_device *); 362 static void sbp_probe_lun(struct sbp_dev *); 363 static void sbp_login_callout(void *); 364 static void sbp_login(struct sbp_dev *); 365 static void sbp_probe_target(void *); 366 static void sbp_post_busreset(void *); 367 static void sbp_post_explore(void *); 368 #if NEED_RESPONSE 369 static void sbp_loginres_callback(struct fw_xfer *); 370 #endif 371 static inline void sbp_xfer_free(struct fw_xfer *); 372 static void sbp_reset_start_callback(struct fw_xfer *); 373 static void sbp_reset_start(struct sbp_dev *); 374 static void sbp_mgm_callback(struct fw_xfer *); 375 static void sbp_scsipi_scan_target(void *); 376 static inline void sbp_scan_dev(struct sbp_dev *); 377 static void sbp_do_attach(struct fw_xfer *); 378 static void sbp_agent_reset_callback(struct fw_xfer *); 379 static void sbp_agent_reset(struct sbp_dev *); 380 static void sbp_busy_timeout_callback(struct fw_xfer *); 381 static void sbp_busy_timeout(struct sbp_dev *); 382 static void sbp_orb_pointer_callback(struct fw_xfer *); 383 static void sbp_orb_pointer(struct sbp_dev *, struct sbp_ocb *); 384 static void sbp_doorbell_callback(struct fw_xfer *); 385 static void sbp_doorbell(struct sbp_dev *); 386 static struct fw_xfer *sbp_write_cmd(struct sbp_dev *, int, int); 387 static void sbp_mgm_orb(struct sbp_dev *, int, struct sbp_ocb *); 388 static void sbp_print_scsi_cmd(struct sbp_ocb *); 389 static void sbp_scsi_status(struct sbp_status *, struct sbp_ocb *); 390 static void sbp_fix_inq_data(struct sbp_ocb *); 391 static void sbp_recv(struct fw_xfer *); 392 static int sbp_logout_all(struct sbp_softc *); 393 static void sbp_free_sdev(struct sbp_dev *); 394 static void sbp_free_target(struct sbp_target *); 395 static void sbp_scsipi_detach_sdev(struct sbp_dev *); 396 static void sbp_scsipi_detach_target(struct sbp_target *); 397 static void sbp_target_reset(struct sbp_dev *, int); 398 static void sbp_mgm_timeout(void *); 399 static void sbp_timeout(void *); 400 static void sbp_action1(struct sbp_softc *, struct scsipi_xfer *); 401 static void sbp_execute_ocb(struct sbp_ocb *, bus_dma_segment_t *, int); 402 static struct sbp_ocb *sbp_dequeue_ocb(struct sbp_dev *, struct sbp_status *); 403 static struct sbp_ocb *sbp_enqueue_ocb(struct sbp_dev *, struct sbp_ocb *); 404 static struct sbp_ocb *sbp_get_ocb(struct sbp_dev *); 405 static void sbp_free_ocb(struct sbp_dev *, struct sbp_ocb *); 406 static void sbp_abort_ocb(struct sbp_ocb *, int); 407 static void sbp_abort_all_ocbs(struct sbp_dev *, int); 408 409 410 static const char *orb_status0[] = { 411 /* 0 */ "No additional information to report", 412 /* 1 */ "Request type not supported", 413 /* 2 */ "Speed not supported", 414 /* 3 */ "Page size not supported", 415 /* 4 */ "Access denied", 416 /* 5 */ "Logical unit not supported", 417 /* 6 */ "Maximum payload too small", 418 /* 7 */ "Reserved for future standardization", 419 /* 8 */ "Resources unavailable", 420 /* 9 */ "Function rejected", 421 /* A */ "Login ID not recognized", 422 /* B */ "Dummy ORB completed", 423 /* C */ "Request aborted", 424 /* FF */ "Unspecified error" 425 #define MAX_ORB_STATUS0 0xd 426 }; 427 428 static const char *orb_status1_object[] = { 429 /* 0 */ "Operation request block (ORB)", 430 /* 1 */ "Data buffer", 431 /* 2 */ "Page table", 432 /* 3 */ "Unable to specify" 433 }; 434 435 static const char *orb_status1_serial_bus_error[] = { 436 /* 0 */ "Missing acknowledge", 437 /* 1 */ "Reserved; not to be used", 438 /* 2 */ "Time-out error", 439 /* 3 */ "Reserved; not to be used", 440 /* 4 */ "Busy retry limit exceeded(X)", 441 /* 5 */ "Busy retry limit exceeded(A)", 442 /* 6 */ "Busy retry limit exceeded(B)", 443 /* 7 */ "Reserved for future standardization", 444 /* 8 */ "Reserved for future standardization", 445 /* 9 */ "Reserved for future standardization", 446 /* A */ "Reserved for future standardization", 447 /* B */ "Tardy retry limit exceeded", 448 /* C */ "Conflict error", 449 /* D */ "Data error", 450 /* E */ "Type error", 451 /* F */ "Address error" 452 }; 453 454 455 CFATTACH_DECL_NEW(sbp, sizeof(struct sbp_softc), 456 sbpmatch, sbpattach, sbpdetach, NULL); 457 458 459 int 460 sbpmatch(device_t parent, cfdata_t cf, void *aux) 461 { 462 struct fw_attach_args *fwa = aux; 463 464 if (strcmp(fwa->name, "sbp") == 0) 465 return 1; 466 return 0; 467 } 468 469 static void 470 sbpattach(device_t parent, device_t self, void *aux) 471 { 472 struct sbp_softc *sc = device_private(self); 473 struct fw_attach_args *fwa = (struct fw_attach_args *)aux; 474 struct firewire_comm *fc; 475 struct scsipi_adapter *sc_adapter = &sc->sc_adapter; 476 struct scsipi_channel *sc_channel = &sc->sc_channel; 477 struct sbp_target *target = &sc->sc_target; 478 int dv_unit; 479 480 aprint_naive("\n"); 481 aprint_normal(": SBP-2/SCSI over IEEE1394\n"); 482 483 sc->sc_fd.dev = self; 484 485 if (cold) 486 sbp_cold++; 487 sc->sc_fd.fc = fc = fwa->fc; 488 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); 489 cv_init(&sc->sc_cv, "sbp"); 490 491 if (max_speed < 0) 492 max_speed = fc->speed; 493 494 sc->sc_dmat = fc->dmat; 495 496 sc->sc_target.fwdev = NULL; 497 sc->sc_target.luns = NULL; 498 499 /* Initialize mutexes and lists before we can error out 500 * to prevent crashes on detach 501 */ 502 mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_VM); 503 STAILQ_INIT(&sc->sc_fwb.xferlist); 504 505 if (sbp_alloc_target(sc, fwa->fwdev) == NULL) 506 return; 507 508 sc_adapter->adapt_dev = sc->sc_fd.dev; 509 sc_adapter->adapt_nchannels = 1; 510 sc_adapter->adapt_max_periph = 1; 511 sc_adapter->adapt_request = sbp_scsipi_request; 512 sc_adapter->adapt_minphys = sbp_minphys; 513 sc_adapter->adapt_openings = 8; 514 515 sc_channel->chan_adapter = sc_adapter; 516 sc_channel->chan_bustype = &scsi_bustype; 517 sc_channel->chan_defquirks = PQUIRK_ONLYBIG; 518 sc_channel->chan_channel = 0; 519 sc_channel->chan_flags = SCSIPI_CHAN_CANGROW | SCSIPI_CHAN_NOSETTLE; 520 521 sc_channel->chan_ntargets = 1; 522 sc_channel->chan_nluns = target->num_lun; /* We set nluns 0 now */ 523 sc_channel->chan_id = 1; 524 525 sc->sc_bus = config_found(sc->sc_fd.dev, sc_channel, scsiprint); 526 if (sc->sc_bus == NULL) { 527 aprint_error_dev(self, "attach failed\n"); 528 return; 529 } 530 531 /* We reserve 16 bit space (4 bytes X 64 unit X 256 luns) */ 532 dv_unit = device_unit(sc->sc_fd.dev); 533 sc->sc_fwb.start = SBP_DEV2ADDR(dv_unit, 0); 534 sc->sc_fwb.end = SBP_DEV2ADDR(dv_unit, -1); 535 /* pre-allocate xfer */ 536 fw_xferlist_add(&sc->sc_fwb.xferlist, M_SBP, 537 /*send*/ 0, /*recv*/ SBP_RECV_LEN, SBP_NUM_OCB / 2, 538 fc, (void *)sc, sbp_recv); 539 fw_bindadd(fc, &sc->sc_fwb); 540 541 sc->sc_fd.post_busreset = sbp_post_busreset; 542 sc->sc_fd.post_explore = sbp_post_explore; 543 544 if (fc->status != FWBUSNOTREADY) { 545 sbp_post_busreset((void *)sc); 546 sbp_post_explore((void *)sc); 547 } 548 } 549 550 static int 551 sbpdetach(device_t self, int flags) 552 { 553 struct sbp_softc *sc = device_private(self); 554 struct firewire_comm *fc = sc->sc_fd.fc; 555 556 sbp_scsipi_detach_target(&sc->sc_target); 557 558 if (sc->sc_target.fwdev && SBP_FWDEV_ALIVE(sc->sc_target.fwdev)) { 559 sbp_logout_all(sc); 560 561 /* XXX wait for logout completion */ 562 mutex_enter(&sc->sc_mtx); 563 cv_timedwait_sig(&sc->sc_cv, &sc->sc_mtx, hz/2); 564 mutex_exit(&sc->sc_mtx); 565 } 566 567 sbp_free_target(&sc->sc_target); 568 569 fw_bindremove(fc, &sc->sc_fwb); 570 fw_xferlist_remove(&sc->sc_fwb.xferlist); 571 mutex_destroy(&sc->sc_fwb.fwb_mtx); 572 573 mutex_destroy(&sc->sc_mtx); 574 cv_destroy(&sc->sc_cv); 575 576 return 0; 577 } 578 579 580 static void 581 sbp_scsipi_request(struct scsipi_channel *channel, scsipi_adapter_req_t req, 582 void *arg) 583 { 584 struct sbp_softc *sc = device_private(channel->chan_adapter->adapt_dev); 585 struct scsipi_xfer *xs = arg; 586 int i; 587 588 SBP_DEBUG(1) 589 printf("Called sbp_scsipi_request\n"); 590 END_DEBUG 591 592 switch (req) { 593 case ADAPTER_REQ_RUN_XFER: 594 SBP_DEBUG(1) 595 printf("Got req_run_xfer\n"); 596 printf("xs control: 0x%08x, timeout: %d\n", 597 xs->xs_control, xs->timeout); 598 printf("opcode: 0x%02x\n", (int)xs->cmd->opcode); 599 for (i = 0; i < 15; i++) 600 printf("0x%02x ",(int)xs->cmd->bytes[i]); 601 printf("\n"); 602 END_DEBUG 603 if (xs->xs_control & XS_CTL_RESET) { 604 SBP_DEBUG(1) 605 printf("XS_CTL_RESET not support\n"); 606 END_DEBUG 607 break; 608 } 609 #define SBPSCSI_SBP2_MAX_CDB 12 610 if (xs->cmdlen > SBPSCSI_SBP2_MAX_CDB) { 611 SBP_DEBUG(0) 612 printf( 613 "sbp doesn't support cdb's larger than %d bytes\n", 614 SBPSCSI_SBP2_MAX_CDB); 615 END_DEBUG 616 xs->error = XS_DRIVER_STUFFUP; 617 scsipi_done(xs); 618 return; 619 } 620 sbp_action1(sc, xs); 621 622 break; 623 case ADAPTER_REQ_GROW_RESOURCES: 624 SBP_DEBUG(1) 625 printf("Got req_grow_resources\n"); 626 END_DEBUG 627 break; 628 case ADAPTER_REQ_SET_XFER_MODE: 629 SBP_DEBUG(1) 630 printf("Got set xfer mode\n"); 631 END_DEBUG 632 break; 633 default: 634 panic("Unknown request: %d\n", (int)req); 635 } 636 } 637 638 static void 639 sbp_minphys(struct buf *bp) 640 { 641 642 minphys(bp); 643 } 644 645 646 /* 647 * Display device characteristics on the console 648 */ 649 static void 650 sbp_show_sdev_info(struct sbp_dev *sdev) 651 { 652 struct fw_device *fwdev = sdev->target->fwdev; 653 struct sbp_softc *sc = sdev->target->sbp; 654 655 aprint_normal_dev(sc->sc_fd.dev, 656 "ordered:%d type:%d EUI:%08x%08x node:%d speed:%d maxrec:%d\n", 657 (sdev->type & 0x40) >> 6, 658 (sdev->type & 0x1f), 659 fwdev->eui.hi, 660 fwdev->eui.lo, 661 fwdev->dst, 662 fwdev->speed, 663 fwdev->maxrec); 664 aprint_normal_dev(sc->sc_fd.dev, "%s '%s' '%s' '%s'\n", 665 sdev->bustgtlun, sdev->vendor, sdev->product, sdev->revision); 666 } 667 668 static void 669 sbp_alloc_lun(struct sbp_target *target) 670 { 671 struct crom_context cc; 672 struct csrreg *reg; 673 struct sbp_dev *sdev, **newluns; 674 struct sbp_softc *sc; 675 int maxlun, lun, i; 676 677 sc = target->sbp; 678 crom_init_context(&cc, target->fwdev->csrrom); 679 /* XXX shoud parse appropriate unit directories only */ 680 maxlun = -1; 681 while (cc.depth >= 0) { 682 reg = crom_search_key(&cc, CROM_LUN); 683 if (reg == NULL) 684 break; 685 lun = reg->val & 0xffff; 686 SBP_DEBUG(0) 687 printf("target %d lun %d found\n", target->target_id, lun); 688 END_DEBUG 689 if (maxlun < lun) 690 maxlun = lun; 691 crom_next(&cc); 692 } 693 if (maxlun < 0) 694 aprint_normal_dev(sc->sc_fd.dev, "%d: no LUN found\n", 695 target->target_id); 696 697 maxlun++; 698 if (maxlun >= SBP_NUM_LUNS) 699 maxlun = SBP_NUM_LUNS; 700 701 /* Invalidiate stale devices */ 702 for (lun = 0; lun < target->num_lun; lun++) { 703 sdev = target->luns[lun]; 704 if (sdev == NULL) 705 continue; 706 sdev->flags &= ~VALID_LUN; 707 if (lun >= maxlun) { 708 /* lost device */ 709 sbp_scsipi_detach_sdev(sdev); 710 sbp_free_sdev(sdev); 711 target->luns[lun] = NULL; 712 } 713 } 714 715 /* Reallocate */ 716 if (maxlun != target->num_lun) { 717 newluns = (struct sbp_dev **) realloc(target->luns, 718 sizeof(struct sbp_dev *) * maxlun, 719 M_SBP, M_NOWAIT | M_ZERO); 720 721 if (newluns == NULL) { 722 aprint_error_dev(sc->sc_fd.dev, "realloc failed\n"); 723 newluns = target->luns; 724 maxlun = target->num_lun; 725 } 726 727 /* 728 * We must zero the extended region for the case 729 * realloc() doesn't allocate new buffer. 730 */ 731 if (maxlun > target->num_lun) { 732 const int sbp_dev_p_sz = sizeof(struct sbp_dev *); 733 734 memset(&newluns[target->num_lun], 0, 735 sbp_dev_p_sz * (maxlun - target->num_lun)); 736 } 737 738 target->luns = newluns; 739 target->num_lun = maxlun; 740 } 741 742 crom_init_context(&cc, target->fwdev->csrrom); 743 while (cc.depth >= 0) { 744 int new = 0; 745 746 reg = crom_search_key(&cc, CROM_LUN); 747 if (reg == NULL) 748 break; 749 lun = reg->val & 0xffff; 750 if (lun >= SBP_NUM_LUNS) { 751 aprint_error_dev(sc->sc_fd.dev, "too large lun %d\n", 752 lun); 753 goto next; 754 } 755 756 sdev = target->luns[lun]; 757 if (sdev == NULL) { 758 sdev = malloc(sizeof(struct sbp_dev), 759 M_SBP, M_NOWAIT | M_ZERO); 760 if (sdev == NULL) { 761 aprint_error_dev(sc->sc_fd.dev, 762 "malloc failed\n"); 763 goto next; 764 } 765 target->luns[lun] = sdev; 766 sdev->lun_id = lun; 767 sdev->target = target; 768 STAILQ_INIT(&sdev->ocbs); 769 callout_init(&sdev->login_callout, CALLOUT_MPSAFE); 770 callout_setfunc(&sdev->login_callout, 771 sbp_login_callout, sdev); 772 sdev->status = SBP_DEV_RESET; 773 new = 1; 774 snprintf(sdev->bustgtlun, 32, "%s:%d:%d", 775 device_xname(sc->sc_fd.dev), 776 sdev->target->target_id, 777 sdev->lun_id); 778 if (!sc->sc_lwp) 779 if (kthread_create( 780 PRI_NONE, KTHREAD_MPSAFE, NULL, 781 sbp_scsipi_scan_target, &sc->sc_target, 782 &sc->sc_lwp, 783 "sbp%d_attach", device_unit(sc->sc_fd.dev))) 784 aprint_error_dev(sc->sc_fd.dev, 785 "unable to create thread"); 786 } 787 sdev->flags |= VALID_LUN; 788 sdev->type = (reg->val & 0xff0000) >> 16; 789 790 if (new == 0) 791 goto next; 792 793 fwdma_alloc_setup(sc->sc_fd.dev, sc->sc_dmat, SBP_DMA_SIZE, 794 &sdev->dma, sizeof(uint32_t), BUS_DMA_NOWAIT); 795 if (sdev->dma.v_addr == NULL) { 796 free(sdev, M_SBP); 797 target->luns[lun] = NULL; 798 goto next; 799 } 800 sdev->ocb = (struct sbp_ocb *)sdev->dma.v_addr; 801 sdev->login = (struct sbp_login_res *)&sdev->ocb[SBP_QUEUE_LEN]; 802 memset((char *)sdev->ocb, 0, 803 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN); 804 805 STAILQ_INIT(&sdev->free_ocbs); 806 for (i = 0; i < SBP_QUEUE_LEN; i++) { 807 struct sbp_ocb *ocb = &sdev->ocb[i]; 808 809 ocb->index = i; 810 ocb->bus_addr = 811 sdev->dma.bus_addr + sizeof(struct sbp_ocb) * i; 812 if (bus_dmamap_create(sc->sc_dmat, 0x100000, 813 SBP_IND_MAX, SBP_SEG_MAX, 0, 0, &ocb->dmamap)) { 814 aprint_error_dev(sc->sc_fd.dev, 815 "cannot create dmamap %d\n", i); 816 /* XXX */ 817 goto next; 818 } 819 sbp_free_ocb(sdev, ocb); /* into free queue */ 820 } 821 next: 822 crom_next(&cc); 823 } 824 825 for (lun = 0; lun < target->num_lun; lun++) { 826 sdev = target->luns[lun]; 827 if (sdev != NULL && (sdev->flags & VALID_LUN) == 0) { 828 sbp_scsipi_detach_sdev(sdev); 829 sbp_free_sdev(sdev); 830 target->luns[lun] = NULL; 831 } 832 } 833 } 834 835 static struct sbp_target * 836 sbp_alloc_target(struct sbp_softc *sc, struct fw_device *fwdev) 837 { 838 struct sbp_target *target; 839 struct crom_context cc; 840 struct csrreg *reg; 841 842 SBP_DEBUG(1) 843 printf("sbp_alloc_target\n"); 844 END_DEBUG 845 /* new target */ 846 target = &sc->sc_target; 847 target->sbp = sc; 848 target->fwdev = fwdev; 849 target->target_id = 0; 850 target->mgm_ocb_cur = NULL; 851 SBP_DEBUG(1) 852 printf("target: mgm_port: %x\n", target->mgm_lo); 853 END_DEBUG 854 STAILQ_INIT(&target->xferlist); 855 target->n_xfer = 0; 856 STAILQ_INIT(&target->mgm_ocb_queue); 857 callout_init(&target->mgm_ocb_timeout, CALLOUT_MPSAFE); 858 859 target->luns = NULL; 860 target->num_lun = 0; 861 862 /* XXX we may want to reload mgm port after each bus reset */ 863 /* XXX there might be multiple management agents */ 864 crom_init_context(&cc, target->fwdev->csrrom); 865 reg = crom_search_key(&cc, CROM_MGM); 866 if (reg == NULL || reg->val == 0) { 867 aprint_error_dev(sc->sc_fd.dev, "NULL management address\n"); 868 target->fwdev = NULL; 869 return NULL; 870 } 871 872 target->mgm_hi = 0xffff; 873 target->mgm_lo = 0xf0000000 | (reg->val << 2); 874 875 return target; 876 } 877 878 static void 879 sbp_probe_lun(struct sbp_dev *sdev) 880 { 881 struct fw_device *fwdev; 882 struct crom_context c, *cc = &c; 883 struct csrreg *reg; 884 885 memset(sdev->vendor, 0, sizeof(sdev->vendor)); 886 memset(sdev->product, 0, sizeof(sdev->product)); 887 888 fwdev = sdev->target->fwdev; 889 crom_init_context(cc, fwdev->csrrom); 890 /* get vendor string */ 891 crom_search_key(cc, CSRKEY_VENDOR); 892 crom_next(cc); 893 crom_parse_text(cc, sdev->vendor, sizeof(sdev->vendor)); 894 /* skip to the unit directory for SBP-2 */ 895 while ((reg = crom_search_key(cc, CSRKEY_VER)) != NULL) { 896 if (reg->val == CSRVAL_T10SBP2) 897 break; 898 crom_next(cc); 899 } 900 /* get firmware revision */ 901 reg = crom_search_key(cc, CSRKEY_FIRM_VER); 902 if (reg != NULL) 903 snprintf(sdev->revision, sizeof(sdev->revision), "%06x", 904 reg->val); 905 /* get product string */ 906 crom_search_key(cc, CSRKEY_MODEL); 907 crom_next(cc); 908 crom_parse_text(cc, sdev->product, sizeof(sdev->product)); 909 } 910 911 static void 912 sbp_login_callout(void *arg) 913 { 914 struct sbp_dev *sdev = (struct sbp_dev *)arg; 915 916 sbp_mgm_orb(sdev, ORB_FUN_LGI, NULL); 917 } 918 919 static void 920 sbp_login(struct sbp_dev *sdev) 921 { 922 struct sbp_softc *sc = sdev->target->sbp; 923 struct timeval delta; 924 struct timeval t; 925 int ticks = 0; 926 927 microtime(&delta); 928 timersub(&delta, &sc->sc_last_busreset, &delta); 929 t.tv_sec = login_delay / 1000; 930 t.tv_usec = (login_delay % 1000) * 1000; 931 timersub(&t, &delta, &t); 932 if (t.tv_sec >= 0 && t.tv_usec > 0) 933 ticks = (t.tv_sec * 1000 + t.tv_usec / 1000) * hz / 1000; 934 SBP_DEBUG(0) 935 printf("%s: sec = %lld usec = %ld ticks = %d\n", __func__, 936 (long long)t.tv_sec, (long)t.tv_usec, ticks); 937 END_DEBUG 938 callout_schedule(&sdev->login_callout, ticks); 939 } 940 941 static void 942 sbp_probe_target(void *arg) 943 { 944 struct sbp_target *target = (struct sbp_target *)arg; 945 struct sbp_dev *sdev; 946 int i; 947 948 SBP_DEBUG(1) 949 printf("%s %d\n", __func__, target->target_id); 950 END_DEBUG 951 952 sbp_alloc_lun(target); 953 954 /* XXX untimeout mgm_ocb and dequeue */ 955 for (i = 0; i < target->num_lun; i++) { 956 sdev = target->luns[i]; 957 if (sdev == NULL || sdev->status == SBP_DEV_DEAD) 958 continue; 959 960 if (sdev->periph != NULL) { 961 scsipi_periph_freeze(sdev->periph, 1); 962 sdev->freeze++; 963 } 964 sbp_probe_lun(sdev); 965 sbp_show_sdev_info(sdev); 966 967 sbp_abort_all_ocbs(sdev, XS_RESET); 968 switch (sdev->status) { 969 case SBP_DEV_RESET: 970 /* new or revived target */ 971 if (auto_login) 972 sbp_login(sdev); 973 break; 974 case SBP_DEV_TOATTACH: 975 case SBP_DEV_PROBE: 976 case SBP_DEV_ATTACHED: 977 case SBP_DEV_RETRY: 978 default: 979 sbp_mgm_orb(sdev, ORB_FUN_RCN, NULL); 980 break; 981 } 982 } 983 } 984 985 static void 986 sbp_post_busreset(void *arg) 987 { 988 struct sbp_softc *sc = (struct sbp_softc *)arg; 989 struct sbp_target *target = &sc->sc_target; 990 struct fw_device *fwdev = target->fwdev; 991 int alive; 992 993 alive = SBP_FWDEV_ALIVE(fwdev); 994 SBP_DEBUG(0) 995 printf("sbp_post_busreset\n"); 996 if (!alive) 997 printf("not alive\n"); 998 END_DEBUG 999 microtime(&sc->sc_last_busreset); 1000 1001 if (!alive) 1002 return; 1003 1004 scsipi_channel_freeze(&sc->sc_channel, 1); 1005 } 1006 1007 static void 1008 sbp_post_explore(void *arg) 1009 { 1010 struct sbp_softc *sc = (struct sbp_softc *)arg; 1011 struct sbp_target *target = &sc->sc_target; 1012 struct fw_device *fwdev = target->fwdev; 1013 int alive; 1014 1015 alive = SBP_FWDEV_ALIVE(fwdev); 1016 SBP_DEBUG(0) 1017 printf("sbp_post_explore (sbp_cold=%d)\n", sbp_cold); 1018 if (!alive) 1019 printf("not alive\n"); 1020 END_DEBUG 1021 if (!alive) 1022 return; 1023 1024 if (!firewire_phydma_enable) 1025 return; 1026 1027 if (sbp_cold > 0) 1028 sbp_cold--; 1029 1030 SBP_DEBUG(0) 1031 printf("sbp_post_explore: EUI:%08x%08x ", fwdev->eui.hi, fwdev->eui.lo); 1032 END_DEBUG 1033 sbp_probe_target((void *)target); 1034 if (target->num_lun == 0) 1035 sbp_free_target(target); 1036 1037 scsipi_channel_thaw(&sc->sc_channel, 1); 1038 } 1039 1040 #if NEED_RESPONSE 1041 static void 1042 sbp_loginres_callback(struct fw_xfer *xfer) 1043 { 1044 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1045 struct sbp_softc *sc = sdev->target->sbp; 1046 1047 SBP_DEBUG(1) 1048 printf("sbp_loginres_callback\n"); 1049 END_DEBUG 1050 /* recycle */ 1051 mutex_enter(&sc->sc_fwb.fwb_mtx); 1052 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 1053 mutex_exit(&sc->sc_fwb.fwb_mtx); 1054 return; 1055 } 1056 #endif 1057 1058 static inline void 1059 sbp_xfer_free(struct fw_xfer *xfer) 1060 { 1061 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1062 struct sbp_softc *sc = sdev->target->sbp; 1063 1064 fw_xfer_unload(xfer); 1065 mutex_enter(&sc->sc_mtx); 1066 STAILQ_INSERT_TAIL(&sdev->target->xferlist, xfer, link); 1067 mutex_exit(&sc->sc_mtx); 1068 } 1069 1070 static void 1071 sbp_reset_start_callback(struct fw_xfer *xfer) 1072 { 1073 struct sbp_dev *tsdev, *sdev = (struct sbp_dev *)xfer->sc; 1074 struct sbp_target *target = sdev->target; 1075 int i; 1076 1077 if (xfer->resp != 0) 1078 aprint_error("%s: sbp_reset_start failed: resp=%d\n", 1079 sdev->bustgtlun, xfer->resp); 1080 1081 for (i = 0; i < target->num_lun; i++) { 1082 tsdev = target->luns[i]; 1083 if (tsdev != NULL && tsdev->status == SBP_DEV_LOGIN) 1084 sbp_login(tsdev); 1085 } 1086 } 1087 1088 static void 1089 sbp_reset_start(struct sbp_dev *sdev) 1090 { 1091 struct fw_xfer *xfer; 1092 struct fw_pkt *fp; 1093 1094 SBP_DEBUG(0) 1095 printf("%s: sbp_reset_start: %s\n", 1096 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun); 1097 END_DEBUG 1098 1099 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); 1100 if (xfer == NULL) 1101 return; 1102 xfer->hand = sbp_reset_start_callback; 1103 fp = &xfer->send.hdr; 1104 fp->mode.wreqq.dest_hi = 0xffff; 1105 fp->mode.wreqq.dest_lo = 0xf0000000 | RESET_START; 1106 fp->mode.wreqq.data = htonl(0xf); 1107 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1108 sbp_xfer_free(xfer); 1109 } 1110 1111 static void 1112 sbp_mgm_callback(struct fw_xfer *xfer) 1113 { 1114 struct sbp_dev *sdev; 1115 1116 sdev = (struct sbp_dev *)xfer->sc; 1117 1118 SBP_DEBUG(1) 1119 printf("%s: sbp_mgm_callback: %s\n", 1120 device_xname(sdev->target->sbp->sc_fd.dev), sdev->bustgtlun); 1121 END_DEBUG 1122 sbp_xfer_free(xfer); 1123 return; 1124 } 1125 1126 static void 1127 sbp_scsipi_scan_target(void *arg) 1128 { 1129 struct sbp_target *target = (struct sbp_target *)arg; 1130 struct sbp_softc *sc = target->sbp; 1131 struct sbp_dev *sdev; 1132 struct scsipi_channel *chan = &sc->sc_channel; 1133 struct scsibus_softc *sc_bus = device_private(sc->sc_bus); 1134 int lun, yet; 1135 1136 do { 1137 mutex_enter(&sc->sc_mtx); 1138 cv_wait_sig(&sc->sc_cv, &sc->sc_mtx); 1139 mutex_exit(&sc->sc_mtx); 1140 yet = 0; 1141 1142 for (lun = 0; lun < target->num_lun; lun++) { 1143 sdev = target->luns[lun]; 1144 if (sdev == NULL) 1145 continue; 1146 if (sdev->status != SBP_DEV_PROBE) { 1147 yet++; 1148 continue; 1149 } 1150 1151 if (sdev->periph == NULL) { 1152 if (chan->chan_nluns < target->num_lun) 1153 chan->chan_nluns = target->num_lun; 1154 1155 scsi_probe_bus(sc_bus, target->target_id, 1156 sdev->lun_id); 1157 sdev->periph = scsipi_lookup_periph(chan, 1158 target->target_id, lun); 1159 } 1160 sdev->status = SBP_DEV_ATTACHED; 1161 } 1162 } while (yet > 0); 1163 1164 sc->sc_lwp = NULL; 1165 kthread_exit(0); 1166 1167 /* NOTREACHED */ 1168 } 1169 1170 static inline void 1171 sbp_scan_dev(struct sbp_dev *sdev) 1172 { 1173 struct sbp_softc *sc = sdev->target->sbp; 1174 1175 sdev->status = SBP_DEV_PROBE; 1176 mutex_enter(&sc->sc_mtx); 1177 cv_signal(&sdev->target->sbp->sc_cv); 1178 mutex_exit(&sc->sc_mtx); 1179 } 1180 1181 1182 static void 1183 sbp_do_attach(struct fw_xfer *xfer) 1184 { 1185 struct sbp_dev *sdev; 1186 struct sbp_target *target; 1187 struct sbp_softc *sc; 1188 1189 sdev = (struct sbp_dev *)xfer->sc; 1190 target = sdev->target; 1191 sc = target->sbp; 1192 1193 SBP_DEBUG(0) 1194 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1195 sdev->bustgtlun); 1196 END_DEBUG 1197 sbp_xfer_free(xfer); 1198 1199 sbp_scan_dev(sdev); 1200 return; 1201 } 1202 1203 static void 1204 sbp_agent_reset_callback(struct fw_xfer *xfer) 1205 { 1206 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1207 struct sbp_softc *sc = sdev->target->sbp; 1208 1209 SBP_DEBUG(1) 1210 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1211 sdev->bustgtlun); 1212 END_DEBUG 1213 if (xfer->resp != 0) 1214 aprint_error_dev(sc->sc_fd.dev, "%s:%s: resp=%d\n", __func__, 1215 sdev->bustgtlun, xfer->resp); 1216 1217 sbp_xfer_free(xfer); 1218 if (sdev->periph != NULL) { 1219 scsipi_periph_thaw(sdev->periph, sdev->freeze); 1220 scsipi_channel_thaw(&sc->sc_channel, 0); 1221 sdev->freeze = 0; 1222 } 1223 } 1224 1225 static void 1226 sbp_agent_reset(struct sbp_dev *sdev) 1227 { 1228 struct fw_xfer *xfer; 1229 struct fw_pkt *fp; 1230 1231 SBP_DEBUG(0) 1232 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1233 __func__, sdev->bustgtlun); 1234 END_DEBUG 1235 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x04); 1236 if (xfer == NULL) 1237 return; 1238 if (sdev->status == SBP_DEV_ATTACHED || sdev->status == SBP_DEV_PROBE) 1239 xfer->hand = sbp_agent_reset_callback; 1240 else 1241 xfer->hand = sbp_do_attach; 1242 fp = &xfer->send.hdr; 1243 fp->mode.wreqq.data = htonl(0xf); 1244 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1245 sbp_xfer_free(xfer); 1246 sbp_abort_all_ocbs(sdev, XS_RESET); 1247 } 1248 1249 static void 1250 sbp_busy_timeout_callback(struct fw_xfer *xfer) 1251 { 1252 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1253 1254 SBP_DEBUG(1) 1255 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1256 __func__, sdev->bustgtlun); 1257 END_DEBUG 1258 sbp_xfer_free(xfer); 1259 sbp_agent_reset(sdev); 1260 } 1261 1262 static void 1263 sbp_busy_timeout(struct sbp_dev *sdev) 1264 { 1265 struct fw_pkt *fp; 1266 struct fw_xfer *xfer; 1267 1268 SBP_DEBUG(0) 1269 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1270 __func__, sdev->bustgtlun); 1271 END_DEBUG 1272 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0); 1273 if (xfer == NULL) 1274 return; 1275 xfer->hand = sbp_busy_timeout_callback; 1276 fp = &xfer->send.hdr; 1277 fp->mode.wreqq.dest_hi = 0xffff; 1278 fp->mode.wreqq.dest_lo = 0xf0000000 | BUSY_TIMEOUT; 1279 fp->mode.wreqq.data = htonl((1 << (13+12)) | 0xf); 1280 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1281 sbp_xfer_free(xfer); 1282 } 1283 1284 static void 1285 sbp_orb_pointer_callback(struct fw_xfer *xfer) 1286 { 1287 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1288 struct sbp_softc *sc = sdev->target->sbp; 1289 1290 SBP_DEBUG(1) 1291 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1292 sdev->bustgtlun); 1293 END_DEBUG 1294 if (xfer->resp != 0) 1295 aprint_error_dev(sc->sc_fd.dev, "%s:%s: xfer->resp = %d\n", 1296 __func__, sdev->bustgtlun, xfer->resp); 1297 sbp_xfer_free(xfer); 1298 sdev->flags &= ~ORB_POINTER_ACTIVE; 1299 1300 if ((sdev->flags & ORB_POINTER_NEED) != 0) { 1301 struct sbp_ocb *ocb; 1302 1303 sdev->flags &= ~ORB_POINTER_NEED; 1304 ocb = STAILQ_FIRST(&sdev->ocbs); 1305 if (ocb != NULL) 1306 sbp_orb_pointer(sdev, ocb); 1307 } 1308 return; 1309 } 1310 1311 static void 1312 sbp_orb_pointer(struct sbp_dev *sdev, struct sbp_ocb *ocb) 1313 { 1314 struct sbp_softc *sc = sdev->target->sbp; 1315 struct fw_xfer *xfer; 1316 struct fw_pkt *fp; 1317 1318 SBP_DEBUG(1) 1319 printf("%s:%s:%s: 0x%08x\n", device_xname(sc->sc_fd.dev), __func__, 1320 sdev->bustgtlun, (uint32_t)ocb->bus_addr); 1321 END_DEBUG 1322 1323 if ((sdev->flags & ORB_POINTER_ACTIVE) != 0) { 1324 SBP_DEBUG(0) 1325 printf("%s: orb pointer active\n", __func__); 1326 END_DEBUG 1327 sdev->flags |= ORB_POINTER_NEED; 1328 return; 1329 } 1330 1331 sdev->flags |= ORB_POINTER_ACTIVE; 1332 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0x08); 1333 if (xfer == NULL) 1334 return; 1335 xfer->hand = sbp_orb_pointer_callback; 1336 1337 fp = &xfer->send.hdr; 1338 fp->mode.wreqb.len = 8; 1339 fp->mode.wreqb.extcode = 0; 1340 xfer->send.payload[0] = 1341 htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16)); 1342 xfer->send.payload[1] = htonl((uint32_t)ocb->bus_addr); 1343 1344 if (fw_asyreq(xfer->fc, -1, xfer) != 0) { 1345 sbp_xfer_free(xfer); 1346 ocb->xs->error = XS_DRIVER_STUFFUP; 1347 scsipi_done(ocb->xs); 1348 } 1349 } 1350 1351 static void 1352 sbp_doorbell_callback(struct fw_xfer *xfer) 1353 { 1354 struct sbp_dev *sdev = (struct sbp_dev *)xfer->sc; 1355 struct sbp_softc *sc = sdev->target->sbp; 1356 1357 SBP_DEBUG(1) 1358 printf("%s:%s:%s\n", device_xname(sc->sc_fd.dev), __func__, 1359 sdev->bustgtlun); 1360 END_DEBUG 1361 if (xfer->resp != 0) { 1362 aprint_error_dev(sc->sc_fd.dev, "%s: xfer->resp = %d\n", 1363 __func__, xfer->resp); 1364 } 1365 sbp_xfer_free(xfer); 1366 sdev->flags &= ~ORB_DOORBELL_ACTIVE; 1367 if ((sdev->flags & ORB_DOORBELL_NEED) != 0) { 1368 sdev->flags &= ~ORB_DOORBELL_NEED; 1369 sbp_doorbell(sdev); 1370 } 1371 return; 1372 } 1373 1374 static void 1375 sbp_doorbell(struct sbp_dev *sdev) 1376 { 1377 struct fw_xfer *xfer; 1378 struct fw_pkt *fp; 1379 1380 SBP_DEBUG(1) 1381 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1382 __func__, sdev->bustgtlun); 1383 END_DEBUG 1384 1385 if ((sdev->flags & ORB_DOORBELL_ACTIVE) != 0) { 1386 sdev->flags |= ORB_DOORBELL_NEED; 1387 return; 1388 } 1389 sdev->flags |= ORB_DOORBELL_ACTIVE; 1390 xfer = sbp_write_cmd(sdev, FWTCODE_WREQQ, 0x10); 1391 if (xfer == NULL) 1392 return; 1393 xfer->hand = sbp_doorbell_callback; 1394 fp = &xfer->send.hdr; 1395 fp->mode.wreqq.data = htonl(0xf); 1396 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1397 sbp_xfer_free(xfer); 1398 } 1399 1400 static struct fw_xfer * 1401 sbp_write_cmd(struct sbp_dev *sdev, int tcode, int offset) 1402 { 1403 struct sbp_softc *sc; 1404 struct fw_xfer *xfer; 1405 struct fw_pkt *fp; 1406 struct sbp_target *target; 1407 int new = 0; 1408 1409 target = sdev->target; 1410 sc = target->sbp; 1411 mutex_enter(&sc->sc_mtx); 1412 xfer = STAILQ_FIRST(&target->xferlist); 1413 if (xfer == NULL) { 1414 if (target->n_xfer > 5 /* XXX */) { 1415 aprint_error_dev(sc->sc_fd.dev, 1416 "no more xfer for this target\n"); 1417 mutex_exit(&sc->sc_mtx); 1418 return NULL; 1419 } 1420 xfer = fw_xfer_alloc_buf(M_SBP, 8, 0); 1421 if (xfer == NULL) { 1422 aprint_error_dev(sc->sc_fd.dev, 1423 "fw_xfer_alloc_buf failed\n"); 1424 mutex_exit(&sc->sc_mtx); 1425 return NULL; 1426 } 1427 target->n_xfer++; 1428 SBP_DEBUG(0) 1429 printf("sbp: alloc %d xfer\n", target->n_xfer); 1430 END_DEBUG 1431 new = 1; 1432 } else 1433 STAILQ_REMOVE_HEAD(&target->xferlist, link); 1434 mutex_exit(&sc->sc_mtx); 1435 1436 microtime(&xfer->tv); 1437 1438 if (new) { 1439 xfer->recv.pay_len = 0; 1440 xfer->send.spd = min(target->fwdev->speed, max_speed); 1441 xfer->fc = target->sbp->sc_fd.fc; 1442 } 1443 1444 if (tcode == FWTCODE_WREQB) 1445 xfer->send.pay_len = 8; 1446 else 1447 xfer->send.pay_len = 0; 1448 1449 xfer->sc = (void *)sdev; 1450 fp = &xfer->send.hdr; 1451 fp->mode.wreqq.dest_hi = sdev->login->cmd_hi; 1452 fp->mode.wreqq.dest_lo = sdev->login->cmd_lo + offset; 1453 fp->mode.wreqq.tlrt = 0; 1454 fp->mode.wreqq.tcode = tcode; 1455 fp->mode.wreqq.pri = 0; 1456 fp->mode.wreqq.dst = FWLOCALBUS | target->fwdev->dst; 1457 1458 return xfer; 1459 } 1460 1461 static void 1462 sbp_mgm_orb(struct sbp_dev *sdev, int func, struct sbp_ocb *aocb) 1463 { 1464 struct fw_xfer *xfer; 1465 struct fw_pkt *fp; 1466 struct sbp_ocb *ocb; 1467 struct sbp_target *target; 1468 int nid, dv_unit; 1469 1470 target = sdev->target; 1471 nid = target->sbp->sc_fd.fc->nodeid | FWLOCALBUS; 1472 dv_unit = device_unit(target->sbp->sc_fd.dev); 1473 1474 mutex_enter(&target->sbp->sc_mtx); 1475 if (func == ORB_FUN_RUNQUEUE) { 1476 ocb = STAILQ_FIRST(&target->mgm_ocb_queue); 1477 if (target->mgm_ocb_cur != NULL || ocb == NULL) { 1478 mutex_exit(&target->sbp->sc_mtx); 1479 return; 1480 } 1481 STAILQ_REMOVE_HEAD(&target->mgm_ocb_queue, ocb); 1482 mutex_exit(&target->sbp->sc_mtx); 1483 goto start; 1484 } 1485 if ((ocb = sbp_get_ocb(sdev)) == NULL) { 1486 mutex_exit(&target->sbp->sc_mtx); 1487 /* XXX */ 1488 return; 1489 } 1490 mutex_exit(&target->sbp->sc_mtx); 1491 ocb->flags = OCB_ACT_MGM; 1492 ocb->sdev = sdev; 1493 1494 memset(ocb->orb, 0, sizeof(ocb->orb)); 1495 ocb->orb[6] = htonl((nid << 16) | SBP_BIND_HI); 1496 ocb->orb[7] = htonl(SBP_DEV2ADDR(dv_unit, sdev->lun_id)); 1497 1498 SBP_DEBUG(0) 1499 printf("%s:%s:%s: %s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1500 __func__, sdev->bustgtlun, orb_fun_name[(func>>16)&0xf]); 1501 END_DEBUG 1502 switch (func) { 1503 case ORB_FUN_LGI: 1504 { 1505 const off_t sbp_login_off = 1506 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN; 1507 1508 ocb->orb[0] = ocb->orb[1] = 0; /* password */ 1509 ocb->orb[2] = htonl(nid << 16); 1510 ocb->orb[3] = htonl(sdev->dma.bus_addr + sbp_login_off); 1511 ocb->orb[4] = htonl(ORB_NOTIFY | sdev->lun_id); 1512 if (ex_login) 1513 ocb->orb[4] |= htonl(ORB_EXV); 1514 ocb->orb[5] = htonl(SBP_LOGIN_SIZE); 1515 bus_dmamap_sync(sdev->dma.dma_tag, sdev->dma.dma_map, 1516 sbp_login_off, SBP_LOGIN_SIZE, BUS_DMASYNC_PREREAD); 1517 break; 1518 } 1519 1520 case ORB_FUN_ATA: 1521 ocb->orb[0] = htonl((0 << 16) | 0); 1522 ocb->orb[1] = htonl(aocb->bus_addr & 0xffffffff); 1523 /* fall through */ 1524 case ORB_FUN_RCN: 1525 case ORB_FUN_LGO: 1526 case ORB_FUN_LUR: 1527 case ORB_FUN_RST: 1528 case ORB_FUN_ATS: 1529 ocb->orb[4] = htonl(ORB_NOTIFY | func | sdev->login->id); 1530 break; 1531 } 1532 1533 if (target->mgm_ocb_cur != NULL) { 1534 /* there is a standing ORB */ 1535 mutex_enter(&target->sbp->sc_mtx); 1536 STAILQ_INSERT_TAIL(&sdev->target->mgm_ocb_queue, ocb, ocb); 1537 mutex_exit(&target->sbp->sc_mtx); 1538 return; 1539 } 1540 start: 1541 target->mgm_ocb_cur = ocb; 1542 1543 callout_reset(&target->mgm_ocb_timeout, 5 * hz, sbp_mgm_timeout, ocb); 1544 xfer = sbp_write_cmd(sdev, FWTCODE_WREQB, 0); 1545 if (xfer == NULL) 1546 return; 1547 xfer->hand = sbp_mgm_callback; 1548 1549 fp = &xfer->send.hdr; 1550 fp->mode.wreqb.dest_hi = sdev->target->mgm_hi; 1551 fp->mode.wreqb.dest_lo = sdev->target->mgm_lo; 1552 fp->mode.wreqb.len = 8; 1553 fp->mode.wreqb.extcode = 0; 1554 xfer->send.payload[0] = htonl(nid << 16); 1555 xfer->send.payload[1] = htonl(ocb->bus_addr & 0xffffffff); 1556 1557 /* cache writeback & invalidate(required ORB_FUN_LGI func) */ 1558 /* when abort_ocb, should sync POST ope ? */ 1559 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE); 1560 if (fw_asyreq(xfer->fc, -1, xfer) != 0) 1561 sbp_xfer_free(xfer); 1562 } 1563 1564 static void 1565 sbp_print_scsi_cmd(struct sbp_ocb *ocb) 1566 { 1567 struct scsipi_xfer *xs = ocb->xs; 1568 1569 printf("%s:%d:%d:" 1570 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x," 1571 " flags: 0x%02x, %db cmd/%db data\n", 1572 device_xname(ocb->sdev->target->sbp->sc_fd.dev), 1573 xs->xs_periph->periph_target, 1574 xs->xs_periph->periph_lun, 1575 xs->cmd->opcode, 1576 xs->cmd->bytes[0], xs->cmd->bytes[1], 1577 xs->cmd->bytes[2], xs->cmd->bytes[3], 1578 xs->cmd->bytes[4], xs->cmd->bytes[5], 1579 xs->cmd->bytes[6], xs->cmd->bytes[7], 1580 xs->cmd->bytes[8], 1581 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT), 1582 xs->cmdlen, xs->datalen); 1583 } 1584 1585 static void 1586 sbp_scsi_status(struct sbp_status *sbp_status, struct sbp_ocb *ocb) 1587 { 1588 struct sbp_cmd_status *sbp_cmd_status; 1589 struct scsi_sense_data *sense = &ocb->xs->sense.scsi_sense; 1590 1591 sbp_cmd_status = (struct sbp_cmd_status *)sbp_status->data; 1592 1593 SBP_DEBUG(0) 1594 sbp_print_scsi_cmd(ocb); 1595 /* XXX need decode status */ 1596 printf("%s:" 1597 " SCSI status %x sfmt %x valid %x key %x code %x qlfr %x len %d\n", 1598 ocb->sdev->bustgtlun, 1599 sbp_cmd_status->status, 1600 sbp_cmd_status->sfmt, 1601 sbp_cmd_status->valid, 1602 sbp_cmd_status->s_key, 1603 sbp_cmd_status->s_code, 1604 sbp_cmd_status->s_qlfr, 1605 sbp_status->len); 1606 END_DEBUG 1607 1608 switch (sbp_cmd_status->status) { 1609 case SCSI_CHECK: 1610 case SCSI_BUSY: 1611 case SCSI_TERMINATED: 1612 if (sbp_cmd_status->sfmt == SBP_SFMT_CURR) 1613 sense->response_code = SSD_RCODE_CURRENT; 1614 else 1615 sense->response_code = SSD_RCODE_DEFERRED; 1616 if (sbp_cmd_status->valid) 1617 sense->response_code |= SSD_RCODE_VALID; 1618 sense->flags = sbp_cmd_status->s_key; 1619 if (sbp_cmd_status->mark) 1620 sense->flags |= SSD_FILEMARK; 1621 if (sbp_cmd_status->eom) 1622 sense->flags |= SSD_EOM; 1623 if (sbp_cmd_status->ill_len) 1624 sense->flags |= SSD_ILI; 1625 1626 memcpy(sense->info, &sbp_cmd_status->info, 4); 1627 1628 if (sbp_status->len <= 1) 1629 /* XXX not scsi status. shouldn't be happened */ 1630 sense->extra_len = 0; 1631 else if (sbp_status->len <= 4) 1632 /* add_sense_code(_qual), info, cmd_spec_info */ 1633 sense->extra_len = 6; 1634 else 1635 /* fru, sense_key_spec */ 1636 sense->extra_len = 10; 1637 1638 memcpy(sense->csi, &sbp_cmd_status->cdb, 4); 1639 1640 sense->asc = sbp_cmd_status->s_code; 1641 sense->ascq = sbp_cmd_status->s_qlfr; 1642 sense->fru = sbp_cmd_status->fru; 1643 1644 memcpy(sense->sks.sks_bytes, sbp_cmd_status->s_keydep, 3); 1645 ocb->xs->error = XS_SENSE; 1646 ocb->xs->xs_status = sbp_cmd_status->status; 1647 /* 1648 { 1649 uint8_t j, *tmp; 1650 tmp = sense; 1651 for (j = 0; j < 32; j += 8) 1652 aprint_normal( 1653 "sense %02x%02x %02x%02x %02x%02x %02x%02x\n", 1654 tmp[j], tmp[j+1], tmp[j+2], tmp[j+3], 1655 tmp[j+4], tmp[j+5], tmp[j+6], tmp[j+7]); 1656 1657 } 1658 */ 1659 break; 1660 default: 1661 aprint_error_dev(ocb->sdev->target->sbp->sc_fd.dev, 1662 "%s:%s: unknown scsi status 0x%x\n", 1663 __func__, ocb->sdev->bustgtlun, sbp_cmd_status->status); 1664 } 1665 } 1666 1667 static void 1668 sbp_fix_inq_data(struct sbp_ocb *ocb) 1669 { 1670 struct scsipi_xfer *xs = ocb->xs; 1671 struct sbp_dev *sdev; 1672 struct scsipi_inquiry_data *inq = 1673 (struct scsipi_inquiry_data *)xs->data; 1674 1675 sdev = ocb->sdev; 1676 1677 #if 0 1678 /* 1679 * NetBSD is assuming always 0 for EVPD-bit and 'Page Code'. 1680 */ 1681 #define SI_EVPD 0x01 1682 if (xs->cmd->bytes[0] & SI_EVPD) 1683 return; 1684 #endif 1685 SBP_DEBUG(1) 1686 printf("%s:%s:%s\n", device_xname(sdev->target->sbp->sc_fd.dev), 1687 __func__, sdev->bustgtlun); 1688 END_DEBUG 1689 switch (inq->device & SID_TYPE) { 1690 case T_DIRECT: 1691 #if 0 1692 /* 1693 * XXX Convert Direct Access device to RBC. 1694 * I've never seen FireWire DA devices which support READ_6. 1695 */ 1696 if ((inq->device & SID_TYPE) == T_DIRECT) 1697 inq->device |= T_SIMPLE_DIRECT; /* T_DIRECT == 0 */ 1698 #endif 1699 /* FALLTHROUGH */ 1700 1701 case T_SIMPLE_DIRECT: 1702 /* 1703 * Override vendor/product/revision information. 1704 * Some devices sometimes return strange strings. 1705 */ 1706 #if 1 1707 memcpy(inq->vendor, sdev->vendor, sizeof(inq->vendor)); 1708 memcpy(inq->product, sdev->product, sizeof(inq->product)); 1709 memcpy(inq->revision + 2, sdev->revision, 1710 sizeof(inq->revision)); 1711 #endif 1712 break; 1713 } 1714 /* 1715 * Force to enable/disable tagged queuing. 1716 * XXX CAM also checks SCP_QUEUE_DQUE flag in the control mode page. 1717 */ 1718 if (sbp_tags > 0) 1719 inq->flags3 |= SID_CmdQue; 1720 else if (sbp_tags < 0) 1721 inq->flags3 &= ~SID_CmdQue; 1722 1723 } 1724 1725 static void 1726 sbp_recv(struct fw_xfer *xfer) 1727 { 1728 struct fw_pkt *rfp; 1729 #if NEED_RESPONSE 1730 struct fw_pkt *sfp; 1731 #endif 1732 struct sbp_softc *sc; 1733 struct sbp_dev *sdev; 1734 struct sbp_ocb *ocb; 1735 struct sbp_login_res *login_res = NULL; 1736 struct sbp_status *sbp_status; 1737 struct sbp_target *target; 1738 int orb_fun, status_valid0, status_valid, l, reset_agent = 0; 1739 uint32_t addr; 1740 /* 1741 uint32_t *ld; 1742 ld = xfer->recv.buf; 1743 printf("sbp %x %d %d %08x %08x %08x %08x\n", 1744 xfer->resp, xfer->recv.len, xfer->recv.off, ntohl(ld[0]), ntohl(ld[1]), ntohl(ld[2]), ntohl(ld[3])); 1745 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[4]), ntohl(ld[5]), ntohl(ld[6]), ntohl(ld[7])); 1746 printf("sbp %08x %08x %08x %08x\n", ntohl(ld[8]), ntohl(ld[9]), ntohl(ld[10]), ntohl(ld[11])); 1747 */ 1748 1749 sc = (struct sbp_softc *)xfer->sc; 1750 if (xfer->resp != 0) { 1751 aprint_error_dev(sc->sc_fd.dev, 1752 "sbp_recv: xfer->resp = %d\n", xfer->resp); 1753 goto done0; 1754 } 1755 if (xfer->recv.payload == NULL) { 1756 aprint_error_dev(sc->sc_fd.dev, 1757 "sbp_recv: xfer->recv.payload == NULL\n"); 1758 goto done0; 1759 } 1760 rfp = &xfer->recv.hdr; 1761 if (rfp->mode.wreqb.tcode != FWTCODE_WREQB) { 1762 aprint_error_dev(sc->sc_fd.dev, 1763 "sbp_recv: tcode = %d\n", rfp->mode.wreqb.tcode); 1764 goto done0; 1765 } 1766 sbp_status = (struct sbp_status *)xfer->recv.payload; 1767 addr = rfp->mode.wreqb.dest_lo; 1768 SBP_DEBUG(2) 1769 printf("received address 0x%x\n", addr); 1770 END_DEBUG 1771 target = &sc->sc_target; 1772 l = SBP_ADDR2LUN(addr); 1773 if (l >= target->num_lun || target->luns[l] == NULL) { 1774 aprint_error_dev(sc->sc_fd.dev, 1775 "sbp_recv1: invalid lun %d (target=%d)\n", 1776 l, target->target_id); 1777 goto done0; 1778 } 1779 sdev = target->luns[l]; 1780 1781 ocb = NULL; 1782 switch (sbp_status->src) { 1783 case SRC_NEXT_EXISTS: 1784 case SRC_NO_NEXT: 1785 /* check mgm_ocb_cur first */ 1786 ocb = target->mgm_ocb_cur; 1787 if (ocb != NULL) 1788 if (OCB_MATCH(ocb, sbp_status)) { 1789 callout_stop(&target->mgm_ocb_timeout); 1790 target->mgm_ocb_cur = NULL; 1791 break; 1792 } 1793 ocb = sbp_dequeue_ocb(sdev, sbp_status); 1794 if (ocb == NULL) 1795 aprint_error_dev(sc->sc_fd.dev, 1796 "%s:%s: No ocb(%x) on the queue\n", __func__, 1797 sdev->bustgtlun, ntohl(sbp_status->orb_lo)); 1798 break; 1799 case SRC_UNSOL: 1800 /* unsolicit */ 1801 aprint_error_dev(sc->sc_fd.dev, 1802 "%s:%s: unsolicit status received\n", 1803 __func__, sdev->bustgtlun); 1804 break; 1805 default: 1806 aprint_error_dev(sc->sc_fd.dev, 1807 "%s:%s: unknown sbp_status->src\n", 1808 __func__, sdev->bustgtlun); 1809 } 1810 1811 status_valid0 = (sbp_status->src < 2 1812 && sbp_status->resp == SBP_REQ_CMP 1813 && sbp_status->dead == 0); 1814 status_valid = (status_valid0 && sbp_status->status == 0); 1815 1816 if (!status_valid0 || debug > 2) { 1817 int status; 1818 SBP_DEBUG(0) 1819 printf("%s:%s:%s: ORB status src:%x resp:%x dead:%x" 1820 " len:%x stat:%x orb:%x%08x\n", 1821 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun, 1822 sbp_status->src, sbp_status->resp, sbp_status->dead, 1823 sbp_status->len, sbp_status->status, 1824 ntohs(sbp_status->orb_hi), ntohl(sbp_status->orb_lo)); 1825 END_DEBUG 1826 printf("%s:%s\n", device_xname(sc->sc_fd.dev), sdev->bustgtlun); 1827 status = sbp_status->status; 1828 switch (sbp_status->resp) { 1829 case SBP_REQ_CMP: 1830 if (status > MAX_ORB_STATUS0) 1831 printf("%s\n", orb_status0[MAX_ORB_STATUS0]); 1832 else 1833 printf("%s\n", orb_status0[status]); 1834 break; 1835 case SBP_TRANS_FAIL: 1836 printf("Obj: %s, Error: %s\n", 1837 orb_status1_object[(status>>6) & 3], 1838 orb_status1_serial_bus_error[status & 0xf]); 1839 break; 1840 case SBP_ILLE_REQ: 1841 printf("Illegal request\n"); 1842 break; 1843 case SBP_VEND_DEP: 1844 printf("Vendor dependent\n"); 1845 break; 1846 default: 1847 printf("unknown respose code %d\n", sbp_status->resp); 1848 } 1849 } 1850 1851 /* we have to reset the fetch agent if it's dead */ 1852 if (sbp_status->dead) { 1853 if (sdev->periph != NULL) { 1854 scsipi_periph_freeze(sdev->periph, 1); 1855 sdev->freeze++; 1856 } 1857 reset_agent = 1; 1858 } 1859 1860 if (ocb == NULL) 1861 goto done; 1862 1863 switch (ntohl(ocb->orb[4]) & ORB_FMT_MSK) { 1864 case ORB_FMT_NOP: 1865 break; 1866 case ORB_FMT_VED: 1867 break; 1868 case ORB_FMT_STD: 1869 switch (ocb->flags) { 1870 case OCB_ACT_MGM: 1871 orb_fun = ntohl(ocb->orb[4]) & ORB_FUN_MSK; 1872 reset_agent = 0; 1873 switch (orb_fun) { 1874 case ORB_FUN_LGI: 1875 { 1876 const struct fwdma_alloc *dma = &sdev->dma; 1877 const off_t sbp_login_off = 1878 sizeof(struct sbp_ocb) * SBP_QUEUE_LEN; 1879 1880 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 1881 sbp_login_off, SBP_LOGIN_SIZE, 1882 BUS_DMASYNC_POSTREAD); 1883 login_res = sdev->login; 1884 login_res->len = ntohs(login_res->len); 1885 login_res->id = ntohs(login_res->id); 1886 login_res->cmd_hi = ntohs(login_res->cmd_hi); 1887 login_res->cmd_lo = ntohl(login_res->cmd_lo); 1888 if (status_valid) { 1889 SBP_DEBUG(0) 1890 printf("%s:%s:%s: login:" 1891 " len %d, ID %d, cmd %08x%08x," 1892 " recon_hold %d\n", 1893 device_xname(sc->sc_fd.dev), 1894 __func__, sdev->bustgtlun, 1895 login_res->len, login_res->id, 1896 login_res->cmd_hi, 1897 login_res->cmd_lo, 1898 ntohs(login_res->recon_hold)); 1899 END_DEBUG 1900 sbp_busy_timeout(sdev); 1901 } else { 1902 /* forgot logout? */ 1903 aprint_error_dev(sc->sc_fd.dev, 1904 "%s:%s: login failed\n", 1905 __func__, sdev->bustgtlun); 1906 sdev->status = SBP_DEV_RESET; 1907 } 1908 break; 1909 } 1910 case ORB_FUN_RCN: 1911 login_res = sdev->login; 1912 if (status_valid) { 1913 SBP_DEBUG(0) 1914 printf("%s:%s:%s: reconnect:" 1915 " len %d, ID %d, cmd %08x%08x\n", 1916 device_xname(sc->sc_fd.dev), 1917 __func__, sdev->bustgtlun, 1918 login_res->len, login_res->id, 1919 login_res->cmd_hi, 1920 login_res->cmd_lo); 1921 END_DEBUG 1922 sbp_agent_reset(sdev); 1923 } else { 1924 /* reconnection hold time exceed? */ 1925 SBP_DEBUG(0) 1926 aprint_error_dev(sc->sc_fd.dev, 1927 "%s:%s: reconnect failed\n", 1928 __func__, sdev->bustgtlun); 1929 END_DEBUG 1930 sbp_login(sdev); 1931 } 1932 break; 1933 case ORB_FUN_LGO: 1934 sdev->status = SBP_DEV_RESET; 1935 break; 1936 case ORB_FUN_RST: 1937 sbp_busy_timeout(sdev); 1938 break; 1939 case ORB_FUN_LUR: 1940 case ORB_FUN_ATA: 1941 case ORB_FUN_ATS: 1942 sbp_agent_reset(sdev); 1943 break; 1944 default: 1945 aprint_error_dev(sc->sc_fd.dev, 1946 "%s:%s: unknown function %d\n", 1947 __func__, sdev->bustgtlun, orb_fun); 1948 break; 1949 } 1950 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); 1951 break; 1952 case OCB_ACT_CMD: 1953 sdev->timeout = 0; 1954 if (ocb->xs != NULL) { 1955 struct scsipi_xfer *xs = ocb->xs; 1956 1957 if (sbp_status->len > 1) 1958 sbp_scsi_status(sbp_status, ocb); 1959 else 1960 if (sbp_status->resp != SBP_REQ_CMP) 1961 xs->error = XS_DRIVER_STUFFUP; 1962 else { 1963 xs->error = XS_NOERROR; 1964 xs->resid = 0; 1965 } 1966 /* fix up inq data */ 1967 if (xs->cmd->opcode == INQUIRY) 1968 sbp_fix_inq_data(ocb); 1969 scsipi_done(xs); 1970 } 1971 break; 1972 default: 1973 break; 1974 } 1975 } 1976 1977 if (!use_doorbell) 1978 sbp_free_ocb(sdev, ocb); 1979 done: 1980 if (reset_agent) 1981 sbp_agent_reset(sdev); 1982 1983 done0: 1984 xfer->recv.pay_len = SBP_RECV_LEN; 1985 /* The received packet is usually small enough to be stored within 1986 * the buffer. In that case, the controller return ack_complete and 1987 * no respose is necessary. 1988 * 1989 * XXX fwohci.c and firewire.c should inform event_code such as 1990 * ack_complete or ack_pending to upper driver. 1991 */ 1992 #if NEED_RESPONSE 1993 xfer->send.off = 0; 1994 sfp = (struct fw_pkt *)xfer->send.buf; 1995 sfp->mode.wres.dst = rfp->mode.wreqb.src; 1996 xfer->dst = sfp->mode.wres.dst; 1997 xfer->spd = min(sdev->target->fwdev->speed, max_speed); 1998 xfer->hand = sbp_loginres_callback; 1999 2000 sfp->mode.wres.tlrt = rfp->mode.wreqb.tlrt; 2001 sfp->mode.wres.tcode = FWTCODE_WRES; 2002 sfp->mode.wres.rtcode = 0; 2003 sfp->mode.wres.pri = 0; 2004 2005 if (fw_asyreq(xfer->fc, -1, xfer) != 0) { 2006 aprint_error_dev(sc->sc_fd.dev, "mgm_orb failed\n"); 2007 mutex_enter(&sc->sc_fwb.fwb_mtx); 2008 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 2009 mutex_exit(&sc->sc_fwb.fwb_mtx); 2010 } 2011 #else 2012 /* recycle */ 2013 mutex_enter(&sc->sc_fwb.fwb_mtx); 2014 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 2015 mutex_exit(&sc->sc_fwb.fwb_mtx); 2016 #endif 2017 2018 return; 2019 2020 } 2021 2022 static int 2023 sbp_logout_all(struct sbp_softc *sbp) 2024 { 2025 struct sbp_target *target; 2026 struct sbp_dev *sdev; 2027 int i; 2028 2029 SBP_DEBUG(0) 2030 printf("sbp_logout_all\n"); 2031 END_DEBUG 2032 target = &sbp->sc_target; 2033 if (target->luns != NULL) { 2034 for (i = 0; i < target->num_lun; i++) { 2035 sdev = target->luns[i]; 2036 if (sdev == NULL) 2037 continue; 2038 callout_stop(&sdev->login_callout); 2039 if (sdev->status >= SBP_DEV_TOATTACH && 2040 sdev->status <= SBP_DEV_ATTACHED) 2041 sbp_mgm_orb(sdev, ORB_FUN_LGO, NULL); 2042 } 2043 } 2044 2045 return 0; 2046 } 2047 2048 static void 2049 sbp_free_sdev(struct sbp_dev *sdev) 2050 { 2051 struct sbp_softc *sc = sdev->target->sbp; 2052 int i; 2053 2054 if (sdev == NULL) 2055 return; 2056 for (i = 0; i < SBP_QUEUE_LEN; i++) 2057 bus_dmamap_destroy(sc->sc_dmat, sdev->ocb[i].dmamap); 2058 fwdma_free(sdev->dma.dma_tag, sdev->dma.dma_map, sdev->dma.v_addr); 2059 free(sdev, M_SBP); 2060 sdev = NULL; 2061 } 2062 2063 static void 2064 sbp_free_target(struct sbp_target *target) 2065 { 2066 struct fw_xfer *xfer, *next; 2067 int i; 2068 2069 if (target->luns == NULL) 2070 return; 2071 callout_stop(&target->mgm_ocb_timeout); 2072 for (i = 0; i < target->num_lun; i++) 2073 sbp_free_sdev(target->luns[i]); 2074 2075 for (xfer = STAILQ_FIRST(&target->xferlist); 2076 xfer != NULL; xfer = next) { 2077 next = STAILQ_NEXT(xfer, link); 2078 fw_xfer_free_buf(xfer); 2079 } 2080 STAILQ_INIT(&target->xferlist); 2081 free(target->luns, M_SBP); 2082 target->num_lun = 0; 2083 target->luns = NULL; 2084 target->fwdev = NULL; 2085 } 2086 2087 static void 2088 sbp_scsipi_detach_sdev(struct sbp_dev *sdev) 2089 { 2090 struct sbp_target *target; 2091 struct sbp_softc *sbp; 2092 2093 if (sdev == NULL) 2094 return; 2095 2096 target = sdev->target; 2097 if (target == NULL) 2098 return; 2099 2100 sbp = target->sbp; 2101 2102 if (sdev->status == SBP_DEV_DEAD) 2103 return; 2104 if (sdev->status == SBP_DEV_RESET) 2105 return; 2106 if (sdev->periph != NULL) { 2107 scsipi_periph_thaw(sdev->periph, sdev->freeze); 2108 scsipi_channel_thaw(&sbp->sc_channel, 0); /* XXXX */ 2109 sdev->freeze = 0; 2110 if (scsipi_target_detach(&sbp->sc_channel, 2111 target->target_id, sdev->lun_id, DETACH_FORCE) != 0) { 2112 aprint_error_dev(sbp->sc_fd.dev, "detach failed\n"); 2113 } 2114 sdev->periph = NULL; 2115 } 2116 sbp_abort_all_ocbs(sdev, XS_DRIVER_STUFFUP); 2117 } 2118 2119 static void 2120 sbp_scsipi_detach_target(struct sbp_target *target) 2121 { 2122 struct sbp_softc *sbp = target->sbp; 2123 int i; 2124 2125 if (target->luns != NULL) { 2126 SBP_DEBUG(0) 2127 printf("sbp_detach_target %d\n", target->target_id); 2128 END_DEBUG 2129 for (i = 0; i < target->num_lun; i++) 2130 sbp_scsipi_detach_sdev(target->luns[i]); 2131 if (config_detach(sbp->sc_bus, DETACH_FORCE) != 0) 2132 aprint_error_dev(sbp->sc_fd.dev, "%d detach failed\n", 2133 target->target_id); 2134 sbp->sc_bus = NULL; 2135 } 2136 } 2137 2138 static void 2139 sbp_target_reset(struct sbp_dev *sdev, int method) 2140 { 2141 struct sbp_target *target = sdev->target; 2142 struct sbp_dev *tsdev; 2143 int i; 2144 2145 for (i = 0; i < target->num_lun; i++) { 2146 tsdev = target->luns[i]; 2147 if (tsdev == NULL) 2148 continue; 2149 if (tsdev->status == SBP_DEV_DEAD) 2150 continue; 2151 if (tsdev->status == SBP_DEV_RESET) 2152 continue; 2153 if (sdev->periph != NULL) { 2154 scsipi_periph_freeze(tsdev->periph, 1); 2155 tsdev->freeze++; 2156 } 2157 sbp_abort_all_ocbs(tsdev, XS_TIMEOUT); 2158 if (method == 2) 2159 tsdev->status = SBP_DEV_LOGIN; 2160 } 2161 switch (method) { 2162 case 1: 2163 aprint_error("target reset\n"); 2164 sbp_mgm_orb(sdev, ORB_FUN_RST, NULL); 2165 break; 2166 case 2: 2167 aprint_error("reset start\n"); 2168 sbp_reset_start(sdev); 2169 break; 2170 } 2171 } 2172 2173 static void 2174 sbp_mgm_timeout(void *arg) 2175 { 2176 struct sbp_ocb *ocb = (struct sbp_ocb *)arg; 2177 struct sbp_dev *sdev = ocb->sdev; 2178 struct sbp_target *target = sdev->target; 2179 2180 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2181 "%s:%s: request timeout(mgm orb:0x%08x) ... ", 2182 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); 2183 target->mgm_ocb_cur = NULL; 2184 sbp_free_ocb(sdev, ocb); 2185 #if 0 2186 /* XXX */ 2187 aprint_error("run next request\n"); 2188 sbp_mgm_orb(sdev, ORB_FUN_RUNQUEUE, NULL); 2189 #endif 2190 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2191 "%s:%s: reset start\n", __func__, sdev->bustgtlun); 2192 sbp_reset_start(sdev); 2193 } 2194 2195 static void 2196 sbp_timeout(void *arg) 2197 { 2198 struct sbp_ocb *ocb = (struct sbp_ocb *)arg; 2199 struct sbp_dev *sdev = ocb->sdev; 2200 2201 aprint_error_dev(sdev->target->sbp->sc_fd.dev, 2202 "%s:%s: request timeout(cmd orb:0x%08x) ... ", 2203 __func__, sdev->bustgtlun, (uint32_t)ocb->bus_addr); 2204 2205 sdev->timeout++; 2206 switch (sdev->timeout) { 2207 case 1: 2208 aprint_error("agent reset\n"); 2209 if (sdev->periph != NULL) { 2210 scsipi_periph_freeze(sdev->periph, 1); 2211 sdev->freeze++; 2212 } 2213 sbp_abort_all_ocbs(sdev, XS_TIMEOUT); 2214 sbp_agent_reset(sdev); 2215 break; 2216 case 2: 2217 case 3: 2218 sbp_target_reset(sdev, sdev->timeout - 1); 2219 break; 2220 default: 2221 aprint_error("\n"); 2222 #if 0 2223 /* XXX give up */ 2224 sbp_scsipi_detach_target(target); 2225 if (target->luns != NULL) 2226 free(target->luns, M_SBP); 2227 target->num_lun = 0; 2228 target->luns = NULL; 2229 target->fwdev = NULL; 2230 #endif 2231 } 2232 } 2233 2234 static void 2235 sbp_action1(struct sbp_softc *sc, struct scsipi_xfer *xs) 2236 { 2237 struct sbp_target *target = &sc->sc_target; 2238 struct sbp_dev *sdev = NULL; 2239 struct sbp_ocb *ocb; 2240 int speed, flag, error; 2241 void *cdb; 2242 2243 /* target:lun -> sdev mapping */ 2244 if (target->fwdev != NULL && 2245 xs->xs_periph->periph_lun < target->num_lun) { 2246 sdev = target->luns[xs->xs_periph->periph_lun]; 2247 if (sdev != NULL && sdev->status != SBP_DEV_ATTACHED && 2248 sdev->status != SBP_DEV_PROBE) 2249 sdev = NULL; 2250 } 2251 2252 if (sdev == NULL) { 2253 SBP_DEBUG(1) 2254 printf("%s:%d:%d: Invalid target (target needed)\n", 2255 sc ? device_xname(sc->sc_fd.dev) : "???", 2256 xs->xs_periph->periph_target, 2257 xs->xs_periph->periph_lun); 2258 END_DEBUG 2259 2260 xs->error = XS_DRIVER_STUFFUP; 2261 scsipi_done(xs); 2262 return; 2263 } 2264 2265 SBP_DEBUG(2) 2266 printf("%s:%d:%d:" 2267 " cmd: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x," 2268 " flags: 0x%02x, %db cmd/%db data\n", 2269 device_xname(sc->sc_fd.dev), 2270 xs->xs_periph->periph_target, 2271 xs->xs_periph->periph_lun, 2272 xs->cmd->opcode, 2273 xs->cmd->bytes[0], xs->cmd->bytes[1], 2274 xs->cmd->bytes[2], xs->cmd->bytes[3], 2275 xs->cmd->bytes[4], xs->cmd->bytes[5], 2276 xs->cmd->bytes[6], xs->cmd->bytes[7], 2277 xs->cmd->bytes[8], 2278 xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT), 2279 xs->cmdlen, xs->datalen); 2280 END_DEBUG 2281 mutex_enter(&sc->sc_mtx); 2282 ocb = sbp_get_ocb(sdev); 2283 mutex_exit(&sc->sc_mtx); 2284 if (ocb == NULL) { 2285 xs->error = XS_REQUEUE; 2286 if (sdev->freeze == 0) { 2287 scsipi_periph_freeze(sdev->periph, 1); 2288 sdev->freeze++; 2289 } 2290 scsipi_done(xs); 2291 return; 2292 } 2293 2294 ocb->flags = OCB_ACT_CMD; 2295 ocb->sdev = sdev; 2296 ocb->xs = xs; 2297 ocb->orb[0] = htonl(1 << 31); 2298 ocb->orb[1] = 0; 2299 ocb->orb[2] = htonl(((sc->sc_fd.fc->nodeid | FWLOCALBUS) << 16)); 2300 ocb->orb[3] = htonl(ocb->bus_addr + IND_PTR_OFFSET); 2301 speed = min(target->fwdev->speed, max_speed); 2302 ocb->orb[4] = 2303 htonl(ORB_NOTIFY | ORB_CMD_SPD(speed) | ORB_CMD_MAXP(speed + 7)); 2304 if ((xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) == 2305 XS_CTL_DATA_IN) { 2306 ocb->orb[4] |= htonl(ORB_CMD_IN); 2307 flag = BUS_DMA_READ; 2308 } else 2309 flag = BUS_DMA_WRITE; 2310 2311 cdb = xs->cmd; 2312 memcpy((void *)&ocb->orb[5], cdb, xs->cmdlen); 2313 /* 2314 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[0]), ntohl(ocb->orb[1]), ntohl(ocb->orb[2]), ntohl(ocb->orb[3])); 2315 printf("ORB %08x %08x %08x %08x\n", ntohl(ocb->orb[4]), ntohl(ocb->orb[5]), ntohl(ocb->orb[6]), ntohl(ocb->orb[7])); 2316 */ 2317 if (xs->datalen > 0) { 2318 error = bus_dmamap_load(sc->sc_dmat, ocb->dmamap, 2319 xs->data, xs->datalen, NULL, BUS_DMA_NOWAIT | flag); 2320 if (error) { 2321 aprint_error_dev(sc->sc_fd.dev, 2322 "DMA map load error %d\n", error); 2323 xs->error = XS_DRIVER_STUFFUP; 2324 scsipi_done(xs); 2325 } else 2326 sbp_execute_ocb(ocb, ocb->dmamap->dm_segs, 2327 ocb->dmamap->dm_nsegs); 2328 } else 2329 sbp_execute_ocb(ocb, NULL, 0); 2330 2331 return; 2332 } 2333 2334 static void 2335 sbp_execute_ocb(struct sbp_ocb *ocb, bus_dma_segment_t *segments, int seg) 2336 { 2337 struct sbp_ocb *prev; 2338 bus_dma_segment_t *s; 2339 int i; 2340 2341 SBP_DEBUG(2) 2342 printf("sbp_execute_ocb: seg %d", seg); 2343 for (i = 0; i < seg; i++) 2344 printf(", %jx:%jd", (uintmax_t)segments[i].ds_addr, 2345 (uintmax_t)segments[i].ds_len); 2346 printf("\n"); 2347 END_DEBUG 2348 2349 if (seg == 1) { 2350 /* direct pointer */ 2351 s = segments; 2352 if (s->ds_len > SBP_SEG_MAX) 2353 panic("ds_len > SBP_SEG_MAX, fix busdma code"); 2354 ocb->orb[3] = htonl(s->ds_addr); 2355 ocb->orb[4] |= htonl(s->ds_len); 2356 } else if (seg > 1) { 2357 /* page table */ 2358 for (i = 0; i < seg; i++) { 2359 s = &segments[i]; 2360 SBP_DEBUG(0) 2361 /* XXX LSI Logic "< 16 byte" bug might be hit */ 2362 if (s->ds_len < 16) 2363 printf("sbp_execute_ocb: warning, " 2364 "segment length(%jd) is less than 16." 2365 "(seg=%d/%d)\n", 2366 (uintmax_t)s->ds_len, i + 1, seg); 2367 END_DEBUG 2368 if (s->ds_len > SBP_SEG_MAX) 2369 panic("ds_len > SBP_SEG_MAX, fix busdma code"); 2370 ocb->ind_ptr[i].hi = htonl(s->ds_len << 16); 2371 ocb->ind_ptr[i].lo = htonl(s->ds_addr); 2372 } 2373 ocb->orb[4] |= htonl(ORB_CMD_PTBL | seg); 2374 } 2375 2376 if (seg > 0) { 2377 struct sbp_softc *sc = ocb->sdev->target->sbp; 2378 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2379 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 2380 2381 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2382 0, ocb->dmamap->dm_mapsize, flag); 2383 } 2384 prev = sbp_enqueue_ocb(ocb->sdev, ocb); 2385 SBP_ORB_DMA_SYNC(ocb->sdev->dma, ocb->index, BUS_DMASYNC_PREWRITE); 2386 if (use_doorbell) { 2387 if (prev == NULL) { 2388 if (ocb->sdev->last_ocb != NULL) 2389 sbp_doorbell(ocb->sdev); 2390 else 2391 sbp_orb_pointer(ocb->sdev, ocb); 2392 } 2393 } else 2394 if (prev == NULL || (ocb->sdev->flags & ORB_LINK_DEAD) != 0) { 2395 ocb->sdev->flags &= ~ORB_LINK_DEAD; 2396 sbp_orb_pointer(ocb->sdev, ocb); 2397 } 2398 } 2399 2400 static struct sbp_ocb * 2401 sbp_dequeue_ocb(struct sbp_dev *sdev, struct sbp_status *sbp_status) 2402 { 2403 struct sbp_softc *sc = sdev->target->sbp; 2404 struct sbp_ocb *ocb; 2405 struct sbp_ocb *next; 2406 int order = 0; 2407 2408 SBP_DEBUG(1) 2409 printf("%s:%s:%s: 0x%08x src %d\n", device_xname(sc->sc_fd.dev), 2410 __func__, sdev->bustgtlun, ntohl(sbp_status->orb_lo), 2411 sbp_status->src); 2412 END_DEBUG 2413 mutex_enter(&sc->sc_mtx); 2414 for (ocb = STAILQ_FIRST(&sdev->ocbs); ocb != NULL; ocb = next) { 2415 next = STAILQ_NEXT(ocb, ocb); 2416 if (OCB_MATCH(ocb, sbp_status)) { 2417 /* found */ 2418 SBP_ORB_DMA_SYNC(sdev->dma, ocb->index, 2419 BUS_DMASYNC_POSTWRITE); 2420 STAILQ_REMOVE(&sdev->ocbs, ocb, sbp_ocb, ocb); 2421 if (ocb->xs != NULL) 2422 callout_stop(&ocb->xs->xs_callout); 2423 if (ntohl(ocb->orb[4]) & 0xffff) { 2424 const int flag = 2425 (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2426 BUS_DMASYNC_POSTREAD : 2427 BUS_DMASYNC_POSTWRITE; 2428 2429 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2430 0, ocb->dmamap->dm_mapsize, flag); 2431 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap); 2432 2433 } 2434 if (!use_doorbell) { 2435 if (sbp_status->src == SRC_NO_NEXT) { 2436 if (next != NULL) 2437 sbp_orb_pointer(sdev, next); 2438 else if (order > 0) 2439 /* 2440 * Unordered execution 2441 * We need to send pointer for 2442 * next ORB 2443 */ 2444 sdev->flags |= ORB_LINK_DEAD; 2445 } 2446 } 2447 break; 2448 } else 2449 order++; 2450 } 2451 mutex_exit(&sc->sc_mtx); 2452 2453 if (ocb && use_doorbell) { 2454 /* 2455 * XXX this is not correct for unordered 2456 * execution. 2457 */ 2458 if (sdev->last_ocb != NULL) 2459 sbp_free_ocb(sdev, sdev->last_ocb); 2460 sdev->last_ocb = ocb; 2461 if (next != NULL && 2462 sbp_status->src == SRC_NO_NEXT) 2463 sbp_doorbell(sdev); 2464 } 2465 2466 SBP_DEBUG(0) 2467 if (ocb && order > 0) 2468 printf("%s:%s:%s: unordered execution order:%d\n", 2469 device_xname(sc->sc_fd.dev), __func__, sdev->bustgtlun, 2470 order); 2471 END_DEBUG 2472 return ocb; 2473 } 2474 2475 static struct sbp_ocb * 2476 sbp_enqueue_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) 2477 { 2478 struct sbp_softc *sc = sdev->target->sbp; 2479 struct sbp_ocb *tocb, *prev, *prev2; 2480 2481 SBP_DEBUG(1) 2482 printf("%s:%s:%s: 0x%08jx\n", device_xname(sc->sc_fd.dev), 2483 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); 2484 END_DEBUG 2485 mutex_enter(&sc->sc_mtx); 2486 prev = NULL; 2487 STAILQ_FOREACH(tocb, &sdev->ocbs, ocb) 2488 prev = tocb; 2489 prev2 = prev; 2490 STAILQ_INSERT_TAIL(&sdev->ocbs, ocb, ocb); 2491 mutex_exit(&sc->sc_mtx); 2492 2493 callout_reset(&ocb->xs->xs_callout, mstohz(ocb->xs->timeout), 2494 sbp_timeout, ocb); 2495 2496 if (use_doorbell && prev == NULL) 2497 prev2 = sdev->last_ocb; 2498 2499 if (prev2 != NULL) { 2500 SBP_DEBUG(2) 2501 printf("linking chain 0x%jx -> 0x%jx\n", 2502 (uintmax_t)prev2->bus_addr, (uintmax_t)ocb->bus_addr); 2503 END_DEBUG 2504 /* 2505 * Suppress compiler optimization so that orb[1] must be 2506 * written first. 2507 * XXX We may need an explicit memory barrier for other 2508 * architectures other than i386/amd64. 2509 */ 2510 *(volatile uint32_t *)&prev2->orb[1] = htonl(ocb->bus_addr); 2511 *(volatile uint32_t *)&prev2->orb[0] = 0; 2512 } 2513 2514 return prev; 2515 } 2516 2517 static struct sbp_ocb * 2518 sbp_get_ocb(struct sbp_dev *sdev) 2519 { 2520 struct sbp_softc *sc = sdev->target->sbp; 2521 struct sbp_ocb *ocb; 2522 2523 KASSERT(mutex_owned(&sc->sc_mtx)); 2524 2525 ocb = STAILQ_FIRST(&sdev->free_ocbs); 2526 if (ocb == NULL) { 2527 sdev->flags |= ORB_SHORTAGE; 2528 aprint_error_dev(sc->sc_fd.dev, 2529 "ocb shortage!!!\n"); 2530 return NULL; 2531 } 2532 STAILQ_REMOVE_HEAD(&sdev->free_ocbs, ocb); 2533 ocb->xs = NULL; 2534 return ocb; 2535 } 2536 2537 static void 2538 sbp_free_ocb(struct sbp_dev *sdev, struct sbp_ocb *ocb) 2539 { 2540 struct sbp_softc *sc = sdev->target->sbp; 2541 int count; 2542 2543 ocb->flags = 0; 2544 ocb->xs = NULL; 2545 2546 mutex_enter(&sc->sc_mtx); 2547 STAILQ_INSERT_TAIL(&sdev->free_ocbs, ocb, ocb); 2548 mutex_exit(&sc->sc_mtx); 2549 if (sdev->flags & ORB_SHORTAGE) { 2550 sdev->flags &= ~ORB_SHORTAGE; 2551 count = sdev->freeze; 2552 sdev->freeze = 0; 2553 if (sdev->periph) 2554 scsipi_periph_thaw(sdev->periph, count); 2555 scsipi_channel_thaw(&sc->sc_channel, 0); 2556 } 2557 } 2558 2559 static void 2560 sbp_abort_ocb(struct sbp_ocb *ocb, int status) 2561 { 2562 struct sbp_softc *sc; 2563 struct sbp_dev *sdev; 2564 2565 sdev = ocb->sdev; 2566 sc = sdev->target->sbp; 2567 SBP_DEBUG(0) 2568 printf("%s:%s:%s: sbp_abort_ocb 0x%jx\n", device_xname(sc->sc_fd.dev), 2569 __func__, sdev->bustgtlun, (uintmax_t)ocb->bus_addr); 2570 END_DEBUG 2571 SBP_DEBUG(1) 2572 if (ocb->xs != NULL) 2573 sbp_print_scsi_cmd(ocb); 2574 END_DEBUG 2575 if (ntohl(ocb->orb[4]) & 0xffff) { 2576 const int flag = (ntohl(ocb->orb[4]) & ORB_CMD_IN) ? 2577 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 2578 2579 bus_dmamap_sync(sc->sc_dmat, ocb->dmamap, 2580 0, ocb->dmamap->dm_mapsize, flag); 2581 bus_dmamap_unload(sc->sc_dmat, ocb->dmamap); 2582 } 2583 if (ocb->xs != NULL) { 2584 callout_stop(&ocb->xs->xs_callout); 2585 ocb->xs->error = status; 2586 scsipi_done(ocb->xs); 2587 } 2588 sbp_free_ocb(sdev, ocb); 2589 } 2590 2591 static void 2592 sbp_abort_all_ocbs(struct sbp_dev *sdev, int status) 2593 { 2594 struct sbp_softc *sc = sdev->target->sbp; 2595 struct sbp_ocb *ocb, *next; 2596 STAILQ_HEAD(, sbp_ocb) temp; 2597 2598 mutex_enter(&sc->sc_mtx); 2599 STAILQ_INIT(&temp); 2600 STAILQ_CONCAT(&temp, &sdev->ocbs); 2601 STAILQ_INIT(&sdev->ocbs); 2602 mutex_exit(&sc->sc_mtx); 2603 2604 for (ocb = STAILQ_FIRST(&temp); ocb != NULL; ocb = next) { 2605 next = STAILQ_NEXT(ocb, ocb); 2606 sbp_abort_ocb(ocb, status); 2607 } 2608 if (sdev->last_ocb != NULL) { 2609 sbp_free_ocb(sdev, sdev->last_ocb); 2610 sdev->last_ocb = NULL; 2611 } 2612 } 2613