1 /* $OpenBSD: qle.c,v 1.66 2024/09/04 07:54:52 mglocker Exp $ */ 2 3 /* 4 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/atomic.h> 22 #include <sys/malloc.h> 23 #include <sys/device.h> 24 #include <sys/task.h> 25 #include <sys/timeout.h> 26 27 #include <machine/bus.h> 28 29 #include <dev/pci/pcireg.h> 30 #include <dev/pci/pcivar.h> 31 #include <dev/pci/pcidevs.h> 32 33 #include <scsi/scsi_all.h> 34 #include <scsi/scsiconf.h> 35 36 #include <dev/pci/qlereg.h> 37 38 #ifdef QLE_DEBUG 39 #define DPRINTF(m, f...) do { if ((qledebug & (m)) == (m)) printf(f); } \ 40 while (0) 41 #define QLE_D_MBOX 0x01 42 #define QLE_D_INTR 0x02 43 #define QLE_D_PORT 0x04 44 #define QLE_D_IO 0x08 45 #define QLE_D_IOCB 0x10 46 int qledebug = QLE_D_PORT; 47 #else 48 #define DPRINTF(m, f...) 49 #endif 50 51 #ifndef QLE_NOFIRMWARE 52 #include <dev/microcode/isp/asm_2400.h> 53 #include <dev/microcode/isp/asm_2500.h> 54 #endif 55 56 #define QLE_PCI_MEM_BAR 0x14 57 #define QLE_PCI_IO_BAR 0x10 58 59 60 #define QLE_DEFAULT_PORT_NAME 0x400000007F000003ULL /* from isp(4) */ 61 62 #define QLE_WAIT_FOR_LOOP 10 /* seconds */ 63 #define QLE_LOOP_SETTLE 200 /* ms */ 64 65 /* rounded up range of assignable handles */ 66 #define QLE_MAX_TARGETS 2048 67 68 /* maximum number of segments allowed for in a single io */ 69 #define QLE_MAX_SEGS 32 70 71 enum qle_isp_gen { 72 QLE_GEN_ISP24XX = 1, 73 QLE_GEN_ISP25XX 74 }; 75 76 enum qle_isp_type { 77 QLE_ISP2422 = 1, 78 QLE_ISP2432, 79 QLE_ISP2512, 80 QLE_ISP2522, 81 QLE_ISP2532 82 }; 83 84 /* port database things */ 85 #define QLE_SCRATCH_SIZE 0x1000 86 87 enum qle_port_disp { 88 QLE_PORT_DISP_NEW, 89 QLE_PORT_DISP_GONE, 90 QLE_PORT_DISP_SAME, 91 QLE_PORT_DISP_CHANGED, 92 QLE_PORT_DISP_MOVED, 93 QLE_PORT_DISP_DUP 94 }; 95 96 #define QLE_LOCATION_LOOP (1 << 24) 97 #define QLE_LOCATION_FABRIC (2 << 24) 98 #define QLE_LOCATION_LOOP_ID(l) (l | QLE_LOCATION_LOOP) 99 #define QLE_LOCATION_PORT_ID(p) (p | QLE_LOCATION_FABRIC) 100 101 struct qle_fc_port { 102 TAILQ_ENTRY(qle_fc_port) ports; 103 TAILQ_ENTRY(qle_fc_port) update; 104 105 u_int64_t node_name; 106 u_int64_t port_name; 107 u_int32_t location; /* port id or loop id */ 108 109 int flags; 110 #define QLE_PORT_FLAG_IS_TARGET 1 111 #define QLE_PORT_FLAG_NEEDS_LOGIN 2 112 113 u_int32_t portid; 114 u_int16_t loopid; 115 }; 116 117 118 /* request/response queue stuff */ 119 #define QLE_QUEUE_ENTRY_SIZE 64 120 121 struct qle_ccb { 122 struct qle_softc *ccb_sc; 123 int ccb_id; 124 struct scsi_xfer *ccb_xs; 125 126 bus_dmamap_t ccb_dmamap; 127 128 struct qle_iocb_seg *ccb_segs; 129 u_int64_t ccb_seg_offset; 130 131 SIMPLEQ_ENTRY(qle_ccb) ccb_link; 132 }; 133 134 SIMPLEQ_HEAD(qle_ccb_list, qle_ccb); 135 136 struct qle_dmamem { 137 bus_dmamap_t qdm_map; 138 bus_dma_segment_t qdm_seg; 139 size_t qdm_size; 140 caddr_t qdm_kva; 141 }; 142 #define QLE_DMA_MAP(_qdm) ((_qdm)->qdm_map) 143 #define QLE_DMA_LEN(_qdm) ((_qdm)->qdm_size) 144 #define QLE_DMA_DVA(_qdm) ((u_int64_t)(_qdm)->qdm_map->dm_segs[0].ds_addr) 145 #define QLE_DMA_KVA(_qdm) ((void *)(_qdm)->qdm_kva) 146 147 struct qle_softc { 148 struct device sc_dev; 149 150 pci_chipset_tag_t sc_pc; 151 pcitag_t sc_tag; 152 153 void *sc_ih; 154 bus_space_tag_t sc_iot; 155 bus_space_handle_t sc_ioh; 156 bus_size_t sc_ios; 157 bus_dma_tag_t sc_dmat; 158 159 struct scsibus_softc *sc_scsibus; 160 161 enum qle_isp_type sc_isp_type; 162 enum qle_isp_gen sc_isp_gen; 163 int sc_port; 164 165 bus_space_handle_t sc_mbox_ioh; 166 u_int16_t sc_mbox[QLE_MBOX_COUNT]; 167 int sc_mbox_pending; 168 struct mutex sc_mbox_mtx; 169 170 int sc_loop_up; 171 int sc_topology; 172 int sc_loop_id; 173 int sc_port_id; 174 int sc_loop_max_id; 175 u_int64_t sc_sns_port_name; 176 177 struct mutex sc_port_mtx; 178 TAILQ_HEAD(, qle_fc_port) sc_ports; 179 TAILQ_HEAD(, qle_fc_port) sc_ports_new; 180 TAILQ_HEAD(, qle_fc_port) sc_ports_gone; 181 TAILQ_HEAD(, qle_fc_port) sc_ports_found; 182 struct qle_fc_port *sc_targets[QLE_MAX_TARGETS]; 183 184 struct taskq *sc_update_taskq; 185 struct task sc_update_task; 186 struct timeout sc_update_timeout; 187 int sc_update; 188 int sc_update_tasks; 189 #define QLE_UPDATE_TASK_CLEAR_ALL 0x00000001 190 #define QLE_UPDATE_TASK_SOFTRESET 0x00000002 191 #define QLE_UPDATE_TASK_UPDATE_TOPO 0x00000004 192 #define QLE_UPDATE_TASK_GET_PORT_LIST 0x00000008 193 #define QLE_UPDATE_TASK_PORT_LIST 0x00000010 194 #define QLE_UPDATE_TASK_SCAN_FABRIC 0x00000020 195 #define QLE_UPDATE_TASK_SCANNING_FABRIC 0x00000040 196 #define QLE_UPDATE_TASK_FABRIC_LOGIN 0x00000080 197 #define QLE_UPDATE_TASK_FABRIC_RELOGIN 0x00000100 198 #define QLE_UPDATE_TASK_DETACH_TARGET 0x00000200 199 #define QLE_UPDATE_TASK_ATTACH_TARGET 0x00000400 200 201 int sc_maxcmds; 202 struct qle_dmamem *sc_requests; 203 struct qle_dmamem *sc_responses; 204 struct qle_dmamem *sc_segments; 205 struct qle_dmamem *sc_pri_requests; 206 struct qle_dmamem *sc_scratch; 207 struct qle_dmamem *sc_fcp_cmnds; 208 struct qle_ccb *sc_ccbs; 209 struct qle_ccb_list sc_ccb_free; 210 struct mutex sc_ccb_mtx; 211 struct mutex sc_queue_mtx; 212 struct scsi_iopool sc_iopool; 213 u_int32_t sc_next_req_id; 214 u_int32_t sc_last_resp_id; 215 int sc_marker_required; 216 int sc_fabric_pending; 217 u_int8_t sc_fabric_response[QLE_QUEUE_ENTRY_SIZE]; 218 219 struct qle_nvram sc_nvram; 220 int sc_nvram_valid; 221 }; 222 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 223 224 int qle_intr(void *); 225 226 int qle_match(struct device *, void *, void *); 227 void qle_attach(struct device *, struct device *, void *); 228 int qle_detach(struct device *, int); 229 230 const struct cfattach qle_ca = { 231 sizeof(struct qle_softc), 232 qle_match, 233 qle_attach, 234 qle_detach 235 }; 236 237 struct cfdriver qle_cd = { 238 NULL, 239 "qle", 240 DV_DULL 241 }; 242 243 void qle_scsi_cmd(struct scsi_xfer *); 244 int qle_scsi_probe(struct scsi_link *); 245 246 247 const struct scsi_adapter qle_switch = { 248 qle_scsi_cmd, NULL, qle_scsi_probe, NULL, NULL 249 }; 250 251 u_int32_t qle_read(struct qle_softc *, int); 252 void qle_write(struct qle_softc *, int, u_int32_t); 253 void qle_host_cmd(struct qle_softc *sc, u_int32_t); 254 255 int qle_mbox(struct qle_softc *, int); 256 int qle_ct_pass_through(struct qle_softc *sc, 257 u_int32_t port_handle, struct qle_dmamem *mem, 258 size_t req_size, size_t resp_size); 259 void qle_mbox_putaddr(u_int16_t *, struct qle_dmamem *); 260 u_int16_t qle_read_mbox(struct qle_softc *, int); 261 void qle_write_mbox(struct qle_softc *, int, u_int16_t); 262 263 void qle_handle_intr(struct qle_softc *, u_int16_t, u_int16_t); 264 void qle_set_ints(struct qle_softc *, int); 265 int qle_read_isr(struct qle_softc *, u_int16_t *, u_int16_t *); 266 void qle_clear_isr(struct qle_softc *, u_int16_t); 267 268 void qle_put_marker(struct qle_softc *, void *); 269 void qle_put_cmd(struct qle_softc *, void *, struct scsi_xfer *, 270 struct qle_ccb *, u_int32_t); 271 struct qle_ccb *qle_handle_resp(struct qle_softc *, u_int32_t); 272 void qle_sge(struct qle_iocb_seg *, u_int64_t, u_int32_t); 273 274 struct qle_fc_port *qle_next_fabric_port(struct qle_softc *, u_int32_t *, 275 u_int32_t *); 276 int qle_get_port_db(struct qle_softc *, u_int16_t, 277 struct qle_dmamem *); 278 int qle_get_port_name_list(struct qle_softc *sc, u_int32_t); 279 int qle_add_loop_port(struct qle_softc *, struct qle_fc_port *); 280 int qle_add_fabric_port(struct qle_softc *, struct qle_fc_port *); 281 int qle_add_logged_in_port(struct qle_softc *, u_int16_t, 282 u_int32_t); 283 int qle_classify_port(struct qle_softc *, u_int32_t, u_int64_t, 284 u_int64_t, struct qle_fc_port **); 285 int qle_get_loop_id(struct qle_softc *sc, int); 286 void qle_clear_port_lists(struct qle_softc *); 287 int qle_softreset(struct qle_softc *); 288 void qle_update_topology(struct qle_softc *); 289 int qle_update_fabric(struct qle_softc *); 290 int qle_fabric_plogx(struct qle_softc *, struct qle_fc_port *, int, 291 u_int32_t *); 292 int qle_fabric_plogi(struct qle_softc *, struct qle_fc_port *); 293 void qle_fabric_plogo(struct qle_softc *, struct qle_fc_port *); 294 295 void qle_update_start(struct qle_softc *, int); 296 void qle_update_defer(struct qle_softc *, int); 297 void qle_update_cancel(struct qle_softc *); 298 void qle_update_done(struct qle_softc *, int); 299 void qle_do_update(void *); 300 void qle_deferred_update(void *); 301 int qle_async(struct qle_softc *, u_int16_t); 302 303 int qle_load_fwchunk(struct qle_softc *, 304 struct qle_dmamem *, const u_int32_t *); 305 u_int32_t qle_read_ram_word(struct qle_softc *, u_int32_t); 306 int qle_verify_firmware(struct qle_softc *, u_int32_t); 307 int qle_load_firmware_chunks(struct qle_softc *, const u_int32_t *); 308 int qle_read_nvram(struct qle_softc *); 309 310 struct qle_dmamem *qle_dmamem_alloc(struct qle_softc *, size_t); 311 void qle_dmamem_free(struct qle_softc *, struct qle_dmamem *); 312 313 int qle_alloc_ccbs(struct qle_softc *); 314 void qle_free_ccbs(struct qle_softc *); 315 void *qle_get_ccb(void *); 316 void qle_put_ccb(void *, void *); 317 318 void qle_dump_stuff(struct qle_softc *, void *, int); 319 void qle_dump_iocb(struct qle_softc *, void *); 320 void qle_dump_iocb_segs(struct qle_softc *, void *, int); 321 322 static const struct pci_matchid qle_devices[] = { 323 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2422 }, 324 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2432 }, 325 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2512 }, 326 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2522 }, 327 { PCI_VENDOR_QLOGIC, PCI_PRODUCT_QLOGIC_ISP2532 }, 328 }; 329 330 int 331 qle_match(struct device *parent, void *match, void *aux) 332 { 333 return (pci_matchbyid(aux, qle_devices, nitems(qle_devices))); 334 } 335 336 void 337 qle_attach(struct device *parent, struct device *self, void *aux) 338 { 339 struct qle_softc *sc = (void *)self; 340 struct pci_attach_args *pa = aux; 341 pci_intr_handle_t ih; 342 const char *intrstr; 343 u_int32_t pcictl; 344 struct scsibus_attach_args saa; 345 struct qle_init_cb *icb; 346 bus_size_t mbox_base; 347 u_int32_t firmware_addr; 348 #ifndef QLE_NOFIRMWARE 349 const u_int32_t *firmware = NULL; 350 #endif 351 352 pcireg_t bars[] = { QLE_PCI_MEM_BAR, QLE_PCI_IO_BAR }; 353 pcireg_t memtype; 354 int r, i, rv, loop_up = 0; 355 356 sc->sc_pc = pa->pa_pc; 357 sc->sc_tag = pa->pa_tag; 358 sc->sc_ih = NULL; 359 sc->sc_dmat = pa->pa_dmat; 360 sc->sc_ios = 0; 361 362 for (r = 0; r < nitems(bars); r++) { 363 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, bars[r]); 364 if (pci_mapreg_map(pa, bars[r], memtype, 0, 365 &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, 0) == 0) 366 break; 367 368 sc->sc_ios = 0; 369 } 370 if (sc->sc_ios == 0) { 371 printf(": unable to map registers\n"); 372 return; 373 } 374 375 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 376 printf(": unable to map interrupt\n"); 377 goto unmap; 378 } 379 intrstr = pci_intr_string(sc->sc_pc, ih); 380 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO, 381 qle_intr, sc, DEVNAME(sc)); 382 if (sc->sc_ih == NULL) { 383 printf(": unable to establish interrupt"); 384 if (intrstr != NULL) 385 printf(" at %s", intrstr); 386 printf("\n"); 387 goto deintr; 388 } 389 390 printf(": %s\n", intrstr); 391 392 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 393 pcictl |= PCI_COMMAND_INVALIDATE_ENABLE | 394 PCI_COMMAND_PARITY_ENABLE | PCI_COMMAND_SERR_ENABLE; 395 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, pcictl); 396 397 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 398 pcictl &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT); 399 pcictl &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 400 pcictl |= (0x80 << PCI_LATTIMER_SHIFT); 401 pcictl |= (0x10 << PCI_CACHELINE_SHIFT); 402 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, pcictl); 403 404 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); 405 pcictl &= ~1; 406 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, pcictl); 407 408 switch (PCI_PRODUCT(pa->pa_id)) { 409 case PCI_PRODUCT_QLOGIC_ISP2422: 410 sc->sc_isp_type = QLE_ISP2422; 411 sc->sc_isp_gen = QLE_GEN_ISP24XX; 412 break; 413 case PCI_PRODUCT_QLOGIC_ISP2432: 414 sc->sc_isp_type = QLE_ISP2432; 415 sc->sc_isp_gen = QLE_GEN_ISP24XX; 416 break; 417 case PCI_PRODUCT_QLOGIC_ISP2512: 418 sc->sc_isp_type = QLE_ISP2512; 419 sc->sc_isp_gen = QLE_GEN_ISP25XX; 420 break; 421 case PCI_PRODUCT_QLOGIC_ISP2522: 422 sc->sc_isp_type = QLE_ISP2522; 423 sc->sc_isp_gen = QLE_GEN_ISP25XX; 424 break; 425 case PCI_PRODUCT_QLOGIC_ISP2532: 426 sc->sc_isp_type = QLE_ISP2532; 427 sc->sc_isp_gen = QLE_GEN_ISP25XX; 428 break; 429 430 default: 431 printf("unknown pci id %x", pa->pa_id); 432 goto deintr; 433 } 434 435 /* these are the same for 24xx and 25xx but may vary later */ 436 mbox_base = QLE_MBOX_BASE_24XX; 437 firmware_addr = QLE_2400_CODE_ORG; 438 439 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, mbox_base, 440 sizeof(sc->sc_mbox), &sc->sc_mbox_ioh) != 0) { 441 printf("%s: unable to map mbox registers\n", DEVNAME(sc)); 442 goto deintr; 443 } 444 445 sc->sc_port = pa->pa_function; 446 447 TAILQ_INIT(&sc->sc_ports); 448 TAILQ_INIT(&sc->sc_ports_new); 449 TAILQ_INIT(&sc->sc_ports_gone); 450 TAILQ_INIT(&sc->sc_ports_found); 451 452 /* after reset, mbox regs 1 and 2 contain the string "ISP " */ 453 if (qle_read_mbox(sc, 1) != 0x4953 || 454 qle_read_mbox(sc, 2) != 0x5020) { 455 /* try releasing the risc processor */ 456 qle_host_cmd(sc, QLE_HOST_CMD_RELEASE); 457 } 458 459 qle_host_cmd(sc, QLE_HOST_CMD_PAUSE); 460 if (qle_softreset(sc) != 0) { 461 printf("softreset failed\n"); 462 goto deintr; 463 } 464 465 if (qle_read_nvram(sc) == 0) 466 sc->sc_nvram_valid = 1; 467 468 #ifdef QLE_NOFIRMWARE 469 if (qle_verify_firmware(sc, firmware_addr)) { 470 printf("%s: no firmware loaded\n", DEVNAME(sc)); 471 goto deintr; 472 } 473 #else 474 switch (sc->sc_isp_gen) { 475 case QLE_GEN_ISP24XX: 476 firmware = isp_2400_risc_code; 477 break; 478 case QLE_GEN_ISP25XX: 479 firmware = isp_2500_risc_code; 480 break; 481 default: 482 printf("%s: no firmware to load?\n", DEVNAME(sc)); 483 goto deintr; 484 } 485 if (qle_load_firmware_chunks(sc, firmware)) { 486 printf("%s: firmware load failed\n", DEVNAME(sc)); 487 goto deintr; 488 } 489 #endif 490 491 /* execute firmware */ 492 sc->sc_mbox[0] = QLE_MBOX_EXEC_FIRMWARE; 493 sc->sc_mbox[1] = firmware_addr >> 16; 494 sc->sc_mbox[2] = firmware_addr & 0xffff; 495 #ifdef QLE_NOFIRMWARE 496 sc->sc_mbox[3] = 1; 497 #else 498 sc->sc_mbox[3] = 0; 499 #endif 500 sc->sc_mbox[4] = 0; 501 if (qle_mbox(sc, 0x001f)) { 502 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]); 503 goto deintr; 504 } 505 506 delay(250000); /* from isp(4) */ 507 508 sc->sc_mbox[0] = QLE_MBOX_ABOUT_FIRMWARE; 509 if (qle_mbox(sc, 0x0001)) { 510 printf("ISP not talking after firmware exec: %x\n", 511 sc->sc_mbox[0]); 512 goto deintr; 513 } 514 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc), 515 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]); 516 517 sc->sc_maxcmds = 4096; 518 519 /* reserve queue slots for markers and fabric ops */ 520 sc->sc_maxcmds -= 2; 521 522 if (qle_alloc_ccbs(sc)) { 523 /* error already printed */ 524 goto deintr; 525 } 526 sc->sc_scratch = qle_dmamem_alloc(sc, QLE_SCRATCH_SIZE); 527 if (sc->sc_scratch == NULL) { 528 printf("%s: unable to allocate scratch\n", DEVNAME(sc)); 529 goto free_ccbs; 530 } 531 532 /* build init buffer thing */ 533 icb = (struct qle_init_cb *)QLE_DMA_KVA(sc->sc_scratch); 534 memset(icb, 0, sizeof(*icb)); 535 icb->icb_version = QLE_ICB_VERSION; 536 if (sc->sc_nvram_valid) { 537 icb->icb_max_frame_len = sc->sc_nvram.frame_payload_size; 538 icb->icb_exec_throttle = sc->sc_nvram.execution_throttle; 539 icb->icb_hardaddr = sc->sc_nvram.hard_address; 540 icb->icb_portname = sc->sc_nvram.port_name; 541 icb->icb_nodename = sc->sc_nvram.node_name; 542 icb->icb_login_retry = sc->sc_nvram.login_retry; 543 icb->icb_login_timeout = sc->sc_nvram.login_timeout; 544 icb->icb_fwoptions1 = sc->sc_nvram.fwoptions1; 545 icb->icb_fwoptions2 = sc->sc_nvram.fwoptions2; 546 icb->icb_fwoptions3 = sc->sc_nvram.fwoptions3; 547 } else { 548 /* defaults copied from isp(4) */ 549 htolem16(&icb->icb_max_frame_len, 1024); 550 htolem16(&icb->icb_exec_throttle, 16); 551 icb->icb_portname = htobe64(QLE_DEFAULT_PORT_NAME); 552 icb->icb_nodename = 0; 553 icb->icb_login_retry = 3; 554 555 htolem32(&icb->icb_fwoptions1, QLE_ICB_FW1_FAIRNESS | 556 QLE_ICB_FW1_HARD_ADDR | QLE_ICB_FW1_FULL_DUPLEX); 557 htolem32(&icb->icb_fwoptions2, QLE_ICB_FW2_LOOP_PTP); 558 htolem32(&icb->icb_fwoptions3, QLE_ICB_FW3_FCP_RSP_24_0 | 559 QLE_ICB_FW3_AUTONEG); 560 } 561 562 icb->icb_exchange_count = 0; 563 564 icb->icb_req_out = 0; 565 icb->icb_resp_in = 0; 566 icb->icb_pri_req_out = 0; 567 htolem16(&icb->icb_req_queue_len, sc->sc_maxcmds); 568 htolem16(&icb->icb_resp_queue_len, sc->sc_maxcmds); 569 htolem16(&icb->icb_pri_req_queue_len, 8); /* apparently the minimum */ 570 htolem32(&icb->icb_req_queue_addr_lo, 571 QLE_DMA_DVA(sc->sc_requests)); 572 htolem32(&icb->icb_req_queue_addr_hi, 573 QLE_DMA_DVA(sc->sc_requests) >> 32); 574 htolem32(&icb->icb_resp_queue_addr_lo, 575 QLE_DMA_DVA(sc->sc_responses)); 576 htolem32(&icb->icb_resp_queue_addr_hi, 577 QLE_DMA_DVA(sc->sc_responses) >> 32); 578 htolem32(&icb->icb_pri_req_queue_addr_lo, 579 QLE_DMA_DVA(sc->sc_pri_requests)); 580 htolem32(&icb->icb_pri_req_queue_addr_hi, 581 QLE_DMA_DVA(sc->sc_pri_requests) >> 32); 582 583 htolem16(&icb->icb_link_down_nos, 200); 584 icb->icb_int_delay = 0; 585 icb->icb_login_timeout = 0; 586 587 sc->sc_mbox[0] = QLE_MBOX_INIT_FIRMWARE; 588 sc->sc_mbox[4] = 0; 589 sc->sc_mbox[5] = 0; 590 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch); 591 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 592 sizeof(*icb), BUS_DMASYNC_PREWRITE); 593 rv = qle_mbox(sc, 0x00fd); 594 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 595 sizeof(*icb), BUS_DMASYNC_POSTWRITE); 596 597 if (rv != 0) { 598 printf("%s: ISP firmware init failed: %x\n", DEVNAME(sc), 599 sc->sc_mbox[0]); 600 goto free_scratch; 601 } 602 603 /* enable some more notifications */ 604 sc->sc_mbox[0] = QLE_MBOX_SET_FIRMWARE_OPTIONS; 605 sc->sc_mbox[1] = QLE_FW_OPTION1_ASYNC_LIP_F8 | 606 QLE_FW_OPTION1_ASYNC_LIP_RESET | 607 QLE_FW_OPTION1_ASYNC_LIP_ERROR | 608 QLE_FW_OPTION1_ASYNC_LOGIN_RJT; 609 sc->sc_mbox[2] = 0; 610 sc->sc_mbox[3] = 0; 611 if (qle_mbox(sc, 0x000f)) { 612 printf("%s: setting firmware options failed: %x\n", 613 DEVNAME(sc), sc->sc_mbox[0]); 614 goto free_scratch; 615 } 616 617 sc->sc_update_taskq = taskq_create(DEVNAME(sc), 1, IPL_BIO, 0); 618 task_set(&sc->sc_update_task, qle_do_update, sc); 619 timeout_set(&sc->sc_update_timeout, qle_deferred_update, sc); 620 621 /* wait a bit for link to come up so we can scan and attach devices */ 622 for (i = 0; i < QLE_WAIT_FOR_LOOP * 1000; i++) { 623 u_int16_t isr, info; 624 625 if (sc->sc_loop_up) { 626 if (++loop_up == QLE_LOOP_SETTLE) 627 break; 628 } else 629 loop_up = 0; 630 631 delay(1000); 632 633 if (qle_read_isr(sc, &isr, &info) == 0) 634 continue; 635 636 qle_handle_intr(sc, isr, info); 637 638 } 639 640 if (sc->sc_loop_up) { 641 qle_do_update(sc); 642 } else { 643 DPRINTF(QLE_D_PORT, "%s: loop still down, giving up\n", 644 DEVNAME(sc)); 645 } 646 647 saa.saa_adapter = &qle_switch; 648 saa.saa_adapter_softc = sc; 649 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET; 650 saa.saa_adapter_buswidth = QLE_MAX_TARGETS; 651 saa.saa_luns = 8; 652 saa.saa_openings = sc->sc_maxcmds; 653 saa.saa_pool = &sc->sc_iopool; 654 if (sc->sc_nvram_valid) { 655 saa.saa_wwpn = betoh64(sc->sc_nvram.port_name); 656 saa.saa_wwnn = betoh64(sc->sc_nvram.node_name); 657 } else { 658 saa.saa_wwpn = QLE_DEFAULT_PORT_NAME; 659 saa.saa_wwnn = 0; 660 } 661 if (saa.saa_wwnn == 0) { 662 /* 663 * mask out the port number from the port name to get 664 * the node name. 665 */ 666 saa.saa_wwnn = saa.saa_wwpn; 667 saa.saa_wwnn &= ~(0xfULL << 56); 668 } 669 saa.saa_quirks = saa.saa_flags = 0; 670 671 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, 672 &saa, scsiprint); 673 674 return; 675 676 free_scratch: 677 qle_dmamem_free(sc, sc->sc_scratch); 678 free_ccbs: 679 qle_free_ccbs(sc); 680 deintr: 681 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 682 sc->sc_ih = NULL; 683 unmap: 684 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 685 sc->sc_ios = 0; 686 } 687 688 int 689 qle_detach(struct device *self, int flags) 690 { 691 struct qle_softc *sc = (struct qle_softc *)self; 692 693 if (sc->sc_ih == NULL) { 694 /* we didn't attach properly, so nothing to detach */ 695 return (0); 696 } 697 698 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 699 sc->sc_ih = NULL; 700 701 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 702 sc->sc_ios = 0; 703 704 return (0); 705 } 706 707 int 708 qle_classify_port(struct qle_softc *sc, u_int32_t location, 709 u_int64_t port_name, u_int64_t node_name, struct qle_fc_port **prev) 710 { 711 struct qle_fc_port *port, *locmatch, *wwnmatch; 712 locmatch = NULL; 713 wwnmatch = NULL; 714 715 /* make sure we don't try to add a port or location twice */ 716 TAILQ_FOREACH(port, &sc->sc_ports_new, update) { 717 if ((port->port_name == port_name && 718 port->node_name == node_name) || 719 port->location == location) { 720 *prev = port; 721 return (QLE_PORT_DISP_DUP); 722 } 723 } 724 725 /* if we're attaching, everything is new */ 726 if (sc->sc_scsibus == NULL) { 727 *prev = NULL; 728 return (QLE_PORT_DISP_NEW); 729 } 730 731 TAILQ_FOREACH(port, &sc->sc_ports, ports) { 732 if (port->location == location) 733 locmatch = port; 734 735 if (port->port_name == port_name && 736 port->node_name == node_name) 737 wwnmatch = port; 738 } 739 740 if (locmatch == NULL && wwnmatch == NULL) { 741 *prev = NULL; 742 return (QLE_PORT_DISP_NEW); 743 } else if (locmatch == wwnmatch) { 744 *prev = locmatch; 745 return (QLE_PORT_DISP_SAME); 746 } else if (wwnmatch != NULL) { 747 *prev = wwnmatch; 748 return (QLE_PORT_DISP_MOVED); 749 } else { 750 *prev = locmatch; 751 return (QLE_PORT_DISP_CHANGED); 752 } 753 } 754 755 int 756 qle_get_loop_id(struct qle_softc *sc, int start) 757 { 758 int i, last; 759 760 i = QLE_MIN_HANDLE; 761 last = QLE_MAX_HANDLE; 762 if (i < start) 763 i = start; 764 765 for (; i <= last; i++) { 766 if (sc->sc_targets[i] == NULL) 767 return (i); 768 } 769 770 return (-1); 771 } 772 773 int 774 qle_get_port_db(struct qle_softc *sc, u_int16_t loopid, struct qle_dmamem *mem) 775 { 776 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_DB; 777 sc->sc_mbox[1] = loopid; 778 qle_mbox_putaddr(sc->sc_mbox, mem); 779 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, 780 sizeof(struct qle_get_port_db), BUS_DMASYNC_PREREAD); 781 if (qle_mbox(sc, 0x00cf)) { 782 DPRINTF(QLE_D_PORT, "%s: get port db for %d failed: %x\n", 783 DEVNAME(sc), loopid, sc->sc_mbox[0]); 784 return (1); 785 } 786 787 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, 788 sizeof(struct qle_get_port_db), BUS_DMASYNC_POSTREAD); 789 return (0); 790 } 791 792 int 793 qle_get_port_name_list(struct qle_softc *sc, u_int32_t match) 794 { 795 struct qle_port_name_list *l; 796 struct qle_fc_port *port; 797 int i; 798 799 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_NAME_LIST; 800 sc->sc_mbox[1] = 0; 801 sc->sc_mbox[8] = QLE_DMA_LEN(sc->sc_scratch); 802 sc->sc_mbox[9] = 0; 803 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch); 804 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 805 QLE_DMA_LEN(sc->sc_scratch), BUS_DMASYNC_PREREAD); 806 if (qle_mbox(sc, 0x03cf)) { 807 DPRINTF(QLE_D_PORT, "%s: get port name list failed: %x\n", 808 DEVNAME(sc), sc->sc_mbox[0]); 809 return (1); 810 } 811 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 812 sc->sc_mbox[1], BUS_DMASYNC_POSTREAD); 813 814 i = 0; 815 l = QLE_DMA_KVA(sc->sc_scratch); 816 mtx_enter(&sc->sc_port_mtx); 817 while (i * sizeof(*l) < sc->sc_mbox[1]) { 818 u_int16_t loopid; 819 u_int32_t loc; 820 821 loopid = lemtoh16(&l[i].loopid) & 0xfff; 822 /* skip special ports */ 823 switch (loopid) { 824 case QLE_F_PORT_HANDLE: 825 case QLE_SNS_HANDLE: 826 case QLE_FABRIC_CTRL_HANDLE: 827 case QLE_IP_BCAST_HANDLE: 828 loc = 0; 829 break; 830 default: 831 if (loopid <= sc->sc_loop_max_id) { 832 loc = QLE_LOCATION_LOOP_ID(loopid); 833 } else { 834 /* 835 * we don't have the port id here, so just 836 * indicate it's a fabric port. 837 */ 838 loc = QLE_LOCATION_FABRIC; 839 } 840 break; 841 } 842 843 if (match & loc) { 844 port = malloc(sizeof(*port), M_DEVBUF, M_ZERO | 845 M_NOWAIT); 846 if (port == NULL) { 847 printf("%s: failed to allocate port struct\n", 848 DEVNAME(sc)); 849 break; 850 } 851 port->location = loc; 852 port->loopid = loopid; 853 port->port_name = letoh64(l[i].port_name); 854 DPRINTF(QLE_D_PORT, "%s: loop id %d, port name %llx\n", 855 DEVNAME(sc), port->loopid, port->port_name); 856 TAILQ_INSERT_TAIL(&sc->sc_ports_found, port, update); 857 } 858 i++; 859 } 860 mtx_leave(&sc->sc_port_mtx); 861 862 return (0); 863 } 864 865 int 866 qle_add_loop_port(struct qle_softc *sc, struct qle_fc_port *port) 867 { 868 struct qle_get_port_db *pdb; 869 struct qle_fc_port *pport; 870 int disp; 871 872 if (qle_get_port_db(sc, port->loopid, sc->sc_scratch) != 0) { 873 return (1); 874 } 875 pdb = QLE_DMA_KVA(sc->sc_scratch); 876 877 if (lemtoh16(&pdb->prli_svc_word3) & QLE_SVC3_TARGET_ROLE) 878 port->flags |= QLE_PORT_FLAG_IS_TARGET; 879 880 port->port_name = betoh64(pdb->port_name); 881 port->node_name = betoh64(pdb->node_name); 882 port->portid = (pdb->port_id[0] << 16) | (pdb->port_id[1] << 8) | 883 pdb->port_id[2]; 884 885 mtx_enter(&sc->sc_port_mtx); 886 disp = qle_classify_port(sc, port->location, port->port_name, 887 port->node_name, &pport); 888 switch (disp) { 889 case QLE_PORT_DISP_CHANGED: 890 case QLE_PORT_DISP_MOVED: 891 case QLE_PORT_DISP_NEW: 892 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update); 893 sc->sc_targets[port->loopid] = port; 894 break; 895 case QLE_PORT_DISP_DUP: 896 free(port, M_DEVBUF, sizeof *port); 897 break; 898 case QLE_PORT_DISP_SAME: 899 TAILQ_REMOVE(&sc->sc_ports_gone, pport, update); 900 free(port, M_DEVBUF, sizeof *port); 901 break; 902 } 903 mtx_leave(&sc->sc_port_mtx); 904 905 switch (disp) { 906 case QLE_PORT_DISP_CHANGED: 907 case QLE_PORT_DISP_MOVED: 908 case QLE_PORT_DISP_NEW: 909 DPRINTF(QLE_D_PORT, "%s: %s %d; name %llx\n", 910 DEVNAME(sc), ISSET(port->flags, QLE_PORT_FLAG_IS_TARGET) ? 911 "target" : "non-target", port->loopid, 912 betoh64(pdb->port_name)); 913 break; 914 default: 915 break; 916 } 917 return (0); 918 } 919 920 int 921 qle_add_fabric_port(struct qle_softc *sc, struct qle_fc_port *port) 922 { 923 struct qle_get_port_db *pdb; 924 925 if (qle_get_port_db(sc, port->loopid, sc->sc_scratch) != 0) { 926 free(port, M_DEVBUF, sizeof *port); 927 return (1); 928 } 929 pdb = QLE_DMA_KVA(sc->sc_scratch); 930 931 if (lemtoh16(&pdb->prli_svc_word3) & QLE_SVC3_TARGET_ROLE) 932 port->flags |= QLE_PORT_FLAG_IS_TARGET; 933 934 /* 935 * if we only know about this port because qle_get_port_name_list 936 * returned it, we don't have its port id or node name, so fill 937 * those in and update its location. 938 */ 939 if (port->location == QLE_LOCATION_FABRIC) { 940 port->node_name = betoh64(pdb->node_name); 941 port->port_name = betoh64(pdb->port_name); 942 port->portid = (pdb->port_id[0] << 16) | 943 (pdb->port_id[1] << 8) | pdb->port_id[2]; 944 port->location = QLE_LOCATION_PORT_ID(port->portid); 945 } 946 947 mtx_enter(&sc->sc_port_mtx); 948 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update); 949 sc->sc_targets[port->loopid] = port; 950 mtx_leave(&sc->sc_port_mtx); 951 952 DPRINTF(QLE_D_PORT, "%s: %s %d; name %llx\n", 953 DEVNAME(sc), ISSET(port->flags, QLE_PORT_FLAG_IS_TARGET) ? 954 "target" : "non-target", port->loopid, port->port_name); 955 return (0); 956 } 957 958 int 959 qle_add_logged_in_port(struct qle_softc *sc, u_int16_t loopid, 960 u_int32_t portid) 961 { 962 struct qle_fc_port *port; 963 struct qle_get_port_db *pdb; 964 u_int64_t node_name, port_name; 965 int flags, ret; 966 967 ret = qle_get_port_db(sc, loopid, sc->sc_scratch); 968 mtx_enter(&sc->sc_port_mtx); 969 if (ret != 0) { 970 /* put in a fake port to prevent use of this loop id */ 971 printf("%s: loop id %d used, but can't see what's using it\n", 972 DEVNAME(sc), loopid); 973 node_name = 0; 974 port_name = 0; 975 flags = 0; 976 } else { 977 pdb = QLE_DMA_KVA(sc->sc_scratch); 978 node_name = betoh64(pdb->node_name); 979 port_name = betoh64(pdb->port_name); 980 flags = 0; 981 if (lemtoh16(&pdb->prli_svc_word3) & QLE_SVC3_TARGET_ROLE) 982 flags |= QLE_PORT_FLAG_IS_TARGET; 983 984 /* see if we've already found this port */ 985 TAILQ_FOREACH(port, &sc->sc_ports_found, update) { 986 if ((port->node_name == node_name) && 987 (port->port_name == port_name) && 988 (port->portid == portid)) { 989 mtx_leave(&sc->sc_port_mtx); 990 DPRINTF(QLE_D_PORT, "%s: already found port " 991 "%06x\n", DEVNAME(sc), portid); 992 return (0); 993 } 994 } 995 } 996 997 port = malloc(sizeof(*port), M_DEVBUF, M_ZERO | M_NOWAIT); 998 if (port == NULL) { 999 mtx_leave(&sc->sc_port_mtx); 1000 printf("%s: failed to allocate a port structure\n", 1001 DEVNAME(sc)); 1002 return (1); 1003 } 1004 port->location = QLE_LOCATION_PORT_ID(portid); 1005 port->port_name = port_name; 1006 port->node_name = node_name; 1007 port->loopid = loopid; 1008 port->portid = portid; 1009 port->flags = flags; 1010 1011 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports); 1012 sc->sc_targets[port->loopid] = port; 1013 mtx_leave(&sc->sc_port_mtx); 1014 1015 DPRINTF(QLE_D_PORT, "%s: added logged in port %06x at %d\n", 1016 DEVNAME(sc), portid, loopid); 1017 return (0); 1018 } 1019 1020 struct qle_ccb * 1021 qle_handle_resp(struct qle_softc *sc, u_int32_t id) 1022 { 1023 struct qle_ccb *ccb; 1024 struct qle_iocb_status *status; 1025 struct qle_iocb_req6 *req; 1026 struct scsi_xfer *xs; 1027 u_int32_t handle; 1028 u_int16_t completion; 1029 u_int8_t *entry; 1030 u_int8_t *data; 1031 1032 ccb = NULL; 1033 entry = QLE_DMA_KVA(sc->sc_responses) + (id * QLE_QUEUE_ENTRY_SIZE); 1034 1035 bus_dmamap_sync(sc->sc_dmat, 1036 QLE_DMA_MAP(sc->sc_responses), id * QLE_QUEUE_ENTRY_SIZE, 1037 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD); 1038 1039 qle_dump_iocb(sc, entry); 1040 switch(entry[0]) { 1041 case QLE_IOCB_STATUS: 1042 status = (struct qle_iocb_status *)entry; 1043 handle = status->handle; 1044 if (handle > sc->sc_maxcmds) { 1045 panic("bad completed command handle: %d (> %d)", 1046 handle, sc->sc_maxcmds); 1047 } 1048 1049 ccb = &sc->sc_ccbs[handle]; 1050 xs = ccb->ccb_xs; 1051 if (xs == NULL) { 1052 DPRINTF(QLE_D_IO, "%s: got status for inactive ccb %d\n", 1053 DEVNAME(sc), handle); 1054 ccb = NULL; 1055 break; 1056 } 1057 if (xs->io != ccb) { 1058 panic("completed command handle doesn't match xs " 1059 "(handle %d, ccb %p, xs->io %p)", handle, ccb, 1060 xs->io); 1061 } 1062 1063 if (xs->datalen > 0) { 1064 if (ccb->ccb_dmamap->dm_nsegs > 1065 QLE_IOCB_SEGS_PER_CMD) { 1066 bus_dmamap_sync(sc->sc_dmat, 1067 QLE_DMA_MAP(sc->sc_segments), 1068 ccb->ccb_seg_offset, 1069 sizeof(*ccb->ccb_segs) * 1070 ccb->ccb_dmamap->dm_nsegs + 1, 1071 BUS_DMASYNC_POSTWRITE); 1072 } 1073 1074 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1075 ccb->ccb_dmamap->dm_mapsize, 1076 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1077 BUS_DMASYNC_POSTWRITE); 1078 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1079 } 1080 1081 xs->status = lemtoh16(&status->scsi_status) & 0xff; 1082 xs->resid = 0; 1083 completion = lemtoh16(&status->completion); 1084 switch (completion) { 1085 case QLE_IOCB_STATUS_DATA_UNDERRUN: 1086 xs->resid = lemtoh32(&status->resid); 1087 /* FALLTHROUGH */ 1088 case QLE_IOCB_STATUS_DATA_OVERRUN: 1089 case QLE_IOCB_STATUS_COMPLETE: 1090 if (lemtoh16(&status->scsi_status) & 1091 QLE_SCSI_STATUS_SENSE_VALID) { 1092 u_int32_t *pp; 1093 int sr; 1094 data = status->data + 1095 lemtoh32(&status->fcp_rsp_len); 1096 sr = MIN(lemtoh32(&status->fcp_sense_len), 1097 sizeof(xs->sense)); 1098 memcpy(&xs->sense, data, sr); 1099 xs->error = XS_SENSE; 1100 pp = (u_int32_t *)&xs->sense; 1101 for (sr = 0; sr < sizeof(xs->sense)/4; sr++) { 1102 pp[sr] = swap32(pp[sr]); 1103 } 1104 } else { 1105 xs->error = XS_NOERROR; 1106 } 1107 break; 1108 1109 case QLE_IOCB_STATUS_DMA_ERROR: 1110 DPRINTF(QLE_D_IO, "%s: dma error\n", DEVNAME(sc)); 1111 /* set resid apparently? */ 1112 break; 1113 1114 case QLE_IOCB_STATUS_RESET: 1115 DPRINTF(QLE_D_IO, "%s: reset destroyed command\n", 1116 DEVNAME(sc)); 1117 sc->sc_marker_required = 1; 1118 xs->error = XS_RESET; 1119 break; 1120 1121 case QLE_IOCB_STATUS_ABORTED: 1122 DPRINTF(QLE_D_IO, "%s: aborted\n", DEVNAME(sc)); 1123 sc->sc_marker_required = 1; 1124 xs->error = XS_DRIVER_STUFFUP; 1125 break; 1126 1127 case QLE_IOCB_STATUS_TIMEOUT: 1128 DPRINTF(QLE_D_IO, "%s: command timed out\n", 1129 DEVNAME(sc)); 1130 xs->error = XS_TIMEOUT; 1131 break; 1132 1133 case QLE_IOCB_STATUS_QUEUE_FULL: 1134 DPRINTF(QLE_D_IO, "%s: queue full\n", DEVNAME(sc)); 1135 xs->error = XS_BUSY; 1136 break; 1137 1138 case QLE_IOCB_STATUS_PORT_UNAVAIL: 1139 case QLE_IOCB_STATUS_PORT_LOGGED_OUT: 1140 case QLE_IOCB_STATUS_PORT_CHANGED: 1141 DPRINTF(QLE_D_IO, "%s: dev gone\n", DEVNAME(sc)); 1142 xs->error = XS_SELTIMEOUT; 1143 /* mark port as needing relogin? */ 1144 break; 1145 1146 default: 1147 DPRINTF(QLE_D_IO, "%s: unexpected completion status " 1148 "%x\n", DEVNAME(sc), status->completion); 1149 xs->error = XS_DRIVER_STUFFUP; 1150 break; 1151 } 1152 break; 1153 1154 case QLE_IOCB_STATUS_CONT: 1155 DPRINTF(QLE_D_IO, "%s: ignoring status continuation iocb\n", 1156 DEVNAME(sc)); 1157 break; 1158 1159 case QLE_IOCB_PLOGX: 1160 case QLE_IOCB_CT_PASSTHROUGH: 1161 if (sc->sc_fabric_pending) { 1162 qle_dump_iocb(sc, entry); 1163 memcpy(sc->sc_fabric_response, entry, 1164 QLE_QUEUE_ENTRY_SIZE); 1165 sc->sc_fabric_pending = 2; 1166 wakeup(sc->sc_scratch); 1167 } else { 1168 DPRINTF(QLE_D_IO, "%s: unexpected fabric response %x\n", 1169 DEVNAME(sc), entry[0]); 1170 } 1171 break; 1172 1173 case QLE_IOCB_MARKER: 1174 break; 1175 1176 case QLE_IOCB_CMD_TYPE_6: 1177 case QLE_IOCB_CMD_TYPE_7: 1178 DPRINTF(QLE_D_IO, "%s: request bounced back\n", DEVNAME(sc)); 1179 req = (struct qle_iocb_req6 *)entry; 1180 handle = req->req_handle; 1181 if (handle > sc->sc_maxcmds) { 1182 panic("bad bounced command handle: %d (> %d)", 1183 handle, sc->sc_maxcmds); 1184 } 1185 1186 ccb = &sc->sc_ccbs[handle]; 1187 xs = ccb->ccb_xs; 1188 xs->error = XS_DRIVER_STUFFUP; 1189 break; 1190 default: 1191 DPRINTF(QLE_D_IO, "%s: unexpected response entry type %x\n", 1192 DEVNAME(sc), entry[0]); 1193 break; 1194 } 1195 1196 return (ccb); 1197 } 1198 1199 void 1200 qle_handle_intr(struct qle_softc *sc, u_int16_t isr, u_int16_t info) 1201 { 1202 int i; 1203 u_int32_t rspin; 1204 struct qle_ccb *ccb; 1205 1206 switch (isr) { 1207 case QLE_INT_TYPE_ASYNC: 1208 qle_async(sc, info); 1209 break; 1210 1211 case QLE_INT_TYPE_IO: 1212 rspin = qle_read(sc, QLE_RESP_IN); 1213 if (rspin == sc->sc_last_resp_id) 1214 break; 1215 1216 do { 1217 ccb = qle_handle_resp(sc, sc->sc_last_resp_id); 1218 if (ccb) 1219 scsi_done(ccb->ccb_xs); 1220 1221 sc->sc_last_resp_id++; 1222 sc->sc_last_resp_id %= sc->sc_maxcmds; 1223 } while (sc->sc_last_resp_id != rspin); 1224 1225 qle_write(sc, QLE_RESP_OUT, sc->sc_last_resp_id); 1226 break; 1227 1228 case QLE_INT_TYPE_MBOX: 1229 mtx_enter(&sc->sc_mbox_mtx); 1230 if (sc->sc_mbox_pending) { 1231 for (i = 0; i < nitems(sc->sc_mbox); i++) { 1232 sc->sc_mbox[i] = qle_read_mbox(sc, i); 1233 } 1234 sc->sc_mbox_pending = 2; 1235 wakeup(sc->sc_mbox); 1236 mtx_leave(&sc->sc_mbox_mtx); 1237 } else { 1238 mtx_leave(&sc->sc_mbox_mtx); 1239 DPRINTF(QLE_D_INTR, "%s: unexpected mbox interrupt: " 1240 "%x\n", DEVNAME(sc), info); 1241 } 1242 break; 1243 1244 default: 1245 break; 1246 } 1247 1248 qle_clear_isr(sc, isr); 1249 } 1250 1251 int 1252 qle_intr(void *xsc) 1253 { 1254 struct qle_softc *sc = xsc; 1255 u_int16_t isr; 1256 u_int16_t info; 1257 1258 if (qle_read_isr(sc, &isr, &info) == 0) 1259 return (0); 1260 1261 qle_handle_intr(sc, isr, info); 1262 return (1); 1263 } 1264 1265 int 1266 qle_scsi_probe(struct scsi_link *link) 1267 { 1268 struct qle_softc *sc = link->bus->sb_adapter_softc; 1269 int rv = 0; 1270 1271 mtx_enter(&sc->sc_port_mtx); 1272 if (sc->sc_targets[link->target] == NULL) 1273 rv = ENXIO; 1274 else if (!ISSET(sc->sc_targets[link->target]->flags, 1275 QLE_PORT_FLAG_IS_TARGET)) 1276 rv = ENXIO; 1277 mtx_leave(&sc->sc_port_mtx); 1278 1279 return (rv); 1280 } 1281 1282 void 1283 qle_scsi_cmd(struct scsi_xfer *xs) 1284 { 1285 struct scsi_link *link = xs->sc_link; 1286 struct qle_softc *sc = link->bus->sb_adapter_softc; 1287 struct qle_ccb *ccb; 1288 void *iocb; 1289 struct qle_ccb_list list; 1290 u_int16_t req; 1291 u_int32_t portid; 1292 int offset, error, done; 1293 bus_dmamap_t dmap; 1294 1295 if (xs->cmdlen > 16) { 1296 DPRINTF(QLE_D_IO, "%s: cmd too big (%d)\n", DEVNAME(sc), 1297 xs->cmdlen); 1298 memset(&xs->sense, 0, sizeof(xs->sense)); 1299 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 1300 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1301 xs->sense.add_sense_code = 0x20; 1302 xs->error = XS_SENSE; 1303 scsi_done(xs); 1304 return; 1305 } 1306 1307 portid = 0xffffffff; 1308 mtx_enter(&sc->sc_port_mtx); 1309 if (sc->sc_targets[xs->sc_link->target] != NULL) { 1310 portid = sc->sc_targets[xs->sc_link->target]->portid; 1311 } 1312 mtx_leave(&sc->sc_port_mtx); 1313 if (portid == 0xffffffff) { 1314 xs->error = XS_DRIVER_STUFFUP; 1315 scsi_done(xs); 1316 return; 1317 } 1318 1319 ccb = xs->io; 1320 dmap = ccb->ccb_dmamap; 1321 if (xs->datalen > 0) { 1322 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, 1323 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ? 1324 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1325 if (error) { 1326 xs->error = XS_DRIVER_STUFFUP; 1327 scsi_done(xs); 1328 return; 1329 } 1330 1331 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 1332 dmap->dm_mapsize, 1333 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1334 BUS_DMASYNC_PREWRITE); 1335 } 1336 1337 mtx_enter(&sc->sc_queue_mtx); 1338 1339 /* put in a sync marker if required */ 1340 if (sc->sc_marker_required) { 1341 req = sc->sc_next_req_id++; 1342 if (sc->sc_next_req_id == sc->sc_maxcmds) 1343 sc->sc_next_req_id = 0; 1344 1345 DPRINTF(QLE_D_IO, "%s: writing marker at request %d\n", 1346 DEVNAME(sc), req); 1347 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1348 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1349 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), 1350 offset, QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1351 qle_put_marker(sc, iocb); 1352 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1353 sc->sc_marker_required = 0; 1354 } 1355 1356 req = sc->sc_next_req_id++; 1357 if (sc->sc_next_req_id == sc->sc_maxcmds) 1358 sc->sc_next_req_id = 0; 1359 1360 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1361 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1362 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1363 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1364 1365 ccb->ccb_xs = xs; 1366 1367 qle_put_cmd(sc, iocb, xs, ccb, portid); 1368 1369 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1370 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREREAD); 1371 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1372 1373 if (!ISSET(xs->flags, SCSI_POLL)) { 1374 mtx_leave(&sc->sc_queue_mtx); 1375 return; 1376 } 1377 1378 done = 0; 1379 SIMPLEQ_INIT(&list); 1380 do { 1381 u_int16_t isr, info; 1382 u_int32_t rspin; 1383 delay(100); 1384 1385 if (qle_read_isr(sc, &isr, &info) == 0) { 1386 continue; 1387 } 1388 1389 if (isr != QLE_INT_TYPE_IO) { 1390 qle_handle_intr(sc, isr, info); 1391 continue; 1392 } 1393 1394 rspin = qle_read(sc, QLE_RESP_IN); 1395 while (rspin != sc->sc_last_resp_id) { 1396 ccb = qle_handle_resp(sc, sc->sc_last_resp_id); 1397 1398 sc->sc_last_resp_id++; 1399 if (sc->sc_last_resp_id == sc->sc_maxcmds) 1400 sc->sc_last_resp_id = 0; 1401 1402 if (ccb != NULL) 1403 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link); 1404 if (ccb == xs->io) 1405 done = 1; 1406 } 1407 qle_write(sc, QLE_RESP_OUT, sc->sc_last_resp_id); 1408 qle_clear_isr(sc, isr); 1409 } while (done == 0); 1410 1411 mtx_leave(&sc->sc_queue_mtx); 1412 1413 while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) { 1414 SIMPLEQ_REMOVE_HEAD(&list, ccb_link); 1415 scsi_done(ccb->ccb_xs); 1416 } 1417 } 1418 1419 u_int32_t 1420 qle_read(struct qle_softc *sc, int offset) 1421 { 1422 u_int32_t v; 1423 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 1424 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 4, 1425 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1426 return (v); 1427 } 1428 1429 void 1430 qle_write(struct qle_softc *sc, int offset, u_int32_t value) 1431 { 1432 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 1433 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 4, 1434 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1435 } 1436 1437 u_int16_t 1438 qle_read_mbox(struct qle_softc *sc, int mbox) 1439 { 1440 u_int16_t v; 1441 bus_size_t offset = mbox * 2; 1442 v = bus_space_read_2(sc->sc_iot, sc->sc_mbox_ioh, offset); 1443 bus_space_barrier(sc->sc_iot, sc->sc_mbox_ioh, offset, 2, 1444 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1445 return (v); 1446 } 1447 1448 void 1449 qle_write_mbox(struct qle_softc *sc, int mbox, u_int16_t value) 1450 { 1451 bus_size_t offset = (mbox * 2); 1452 bus_space_write_2(sc->sc_iot, sc->sc_mbox_ioh, offset, value); 1453 bus_space_barrier(sc->sc_iot, sc->sc_mbox_ioh, offset, 2, 1454 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1455 } 1456 1457 void 1458 qle_host_cmd(struct qle_softc *sc, u_int32_t cmd) 1459 { 1460 qle_write(sc, QLE_HOST_CMD_CTRL, cmd << QLE_HOST_CMD_SHIFT); 1461 } 1462 1463 #define MBOX_COMMAND_TIMEOUT 400000 1464 1465 int 1466 qle_mbox(struct qle_softc *sc, int maskin) 1467 { 1468 int i; 1469 int result = 0; 1470 int rv; 1471 1472 for (i = 0; i < nitems(sc->sc_mbox); i++) { 1473 if (maskin & (1 << i)) { 1474 qle_write_mbox(sc, i, sc->sc_mbox[i]); 1475 } 1476 } 1477 qle_host_cmd(sc, QLE_HOST_CMD_SET_HOST_INT); 1478 1479 if (sc->sc_scsibus != NULL) { 1480 mtx_enter(&sc->sc_mbox_mtx); 1481 sc->sc_mbox_pending = 1; 1482 while (sc->sc_mbox_pending == 1) { 1483 msleep_nsec(sc->sc_mbox, &sc->sc_mbox_mtx, PRIBIO, 1484 "qlembox", INFSLP); 1485 } 1486 result = sc->sc_mbox[0]; 1487 sc->sc_mbox_pending = 0; 1488 mtx_leave(&sc->sc_mbox_mtx); 1489 return (result == QLE_MBOX_COMPLETE ? 0 : result); 1490 } 1491 1492 for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) { 1493 u_int16_t isr, info; 1494 1495 delay(100); 1496 1497 if (qle_read_isr(sc, &isr, &info) == 0) 1498 continue; 1499 1500 switch (isr) { 1501 case QLE_INT_TYPE_MBOX: 1502 result = info; 1503 break; 1504 1505 default: 1506 qle_handle_intr(sc, isr, info); 1507 break; 1508 } 1509 } 1510 1511 if (result == 0) { 1512 /* timed out; do something? */ 1513 DPRINTF(QLE_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc)); 1514 rv = 1; 1515 } else { 1516 for (i = 0; i < nitems(sc->sc_mbox); i++) { 1517 sc->sc_mbox[i] = qle_read_mbox(sc, i); 1518 } 1519 rv = (result == QLE_MBOX_COMPLETE ? 0 : result); 1520 } 1521 1522 qle_clear_isr(sc, QLE_INT_TYPE_MBOX); 1523 return (rv); 1524 } 1525 1526 void 1527 qle_mbox_putaddr(u_int16_t *mbox, struct qle_dmamem *mem) 1528 { 1529 mbox[2] = (QLE_DMA_DVA(mem) >> 16) & 0xffff; 1530 mbox[3] = (QLE_DMA_DVA(mem) >> 0) & 0xffff; 1531 mbox[6] = (QLE_DMA_DVA(mem) >> 48) & 0xffff; 1532 mbox[7] = (QLE_DMA_DVA(mem) >> 32) & 0xffff; 1533 } 1534 1535 void 1536 qle_set_ints(struct qle_softc *sc, int enabled) 1537 { 1538 u_int32_t v = enabled ? QLE_INT_CTRL_ENABLE : 0; 1539 qle_write(sc, QLE_INT_CTRL, v); 1540 } 1541 1542 int 1543 qle_read_isr(struct qle_softc *sc, u_int16_t *isr, u_int16_t *info) 1544 { 1545 u_int32_t v; 1546 1547 switch (sc->sc_isp_gen) { 1548 case QLE_GEN_ISP24XX: 1549 case QLE_GEN_ISP25XX: 1550 if ((qle_read(sc, QLE_INT_STATUS) & QLE_RISC_INT_REQ) == 0) 1551 return (0); 1552 1553 v = qle_read(sc, QLE_RISC_STATUS); 1554 1555 switch (v & QLE_INT_STATUS_MASK) { 1556 case QLE_24XX_INT_ROM_MBOX: 1557 case QLE_24XX_INT_ROM_MBOX_FAIL: 1558 case QLE_24XX_INT_MBOX: 1559 case QLE_24XX_INT_MBOX_FAIL: 1560 *isr = QLE_INT_TYPE_MBOX; 1561 break; 1562 1563 case QLE_24XX_INT_ASYNC: 1564 *isr = QLE_INT_TYPE_ASYNC; 1565 break; 1566 1567 case QLE_24XX_INT_RSPQ: 1568 *isr = QLE_INT_TYPE_IO; 1569 break; 1570 1571 default: 1572 *isr = QLE_INT_TYPE_OTHER; 1573 break; 1574 } 1575 1576 *info = (v >> QLE_INT_INFO_SHIFT); 1577 return (1); 1578 1579 default: 1580 return (0); 1581 } 1582 } 1583 1584 void 1585 qle_clear_isr(struct qle_softc *sc, u_int16_t isr) 1586 { 1587 qle_host_cmd(sc, QLE_HOST_CMD_CLR_RISC_INT); 1588 } 1589 1590 void 1591 qle_update_done(struct qle_softc *sc, int task) 1592 { 1593 atomic_clearbits_int(&sc->sc_update_tasks, task); 1594 } 1595 1596 void 1597 qle_update_cancel(struct qle_softc *sc) 1598 { 1599 atomic_swap_uint(&sc->sc_update_tasks, 0); 1600 timeout_del(&sc->sc_update_timeout); 1601 task_del(sc->sc_update_taskq, &sc->sc_update_task); 1602 } 1603 1604 void 1605 qle_update_start(struct qle_softc *sc, int task) 1606 { 1607 atomic_setbits_int(&sc->sc_update_tasks, task); 1608 if (!timeout_pending(&sc->sc_update_timeout)) 1609 task_add(sc->sc_update_taskq, &sc->sc_update_task); 1610 } 1611 1612 void 1613 qle_update_defer(struct qle_softc *sc, int task) 1614 { 1615 atomic_setbits_int(&sc->sc_update_tasks, task); 1616 timeout_del(&sc->sc_update_timeout); 1617 task_del(sc->sc_update_taskq, &sc->sc_update_task); 1618 timeout_add_msec(&sc->sc_update_timeout, QLE_LOOP_SETTLE); 1619 } 1620 1621 void 1622 qle_clear_port_lists(struct qle_softc *sc) 1623 { 1624 struct qle_fc_port *port; 1625 while (!TAILQ_EMPTY(&sc->sc_ports_found)) { 1626 port = TAILQ_FIRST(&sc->sc_ports_found); 1627 TAILQ_REMOVE(&sc->sc_ports_found, port, update); 1628 free(port, M_DEVBUF, sizeof *port); 1629 } 1630 1631 while (!TAILQ_EMPTY(&sc->sc_ports_new)) { 1632 port = TAILQ_FIRST(&sc->sc_ports_new); 1633 TAILQ_REMOVE(&sc->sc_ports_new, port, update); 1634 free(port, M_DEVBUF, sizeof *port); 1635 } 1636 1637 while (!TAILQ_EMPTY(&sc->sc_ports_gone)) { 1638 port = TAILQ_FIRST(&sc->sc_ports_gone); 1639 TAILQ_REMOVE(&sc->sc_ports_gone, port, update); 1640 } 1641 } 1642 1643 int 1644 qle_softreset(struct qle_softc *sc) 1645 { 1646 int i; 1647 qle_set_ints(sc, 0); 1648 1649 /* set led control bits, stop dma */ 1650 qle_write(sc, QLE_GPIO_DATA, 0); 1651 qle_write(sc, QLE_CTRL_STATUS, QLE_CTRL_DMA_SHUTDOWN); 1652 while (qle_read(sc, QLE_CTRL_STATUS) & QLE_CTRL_DMA_ACTIVE) { 1653 DPRINTF(QLE_D_IO, "%s: dma still active\n", DEVNAME(sc)); 1654 delay(100); 1655 } 1656 1657 /* reset */ 1658 qle_write(sc, QLE_CTRL_STATUS, QLE_CTRL_RESET | QLE_CTRL_DMA_SHUTDOWN); 1659 delay(100); 1660 /* clear data and control dma engines? */ 1661 1662 /* wait for soft reset to clear */ 1663 for (i = 0; i < 1000; i++) { 1664 if (qle_read_mbox(sc, 0) == 0x0000) 1665 break; 1666 1667 delay(100); 1668 } 1669 1670 if (i == 1000) { 1671 printf("%s: reset mbox didn't clear\n", DEVNAME(sc)); 1672 qle_set_ints(sc, 0); 1673 return (ENXIO); 1674 } 1675 1676 for (i = 0; i < 500000; i++) { 1677 if ((qle_read(sc, QLE_CTRL_STATUS) & QLE_CTRL_RESET) == 0) 1678 break; 1679 delay(5); 1680 } 1681 if (i == 500000) { 1682 printf("%s: reset status didn't clear\n", DEVNAME(sc)); 1683 return (ENXIO); 1684 } 1685 1686 /* reset risc processor */ 1687 qle_host_cmd(sc, QLE_HOST_CMD_RESET); 1688 qle_host_cmd(sc, QLE_HOST_CMD_RELEASE); 1689 qle_host_cmd(sc, QLE_HOST_CMD_CLEAR_RESET); 1690 1691 /* wait for reset to clear */ 1692 for (i = 0; i < 1000; i++) { 1693 if (qle_read_mbox(sc, 0) == 0x0000) 1694 break; 1695 delay(100); 1696 } 1697 if (i == 1000) { 1698 printf("%s: risc not ready after reset\n", DEVNAME(sc)); 1699 return (ENXIO); 1700 } 1701 1702 /* reset queue pointers */ 1703 qle_write(sc, QLE_REQ_IN, 0); 1704 qle_write(sc, QLE_REQ_OUT, 0); 1705 qle_write(sc, QLE_RESP_IN, 0); 1706 qle_write(sc, QLE_RESP_OUT, 0); 1707 1708 qle_set_ints(sc, 1); 1709 1710 /* do a basic mailbox operation to check we're alive */ 1711 sc->sc_mbox[0] = QLE_MBOX_NOP; 1712 if (qle_mbox(sc, 0x0001)) { 1713 printf("ISP not responding after reset\n"); 1714 return (ENXIO); 1715 } 1716 1717 return (0); 1718 } 1719 1720 void 1721 qle_update_topology(struct qle_softc *sc) 1722 { 1723 sc->sc_mbox[0] = QLE_MBOX_GET_ID; 1724 if (qle_mbox(sc, 0x0001)) { 1725 DPRINTF(QLE_D_PORT, "%s: unable to get loop id\n", DEVNAME(sc)); 1726 sc->sc_topology = QLE_TOPO_N_PORT_NO_TARGET; 1727 } else { 1728 sc->sc_topology = sc->sc_mbox[6]; 1729 sc->sc_loop_id = sc->sc_mbox[1]; 1730 1731 switch (sc->sc_topology) { 1732 case QLE_TOPO_NL_PORT: 1733 case QLE_TOPO_N_PORT: 1734 DPRINTF(QLE_D_PORT, "%s: loop id %d\n", DEVNAME(sc), 1735 sc->sc_loop_id); 1736 break; 1737 1738 case QLE_TOPO_FL_PORT: 1739 case QLE_TOPO_F_PORT: 1740 sc->sc_port_id = sc->sc_mbox[2] | 1741 (sc->sc_mbox[3] << 16); 1742 DPRINTF(QLE_D_PORT, "%s: fabric port id %06x\n", 1743 DEVNAME(sc), sc->sc_port_id); 1744 break; 1745 1746 case QLE_TOPO_N_PORT_NO_TARGET: 1747 default: 1748 DPRINTF(QLE_D_PORT, "%s: not useful\n", DEVNAME(sc)); 1749 break; 1750 } 1751 1752 switch (sc->sc_topology) { 1753 case QLE_TOPO_NL_PORT: 1754 case QLE_TOPO_FL_PORT: 1755 sc->sc_loop_max_id = 126; 1756 break; 1757 1758 case QLE_TOPO_N_PORT: 1759 sc->sc_loop_max_id = 2; 1760 break; 1761 1762 default: 1763 sc->sc_loop_max_id = 0; 1764 break; 1765 } 1766 } 1767 } 1768 1769 int 1770 qle_update_fabric(struct qle_softc *sc) 1771 { 1772 /*struct qle_sns_rft_id *rft;*/ 1773 1774 switch (sc->sc_topology) { 1775 case QLE_TOPO_F_PORT: 1776 case QLE_TOPO_FL_PORT: 1777 break; 1778 1779 default: 1780 return (0); 1781 } 1782 1783 /* get the name server's port db entry */ 1784 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_DB; 1785 sc->sc_mbox[1] = QLE_F_PORT_HANDLE; 1786 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch); 1787 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 1788 sizeof(struct qle_get_port_db), BUS_DMASYNC_PREREAD); 1789 if (qle_mbox(sc, 0x00cf)) { 1790 DPRINTF(QLE_D_PORT, "%s: get port db for SNS failed: %x\n", 1791 DEVNAME(sc), sc->sc_mbox[0]); 1792 sc->sc_sns_port_name = 0; 1793 } else { 1794 struct qle_get_port_db *pdb; 1795 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0, 1796 sizeof(struct qle_get_port_db), BUS_DMASYNC_POSTREAD); 1797 pdb = QLE_DMA_KVA(sc->sc_scratch); 1798 DPRINTF(QLE_D_PORT, "%s: SNS port name %llx\n", DEVNAME(sc), 1799 betoh64(pdb->port_name)); 1800 sc->sc_sns_port_name = betoh64(pdb->port_name); 1801 } 1802 1803 /* 1804 * register fc4 types with the fabric 1805 * some switches do this automatically, but apparently 1806 * some don't. 1807 */ 1808 /* 1809 rft = QLE_DMA_KVA(sc->sc_scratch); 1810 memset(rft, 0, sizeof(*rft) + sizeof(struct qle_sns_req_hdr)); 1811 htolem16(&rft->subcmd, QLE_SNS_RFT_ID); 1812 htolem16(&rft->max_word, sizeof(struct qle_sns_req_hdr) / 4); 1813 htolem32(&rft->port_id, sc->sc_port_id); 1814 rft->fc4_types[0] = (1 << QLE_FC4_SCSI); 1815 if (qle_sns_req(sc, sc->sc_scratch, sizeof(*rft))) { 1816 printf("%s: RFT_ID failed\n", DEVNAME(sc)); 1817 / * we might be able to continue after this fails * / 1818 } 1819 */ 1820 1821 return (1); 1822 } 1823 1824 int 1825 qle_ct_pass_through(struct qle_softc *sc, u_int32_t port_handle, 1826 struct qle_dmamem *mem, size_t req_size, size_t resp_size) 1827 { 1828 struct qle_iocb_ct_passthrough *iocb; 1829 u_int16_t req; 1830 u_int64_t offset; 1831 int rv; 1832 1833 mtx_enter(&sc->sc_queue_mtx); 1834 1835 req = sc->sc_next_req_id++; 1836 if (sc->sc_next_req_id == sc->sc_maxcmds) 1837 sc->sc_next_req_id = 0; 1838 1839 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1840 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1841 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1842 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1843 1844 memset(iocb, 0, QLE_QUEUE_ENTRY_SIZE); 1845 iocb->entry_type = QLE_IOCB_CT_PASSTHROUGH; 1846 iocb->entry_count = 1; 1847 1848 iocb->req_handle = 9; 1849 htolem16(&iocb->req_nport_handle, port_handle); 1850 htolem16(&iocb->req_dsd_count, 1); 1851 htolem16(&iocb->req_resp_dsd_count, 1); 1852 htolem32(&iocb->req_cmd_byte_count, req_size); 1853 htolem32(&iocb->req_resp_byte_count, resp_size); 1854 qle_sge(&iocb->req_cmd_seg, QLE_DMA_DVA(mem), req_size); 1855 qle_sge(&iocb->req_resp_seg, QLE_DMA_DVA(mem) + req_size, resp_size); 1856 1857 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, QLE_DMA_LEN(mem), 1858 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1859 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1860 sc->sc_fabric_pending = 1; 1861 mtx_leave(&sc->sc_queue_mtx); 1862 1863 /* maybe put a proper timeout on this */ 1864 rv = 0; 1865 while (sc->sc_fabric_pending == 1) { 1866 if (sc->sc_scsibus == NULL) { 1867 u_int16_t isr, info; 1868 1869 delay(100); 1870 if (qle_read_isr(sc, &isr, &info) != 0) 1871 qle_handle_intr(sc, isr, info); 1872 } else { 1873 tsleep_nsec(sc->sc_scratch, PRIBIO, "qle_fabric", 1874 SEC_TO_NSEC(1)); 1875 } 1876 } 1877 if (rv == 0) 1878 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, 1879 QLE_DMA_LEN(mem), BUS_DMASYNC_POSTREAD | 1880 BUS_DMASYNC_POSTWRITE); 1881 1882 sc->sc_fabric_pending = 0; 1883 1884 return (rv); 1885 } 1886 1887 struct qle_fc_port * 1888 qle_next_fabric_port(struct qle_softc *sc, u_int32_t *firstport, 1889 u_int32_t *lastport) 1890 { 1891 struct qle_ct_ga_nxt_req *ga; 1892 struct qle_ct_ga_nxt_resp *gar; 1893 struct qle_fc_port *fport; 1894 int result; 1895 1896 /* get the next port from the fabric nameserver */ 1897 ga = QLE_DMA_KVA(sc->sc_scratch); 1898 memset(ga, 0, sizeof(*ga) + sizeof(*gar)); 1899 ga->header.ct_revision = 0x01; 1900 ga->header.ct_gs_type = 0xfc; 1901 ga->header.ct_gs_subtype = 0x02; 1902 ga->subcmd = htobe16(QLE_SNS_GA_NXT); 1903 ga->max_word = htobe16((sizeof(*gar) - 16) / 4); 1904 ga->port_id = htobe32(*lastport); 1905 result = qle_ct_pass_through(sc, QLE_SNS_HANDLE, sc->sc_scratch, 1906 sizeof(*ga), sizeof(*gar)); 1907 if (result) { 1908 DPRINTF(QLE_D_PORT, "%s: GA_NXT %06x failed: %x\n", DEVNAME(sc), 1909 *lastport, result); 1910 *lastport = 0xffffffff; 1911 return (NULL); 1912 } 1913 1914 gar = (struct qle_ct_ga_nxt_resp *)(ga + 1); 1915 /* if the response is all zeroes, try again */ 1916 if (gar->port_type_id == 0 && gar->port_name == 0 && 1917 gar->node_name == 0) { 1918 DPRINTF(QLE_D_PORT, "%s: GA_NXT returned junk\n", DEVNAME(sc)); 1919 return (NULL); 1920 } 1921 1922 /* are we back at the start? */ 1923 *lastport = betoh32(gar->port_type_id) & 0xffffff; 1924 if (*lastport == *firstport) { 1925 *lastport = 0xffffffff; 1926 return (NULL); 1927 } 1928 if (*firstport == 0xffffffff) 1929 *firstport = *lastport; 1930 1931 DPRINTF(QLE_D_PORT, "%s: GA_NXT: port id: %06x, wwpn %llx, wwnn %llx\n", 1932 DEVNAME(sc), *lastport, betoh64(gar->port_name), 1933 betoh64(gar->node_name)); 1934 1935 /* don't try to log in to ourselves */ 1936 if (*lastport == sc->sc_port_id) { 1937 return (NULL); 1938 } 1939 1940 fport = malloc(sizeof(*fport), M_DEVBUF, M_ZERO | M_NOWAIT); 1941 if (fport == NULL) { 1942 printf("%s: failed to allocate a port struct\n", 1943 DEVNAME(sc)); 1944 *lastport = 0xffffffff; 1945 return (NULL); 1946 } 1947 fport->port_name = betoh64(gar->port_name); 1948 fport->node_name = betoh64(gar->node_name); 1949 fport->location = QLE_LOCATION_PORT_ID(*lastport); 1950 fport->portid = *lastport; 1951 return (fport); 1952 } 1953 1954 int 1955 qle_fabric_plogx(struct qle_softc *sc, struct qle_fc_port *port, int flags, 1956 u_int32_t *info) 1957 { 1958 struct qle_iocb_plogx *iocb; 1959 u_int16_t req; 1960 u_int64_t offset; 1961 int rv; 1962 1963 mtx_enter(&sc->sc_queue_mtx); 1964 1965 req = sc->sc_next_req_id++; 1966 if (sc->sc_next_req_id == sc->sc_maxcmds) 1967 sc->sc_next_req_id = 0; 1968 1969 offset = (req * QLE_QUEUE_ENTRY_SIZE); 1970 iocb = QLE_DMA_KVA(sc->sc_requests) + offset; 1971 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset, 1972 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 1973 1974 memset(iocb, 0, QLE_QUEUE_ENTRY_SIZE); 1975 iocb->entry_type = QLE_IOCB_PLOGX; 1976 iocb->entry_count = 1; 1977 1978 iocb->req_handle = 7; 1979 htolem16(&iocb->req_nport_handle, port->loopid); 1980 htolem16(&iocb->req_port_id_lo, port->portid); 1981 iocb->req_port_id_hi = port->portid >> 16; 1982 htolem16(&iocb->req_flags, flags); 1983 1984 DPRINTF(QLE_D_PORT, "%s: plogx loop id %d port %06x, flags %x\n", 1985 DEVNAME(sc), port->loopid, port->portid, flags); 1986 qle_dump_iocb(sc, iocb); 1987 1988 qle_write(sc, QLE_REQ_IN, sc->sc_next_req_id); 1989 sc->sc_fabric_pending = 1; 1990 mtx_leave(&sc->sc_queue_mtx); 1991 1992 /* maybe put a proper timeout on this */ 1993 rv = 0; 1994 while (sc->sc_fabric_pending == 1) { 1995 if (sc->sc_scsibus == NULL) { 1996 u_int16_t isr, info; 1997 1998 delay(100); 1999 if (qle_read_isr(sc, &isr, &info) != 0) 2000 qle_handle_intr(sc, isr, info); 2001 } else { 2002 tsleep_nsec(sc->sc_scratch, PRIBIO, "qle_fabric", 2003 SEC_TO_NSEC(1)); 2004 } 2005 } 2006 sc->sc_fabric_pending = 0; 2007 2008 iocb = (struct qle_iocb_plogx *)&sc->sc_fabric_response; 2009 rv = lemtoh16(&iocb->req_status); 2010 if (rv == QLE_PLOGX_ERROR) { 2011 rv = lemtoh32(&iocb->req_ioparms[0]); 2012 *info = lemtoh32(&iocb->req_ioparms[1]); 2013 } 2014 2015 return (rv); 2016 } 2017 2018 int 2019 qle_fabric_plogi(struct qle_softc *sc, struct qle_fc_port *port) 2020 { 2021 u_int32_t info; 2022 int err, loopid; 2023 2024 loopid = 0; 2025 retry: 2026 if (port->loopid == 0) { 2027 2028 mtx_enter(&sc->sc_port_mtx); 2029 loopid = qle_get_loop_id(sc, loopid); 2030 mtx_leave(&sc->sc_port_mtx); 2031 if (loopid == -1) { 2032 printf("%s: ran out of loop ids\n", DEVNAME(sc)); 2033 return (1); 2034 } 2035 2036 port->loopid = loopid; 2037 } 2038 2039 err = qle_fabric_plogx(sc, port, QLE_PLOGX_LOGIN, &info); 2040 switch (err) { 2041 case 0: 2042 DPRINTF(QLE_D_PORT, "%s: logged in to %06x as %d\n", 2043 DEVNAME(sc), port->portid, port->loopid); 2044 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN; 2045 return (0); 2046 2047 case QLE_PLOGX_ERROR_PORT_ID_USED: 2048 DPRINTF(QLE_D_PORT, "%s: already logged in to %06x as %d\n", 2049 DEVNAME(sc), port->portid, info); 2050 port->loopid = info; 2051 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN; 2052 return (0); 2053 2054 case QLE_PLOGX_ERROR_HANDLE_USED: 2055 if (qle_add_logged_in_port(sc, loopid, info)) { 2056 return (1); 2057 } 2058 port->loopid = 0; 2059 loopid++; 2060 goto retry; 2061 2062 default: 2063 DPRINTF(QLE_D_PORT, "%s: error %x logging in to port %06x\n", 2064 DEVNAME(sc), err, port->portid); 2065 port->loopid = 0; 2066 return (1); 2067 } 2068 } 2069 2070 void 2071 qle_fabric_plogo(struct qle_softc *sc, struct qle_fc_port *port) 2072 { 2073 int err; 2074 u_int32_t info; 2075 2076 /* 2077 * we only log out if we can't see the port any more, so we always 2078 * want to do an explicit logout and free the n-port handle. 2079 */ 2080 err = qle_fabric_plogx(sc, port, QLE_PLOGX_LOGOUT | 2081 QLE_PLOGX_LOGOUT_EXPLICIT | QLE_PLOGX_LOGOUT_FREE_HANDLE, &info); 2082 if (err == 0) { 2083 DPRINTF(QLE_D_PORT, "%s: logged out of port %06x\n", 2084 DEVNAME(sc), port->portid); 2085 } else { 2086 DPRINTF(QLE_D_PORT, "%s: failed to log out of port %06x: " 2087 "%x %x\n", DEVNAME(sc), port->portid, err, info); 2088 } 2089 } 2090 2091 void 2092 qle_deferred_update(void *xsc) 2093 { 2094 struct qle_softc *sc = xsc; 2095 task_add(sc->sc_update_taskq, &sc->sc_update_task); 2096 } 2097 2098 void 2099 qle_do_update(void *xsc) 2100 { 2101 struct qle_softc *sc = xsc; 2102 int firstport, lastport; 2103 struct qle_fc_port *port, *fport; 2104 2105 DPRINTF(QLE_D_PORT, "%s: updating\n", DEVNAME(sc)); 2106 while (sc->sc_update_tasks != 0) { 2107 if (sc->sc_update_tasks & QLE_UPDATE_TASK_CLEAR_ALL) { 2108 TAILQ_HEAD(, qle_fc_port) detach; 2109 DPRINTF(QLE_D_PORT, "%s: detaching everything\n", 2110 DEVNAME(sc)); 2111 2112 mtx_enter(&sc->sc_port_mtx); 2113 qle_clear_port_lists(sc); 2114 TAILQ_INIT(&detach); 2115 TAILQ_CONCAT(&detach, &sc->sc_ports, ports); 2116 mtx_leave(&sc->sc_port_mtx); 2117 2118 while (!TAILQ_EMPTY(&detach)) { 2119 port = TAILQ_FIRST(&detach); 2120 TAILQ_REMOVE(&detach, port, ports); 2121 if (port->flags & QLE_PORT_FLAG_IS_TARGET) { 2122 scsi_detach_target(sc->sc_scsibus, 2123 port->loopid, DETACH_FORCE | 2124 DETACH_QUIET); 2125 sc->sc_targets[port->loopid] = NULL; 2126 } 2127 if (port->location & QLE_LOCATION_FABRIC) 2128 qle_fabric_plogo(sc, port); 2129 2130 free(port, M_DEVBUF, sizeof *port); 2131 } 2132 2133 qle_update_done(sc, QLE_UPDATE_TASK_CLEAR_ALL); 2134 continue; 2135 } 2136 2137 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SOFTRESET) { 2138 DPRINTF(QLE_D_IO, "%s: attempting softreset\n", 2139 DEVNAME(sc)); 2140 if (qle_softreset(sc) != 0) { 2141 DPRINTF(QLE_D_IO, "%s: couldn't softreset\n", 2142 DEVNAME(sc)); 2143 } 2144 qle_update_done(sc, QLE_UPDATE_TASK_SOFTRESET); 2145 continue; 2146 } 2147 2148 if (sc->sc_update_tasks & QLE_UPDATE_TASK_UPDATE_TOPO) { 2149 DPRINTF(QLE_D_PORT, "%s: updating topology\n", 2150 DEVNAME(sc)); 2151 qle_update_topology(sc); 2152 qle_update_done(sc, QLE_UPDATE_TASK_UPDATE_TOPO); 2153 continue; 2154 } 2155 2156 if (sc->sc_update_tasks & QLE_UPDATE_TASK_GET_PORT_LIST) { 2157 DPRINTF(QLE_D_PORT, "%s: getting port name list\n", 2158 DEVNAME(sc)); 2159 mtx_enter(&sc->sc_port_mtx); 2160 qle_clear_port_lists(sc); 2161 mtx_leave(&sc->sc_port_mtx); 2162 2163 qle_get_port_name_list(sc, QLE_LOCATION_LOOP | 2164 QLE_LOCATION_FABRIC); 2165 mtx_enter(&sc->sc_port_mtx); 2166 TAILQ_FOREACH(port, &sc->sc_ports, ports) { 2167 TAILQ_INSERT_TAIL(&sc->sc_ports_gone, port, 2168 update); 2169 if (port->location & QLE_LOCATION_FABRIC) { 2170 port->flags |= 2171 QLE_PORT_FLAG_NEEDS_LOGIN; 2172 } 2173 } 2174 2175 /* take care of ports that haven't changed first */ 2176 TAILQ_FOREACH(fport, &sc->sc_ports_found, update) { 2177 port = sc->sc_targets[fport->loopid]; 2178 if (port == NULL || fport->port_name != 2179 port->port_name) { 2180 /* new or changed port, handled later */ 2181 continue; 2182 } 2183 2184 /* 2185 * the port hasn't been logged out, which 2186 * means we don't need to log in again, and, 2187 * for loop ports, that the port still exists 2188 */ 2189 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN; 2190 if (port->location & QLE_LOCATION_LOOP) 2191 TAILQ_REMOVE(&sc->sc_ports_gone, 2192 port, update); 2193 2194 fport->location = 0; 2195 } 2196 mtx_leave(&sc->sc_port_mtx); 2197 qle_update_start(sc, QLE_UPDATE_TASK_PORT_LIST); 2198 qle_update_done(sc, QLE_UPDATE_TASK_GET_PORT_LIST); 2199 continue; 2200 } 2201 2202 if (sc->sc_update_tasks & QLE_UPDATE_TASK_PORT_LIST) { 2203 mtx_enter(&sc->sc_port_mtx); 2204 fport = TAILQ_FIRST(&sc->sc_ports_found); 2205 if (fport != NULL) { 2206 TAILQ_REMOVE(&sc->sc_ports_found, fport, 2207 update); 2208 } 2209 mtx_leave(&sc->sc_port_mtx); 2210 2211 if (fport == NULL) { 2212 DPRINTF(QLE_D_PORT, "%s: done with ports\n", 2213 DEVNAME(sc)); 2214 qle_update_done(sc, 2215 QLE_UPDATE_TASK_PORT_LIST); 2216 qle_update_start(sc, 2217 QLE_UPDATE_TASK_SCAN_FABRIC); 2218 } else if (fport->location & QLE_LOCATION_LOOP) { 2219 DPRINTF(QLE_D_PORT, "%s: loop port %04x\n", 2220 DEVNAME(sc), fport->loopid); 2221 if (qle_add_loop_port(sc, fport) != 0) 2222 free(fport, M_DEVBUF, sizeof *port); 2223 } else if (fport->location & QLE_LOCATION_FABRIC) { 2224 qle_add_fabric_port(sc, fport); 2225 } else { 2226 /* already processed */ 2227 free(fport, M_DEVBUF, sizeof *port); 2228 } 2229 continue; 2230 } 2231 2232 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SCAN_FABRIC) { 2233 DPRINTF(QLE_D_PORT, "%s: starting fabric scan\n", 2234 DEVNAME(sc)); 2235 lastport = sc->sc_port_id; 2236 firstport = 0xffffffff; 2237 if (qle_update_fabric(sc)) 2238 qle_update_start(sc, 2239 QLE_UPDATE_TASK_SCANNING_FABRIC); 2240 else 2241 qle_update_start(sc, 2242 QLE_UPDATE_TASK_ATTACH_TARGET | 2243 QLE_UPDATE_TASK_DETACH_TARGET); 2244 2245 qle_update_done(sc, QLE_UPDATE_TASK_SCAN_FABRIC); 2246 continue; 2247 } 2248 2249 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SCANNING_FABRIC) { 2250 fport = qle_next_fabric_port(sc, &firstport, &lastport); 2251 if (fport != NULL) { 2252 int disp; 2253 2254 mtx_enter(&sc->sc_port_mtx); 2255 disp = qle_classify_port(sc, fport->location, 2256 fport->port_name, fport->node_name, &port); 2257 switch (disp) { 2258 case QLE_PORT_DISP_CHANGED: 2259 case QLE_PORT_DISP_MOVED: 2260 /* we'll log out the old port later */ 2261 case QLE_PORT_DISP_NEW: 2262 DPRINTF(QLE_D_PORT, "%s: new port " 2263 "%06x\n", DEVNAME(sc), 2264 fport->portid); 2265 TAILQ_INSERT_TAIL(&sc->sc_ports_found, 2266 fport, update); 2267 break; 2268 case QLE_PORT_DISP_DUP: 2269 free(fport, M_DEVBUF, sizeof *port); 2270 break; 2271 case QLE_PORT_DISP_SAME: 2272 DPRINTF(QLE_D_PORT, "%s: existing port " 2273 " %06x\n", DEVNAME(sc), 2274 fport->portid); 2275 TAILQ_REMOVE(&sc->sc_ports_gone, port, 2276 update); 2277 free(fport, M_DEVBUF, sizeof *port); 2278 break; 2279 } 2280 mtx_leave(&sc->sc_port_mtx); 2281 } 2282 if (lastport == 0xffffffff) { 2283 DPRINTF(QLE_D_PORT, "%s: finished\n", 2284 DEVNAME(sc)); 2285 qle_update_done(sc, 2286 QLE_UPDATE_TASK_SCANNING_FABRIC); 2287 qle_update_start(sc, 2288 QLE_UPDATE_TASK_FABRIC_LOGIN); 2289 } 2290 continue; 2291 } 2292 2293 if (sc->sc_update_tasks & QLE_UPDATE_TASK_FABRIC_LOGIN) { 2294 mtx_enter(&sc->sc_port_mtx); 2295 port = TAILQ_FIRST(&sc->sc_ports_found); 2296 if (port != NULL) { 2297 TAILQ_REMOVE(&sc->sc_ports_found, port, update); 2298 } 2299 mtx_leave(&sc->sc_port_mtx); 2300 2301 if (port != NULL) { 2302 DPRINTF(QLE_D_PORT, "%s: found port %06x\n", 2303 DEVNAME(sc), port->portid); 2304 if (qle_fabric_plogi(sc, port) == 0) { 2305 qle_add_fabric_port(sc, port); 2306 } else { 2307 DPRINTF(QLE_D_PORT, "%s: plogi %06x " 2308 "failed\n", DEVNAME(sc), 2309 port->portid); 2310 free(port, M_DEVBUF, sizeof *port); 2311 } 2312 } else { 2313 DPRINTF(QLE_D_PORT, "%s: done with logins\n", 2314 DEVNAME(sc)); 2315 qle_update_done(sc, 2316 QLE_UPDATE_TASK_FABRIC_LOGIN); 2317 qle_update_start(sc, 2318 QLE_UPDATE_TASK_ATTACH_TARGET | 2319 QLE_UPDATE_TASK_DETACH_TARGET); 2320 } 2321 continue; 2322 } 2323 2324 if (sc->sc_update_tasks & QLE_UPDATE_TASK_FABRIC_RELOGIN) { 2325 TAILQ_FOREACH(port, &sc->sc_ports, ports) { 2326 if (port->flags & QLE_PORT_FLAG_NEEDS_LOGIN) { 2327 qle_fabric_plogi(sc, port); 2328 break; 2329 } 2330 } 2331 2332 if (port == NULL) 2333 qle_update_done(sc, 2334 QLE_UPDATE_TASK_FABRIC_RELOGIN); 2335 continue; 2336 } 2337 2338 if (sc->sc_update_tasks & QLE_UPDATE_TASK_DETACH_TARGET) { 2339 mtx_enter(&sc->sc_port_mtx); 2340 port = TAILQ_FIRST(&sc->sc_ports_gone); 2341 if (port != NULL) { 2342 sc->sc_targets[port->loopid] = NULL; 2343 TAILQ_REMOVE(&sc->sc_ports_gone, port, update); 2344 TAILQ_REMOVE(&sc->sc_ports, port, ports); 2345 } 2346 mtx_leave(&sc->sc_port_mtx); 2347 2348 if (port != NULL) { 2349 DPRINTF(QLE_D_PORT, "%s: detaching port %06x\n", 2350 DEVNAME(sc), port->portid); 2351 if (sc->sc_scsibus != NULL) 2352 scsi_detach_target(sc->sc_scsibus, 2353 port->loopid, DETACH_FORCE | 2354 DETACH_QUIET); 2355 2356 if (port->location & QLE_LOCATION_FABRIC) 2357 qle_fabric_plogo(sc, port); 2358 2359 free(port, M_DEVBUF, sizeof *port); 2360 } else { 2361 DPRINTF(QLE_D_PORT, "%s: nothing to detach\n", 2362 DEVNAME(sc)); 2363 qle_update_done(sc, 2364 QLE_UPDATE_TASK_DETACH_TARGET); 2365 } 2366 continue; 2367 } 2368 2369 if (sc->sc_update_tasks & QLE_UPDATE_TASK_ATTACH_TARGET) { 2370 mtx_enter(&sc->sc_port_mtx); 2371 port = TAILQ_FIRST(&sc->sc_ports_new); 2372 if (port != NULL) { 2373 TAILQ_REMOVE(&sc->sc_ports_new, port, update); 2374 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports); 2375 } 2376 mtx_leave(&sc->sc_port_mtx); 2377 2378 if (port != NULL) { 2379 if (sc->sc_scsibus != NULL) 2380 scsi_probe_target(sc->sc_scsibus, 2381 port->loopid); 2382 } else { 2383 qle_update_done(sc, 2384 QLE_UPDATE_TASK_ATTACH_TARGET); 2385 } 2386 continue; 2387 } 2388 2389 } 2390 2391 DPRINTF(QLE_D_PORT, "%s: done updating\n", DEVNAME(sc)); 2392 } 2393 2394 int 2395 qle_async(struct qle_softc *sc, u_int16_t info) 2396 { 2397 switch (info) { 2398 case QLE_ASYNC_SYSTEM_ERROR: 2399 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET); 2400 break; 2401 2402 case QLE_ASYNC_REQ_XFER_ERROR: 2403 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET); 2404 break; 2405 2406 case QLE_ASYNC_RSP_XFER_ERROR: 2407 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET); 2408 break; 2409 2410 case QLE_ASYNC_LIP_OCCURRED: 2411 DPRINTF(QLE_D_INTR, "%s: lip occurred\n", DEVNAME(sc)); 2412 break; 2413 2414 case QLE_ASYNC_LOOP_UP: 2415 DPRINTF(QLE_D_PORT, "%s: loop up\n", DEVNAME(sc)); 2416 sc->sc_loop_up = 1; 2417 sc->sc_marker_required = 1; 2418 qle_update_defer(sc, QLE_UPDATE_TASK_UPDATE_TOPO | 2419 QLE_UPDATE_TASK_GET_PORT_LIST); 2420 break; 2421 2422 case QLE_ASYNC_LOOP_DOWN: 2423 DPRINTF(QLE_D_PORT, "%s: loop down\n", DEVNAME(sc)); 2424 sc->sc_loop_up = 0; 2425 qle_update_cancel(sc); 2426 qle_update_start(sc, QLE_UPDATE_TASK_CLEAR_ALL); 2427 break; 2428 2429 case QLE_ASYNC_LIP_RESET: 2430 DPRINTF(QLE_D_PORT, "%s: lip reset\n", DEVNAME(sc)); 2431 sc->sc_marker_required = 1; 2432 qle_update_defer(sc, QLE_UPDATE_TASK_FABRIC_RELOGIN); 2433 break; 2434 2435 case QLE_ASYNC_PORT_DB_CHANGE: 2436 DPRINTF(QLE_D_PORT, "%s: port db changed %x\n", DEVNAME(sc), 2437 qle_read_mbox(sc, 1)); 2438 qle_update_start(sc, QLE_UPDATE_TASK_GET_PORT_LIST); 2439 break; 2440 2441 case QLE_ASYNC_CHANGE_NOTIFY: 2442 DPRINTF(QLE_D_PORT, "%s: name server change (%02x:%02x)\n", 2443 DEVNAME(sc), qle_read_mbox(sc, 1), qle_read_mbox(sc, 2)); 2444 qle_update_start(sc, QLE_UPDATE_TASK_GET_PORT_LIST); 2445 break; 2446 2447 case QLE_ASYNC_LIP_F8: 2448 DPRINTF(QLE_D_INTR, "%s: lip f8\n", DEVNAME(sc)); 2449 break; 2450 2451 case QLE_ASYNC_LOOP_INIT_ERROR: 2452 DPRINTF(QLE_D_PORT, "%s: loop initialization error: %x\n", 2453 DEVNAME(sc), qle_read_mbox(sc, 1)); 2454 break; 2455 2456 case QLE_ASYNC_POINT_TO_POINT: 2457 DPRINTF(QLE_D_PORT, "%s: connected in point-to-point mode\n", 2458 DEVNAME(sc)); 2459 break; 2460 2461 case QLE_ASYNC_ZIO_RESP_UPDATE: 2462 /* shouldn't happen, we don't do zio */ 2463 break; 2464 2465 default: 2466 DPRINTF(QLE_D_INTR, "%s: unknown async %x\n", DEVNAME(sc), info); 2467 break; 2468 } 2469 return (1); 2470 } 2471 2472 void 2473 qle_dump_stuff(struct qle_softc *sc, void *buf, int n) 2474 { 2475 #ifdef QLE_DEBUG 2476 u_int8_t *d = buf; 2477 int l; 2478 2479 if ((qledebug & QLE_D_IOCB) == 0) 2480 return; 2481 2482 printf("%s: stuff\n", DEVNAME(sc)); 2483 for (l = 0; l < n; l++) { 2484 printf(" %2.2x", d[l]); 2485 if (l % 16 == 15) 2486 printf("\n"); 2487 } 2488 if (n % 16 != 0) 2489 printf("\n"); 2490 #endif 2491 } 2492 2493 void 2494 qle_dump_iocb(struct qle_softc *sc, void *buf) 2495 { 2496 #ifdef QLE_DEBUG 2497 u_int8_t *iocb = buf; 2498 int l; 2499 int b; 2500 2501 if ((qledebug & QLE_D_IOCB) == 0) 2502 return; 2503 2504 printf("%s: iocb:\n", DEVNAME(sc)); 2505 for (l = 0; l < 4; l++) { 2506 for (b = 0; b < 16; b++) { 2507 printf(" %2.2x", iocb[(l*16)+b]); 2508 } 2509 printf("\n"); 2510 } 2511 #endif 2512 } 2513 2514 void 2515 qle_dump_iocb_segs(struct qle_softc *sc, void *segs, int n) 2516 { 2517 #ifdef QLE_DEBUG 2518 u_int8_t *buf = segs; 2519 int s, b; 2520 2521 if ((qledebug & QLE_D_IOCB) == 0) 2522 return; 2523 2524 printf("%s: iocb segs:\n", DEVNAME(sc)); 2525 for (s = 0; s < n; s++) { 2526 for (b = 0; b < sizeof(struct qle_iocb_seg); b++) { 2527 printf(" %2.2x", buf[(s*(sizeof(struct qle_iocb_seg))) 2528 + b]); 2529 } 2530 printf("\n"); 2531 } 2532 #endif 2533 } 2534 2535 void 2536 qle_put_marker(struct qle_softc *sc, void *buf) 2537 { 2538 struct qle_iocb_marker *marker = buf; 2539 2540 marker->entry_type = QLE_IOCB_MARKER; 2541 marker->entry_count = 1; 2542 marker->seqno = 0; 2543 marker->flags = 0; 2544 2545 /* could be more specific here; isp(4) isn't */ 2546 marker->target = 0; 2547 marker->modifier = QLE_IOCB_MARKER_SYNC_ALL; 2548 } 2549 2550 void 2551 qle_sge(struct qle_iocb_seg *seg, u_int64_t addr, u_int32_t len) 2552 { 2553 htolem32(&seg->seg_addr_lo, addr); 2554 htolem32(&seg->seg_addr_hi, addr >> 32); 2555 htolem32(&seg->seg_len, len); 2556 } 2557 2558 void 2559 qle_put_cmd(struct qle_softc *sc, void *buf, struct scsi_xfer *xs, 2560 struct qle_ccb *ccb, u_int32_t target_port) 2561 { 2562 bus_dmamap_t dmap = ccb->ccb_dmamap; 2563 struct qle_iocb_req6 *req = buf; 2564 struct qle_fcp_cmnd *cmnd; 2565 u_int64_t fcp_cmnd_offset; 2566 u_int32_t fcp_dl; 2567 int seg; 2568 int target = xs->sc_link->target; 2569 int lun = xs->sc_link->lun; 2570 u_int16_t flags; 2571 2572 memset(req, 0, sizeof(*req)); 2573 req->entry_type = QLE_IOCB_CMD_TYPE_6; 2574 req->entry_count = 1; 2575 2576 req->req_handle = ccb->ccb_id; 2577 htolem16(&req->req_nport_handle, target); 2578 2579 /* 2580 * timeout is in seconds. make sure it's at least 1 if a timeout 2581 * was specified in xs 2582 */ 2583 if (xs->timeout != 0) 2584 htolem16(&req->req_timeout, MAX(1, xs->timeout/1000)); 2585 2586 if (xs->datalen > 0) { 2587 flags = (xs->flags & SCSI_DATA_IN) ? 2588 QLE_IOCB_CTRL_FLAG_READ : QLE_IOCB_CTRL_FLAG_WRITE; 2589 if (dmap->dm_nsegs == 1) { 2590 qle_sge(&req->req_data_seg, dmap->dm_segs[0].ds_addr, 2591 dmap->dm_segs[0].ds_len); 2592 } else { 2593 flags |= QLE_IOCB_CTRL_FLAG_EXT_SEG; 2594 for (seg = 0; seg < dmap->dm_nsegs; seg++) { 2595 qle_sge(&ccb->ccb_segs[seg], 2596 dmap->dm_segs[seg].ds_addr, 2597 dmap->dm_segs[seg].ds_len); 2598 } 2599 qle_sge(&ccb->ccb_segs[seg++], 0, 0); 2600 2601 bus_dmamap_sync(sc->sc_dmat, 2602 QLE_DMA_MAP(sc->sc_segments), ccb->ccb_seg_offset, 2603 seg * sizeof(*ccb->ccb_segs), 2604 BUS_DMASYNC_PREWRITE); 2605 2606 qle_sge(&req->req_data_seg, 2607 QLE_DMA_DVA(sc->sc_segments) + ccb->ccb_seg_offset, 2608 seg * sizeof(struct qle_iocb_seg)); 2609 } 2610 2611 htolem16(&req->req_data_seg_count, dmap->dm_nsegs); 2612 htolem32(&req->req_data_len, xs->datalen); 2613 htolem16(&req->req_ctrl_flags, flags); 2614 } 2615 2616 htobem16(&req->req_fcp_lun[0], lun); 2617 htobem16(&req->req_fcp_lun[1], lun >> 16); 2618 htolem32(&req->req_target_id, target_port & 0xffffff); 2619 2620 fcp_cmnd_offset = ccb->ccb_id * sizeof(*cmnd); 2621 /* set up FCP_CMND */ 2622 cmnd = (struct qle_fcp_cmnd *)QLE_DMA_KVA(sc->sc_fcp_cmnds) + 2623 ccb->ccb_id; 2624 2625 memset(cmnd, 0, sizeof(*cmnd)); 2626 htobem16(&cmnd->fcp_lun[0], lun); 2627 htobem16(&cmnd->fcp_lun[1], lun >> 16); 2628 /* cmnd->fcp_task_attr = TSK_SIMPLE; */ 2629 /* cmnd->fcp_task_mgmt = 0; */ 2630 memcpy(cmnd->fcp_cdb, &xs->cmd, xs->cmdlen); 2631 2632 /* FCP_DL goes after the cdb */ 2633 fcp_dl = htobe32(xs->datalen); 2634 if (xs->cmdlen > 16) { 2635 htolem16(&req->req_fcp_cmnd_len, 12 + xs->cmdlen + 4); 2636 cmnd->fcp_add_cdb_len = xs->cmdlen - 16; 2637 memcpy(cmnd->fcp_cdb + xs->cmdlen, &fcp_dl, sizeof(fcp_dl)); 2638 } else { 2639 htolem16(&req->req_fcp_cmnd_len, 12 + 16 + 4); 2640 cmnd->fcp_add_cdb_len = 0; 2641 memcpy(cmnd->fcp_cdb + 16, &fcp_dl, sizeof(fcp_dl)); 2642 } 2643 if (xs->datalen > 0) 2644 cmnd->fcp_add_cdb_len |= (xs->flags & SCSI_DATA_IN) ? 2 : 1; 2645 2646 bus_dmamap_sync(sc->sc_dmat, 2647 QLE_DMA_MAP(sc->sc_fcp_cmnds), fcp_cmnd_offset, 2648 sizeof(*cmnd), BUS_DMASYNC_PREWRITE); 2649 2650 /* link req to cmnd */ 2651 fcp_cmnd_offset += QLE_DMA_DVA(sc->sc_fcp_cmnds); 2652 htolem32(&req->req_fcp_cmnd_addr_lo, fcp_cmnd_offset); 2653 htolem32(&req->req_fcp_cmnd_addr_hi, fcp_cmnd_offset >> 32); 2654 } 2655 2656 int 2657 qle_load_fwchunk(struct qle_softc *sc, struct qle_dmamem *mem, 2658 const u_int32_t *src) 2659 { 2660 u_int32_t dest, done, total; 2661 int i; 2662 2663 dest = src[2]; 2664 done = 0; 2665 total = src[3]; 2666 2667 while (done < total) { 2668 u_int32_t *copy; 2669 u_int32_t words; 2670 2671 /* limit transfer size otherwise it just doesn't work */ 2672 words = MIN(total - done, 1 << 10); 2673 copy = QLE_DMA_KVA(mem); 2674 for (i = 0; i < words; i++) { 2675 htolem32(©[i], src[done++]); 2676 } 2677 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, words * 4, 2678 BUS_DMASYNC_PREWRITE); 2679 2680 sc->sc_mbox[0] = QLE_MBOX_LOAD_RISC_RAM; 2681 sc->sc_mbox[1] = dest; 2682 sc->sc_mbox[4] = words >> 16; 2683 sc->sc_mbox[5] = words & 0xffff; 2684 sc->sc_mbox[8] = dest >> 16; 2685 qle_mbox_putaddr(sc->sc_mbox, mem); 2686 if (qle_mbox(sc, 0x01ff)) { 2687 printf("firmware load failed\n"); 2688 return (1); 2689 } 2690 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, words * 4, 2691 BUS_DMASYNC_POSTWRITE); 2692 2693 dest += words; 2694 } 2695 2696 return (qle_verify_firmware(sc, src[2])); 2697 } 2698 2699 int 2700 qle_load_firmware_chunks(struct qle_softc *sc, const u_int32_t *fw) 2701 { 2702 struct qle_dmamem *mem; 2703 int res = 0; 2704 2705 mem = qle_dmamem_alloc(sc, 65536); 2706 for (;;) { 2707 if (qle_load_fwchunk(sc, mem, fw)) { 2708 res = 1; 2709 break; 2710 } 2711 if (fw[1] == 0) 2712 break; 2713 fw += fw[3]; 2714 } 2715 2716 qle_dmamem_free(sc, mem); 2717 return (res); 2718 } 2719 2720 u_int32_t 2721 qle_read_ram_word(struct qle_softc *sc, u_int32_t addr) 2722 { 2723 sc->sc_mbox[0] = QLE_MBOX_READ_RISC_RAM; 2724 sc->sc_mbox[1] = addr & 0xffff; 2725 sc->sc_mbox[8] = addr >> 16; 2726 if (qle_mbox(sc, 0x0103)) { 2727 return (0); 2728 } 2729 return ((sc->sc_mbox[3] << 16) | sc->sc_mbox[2]); 2730 } 2731 2732 int 2733 qle_verify_firmware(struct qle_softc *sc, u_int32_t addr) 2734 { 2735 /* 2736 * QLE_MBOX_VERIFY_CSUM requires at least the firmware header 2737 * to be correct, otherwise it wanders all over ISP memory and 2738 * gets lost. Check that chunk address (addr+2) is right and 2739 * size (addr+3) is plausible first. 2740 */ 2741 if ((qle_read_ram_word(sc, addr+2) != addr) || 2742 (qle_read_ram_word(sc, addr+3) > 0xffff)) { 2743 return (1); 2744 } 2745 2746 sc->sc_mbox[0] = QLE_MBOX_VERIFY_CSUM; 2747 sc->sc_mbox[1] = addr >> 16; 2748 sc->sc_mbox[2] = addr; 2749 if (qle_mbox(sc, 0x0007)) { 2750 return (1); 2751 } 2752 return (0); 2753 } 2754 2755 int 2756 qle_read_nvram(struct qle_softc *sc) 2757 { 2758 u_int32_t data[sizeof(sc->sc_nvram) / 4]; 2759 u_int32_t csum, tmp, v; 2760 int i, base, l; 2761 2762 switch (sc->sc_isp_gen) { 2763 case QLE_GEN_ISP24XX: 2764 base = 0x7ffe0080; 2765 break; 2766 case QLE_GEN_ISP25XX: 2767 base = 0x7ff48080; 2768 break; 2769 } 2770 base += sc->sc_port * 0x100; 2771 2772 csum = 0; 2773 for (i = 0; i < nitems(data); i++) { 2774 data[i] = 0xffffffff; 2775 qle_write(sc, QLE_FLASH_NVRAM_ADDR, base + i); 2776 for (l = 0; l < 5000; l++) { 2777 delay(10); 2778 tmp = qle_read(sc, QLE_FLASH_NVRAM_ADDR); 2779 if (tmp & (1U << 31)) { 2780 v = qle_read(sc, QLE_FLASH_NVRAM_DATA); 2781 csum += v; 2782 data[i] = letoh32(v); 2783 break; 2784 } 2785 } 2786 } 2787 2788 bcopy(data, &sc->sc_nvram, sizeof(sc->sc_nvram)); 2789 /* id field should be 'ISP' */ 2790 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' || 2791 sc->sc_nvram.id[2] != 'P' || csum != 0) { 2792 printf("%s: nvram corrupt\n", DEVNAME(sc)); 2793 return (1); 2794 } 2795 return (0); 2796 } 2797 2798 struct qle_dmamem * 2799 qle_dmamem_alloc(struct qle_softc *sc, size_t size) 2800 { 2801 struct qle_dmamem *m; 2802 int nsegs; 2803 2804 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 2805 if (m == NULL) 2806 return (NULL); 2807 2808 m->qdm_size = size; 2809 2810 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2811 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0) 2812 goto qdmfree; 2813 2814 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1, 2815 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 2816 goto destroy; 2817 2818 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva, 2819 BUS_DMA_NOWAIT) != 0) 2820 goto free; 2821 2822 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL, 2823 BUS_DMA_NOWAIT) != 0) 2824 goto unmap; 2825 2826 return (m); 2827 2828 unmap: 2829 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size); 2830 free: 2831 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1); 2832 destroy: 2833 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map); 2834 qdmfree: 2835 free(m, M_DEVBUF, sizeof *m); 2836 2837 return (NULL); 2838 } 2839 2840 void 2841 qle_dmamem_free(struct qle_softc *sc, struct qle_dmamem *m) 2842 { 2843 bus_dmamap_unload(sc->sc_dmat, m->qdm_map); 2844 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size); 2845 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1); 2846 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map); 2847 free(m, M_DEVBUF, sizeof *m); 2848 } 2849 2850 int 2851 qle_alloc_ccbs(struct qle_softc *sc) 2852 { 2853 struct qle_ccb *ccb; 2854 u_int8_t *cmd; 2855 int i; 2856 2857 SIMPLEQ_INIT(&sc->sc_ccb_free); 2858 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 2859 mtx_init(&sc->sc_queue_mtx, IPL_BIO); 2860 mtx_init(&sc->sc_port_mtx, IPL_BIO); 2861 mtx_init(&sc->sc_mbox_mtx, IPL_BIO); 2862 2863 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct qle_ccb), 2864 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 2865 if (sc->sc_ccbs == NULL) { 2866 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 2867 return (1); 2868 } 2869 2870 sc->sc_requests = qle_dmamem_alloc(sc, sc->sc_maxcmds * 2871 QLE_QUEUE_ENTRY_SIZE); 2872 if (sc->sc_requests == NULL) { 2873 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 2874 goto free_ccbs; 2875 } 2876 sc->sc_responses = qle_dmamem_alloc(sc, sc->sc_maxcmds * 2877 QLE_QUEUE_ENTRY_SIZE); 2878 if (sc->sc_responses == NULL) { 2879 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc)); 2880 goto free_req; 2881 } 2882 sc->sc_pri_requests = qle_dmamem_alloc(sc, 8 * QLE_QUEUE_ENTRY_SIZE); 2883 if (sc->sc_pri_requests == NULL) { 2884 printf("%s: unable to allocate pri ccb dmamem\n", DEVNAME(sc)); 2885 goto free_res; 2886 } 2887 sc->sc_segments = qle_dmamem_alloc(sc, sc->sc_maxcmds * QLE_MAX_SEGS * 2888 sizeof(struct qle_iocb_seg)); 2889 if (sc->sc_segments == NULL) { 2890 printf("%s: unable to allocate iocb segments\n", DEVNAME(sc)); 2891 goto free_pri; 2892 } 2893 2894 sc->sc_fcp_cmnds = qle_dmamem_alloc(sc, sc->sc_maxcmds * 2895 sizeof(struct qle_fcp_cmnd)); 2896 if (sc->sc_fcp_cmnds == NULL) { 2897 printf("%s: unable to allocate FCP_CMNDs\n", DEVNAME(sc)); 2898 goto free_seg; 2899 } 2900 2901 cmd = QLE_DMA_KVA(sc->sc_requests); 2902 memset(cmd, 0, QLE_QUEUE_ENTRY_SIZE * sc->sc_maxcmds); 2903 for (i = 0; i < sc->sc_maxcmds; i++) { 2904 ccb = &sc->sc_ccbs[i]; 2905 2906 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 2907 QLE_MAX_SEGS-1, MAXPHYS, 0, 2908 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2909 &ccb->ccb_dmamap) != 0) { 2910 printf("%s: unable to create dma map\n", DEVNAME(sc)); 2911 goto free_maps; 2912 } 2913 2914 ccb->ccb_sc = sc; 2915 ccb->ccb_id = i; 2916 2917 ccb->ccb_seg_offset = i * QLE_MAX_SEGS * 2918 sizeof(struct qle_iocb_seg); 2919 ccb->ccb_segs = QLE_DMA_KVA(sc->sc_segments) + 2920 ccb->ccb_seg_offset; 2921 2922 qle_put_ccb(sc, ccb); 2923 } 2924 2925 scsi_iopool_init(&sc->sc_iopool, sc, qle_get_ccb, qle_put_ccb); 2926 return (0); 2927 2928 free_maps: 2929 while ((ccb = qle_get_ccb(sc)) != NULL) 2930 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2931 2932 qle_dmamem_free(sc, sc->sc_fcp_cmnds); 2933 free_seg: 2934 qle_dmamem_free(sc, sc->sc_segments); 2935 free_pri: 2936 qle_dmamem_free(sc, sc->sc_pri_requests); 2937 free_res: 2938 qle_dmamem_free(sc, sc->sc_responses); 2939 free_req: 2940 qle_dmamem_free(sc, sc->sc_requests); 2941 free_ccbs: 2942 free(sc->sc_ccbs, M_DEVBUF, 0); 2943 2944 return (1); 2945 } 2946 2947 void 2948 qle_free_ccbs(struct qle_softc *sc) 2949 { 2950 struct qle_ccb *ccb; 2951 2952 scsi_iopool_destroy(&sc->sc_iopool); 2953 while ((ccb = qle_get_ccb(sc)) != NULL) 2954 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2955 qle_dmamem_free(sc, sc->sc_segments); 2956 qle_dmamem_free(sc, sc->sc_responses); 2957 qle_dmamem_free(sc, sc->sc_requests); 2958 free(sc->sc_ccbs, M_DEVBUF, 0); 2959 } 2960 2961 void * 2962 qle_get_ccb(void *xsc) 2963 { 2964 struct qle_softc *sc = xsc; 2965 struct qle_ccb *ccb; 2966 2967 mtx_enter(&sc->sc_ccb_mtx); 2968 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free); 2969 if (ccb != NULL) { 2970 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 2971 } 2972 mtx_leave(&sc->sc_ccb_mtx); 2973 return (ccb); 2974 } 2975 2976 void 2977 qle_put_ccb(void *xsc, void *io) 2978 { 2979 struct qle_softc *sc = xsc; 2980 struct qle_ccb *ccb = io; 2981 2982 ccb->ccb_xs = NULL; 2983 mtx_enter(&sc->sc_ccb_mtx); 2984 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 2985 mtx_leave(&sc->sc_ccb_mtx); 2986 } 2987