1 /* $OpenBSD: qlw.c,v 1.34 2020/02/05 16:29:30 krw Exp $ */ 2 3 /* 4 * Copyright (c) 2011 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org> 6 * Copyright (c) 2014 Mark Kettenis <kettenis@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/atomic.h> 24 #include <sys/device.h> 25 #include <sys/ioctl.h> 26 #include <sys/malloc.h> 27 #include <sys/kernel.h> 28 #include <sys/mutex.h> 29 #include <sys/rwlock.h> 30 #include <sys/sensors.h> 31 #include <sys/queue.h> 32 33 #include <machine/bus.h> 34 35 #include <scsi/scsi_all.h> 36 #include <scsi/scsiconf.h> 37 38 #include <dev/ic/qlwreg.h> 39 #include <dev/ic/qlwvar.h> 40 41 #ifndef SMALL_KERNEL 42 #define QLW_DEBUG 43 #endif 44 45 #ifdef QLW_DEBUG 46 #define DPRINTF(m, f...) do { if ((qlwdebug & (m)) == (m)) printf(f); } \ 47 while (0) 48 #define QLW_D_MBOX 0x01 49 #define QLW_D_INTR 0x02 50 #define QLW_D_PORT 0x04 51 #define QLW_D_IO 0x08 52 #define QLW_D_IOCB 0x10 53 int qlwdebug = QLW_D_PORT | QLW_D_INTR | QLW_D_MBOX; 54 #else 55 #define DPRINTF(m, f...) 56 #endif 57 58 struct cfdriver qlw_cd = { 59 NULL, 60 "qlw", 61 DV_DULL 62 }; 63 64 void qlw_scsi_cmd(struct scsi_xfer *); 65 int qlw_scsi_probe(struct scsi_link *); 66 67 u_int16_t qlw_read(struct qlw_softc *, bus_size_t); 68 void qlw_write(struct qlw_softc *, bus_size_t, u_int16_t); 69 void qlw_host_cmd(struct qlw_softc *sc, u_int16_t); 70 71 int qlw_mbox(struct qlw_softc *, int, int); 72 void qlw_mbox_putaddr(u_int16_t *, struct qlw_dmamem *); 73 u_int16_t qlw_read_mbox(struct qlw_softc *, int); 74 void qlw_write_mbox(struct qlw_softc *, int, u_int16_t); 75 76 int qlw_config_bus(struct qlw_softc *, int); 77 int qlw_config_target(struct qlw_softc *, int, int); 78 void qlw_update_bus(struct qlw_softc *, int); 79 void qlw_update_target(struct qlw_softc *, int, int); 80 void qlw_update_task(void *); 81 82 void qlw_handle_intr(struct qlw_softc *, u_int16_t, u_int16_t); 83 void qlw_set_ints(struct qlw_softc *, int); 84 int qlw_read_isr(struct qlw_softc *, u_int16_t *, u_int16_t *); 85 void qlw_clear_isr(struct qlw_softc *, u_int16_t); 86 87 void qlw_update(struct qlw_softc *, int); 88 void qlw_put_marker(struct qlw_softc *, int, void *); 89 void qlw_put_cmd(struct qlw_softc *, void *, struct scsi_xfer *, 90 struct qlw_ccb *); 91 void qlw_put_cont(struct qlw_softc *, void *, struct scsi_xfer *, 92 struct qlw_ccb *, int); 93 struct qlw_ccb *qlw_handle_resp(struct qlw_softc *, u_int16_t); 94 void qlw_get_header(struct qlw_softc *, struct qlw_iocb_hdr *, 95 int *, int *); 96 void qlw_put_header(struct qlw_softc *, struct qlw_iocb_hdr *, 97 int, int); 98 void qlw_put_data_seg(struct qlw_softc *, struct qlw_iocb_seg *, 99 bus_dmamap_t, int); 100 101 int qlw_softreset(struct qlw_softc *); 102 void qlw_dma_burst_enable(struct qlw_softc *); 103 104 int qlw_async(struct qlw_softc *, u_int16_t); 105 106 int qlw_load_firmware_words(struct qlw_softc *, const u_int16_t *, 107 u_int16_t); 108 int qlw_load_firmware(struct qlw_softc *); 109 int qlw_read_nvram(struct qlw_softc *); 110 void qlw_parse_nvram_1040(struct qlw_softc *, int); 111 void qlw_parse_nvram_1080(struct qlw_softc *, int); 112 void qlw_init_defaults(struct qlw_softc *, int); 113 114 struct qlw_dmamem *qlw_dmamem_alloc(struct qlw_softc *, size_t); 115 void qlw_dmamem_free(struct qlw_softc *, struct qlw_dmamem *); 116 117 int qlw_alloc_ccbs(struct qlw_softc *); 118 void qlw_free_ccbs(struct qlw_softc *); 119 void *qlw_get_ccb(void *); 120 void qlw_put_ccb(void *, void *); 121 122 #ifdef QLW_DEBUG 123 void qlw_dump_iocb(struct qlw_softc *, void *, int); 124 void qlw_dump_iocb_segs(struct qlw_softc *, void *, int); 125 #else 126 #define qlw_dump_iocb(sc, h, fl) do { /* nothing */ } while (0) 127 #define qlw_dump_iocb_segs(sc, h, fl) do { /* nothing */ } while (0) 128 #endif 129 130 static inline int 131 qlw_xs_bus(struct qlw_softc *sc, struct scsi_xfer *xs) 132 { 133 return ((xs->sc_link->scsibus == sc->sc_link[0].scsibus) ? 0 : 1); 134 } 135 136 static inline u_int16_t 137 qlw_swap16(struct qlw_softc *sc, u_int16_t value) 138 { 139 if (sc->sc_isp_gen == QLW_GEN_ISP1000) 140 return htobe16(value); 141 else 142 return htole16(value); 143 } 144 145 static inline u_int32_t 146 qlw_swap32(struct qlw_softc *sc, u_int32_t value) 147 { 148 if (sc->sc_isp_gen == QLW_GEN_ISP1000) 149 return htobe32(value); 150 else 151 return htole32(value); 152 } 153 154 static inline u_int16_t 155 qlw_queue_read(struct qlw_softc *sc, bus_size_t offset) 156 { 157 return qlw_read(sc, sc->sc_mbox_base + offset); 158 } 159 160 static inline void 161 qlw_queue_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value) 162 { 163 qlw_write(sc, sc->sc_mbox_base + offset, value); 164 } 165 166 struct scsi_adapter qlw_switch = { 167 qlw_scsi_cmd, NULL, qlw_scsi_probe, NULL, NULL 168 }; 169 170 int 171 qlw_attach(struct qlw_softc *sc) 172 { 173 struct scsibus_attach_args saa; 174 void (*parse_nvram)(struct qlw_softc *, int); 175 int reset_delay; 176 int bus; 177 178 task_set(&sc->sc_update_task, qlw_update_task, sc); 179 180 switch (sc->sc_isp_gen) { 181 case QLW_GEN_ISP1000: 182 sc->sc_nvram_size = 0; 183 break; 184 case QLW_GEN_ISP1040: 185 sc->sc_nvram_size = 128; 186 sc->sc_nvram_minversion = 2; 187 parse_nvram = qlw_parse_nvram_1040; 188 break; 189 case QLW_GEN_ISP1080: 190 case QLW_GEN_ISP12160: 191 sc->sc_nvram_size = 256; 192 sc->sc_nvram_minversion = 1; 193 parse_nvram = qlw_parse_nvram_1080; 194 break; 195 196 default: 197 printf("unknown isp type\n"); 198 return (ENXIO); 199 } 200 201 /* after reset, mbox registers 1-3 should contain the string "ISP " */ 202 if (qlw_read_mbox(sc, 1) != 0x4953 || 203 qlw_read_mbox(sc, 2) != 0x5020 || 204 qlw_read_mbox(sc, 3) != 0x2020) { 205 /* try releasing the risc processor */ 206 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE); 207 } 208 209 qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE); 210 if (qlw_softreset(sc) != 0) { 211 printf("softreset failed\n"); 212 return (ENXIO); 213 } 214 215 for (bus = 0; bus < sc->sc_numbusses; bus++) 216 qlw_init_defaults(sc, bus); 217 218 if (qlw_read_nvram(sc) == 0) { 219 for (bus = 0; bus < sc->sc_numbusses; bus++) 220 parse_nvram(sc, bus); 221 } 222 223 #ifndef ISP_NOFIRMWARE 224 if (sc->sc_firmware && qlw_load_firmware(sc)) { 225 printf("firmware load failed\n"); 226 return (ENXIO); 227 } 228 #endif 229 230 /* execute firmware */ 231 sc->sc_mbox[0] = QLW_MBOX_EXEC_FIRMWARE; 232 sc->sc_mbox[1] = QLW_CODE_ORG; 233 if (qlw_mbox(sc, 0x0003, 0x0001)) { 234 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]); 235 return (ENXIO); 236 } 237 238 delay(250000); /* from isp(4) */ 239 240 sc->sc_mbox[0] = QLW_MBOX_ABOUT_FIRMWARE; 241 if (qlw_mbox(sc, QLW_MBOX_ABOUT_FIRMWARE_IN, 242 QLW_MBOX_ABOUT_FIRMWARE_OUT)) { 243 printf("ISP not talking after firmware exec: %x\n", 244 sc->sc_mbox[0]); 245 return (ENXIO); 246 } 247 /* The ISP1000 firmware we use doesn't return a version number. */ 248 if (sc->sc_isp_gen == QLW_GEN_ISP1000 && sc->sc_firmware) { 249 sc->sc_mbox[1] = 1; 250 sc->sc_mbox[2] = 37; 251 sc->sc_mbox[3] = 0; 252 sc->sc_mbox[6] = 0; 253 } 254 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc), 255 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]); 256 257 /* work out how many ccbs to allocate */ 258 sc->sc_mbox[0] = QLW_MBOX_GET_FIRMWARE_STATUS; 259 if (qlw_mbox(sc, 0x0001, 0x0007)) { 260 printf("couldn't get firmware status: %x\n", sc->sc_mbox[0]); 261 return (ENXIO); 262 } 263 sc->sc_maxrequests = sc->sc_mbox[2]; 264 if (sc->sc_maxrequests > 512) 265 sc->sc_maxrequests = 512; 266 for (bus = 0; bus < sc->sc_numbusses; bus++) { 267 if (sc->sc_max_queue_depth[bus] > sc->sc_maxrequests) 268 sc->sc_max_queue_depth[bus] = sc->sc_maxrequests; 269 } 270 271 /* 272 * On some 1020/1040 variants the response queue is limited to 273 * 256 entries. We don't really need all that many anyway. 274 */ 275 sc->sc_maxresponses = sc->sc_maxrequests / 2; 276 if (sc->sc_maxresponses < 64) 277 sc->sc_maxresponses = 64; 278 279 /* We may need up to 3 request entries per SCSI command. */ 280 sc->sc_maxccbs = sc->sc_maxrequests / 3; 281 282 /* Allegedly the FIFO is busted on the 1040A. */ 283 if (sc->sc_isp_type == QLW_ISP1040A) 284 sc->sc_isp_config &= ~QLW_PCI_FIFO_MASK; 285 qlw_write(sc, QLW_CFG1, sc->sc_isp_config); 286 287 if (sc->sc_isp_config & QLW_BURST_ENABLE) 288 qlw_dma_burst_enable(sc); 289 290 sc->sc_mbox[0] = QLW_MBOX_SET_FIRMWARE_FEATURES; 291 sc->sc_mbox[1] = 0; 292 if (sc->sc_fw_features & QLW_FW_FEATURE_LVD_NOTIFY) 293 sc->sc_mbox[1] |= QLW_FW_FEATURE_LVD_NOTIFY; 294 if (sc->sc_mbox[1] != 0 && qlw_mbox(sc, 0x0003, 0x0001)) { 295 printf("couldn't set firmware features: %x\n", sc->sc_mbox[0]); 296 return (ENXIO); 297 } 298 299 sc->sc_mbox[0] = QLW_MBOX_SET_CLOCK_RATE; 300 sc->sc_mbox[1] = sc->sc_clock; 301 if (qlw_mbox(sc, 0x0003, 0x0001)) { 302 printf("couldn't set clock rate: %x\n", sc->sc_mbox[0]); 303 return (ENXIO); 304 } 305 306 sc->sc_mbox[0] = QLW_MBOX_SET_RETRY_COUNT; 307 sc->sc_mbox[1] = sc->sc_retry_count[0]; 308 sc->sc_mbox[2] = sc->sc_retry_delay[0]; 309 sc->sc_mbox[6] = sc->sc_retry_count[1]; 310 sc->sc_mbox[7] = sc->sc_retry_delay[1]; 311 if (qlw_mbox(sc, 0x00c7, 0x0001)) { 312 printf("couldn't set retry count: %x\n", sc->sc_mbox[0]); 313 return (ENXIO); 314 } 315 316 sc->sc_mbox[0] = QLW_MBOX_SET_ASYNC_DATA_SETUP; 317 sc->sc_mbox[1] = sc->sc_async_data_setup[0]; 318 sc->sc_mbox[2] = sc->sc_async_data_setup[1]; 319 if (qlw_mbox(sc, 0x0007, 0x0001)) { 320 printf("couldn't set async data setup: %x\n", sc->sc_mbox[0]); 321 return (ENXIO); 322 } 323 324 sc->sc_mbox[0] = QLW_MBOX_SET_ACTIVE_NEGATION; 325 sc->sc_mbox[1] = sc->sc_req_ack_active_neg[0] << 5; 326 sc->sc_mbox[1] |= sc->sc_data_line_active_neg[0] << 4; 327 sc->sc_mbox[2] = sc->sc_req_ack_active_neg[1] << 5; 328 sc->sc_mbox[2] |= sc->sc_data_line_active_neg[1] << 4; 329 if (qlw_mbox(sc, 0x0007, 0x0001)) { 330 printf("couldn't set active negation: %x\n", sc->sc_mbox[0]); 331 return (ENXIO); 332 } 333 334 sc->sc_mbox[0] = QLW_MBOX_SET_TAG_AGE_LIMIT; 335 sc->sc_mbox[1] = sc->sc_tag_age_limit[0]; 336 sc->sc_mbox[2] = sc->sc_tag_age_limit[1]; 337 if (qlw_mbox(sc, 0x0007, 0x0001)) { 338 printf("couldn't set tag age limit: %x\n", sc->sc_mbox[0]); 339 return (ENXIO); 340 } 341 342 sc->sc_mbox[0] = QLW_MBOX_SET_SELECTION_TIMEOUT; 343 sc->sc_mbox[1] = sc->sc_selection_timeout[0]; 344 sc->sc_mbox[2] = sc->sc_selection_timeout[1]; 345 if (qlw_mbox(sc, 0x0007, 0x0001)) { 346 printf("couldn't set selection timeout: %x\n", sc->sc_mbox[0]); 347 return (ENXIO); 348 } 349 350 for (bus = 0; bus < sc->sc_numbusses; bus++) { 351 if (qlw_config_bus(sc, bus)) 352 return (ENXIO); 353 } 354 355 if (qlw_alloc_ccbs(sc)) { 356 /* error already printed */ 357 return (ENOMEM); 358 } 359 360 sc->sc_mbox[0] = QLW_MBOX_INIT_REQ_QUEUE; 361 sc->sc_mbox[1] = sc->sc_maxrequests; 362 qlw_mbox_putaddr(sc->sc_mbox, sc->sc_requests); 363 sc->sc_mbox[4] = 0; 364 if (qlw_mbox(sc, 0x00df, 0x0001)) { 365 printf("couldn't init request queue: %x\n", sc->sc_mbox[0]); 366 goto free_ccbs; 367 } 368 369 sc->sc_mbox[0] = QLW_MBOX_INIT_RSP_QUEUE; 370 sc->sc_mbox[1] = sc->sc_maxresponses; 371 qlw_mbox_putaddr(sc->sc_mbox, sc->sc_responses); 372 sc->sc_mbox[5] = 0; 373 if (qlw_mbox(sc, 0x00ef, 0x0001)) { 374 printf("couldn't init response queue: %x\n", sc->sc_mbox[0]); 375 goto free_ccbs; 376 } 377 378 reset_delay = 0; 379 for (bus = 0; bus < sc->sc_numbusses; bus++) { 380 sc->sc_mbox[0] = QLW_MBOX_BUS_RESET; 381 sc->sc_mbox[1] = sc->sc_reset_delay[bus]; 382 sc->sc_mbox[2] = bus; 383 if (qlw_mbox(sc, 0x0007, 0x0001)) { 384 printf("couldn't reset bus: %x\n", sc->sc_mbox[0]); 385 goto free_ccbs; 386 } 387 sc->sc_marker_required[bus] = 1; 388 sc->sc_update_required[bus] = 0xffff; 389 390 if (sc->sc_reset_delay[bus] > reset_delay) 391 reset_delay = sc->sc_reset_delay[bus]; 392 } 393 394 /* wait for the busses to settle */ 395 delay(reset_delay * 1000000); 396 397 /* we should be good to go now, attach scsibus */ 398 for (bus = 0; bus < sc->sc_numbusses; bus++) { 399 sc->sc_link[bus].adapter = &qlw_switch; 400 sc->sc_link[bus].adapter_softc = sc; 401 sc->sc_link[bus].adapter_target = sc->sc_initiator[bus]; 402 sc->sc_link[bus].adapter_buswidth = QLW_MAX_TARGETS; 403 sc->sc_link[bus].openings = sc->sc_max_queue_depth[bus]; 404 sc->sc_link[bus].pool = &sc->sc_iopool; 405 406 memset(&saa, 0, sizeof(saa)); 407 saa.saa_sc_link = &sc->sc_link[bus]; 408 409 /* config_found() returns the scsibus attached to us */ 410 sc->sc_scsibus[bus] = (struct scsibus_softc *) 411 config_found(&sc->sc_dev, &saa, scsiprint); 412 413 qlw_update_bus(sc, bus); 414 } 415 416 sc->sc_running = 1; 417 return(0); 418 419 free_ccbs: 420 qlw_free_ccbs(sc); 421 return (ENXIO); 422 } 423 424 int 425 qlw_detach(struct qlw_softc *sc, int flags) 426 { 427 return (0); 428 } 429 430 int 431 qlw_config_bus(struct qlw_softc *sc, int bus) 432 { 433 int target, err; 434 435 sc->sc_mbox[0] = QLW_MBOX_SET_INITIATOR_ID; 436 sc->sc_mbox[1] = (bus << 7) | sc->sc_initiator[bus]; 437 438 if (qlw_mbox(sc, 0x0003, 0x0001)) { 439 printf("couldn't set initiator id: %x\n", sc->sc_mbox[0]); 440 return (ENXIO); 441 } 442 443 for (target = 0; target < QLW_MAX_TARGETS; target++) { 444 err = qlw_config_target(sc, bus, target); 445 if (err) 446 return (err); 447 } 448 449 return (0); 450 } 451 452 int 453 qlw_config_target(struct qlw_softc *sc, int bus, int target) 454 { 455 int lun; 456 457 sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS; 458 sc->sc_mbox[1] = (((bus << 7) | target) << 8); 459 sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params; 460 sc->sc_mbox[2] &= QLW_TARGET_SAFE; 461 sc->sc_mbox[2] |= QLW_TARGET_NARROW | QLW_TARGET_ASYNC; 462 sc->sc_mbox[3] = 0; 463 464 if (qlw_mbox(sc, 0x000f, 0x0001)) { 465 printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]); 466 return (ENXIO); 467 } 468 469 for (lun = 0; lun < QLW_MAX_LUNS; lun++) { 470 sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE; 471 sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun; 472 sc->sc_mbox[2] = sc->sc_max_queue_depth[bus]; 473 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle; 474 if (qlw_mbox(sc, 0x000f, 0x0001)) { 475 printf("couldn't set lun parameters: %x\n", 476 sc->sc_mbox[0]); 477 return (ENXIO); 478 } 479 } 480 481 return (0); 482 } 483 484 void 485 qlw_update_bus(struct qlw_softc *sc, int bus) 486 { 487 int target; 488 489 for (target = 0; target < QLW_MAX_TARGETS; target++) 490 qlw_update_target(sc, bus, target); 491 } 492 493 void 494 qlw_update_target(struct qlw_softc *sc, int bus, int target) 495 { 496 struct scsi_link *link; 497 int lun; 498 499 if ((sc->sc_update_required[bus] & (1 << target)) == 0) 500 return; 501 atomic_clearbits_int(&sc->sc_update_required[bus], (1 << target)); 502 503 link = scsi_get_link(sc->sc_scsibus[bus], target, 0); 504 if (link == NULL) 505 return; 506 507 sc->sc_mbox[0] = QLW_MBOX_SET_TARGET_PARAMETERS; 508 sc->sc_mbox[1] = (((bus << 7) | target) << 8); 509 sc->sc_mbox[2] = sc->sc_target[bus][target].qt_params; 510 sc->sc_mbox[2] |= QLW_TARGET_RENEG; 511 sc->sc_mbox[2] &= ~QLW_TARGET_QFRZ; 512 if (link->quirks & SDEV_NOSYNC) 513 sc->sc_mbox[2] &= ~QLW_TARGET_SYNC; 514 if (link->quirks & SDEV_NOWIDE) 515 sc->sc_mbox[2] &= ~QLW_TARGET_WIDE; 516 if (link->quirks & SDEV_NOTAGS) 517 sc->sc_mbox[2] &= ~QLW_TARGET_TAGS; 518 519 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_sync_period; 520 sc->sc_mbox[3] |= (sc->sc_target[bus][target].qt_sync_offset << 8); 521 522 if (qlw_mbox(sc, 0x000f, 0x0001)) { 523 printf("couldn't set target parameters: %x\n", sc->sc_mbox[0]); 524 return; 525 } 526 527 /* XXX do PPR detection */ 528 529 for (lun = 0; lun < QLW_MAX_LUNS; lun++) { 530 sc->sc_mbox[0] = QLW_MBOX_SET_DEVICE_QUEUE; 531 sc->sc_mbox[1] = (((bus << 7) | target) << 8) | lun; 532 sc->sc_mbox[2] = sc->sc_max_queue_depth[bus]; 533 sc->sc_mbox[3] = sc->sc_target[bus][target].qt_exec_throttle; 534 if (qlw_mbox(sc, 0x000f, 0x0001)) { 535 printf("couldn't set lun parameters: %x\n", 536 sc->sc_mbox[0]); 537 return; 538 } 539 } 540 } 541 542 void 543 qlw_update_task(void *xsc) 544 { 545 struct qlw_softc *sc = xsc; 546 int bus; 547 548 for (bus = 0; bus < sc->sc_numbusses; bus++) 549 qlw_update_bus(sc, bus); 550 } 551 552 struct qlw_ccb * 553 qlw_handle_resp(struct qlw_softc *sc, u_int16_t id) 554 { 555 struct qlw_ccb *ccb; 556 struct qlw_iocb_hdr *hdr; 557 struct qlw_iocb_status *status; 558 struct scsi_xfer *xs; 559 u_int32_t handle; 560 int entry_type; 561 int flags; 562 int bus; 563 564 ccb = NULL; 565 hdr = QLW_DMA_KVA(sc->sc_responses) + (id * QLW_QUEUE_ENTRY_SIZE); 566 567 bus_dmamap_sync(sc->sc_dmat, 568 QLW_DMA_MAP(sc->sc_responses), id * QLW_QUEUE_ENTRY_SIZE, 569 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD); 570 571 qlw_get_header(sc, hdr, &entry_type, &flags); 572 switch (entry_type) { 573 case QLW_IOCB_STATUS: 574 status = (struct qlw_iocb_status *)hdr; 575 handle = qlw_swap32(sc, status->handle); 576 if (handle > sc->sc_maxccbs) { 577 panic("bad completed command handle: %d (> %d)", 578 handle, sc->sc_maxccbs); 579 } 580 581 ccb = &sc->sc_ccbs[handle]; 582 xs = ccb->ccb_xs; 583 if (xs == NULL) { 584 DPRINTF(QLW_D_INTR, "%s: got status for inactive" 585 " ccb %d\n", DEVNAME(sc), handle); 586 qlw_dump_iocb(sc, hdr, QLW_D_INTR); 587 ccb = NULL; 588 break; 589 } 590 if (xs->io != ccb) { 591 panic("completed command handle doesn't match xs " 592 "(handle %d, ccb %p, xs->io %p)", handle, ccb, 593 xs->io); 594 } 595 596 if (xs->datalen > 0) { 597 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 598 ccb->ccb_dmamap->dm_mapsize, 599 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 600 BUS_DMASYNC_POSTWRITE); 601 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 602 } 603 604 bus = qlw_xs_bus(sc, xs); 605 xs->status = qlw_swap16(sc, status->scsi_status); 606 switch (qlw_swap16(sc, status->completion)) { 607 case QLW_IOCB_STATUS_COMPLETE: 608 if (qlw_swap16(sc, status->scsi_status) & 609 QLW_SCSI_STATUS_SENSE_VALID) { 610 memcpy(&xs->sense, status->sense_data, 611 sizeof(xs->sense)); 612 xs->error = XS_SENSE; 613 } else { 614 xs->error = XS_NOERROR; 615 } 616 xs->resid = 0; 617 break; 618 619 case QLW_IOCB_STATUS_INCOMPLETE: 620 if (flags & QLW_STATE_GOT_TARGET) { 621 xs->error = XS_DRIVER_STUFFUP; 622 } else { 623 xs->error = XS_SELTIMEOUT; 624 } 625 break; 626 627 case QLW_IOCB_STATUS_DMA_ERROR: 628 DPRINTF(QLW_D_INTR, "%s: dma error\n", DEVNAME(sc)); 629 /* set resid apparently? */ 630 break; 631 632 case QLW_IOCB_STATUS_RESET: 633 DPRINTF(QLW_D_INTR, "%s: reset destroyed command\n", 634 DEVNAME(sc)); 635 sc->sc_marker_required[bus] = 1; 636 xs->error = XS_RESET; 637 break; 638 639 case QLW_IOCB_STATUS_ABORTED: 640 DPRINTF(QLW_D_INTR, "%s: aborted\n", DEVNAME(sc)); 641 sc->sc_marker_required[bus] = 1; 642 xs->error = XS_DRIVER_STUFFUP; 643 break; 644 645 case QLW_IOCB_STATUS_TIMEOUT: 646 DPRINTF(QLW_D_INTR, "%s: command timed out\n", 647 DEVNAME(sc)); 648 xs->error = XS_TIMEOUT; 649 break; 650 651 case QLW_IOCB_STATUS_DATA_OVERRUN: 652 case QLW_IOCB_STATUS_DATA_UNDERRUN: 653 xs->resid = qlw_swap32(sc, status->resid); 654 xs->error = XS_NOERROR; 655 break; 656 657 case QLW_IOCB_STATUS_QUEUE_FULL: 658 DPRINTF(QLW_D_INTR, "%s: queue full\n", DEVNAME(sc)); 659 xs->error = XS_BUSY; 660 break; 661 662 case QLW_IOCB_STATUS_WIDE_FAILED: 663 DPRINTF(QLW_D_INTR, "%s: wide failed\n", DEVNAME(sc)); 664 sc->sc_link->quirks |= SDEV_NOWIDE; 665 atomic_setbits_int(&sc->sc_update_required[bus], 666 1 << xs->sc_link->target); 667 task_add(systq, &sc->sc_update_task); 668 xs->resid = qlw_swap32(sc, status->resid); 669 xs->error = XS_NOERROR; 670 break; 671 672 case QLW_IOCB_STATUS_SYNCXFER_FAILED: 673 DPRINTF(QLW_D_INTR, "%s: sync failed\n", DEVNAME(sc)); 674 sc->sc_link->quirks |= SDEV_NOSYNC; 675 atomic_setbits_int(&sc->sc_update_required[bus], 676 1 << xs->sc_link->target); 677 task_add(systq, &sc->sc_update_task); 678 xs->resid = qlw_swap32(sc, status->resid); 679 xs->error = XS_NOERROR; 680 break; 681 682 default: 683 DPRINTF(QLW_D_INTR, "%s: unexpected completion" 684 " status %x\n", DEVNAME(sc), 685 qlw_swap16(sc, status->completion)); 686 qlw_dump_iocb(sc, hdr, QLW_D_INTR); 687 xs->error = XS_DRIVER_STUFFUP; 688 break; 689 } 690 break; 691 692 default: 693 DPRINTF(QLW_D_INTR, "%s: unexpected response entry type %x\n", 694 DEVNAME(sc), entry_type); 695 qlw_dump_iocb(sc, hdr, QLW_D_INTR); 696 break; 697 } 698 699 return (ccb); 700 } 701 702 void 703 qlw_handle_intr(struct qlw_softc *sc, u_int16_t isr, u_int16_t info) 704 { 705 int i; 706 u_int16_t rspin; 707 struct qlw_ccb *ccb; 708 709 switch (isr) { 710 case QLW_INT_TYPE_ASYNC: 711 qlw_async(sc, info); 712 qlw_clear_isr(sc, isr); 713 break; 714 715 case QLW_INT_TYPE_IO: 716 qlw_clear_isr(sc, isr); 717 rspin = qlw_queue_read(sc, QLW_RESP_IN); 718 if (rspin == sc->sc_last_resp_id) { 719 /* seems to happen a lot on 2200s when mbox commands 720 * complete but it doesn't want to give us the register 721 * semaphore, or something. 722 * 723 * if we're waiting on a mailbox command, don't ack 724 * the interrupt yet. 725 */ 726 if (sc->sc_mbox_pending) { 727 DPRINTF(QLW_D_MBOX, "%s: ignoring premature" 728 " mbox int\n", DEVNAME(sc)); 729 return; 730 } 731 732 break; 733 } 734 735 if (sc->sc_responses == NULL) 736 break; 737 738 DPRINTF(QLW_D_IO, "%s: response queue %x=>%x\n", 739 DEVNAME(sc), sc->sc_last_resp_id, rspin); 740 741 do { 742 ccb = qlw_handle_resp(sc, sc->sc_last_resp_id); 743 if (ccb) 744 scsi_done(ccb->ccb_xs); 745 746 sc->sc_last_resp_id++; 747 sc->sc_last_resp_id %= sc->sc_maxresponses; 748 } while (sc->sc_last_resp_id != rspin); 749 750 qlw_queue_write(sc, QLW_RESP_OUT, rspin); 751 break; 752 753 case QLW_INT_TYPE_MBOX: 754 if (sc->sc_mbox_pending) { 755 if (info == QLW_MBOX_COMPLETE) { 756 for (i = 1; i < nitems(sc->sc_mbox); i++) { 757 sc->sc_mbox[i] = qlw_read_mbox(sc, i); 758 } 759 } else { 760 sc->sc_mbox[0] = info; 761 } 762 wakeup(sc->sc_mbox); 763 } else { 764 DPRINTF(QLW_D_MBOX, "%s: unexpected mbox interrupt:" 765 " %x\n", DEVNAME(sc), info); 766 } 767 qlw_clear_isr(sc, isr); 768 break; 769 770 default: 771 /* maybe log something? */ 772 break; 773 } 774 } 775 776 int 777 qlw_intr(void *xsc) 778 { 779 struct qlw_softc *sc = xsc; 780 u_int16_t isr; 781 u_int16_t info; 782 783 if (qlw_read_isr(sc, &isr, &info) == 0) 784 return (0); 785 786 qlw_handle_intr(sc, isr, info); 787 return (1); 788 } 789 790 int 791 qlw_scsi_probe(struct scsi_link *link) 792 { 793 if (link->lun >= QLW_MAX_LUNS) 794 return (EINVAL); 795 796 return (0); 797 } 798 799 void 800 qlw_scsi_cmd(struct scsi_xfer *xs) 801 { 802 struct scsi_link *link = xs->sc_link; 803 struct qlw_softc *sc = link->adapter_softc; 804 struct qlw_ccb *ccb; 805 struct qlw_iocb_req0 *iocb; 806 struct qlw_ccb_list list; 807 u_int16_t req, rspin; 808 int offset, error, done; 809 bus_dmamap_t dmap; 810 int bus; 811 int seg; 812 813 if (xs->cmdlen > sizeof(iocb->cdb)) { 814 DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc), 815 xs->cmdlen); 816 memset(&xs->sense, 0, sizeof(xs->sense)); 817 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 818 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 819 xs->sense.add_sense_code = 0x20; 820 xs->error = XS_SENSE; 821 scsi_done(xs); 822 return; 823 } 824 825 ccb = xs->io; 826 dmap = ccb->ccb_dmamap; 827 if (xs->datalen > 0) { 828 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, 829 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ? 830 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 831 if (error) { 832 xs->error = XS_DRIVER_STUFFUP; 833 scsi_done(xs); 834 return; 835 } 836 837 bus_dmamap_sync(sc->sc_dmat, dmap, 0, 838 dmap->dm_mapsize, 839 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 840 BUS_DMASYNC_PREWRITE); 841 } 842 843 mtx_enter(&sc->sc_queue_mtx); 844 845 /* put in a sync marker if required */ 846 bus = qlw_xs_bus(sc, xs); 847 if (sc->sc_marker_required[bus]) { 848 req = sc->sc_next_req_id++; 849 if (sc->sc_next_req_id == sc->sc_maxrequests) 850 sc->sc_next_req_id = 0; 851 852 DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n", 853 DEVNAME(sc), req); 854 offset = (req * QLW_QUEUE_ENTRY_SIZE); 855 iocb = QLW_DMA_KVA(sc->sc_requests) + offset; 856 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), 857 offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 858 qlw_put_marker(sc, bus, iocb); 859 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), 860 offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); 861 qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id); 862 sc->sc_marker_required[bus] = 0; 863 } 864 865 req = sc->sc_next_req_id++; 866 if (sc->sc_next_req_id == sc->sc_maxrequests) 867 sc->sc_next_req_id = 0; 868 869 offset = (req * QLW_QUEUE_ENTRY_SIZE); 870 iocb = QLW_DMA_KVA(sc->sc_requests) + offset; 871 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, 872 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 873 874 ccb->ccb_xs = xs; 875 876 DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req); 877 qlw_put_cmd(sc, iocb, xs, ccb); 878 seg = QLW_IOCB_SEGS_PER_CMD; 879 880 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, 881 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); 882 883 while (seg < ccb->ccb_dmamap->dm_nsegs) { 884 req = sc->sc_next_req_id++; 885 if (sc->sc_next_req_id == sc->sc_maxrequests) 886 sc->sc_next_req_id = 0; 887 888 offset = (req * QLW_QUEUE_ENTRY_SIZE); 889 iocb = QLW_DMA_KVA(sc->sc_requests) + offset; 890 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, 891 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); 892 893 DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req); 894 qlw_put_cont(sc, iocb, xs, ccb, seg); 895 seg += QLW_IOCB_SEGS_PER_CONT; 896 897 bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, 898 QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); 899 } 900 901 qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id); 902 903 if (!ISSET(xs->flags, SCSI_POLL)) { 904 mtx_leave(&sc->sc_queue_mtx); 905 return; 906 } 907 908 done = 0; 909 SIMPLEQ_INIT(&list); 910 do { 911 u_int16_t isr, info; 912 913 delay(100); 914 915 if (qlw_read_isr(sc, &isr, &info) == 0) { 916 continue; 917 } 918 919 if (isr != QLW_INT_TYPE_IO) { 920 qlw_handle_intr(sc, isr, info); 921 continue; 922 } 923 924 qlw_clear_isr(sc, isr); 925 926 rspin = qlw_queue_read(sc, QLW_RESP_IN); 927 while (rspin != sc->sc_last_resp_id) { 928 ccb = qlw_handle_resp(sc, sc->sc_last_resp_id); 929 930 sc->sc_last_resp_id++; 931 if (sc->sc_last_resp_id == sc->sc_maxresponses) 932 sc->sc_last_resp_id = 0; 933 934 if (ccb != NULL) 935 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link); 936 if (ccb == xs->io) 937 done = 1; 938 } 939 qlw_queue_write(sc, QLW_RESP_OUT, rspin); 940 } while (done == 0); 941 942 mtx_leave(&sc->sc_queue_mtx); 943 944 while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) { 945 SIMPLEQ_REMOVE_HEAD(&list, ccb_link); 946 scsi_done(ccb->ccb_xs); 947 } 948 } 949 950 u_int16_t 951 qlw_read(struct qlw_softc *sc, bus_size_t offset) 952 { 953 u_int16_t v; 954 v = bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 955 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2, 956 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 957 return (v); 958 } 959 960 void 961 qlw_write(struct qlw_softc *sc, bus_size_t offset, u_int16_t value) 962 { 963 bus_space_write_2(sc->sc_iot, sc->sc_ioh, offset, value); 964 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 2, 965 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 966 } 967 968 u_int16_t 969 qlw_read_mbox(struct qlw_softc *sc, int mbox) 970 { 971 /* could range-check mboxes according to chip type? */ 972 return (qlw_read(sc, sc->sc_mbox_base + (mbox * 2))); 973 } 974 975 void 976 qlw_write_mbox(struct qlw_softc *sc, int mbox, u_int16_t value) 977 { 978 qlw_write(sc, sc->sc_mbox_base + (mbox * 2), value); 979 } 980 981 void 982 qlw_host_cmd(struct qlw_softc *sc, u_int16_t cmd) 983 { 984 qlw_write(sc, sc->sc_host_cmd_ctrl, cmd << QLW_HOST_CMD_SHIFT); 985 } 986 987 #define MBOX_COMMAND_TIMEOUT 4000 988 989 int 990 qlw_mbox(struct qlw_softc *sc, int maskin, int maskout) 991 { 992 int i; 993 int result = 0; 994 int rv; 995 996 sc->sc_mbox_pending = 1; 997 for (i = 0; i < nitems(sc->sc_mbox); i++) { 998 if (maskin & (1 << i)) { 999 qlw_write_mbox(sc, i, sc->sc_mbox[i]); 1000 } 1001 } 1002 qlw_host_cmd(sc, QLW_HOST_CMD_SET_HOST_INT); 1003 1004 if (sc->sc_running == 0) { 1005 for (i = 0; i < MBOX_COMMAND_TIMEOUT && result == 0; i++) { 1006 u_int16_t isr, info; 1007 1008 delay(100); 1009 1010 if (qlw_read_isr(sc, &isr, &info) == 0) 1011 continue; 1012 1013 switch (isr) { 1014 case QLW_INT_TYPE_MBOX: 1015 result = info; 1016 break; 1017 1018 default: 1019 qlw_handle_intr(sc, isr, info); 1020 break; 1021 } 1022 } 1023 } else { 1024 tsleep_nsec(sc->sc_mbox, PRIBIO, "qlw_mbox", INFSLP); 1025 result = sc->sc_mbox[0]; 1026 } 1027 1028 switch (result) { 1029 case QLW_MBOX_COMPLETE: 1030 for (i = 1; i < nitems(sc->sc_mbox); i++) { 1031 sc->sc_mbox[i] = (maskout & (1 << i)) ? 1032 qlw_read_mbox(sc, i) : 0; 1033 } 1034 rv = 0; 1035 break; 1036 1037 case 0: 1038 /* timed out; do something? */ 1039 DPRINTF(QLW_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc)); 1040 rv = 1; 1041 break; 1042 1043 default: 1044 sc->sc_mbox[0] = result; 1045 rv = result; 1046 break; 1047 } 1048 1049 qlw_clear_isr(sc, QLW_INT_TYPE_MBOX); 1050 sc->sc_mbox_pending = 0; 1051 return (rv); 1052 } 1053 1054 void 1055 qlw_mbox_putaddr(u_int16_t *mbox, struct qlw_dmamem *mem) 1056 { 1057 mbox[2] = (QLW_DMA_DVA(mem) >> 16) & 0xffff; 1058 mbox[3] = (QLW_DMA_DVA(mem) >> 0) & 0xffff; 1059 mbox[6] = (QLW_DMA_DVA(mem) >> 48) & 0xffff; 1060 mbox[7] = (QLW_DMA_DVA(mem) >> 32) & 0xffff; 1061 } 1062 1063 void 1064 qlw_set_ints(struct qlw_softc *sc, int enabled) 1065 { 1066 u_int16_t v = enabled ? (QLW_INT_REQ | QLW_RISC_INT_REQ) : 0; 1067 qlw_write(sc, QLW_INT_CTRL, v); 1068 } 1069 1070 int 1071 qlw_read_isr(struct qlw_softc *sc, u_int16_t *isr, u_int16_t *info) 1072 { 1073 u_int16_t int_status; 1074 1075 if (qlw_read(sc, QLW_SEMA) & QLW_SEMA_LOCK) { 1076 *info = qlw_read_mbox(sc, 0); 1077 if (*info & QLW_MBOX_HAS_STATUS) 1078 *isr = QLW_INT_TYPE_MBOX; 1079 else 1080 *isr = QLW_INT_TYPE_ASYNC; 1081 } else { 1082 int_status = qlw_read(sc, QLW_INT_STATUS); 1083 if ((int_status & (QLW_INT_REQ | QLW_RISC_INT_REQ)) == 0) 1084 return (0); 1085 1086 *isr = QLW_INT_TYPE_IO; 1087 } 1088 1089 return (1); 1090 } 1091 1092 void 1093 qlw_clear_isr(struct qlw_softc *sc, u_int16_t isr) 1094 { 1095 qlw_host_cmd(sc, QLW_HOST_CMD_CLR_RISC_INT); 1096 switch (isr) { 1097 case QLW_INT_TYPE_MBOX: 1098 case QLW_INT_TYPE_ASYNC: 1099 qlw_write(sc, QLW_SEMA, 0); 1100 break; 1101 default: 1102 break; 1103 } 1104 } 1105 1106 int 1107 qlw_softreset(struct qlw_softc *sc) 1108 { 1109 int i; 1110 1111 qlw_set_ints(sc, 0); 1112 1113 /* reset */ 1114 qlw_write(sc, QLW_INT_CTRL, QLW_RESET); 1115 delay(100); 1116 /* clear data and control dma engines? */ 1117 1118 /* wait for soft reset to clear */ 1119 for (i = 0; i < 1000; i++) { 1120 if ((qlw_read(sc, QLW_INT_CTRL) & QLW_RESET) == 0) 1121 break; 1122 1123 delay(100); 1124 } 1125 1126 if (i == 1000) { 1127 DPRINTF(QLW_D_INTR, "%s: reset didn't clear\n", DEVNAME(sc)); 1128 qlw_set_ints(sc, 0); 1129 return (ENXIO); 1130 } 1131 1132 qlw_write(sc, QLW_CFG1, 0); 1133 1134 /* reset risc processor */ 1135 qlw_host_cmd(sc, QLW_HOST_CMD_RESET); 1136 delay(100); 1137 qlw_write(sc, QLW_SEMA, 0); 1138 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE); 1139 1140 /* reset queue pointers */ 1141 qlw_queue_write(sc, QLW_REQ_IN, 0); 1142 qlw_queue_write(sc, QLW_REQ_OUT, 0); 1143 qlw_queue_write(sc, QLW_RESP_IN, 0); 1144 qlw_queue_write(sc, QLW_RESP_OUT, 0); 1145 1146 qlw_set_ints(sc, 1); 1147 qlw_host_cmd(sc, QLW_HOST_CMD_BIOS); 1148 1149 /* do a basic mailbox operation to check we're alive */ 1150 sc->sc_mbox[0] = QLW_MBOX_NOP; 1151 if (qlw_mbox(sc, 0x0001, 0x0001)) { 1152 DPRINTF(QLW_D_INTR, "%s: ISP not responding after reset\n", 1153 DEVNAME(sc)); 1154 return (ENXIO); 1155 } 1156 1157 return (0); 1158 } 1159 1160 void 1161 qlw_dma_burst_enable(struct qlw_softc *sc) 1162 { 1163 if (sc->sc_isp_gen == QLW_GEN_ISP1000 || 1164 sc->sc_isp_gen == QLW_GEN_ISP1040) { 1165 qlw_write(sc, QLW_CDMA_CFG, 1166 qlw_read(sc, QLW_CDMA_CFG) | QLW_DMA_BURST_ENABLE); 1167 qlw_write(sc, QLW_DDMA_CFG, 1168 qlw_read(sc, QLW_DDMA_CFG) | QLW_DMA_BURST_ENABLE); 1169 } else { 1170 qlw_host_cmd(sc, QLW_HOST_CMD_PAUSE); 1171 qlw_write(sc, QLW_CFG1, 1172 qlw_read(sc, QLW_CFG1) | QLW_DMA_BANK); 1173 qlw_write(sc, QLW_CDMA_CFG_1080, 1174 qlw_read(sc, QLW_CDMA_CFG_1080) | QLW_DMA_BURST_ENABLE); 1175 qlw_write(sc, QLW_DDMA_CFG_1080, 1176 qlw_read(sc, QLW_DDMA_CFG_1080) | QLW_DMA_BURST_ENABLE); 1177 qlw_write(sc, QLW_CFG1, 1178 qlw_read(sc, QLW_CFG1) & ~QLW_DMA_BANK); 1179 qlw_host_cmd(sc, QLW_HOST_CMD_RELEASE); 1180 } 1181 } 1182 1183 void 1184 qlw_update(struct qlw_softc *sc, int task) 1185 { 1186 /* do things */ 1187 } 1188 1189 int 1190 qlw_async(struct qlw_softc *sc, u_int16_t info) 1191 { 1192 int bus; 1193 1194 switch (info) { 1195 case QLW_ASYNC_BUS_RESET: 1196 DPRINTF(QLW_D_PORT, "%s: bus reset\n", DEVNAME(sc)); 1197 bus = qlw_read_mbox(sc, 6); 1198 sc->sc_marker_required[bus] = 1; 1199 break; 1200 1201 #if 0 1202 case QLW_ASYNC_SYSTEM_ERROR: 1203 qla_update(sc, QLW_UPDATE_SOFTRESET); 1204 break; 1205 1206 case QLW_ASYNC_REQ_XFER_ERROR: 1207 qla_update(sc, QLW_UPDATE_SOFTRESET); 1208 break; 1209 1210 case QLW_ASYNC_RSP_XFER_ERROR: 1211 qla_update(sc, QLW_UPDATE_SOFTRESET); 1212 break; 1213 #endif 1214 1215 case QLW_ASYNC_SCSI_CMD_COMPLETE: 1216 /* shouldn't happen, we disable fast posting */ 1217 break; 1218 1219 case QLW_ASYNC_CTIO_COMPLETE: 1220 /* definitely shouldn't happen, we don't do target mode */ 1221 break; 1222 1223 default: 1224 DPRINTF(QLW_D_INTR, "%s: unknown async %x\n", DEVNAME(sc), 1225 info); 1226 break; 1227 } 1228 return (1); 1229 } 1230 1231 #ifdef QLW_DEBUG 1232 void 1233 qlw_dump_iocb(struct qlw_softc *sc, void *buf, int flags) 1234 { 1235 u_int8_t *iocb = buf; 1236 int l; 1237 int b; 1238 1239 if ((qlwdebug & flags) == 0) 1240 return; 1241 1242 printf("%s: iocb:\n", DEVNAME(sc)); 1243 for (l = 0; l < 4; l++) { 1244 for (b = 0; b < 16; b++) { 1245 printf(" %2.2x", iocb[(l*16)+b]); 1246 } 1247 printf("\n"); 1248 } 1249 } 1250 1251 void 1252 qlw_dump_iocb_segs(struct qlw_softc *sc, void *segs, int n) 1253 { 1254 u_int8_t *buf = segs; 1255 int s, b; 1256 if ((qlwdebug & QLW_D_IOCB) == 0) 1257 return; 1258 1259 printf("%s: iocb segs:\n", DEVNAME(sc)); 1260 for (s = 0; s < n; s++) { 1261 for (b = 0; b < sizeof(struct qlw_iocb_seg); b++) { 1262 printf(" %2.2x", buf[(s*(sizeof(struct qlw_iocb_seg))) 1263 + b]); 1264 } 1265 printf("\n"); 1266 } 1267 } 1268 #endif 1269 1270 /* 1271 * The PCI bus is little-endian whereas SBus is big-endian. This 1272 * leads to some differences in byte twisting of DMA transfers of 1273 * request and response queue entries. Most fields can be treated as 1274 * 16-bit or 32-bit with the endianness of the bus, but the header 1275 * fields end up being swapped by the ISP1000's SBus interface. 1276 */ 1277 1278 void 1279 qlw_get_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr, 1280 int *type, int *flags) 1281 { 1282 if (sc->sc_isp_gen == QLW_GEN_ISP1000) { 1283 *type = hdr->entry_count; 1284 *flags = hdr->seqno; 1285 } else { 1286 *type = hdr->entry_type; 1287 *flags = hdr->flags; 1288 } 1289 } 1290 1291 void 1292 qlw_put_header(struct qlw_softc *sc, struct qlw_iocb_hdr *hdr, 1293 int type, int count) 1294 { 1295 if (sc->sc_isp_gen == QLW_GEN_ISP1000) { 1296 hdr->entry_type = count; 1297 hdr->entry_count = type; 1298 hdr->seqno = 0; 1299 hdr->flags = 0; 1300 } else { 1301 hdr->entry_type = type; 1302 hdr->entry_count = count; 1303 hdr->seqno = 0; 1304 hdr->flags = 0; 1305 } 1306 } 1307 1308 void 1309 qlw_put_data_seg(struct qlw_softc *sc, struct qlw_iocb_seg *seg, 1310 bus_dmamap_t dmap, int num) 1311 { 1312 seg->seg_addr = qlw_swap32(sc, dmap->dm_segs[num].ds_addr); 1313 seg->seg_len = qlw_swap32(sc, dmap->dm_segs[num].ds_len); 1314 } 1315 1316 void 1317 qlw_put_marker(struct qlw_softc *sc, int bus, void *buf) 1318 { 1319 struct qlw_iocb_marker *marker = buf; 1320 1321 qlw_put_header(sc, &marker->hdr, QLW_IOCB_MARKER, 1); 1322 1323 /* could be more specific here; isp(4) isn't */ 1324 marker->device = qlw_swap16(sc, (bus << 7) << 8); 1325 marker->modifier = qlw_swap16(sc, QLW_IOCB_MARKER_SYNC_ALL); 1326 qlw_dump_iocb(sc, buf, QLW_D_IOCB); 1327 } 1328 1329 void 1330 qlw_put_cmd(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs, 1331 struct qlw_ccb *ccb) 1332 { 1333 struct qlw_iocb_req0 *req = buf; 1334 int entry_count = 1; 1335 u_int16_t dir; 1336 int seg, nsegs; 1337 int seg_count; 1338 int timeout = 0; 1339 int bus, target, lun; 1340 1341 if (xs->datalen == 0) { 1342 dir = QLW_IOCB_CMD_NO_DATA; 1343 seg_count = 1; 1344 } else { 1345 dir = xs->flags & SCSI_DATA_IN ? QLW_IOCB_CMD_READ_DATA : 1346 QLW_IOCB_CMD_WRITE_DATA; 1347 seg_count = ccb->ccb_dmamap->dm_nsegs; 1348 nsegs = ccb->ccb_dmamap->dm_nsegs - QLW_IOCB_SEGS_PER_CMD; 1349 while (nsegs > 0) { 1350 entry_count++; 1351 nsegs -= QLW_IOCB_SEGS_PER_CONT; 1352 } 1353 for (seg = 0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) { 1354 if (seg >= QLW_IOCB_SEGS_PER_CMD) 1355 break; 1356 qlw_put_data_seg(sc, &req->segs[seg], 1357 ccb->ccb_dmamap, seg); 1358 } 1359 } 1360 1361 if (sc->sc_running && (xs->sc_link->quirks & SDEV_NOTAGS) == 0) 1362 dir |= QLW_IOCB_CMD_SIMPLE_QUEUE; 1363 1364 qlw_put_header(sc, &req->hdr, QLW_IOCB_CMD_TYPE_0, entry_count); 1365 1366 /* 1367 * timeout is in seconds. make sure it's at least 1 if a timeout 1368 * was specified in xs 1369 */ 1370 if (xs->timeout != 0) 1371 timeout = MAX(1, xs->timeout/1000); 1372 1373 req->flags = qlw_swap16(sc, dir); 1374 req->seg_count = qlw_swap16(sc, seg_count); 1375 req->timeout = qlw_swap16(sc, timeout); 1376 1377 bus = qlw_xs_bus(sc, xs); 1378 target = xs->sc_link->target; 1379 lun = xs->sc_link->lun; 1380 req->device = qlw_swap16(sc, (((bus << 7) | target) << 8) | lun); 1381 1382 memcpy(req->cdb, xs->cmd, xs->cmdlen); 1383 req->ccblen = qlw_swap16(sc, xs->cmdlen); 1384 1385 req->handle = qlw_swap32(sc, ccb->ccb_id); 1386 1387 qlw_dump_iocb(sc, buf, QLW_D_IOCB); 1388 } 1389 1390 void 1391 qlw_put_cont(struct qlw_softc *sc, void *buf, struct scsi_xfer *xs, 1392 struct qlw_ccb *ccb, int seg0) 1393 { 1394 struct qlw_iocb_cont0 *cont = buf; 1395 int seg; 1396 1397 qlw_put_header(sc, &cont->hdr, QLW_IOCB_CONT_TYPE_0, 1); 1398 1399 for (seg = seg0; seg < ccb->ccb_dmamap->dm_nsegs; seg++) { 1400 if ((seg - seg0) >= QLW_IOCB_SEGS_PER_CONT) 1401 break; 1402 qlw_put_data_seg(sc, &cont->segs[seg - seg0], 1403 ccb->ccb_dmamap, seg); 1404 } 1405 } 1406 1407 #ifndef ISP_NOFIRMWARE 1408 int 1409 qlw_load_firmware_words(struct qlw_softc *sc, const u_int16_t *src, 1410 u_int16_t dest) 1411 { 1412 u_int16_t i; 1413 1414 for (i = 0; i < src[3]; i++) { 1415 sc->sc_mbox[0] = QLW_MBOX_WRITE_RAM_WORD; 1416 sc->sc_mbox[1] = i + dest; 1417 sc->sc_mbox[2] = src[i]; 1418 if (qlw_mbox(sc, 0x07, 0x01)) { 1419 printf("firmware load failed\n"); 1420 return (1); 1421 } 1422 } 1423 1424 sc->sc_mbox[0] = QLW_MBOX_VERIFY_CSUM; 1425 sc->sc_mbox[1] = dest; 1426 if (qlw_mbox(sc, 0x0003, 0x0003)) { 1427 printf("verification of chunk at %x failed: %x\n", 1428 dest, sc->sc_mbox[1]); 1429 return (1); 1430 } 1431 1432 return (0); 1433 } 1434 1435 int 1436 qlw_load_firmware(struct qlw_softc *sc) 1437 { 1438 return qlw_load_firmware_words(sc, sc->sc_firmware, QLW_CODE_ORG); 1439 } 1440 1441 #endif /* !ISP_NOFIRMWARE */ 1442 1443 int 1444 qlw_read_nvram(struct qlw_softc *sc) 1445 { 1446 u_int16_t data[sizeof(sc->sc_nvram) >> 1]; 1447 u_int16_t req, cmd, val; 1448 u_int8_t csum; 1449 int i, bit; 1450 int reqcmd; 1451 int nbits; 1452 1453 if (sc->sc_nvram_size == 0) 1454 return (1); 1455 1456 if (sc->sc_nvram_size == 128) { 1457 reqcmd = (QLW_NVRAM_CMD_READ << 6); 1458 nbits = 8; 1459 } else { 1460 reqcmd = (QLW_NVRAM_CMD_READ << 8); 1461 nbits = 10; 1462 } 1463 1464 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL); 1465 delay(10); 1466 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL | QLW_NVRAM_CLOCK); 1467 delay(10); 1468 1469 for (i = 0; i < (sc->sc_nvram_size >> 1); i++) { 1470 req = i | reqcmd; 1471 1472 /* write each bit out through the nvram register */ 1473 for (bit = nbits; bit >= 0; bit--) { 1474 cmd = QLW_NVRAM_CHIP_SEL; 1475 if ((req >> bit) & 1) { 1476 cmd |= QLW_NVRAM_DATA_OUT; 1477 } 1478 qlw_write(sc, QLW_NVRAM, cmd); 1479 delay(10); 1480 qlw_read(sc, QLW_NVRAM); 1481 1482 qlw_write(sc, QLW_NVRAM, cmd | QLW_NVRAM_CLOCK); 1483 delay(10); 1484 qlw_read(sc, QLW_NVRAM); 1485 1486 qlw_write(sc, QLW_NVRAM, cmd); 1487 delay(10); 1488 qlw_read(sc, QLW_NVRAM); 1489 } 1490 1491 /* read the result back */ 1492 val = 0; 1493 for (bit = 0; bit < 16; bit++) { 1494 val <<= 1; 1495 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL | 1496 QLW_NVRAM_CLOCK); 1497 delay(10); 1498 if (qlw_read(sc, QLW_NVRAM) & QLW_NVRAM_DATA_IN) 1499 val |= 1; 1500 delay(10); 1501 1502 qlw_write(sc, QLW_NVRAM, QLW_NVRAM_CHIP_SEL); 1503 delay(10); 1504 qlw_read(sc, QLW_NVRAM); 1505 } 1506 1507 qlw_write(sc, QLW_NVRAM, 0); 1508 delay(10); 1509 qlw_read(sc, QLW_NVRAM); 1510 1511 data[i] = letoh16(val); 1512 } 1513 1514 csum = 0; 1515 for (i = 0; i < (sc->sc_nvram_size >> 1); i++) { 1516 csum += data[i] & 0xff; 1517 csum += data[i] >> 8; 1518 } 1519 1520 memcpy(&sc->sc_nvram, data, sizeof(sc->sc_nvram)); 1521 /* id field should be 'ISP ', version should high enough */ 1522 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' || 1523 sc->sc_nvram.id[2] != 'P' || sc->sc_nvram.id[3] != ' ' || 1524 sc->sc_nvram.nvram_version < sc->sc_nvram_minversion || 1525 (csum != 0)) { 1526 printf("%s: nvram corrupt\n", DEVNAME(sc)); 1527 return (1); 1528 } 1529 return (0); 1530 } 1531 1532 void 1533 qlw_parse_nvram_1040(struct qlw_softc *sc, int bus) 1534 { 1535 struct qlw_nvram_1040 *nv = (struct qlw_nvram_1040 *)&sc->sc_nvram; 1536 int target; 1537 1538 KASSERT(bus == 0); 1539 1540 if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR)) 1541 sc->sc_initiator[0] = (nv->config1 >> 4); 1542 1543 sc->sc_retry_count[0] = nv->retry_count; 1544 sc->sc_retry_delay[0] = nv->retry_delay; 1545 sc->sc_reset_delay[0] = nv->reset_delay; 1546 sc->sc_tag_age_limit[0] = nv->tag_age_limit; 1547 sc->sc_selection_timeout[0] = letoh16(nv->selection_timeout); 1548 sc->sc_max_queue_depth[0] = letoh16(nv->max_queue_depth); 1549 sc->sc_async_data_setup[0] = (nv->config2 & 0x0f); 1550 sc->sc_req_ack_active_neg[0] = ((nv->config2 & 0x10) >> 4); 1551 sc->sc_data_line_active_neg[0] = ((nv->config2 & 0x20) >> 5); 1552 1553 for (target = 0; target < QLW_MAX_TARGETS; target++) { 1554 struct qlw_target *qt = &sc->sc_target[0][target]; 1555 1556 qt->qt_params = (nv->target[target].parameter << 8); 1557 qt->qt_exec_throttle = nv->target[target].execution_throttle; 1558 qt->qt_sync_period = nv->target[target].sync_period; 1559 qt->qt_sync_offset = nv->target[target].flags & 0x0f; 1560 } 1561 } 1562 1563 void 1564 qlw_parse_nvram_1080(struct qlw_softc *sc, int bus) 1565 { 1566 struct qlw_nvram_1080 *nvram = (struct qlw_nvram_1080 *)&sc->sc_nvram; 1567 struct qlw_nvram_bus *nv = &nvram->bus[bus]; 1568 int target; 1569 1570 sc->sc_isp_config = nvram->isp_config; 1571 sc->sc_fw_features = nvram->fw_features; 1572 1573 if (!ISSET(sc->sc_flags, QLW_FLAG_INITIATOR)) 1574 sc->sc_initiator[bus] = (nv->config1 & 0x0f); 1575 1576 sc->sc_retry_count[bus] = nv->retry_count; 1577 sc->sc_retry_delay[bus] = nv->retry_delay; 1578 sc->sc_reset_delay[bus] = nv->reset_delay; 1579 sc->sc_selection_timeout[bus] = letoh16(nv->selection_timeout); 1580 sc->sc_max_queue_depth[bus] = letoh16(nv->max_queue_depth); 1581 sc->sc_async_data_setup[bus] = (nv->config2 & 0x0f); 1582 sc->sc_req_ack_active_neg[bus] = ((nv->config2 & 0x10) >> 4); 1583 sc->sc_data_line_active_neg[bus] = ((nv->config2 & 0x20) >> 5); 1584 1585 for (target = 0; target < QLW_MAX_TARGETS; target++) { 1586 struct qlw_target *qt = &sc->sc_target[bus][target]; 1587 1588 qt->qt_params = (nv->target[target].parameter << 8); 1589 qt->qt_exec_throttle = nv->target[target].execution_throttle; 1590 qt->qt_sync_period = nv->target[target].sync_period; 1591 if (sc->sc_isp_gen == QLW_GEN_ISP12160) 1592 qt->qt_sync_offset = nv->target[target].flags & 0x1f; 1593 else 1594 qt->qt_sync_offset = nv->target[target].flags & 0x0f; 1595 } 1596 } 1597 1598 void 1599 qlw_init_defaults(struct qlw_softc *sc, int bus) 1600 { 1601 int target; 1602 1603 switch (sc->sc_isp_gen) { 1604 case QLW_GEN_ISP1000: 1605 break; 1606 case QLW_GEN_ISP1040: 1607 sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_64; 1608 break; 1609 case QLW_GEN_ISP1080: 1610 case QLW_GEN_ISP12160: 1611 sc->sc_isp_config = QLW_BURST_ENABLE | QLW_PCI_FIFO_128; 1612 sc->sc_fw_features = QLW_FW_FEATURE_LVD_NOTIFY; 1613 break; 1614 } 1615 1616 sc->sc_retry_count[bus] = 0; 1617 sc->sc_retry_delay[bus] = 0; 1618 sc->sc_reset_delay[bus] = 3; 1619 sc->sc_tag_age_limit[bus] = 8; 1620 sc->sc_selection_timeout[bus] = 250; 1621 sc->sc_max_queue_depth[bus] = 32; 1622 if (sc->sc_clock > 40) 1623 sc->sc_async_data_setup[bus] = 9; 1624 else 1625 sc->sc_async_data_setup[bus] = 6; 1626 sc->sc_req_ack_active_neg[bus] = 1; 1627 sc->sc_data_line_active_neg[bus] = 1; 1628 1629 for (target = 0; target < QLW_MAX_TARGETS; target++) { 1630 struct qlw_target *qt = &sc->sc_target[bus][target]; 1631 1632 qt->qt_params = QLW_TARGET_DEFAULT; 1633 qt->qt_exec_throttle = 16; 1634 qt->qt_sync_period = 10; 1635 qt->qt_sync_offset = 12; 1636 } 1637 } 1638 1639 struct qlw_dmamem * 1640 qlw_dmamem_alloc(struct qlw_softc *sc, size_t size) 1641 { 1642 struct qlw_dmamem *m; 1643 int nsegs; 1644 1645 m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO); 1646 if (m == NULL) 1647 return (NULL); 1648 1649 m->qdm_size = size; 1650 1651 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1652 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map) != 0) 1653 goto qdmfree; 1654 1655 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1, 1656 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 1657 goto destroy; 1658 1659 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva, 1660 BUS_DMA_NOWAIT) != 0) 1661 goto free; 1662 1663 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL, 1664 BUS_DMA_NOWAIT) != 0) 1665 goto unmap; 1666 1667 return (m); 1668 1669 unmap: 1670 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size); 1671 free: 1672 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1); 1673 destroy: 1674 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map); 1675 qdmfree: 1676 free(m, M_DEVBUF, sizeof(*m)); 1677 1678 return (NULL); 1679 } 1680 1681 void 1682 qlw_dmamem_free(struct qlw_softc *sc, struct qlw_dmamem *m) 1683 { 1684 bus_dmamap_unload(sc->sc_dmat, m->qdm_map); 1685 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size); 1686 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1); 1687 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map); 1688 free(m, M_DEVBUF, sizeof(*m)); 1689 } 1690 1691 int 1692 qlw_alloc_ccbs(struct qlw_softc *sc) 1693 { 1694 struct qlw_ccb *ccb; 1695 u_int8_t *cmd; 1696 int i; 1697 1698 SIMPLEQ_INIT(&sc->sc_ccb_free); 1699 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 1700 mtx_init(&sc->sc_queue_mtx, IPL_BIO); 1701 1702 sc->sc_ccbs = mallocarray(sc->sc_maxccbs, sizeof(struct qlw_ccb), 1703 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 1704 if (sc->sc_ccbs == NULL) { 1705 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 1706 return (1); 1707 } 1708 1709 sc->sc_requests = qlw_dmamem_alloc(sc, sc->sc_maxrequests * 1710 QLW_QUEUE_ENTRY_SIZE); 1711 if (sc->sc_requests == NULL) { 1712 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 1713 goto free_ccbs; 1714 } 1715 sc->sc_responses = qlw_dmamem_alloc(sc, sc->sc_maxresponses * 1716 QLW_QUEUE_ENTRY_SIZE); 1717 if (sc->sc_responses == NULL) { 1718 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc)); 1719 goto free_req; 1720 } 1721 1722 cmd = QLW_DMA_KVA(sc->sc_requests); 1723 memset(cmd, 0, QLW_QUEUE_ENTRY_SIZE * sc->sc_maxccbs); 1724 for (i = 0; i < sc->sc_maxccbs; i++) { 1725 ccb = &sc->sc_ccbs[i]; 1726 1727 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 1728 QLW_MAX_SEGS, MAXPHYS, 0, 1729 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1730 &ccb->ccb_dmamap) != 0) { 1731 printf("%s: unable to create dma map\n", DEVNAME(sc)); 1732 goto free_maps; 1733 } 1734 1735 ccb->ccb_sc = sc; 1736 ccb->ccb_id = i; 1737 1738 qlw_put_ccb(sc, ccb); 1739 } 1740 1741 scsi_iopool_init(&sc->sc_iopool, sc, qlw_get_ccb, qlw_put_ccb); 1742 return (0); 1743 1744 free_maps: 1745 while ((ccb = qlw_get_ccb(sc)) != NULL) 1746 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1747 1748 qlw_dmamem_free(sc, sc->sc_responses); 1749 free_req: 1750 qlw_dmamem_free(sc, sc->sc_requests); 1751 free_ccbs: 1752 free(sc->sc_ccbs, M_DEVBUF, 0); 1753 1754 return (1); 1755 } 1756 1757 void 1758 qlw_free_ccbs(struct qlw_softc *sc) 1759 { 1760 struct qlw_ccb *ccb; 1761 1762 scsi_iopool_destroy(&sc->sc_iopool); 1763 while ((ccb = qlw_get_ccb(sc)) != NULL) 1764 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1765 qlw_dmamem_free(sc, sc->sc_responses); 1766 qlw_dmamem_free(sc, sc->sc_requests); 1767 free(sc->sc_ccbs, M_DEVBUF, 0); 1768 } 1769 1770 void * 1771 qlw_get_ccb(void *xsc) 1772 { 1773 struct qlw_softc *sc = xsc; 1774 struct qlw_ccb *ccb; 1775 1776 mtx_enter(&sc->sc_ccb_mtx); 1777 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free); 1778 if (ccb != NULL) { 1779 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 1780 } 1781 mtx_leave(&sc->sc_ccb_mtx); 1782 return (ccb); 1783 } 1784 1785 void 1786 qlw_put_ccb(void *xsc, void *io) 1787 { 1788 struct qlw_softc *sc = xsc; 1789 struct qlw_ccb *ccb = io; 1790 1791 ccb->ccb_xs = NULL; 1792 mtx_enter(&sc->sc_ccb_mtx); 1793 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 1794 mtx_leave(&sc->sc_ccb_mtx); 1795 } 1796