1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2001 Scott Long 4 * Copyright (c) 2000 BSDi 5 * Copyright (c) 2001 Adaptec, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: src/sys/dev/aac/aac.c,v 1.170 2012/02/13 16:48:49 emaste Exp $ 30 */ 31 32 /* 33 * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. 34 */ 35 #define AAC_DRIVERNAME "aac" 36 37 #include "opt_aac.h" 38 39 /* #include <stddef.h> */ 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/kthread.h> 45 #include <sys/poll.h> 46 47 #include <sys/bus.h> 48 #include <sys/conf.h> 49 #include <sys/signalvar.h> 50 #include <sys/time.h> 51 #include <sys/eventhandler.h> 52 #include <sys/rman.h> 53 54 #include <sys/bus_dma.h> 55 #include <sys/device.h> 56 #include <sys/mplock2.h> 57 58 #include <bus/pci/pcireg.h> 59 #include <bus/pci/pcivar.h> 60 61 #include <dev/raid/aac/aacreg.h> 62 #include <dev/raid/aac/aac_ioctl.h> 63 #include <dev/raid/aac/aacvar.h> 64 #include <dev/raid/aac/aac_tables.h> 65 66 static void aac_startup(void *arg); 67 static void aac_add_container(struct aac_softc *sc, 68 struct aac_mntinforesp *mir, int f); 69 static void aac_get_bus_info(struct aac_softc *sc); 70 static void aac_daemon(void *arg); 71 72 /* Command Processing */ 73 static void aac_timeout(struct aac_softc *sc); 74 static void aac_complete(void *context, int pending); 75 static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); 76 static void aac_bio_complete(struct aac_command *cm); 77 static int aac_wait_command(struct aac_command *cm); 78 static void aac_command_thread(void *arg); 79 80 /* Command Buffer Management */ 81 static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, 82 int nseg, int error); 83 static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, 84 int nseg, int error); 85 static int aac_alloc_commands(struct aac_softc *sc); 86 static void aac_free_commands(struct aac_softc *sc); 87 static void aac_unmap_command(struct aac_command *cm); 88 89 /* Hardware Interface */ 90 static int aac_alloc(struct aac_softc *sc); 91 static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, 92 int error); 93 static int aac_check_firmware(struct aac_softc *sc); 94 static int aac_init(struct aac_softc *sc); 95 static int aac_sync_command(struct aac_softc *sc, u_int32_t command, 96 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, 97 u_int32_t arg3, u_int32_t *sp); 98 static int aac_setup_intr(struct aac_softc *sc); 99 static int aac_enqueue_fib(struct aac_softc *sc, int queue, 100 struct aac_command *cm); 101 static int aac_dequeue_fib(struct aac_softc *sc, int queue, 102 u_int32_t *fib_size, struct aac_fib **fib_addr); 103 static int aac_enqueue_response(struct aac_softc *sc, int queue, 104 struct aac_fib *fib); 105 106 /* StrongARM interface */ 107 static int aac_sa_get_fwstatus(struct aac_softc *sc); 108 static void aac_sa_qnotify(struct aac_softc *sc, int qbit); 109 static int aac_sa_get_istatus(struct aac_softc *sc); 110 static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); 111 static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 112 u_int32_t arg0, u_int32_t arg1, 113 u_int32_t arg2, u_int32_t arg3); 114 static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); 115 static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); 116 117 struct aac_interface aac_sa_interface = { 118 aac_sa_get_fwstatus, 119 aac_sa_qnotify, 120 aac_sa_get_istatus, 121 aac_sa_clear_istatus, 122 aac_sa_set_mailbox, 123 aac_sa_get_mailbox, 124 aac_sa_set_interrupts, 125 NULL, NULL, NULL 126 }; 127 128 /* i960Rx interface */ 129 static int aac_rx_get_fwstatus(struct aac_softc *sc); 130 static void aac_rx_qnotify(struct aac_softc *sc, int qbit); 131 static int aac_rx_get_istatus(struct aac_softc *sc); 132 static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); 133 static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 134 u_int32_t arg0, u_int32_t arg1, 135 u_int32_t arg2, u_int32_t arg3); 136 static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); 137 static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); 138 static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); 139 static int aac_rx_get_outb_queue(struct aac_softc *sc); 140 static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); 141 142 struct aac_interface aac_rx_interface = { 143 aac_rx_get_fwstatus, 144 aac_rx_qnotify, 145 aac_rx_get_istatus, 146 aac_rx_clear_istatus, 147 aac_rx_set_mailbox, 148 aac_rx_get_mailbox, 149 aac_rx_set_interrupts, 150 aac_rx_send_command, 151 aac_rx_get_outb_queue, 152 aac_rx_set_outb_queue 153 }; 154 155 /* Rocket/MIPS interface */ 156 static int aac_rkt_get_fwstatus(struct aac_softc *sc); 157 static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); 158 static int aac_rkt_get_istatus(struct aac_softc *sc); 159 static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); 160 static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, 161 u_int32_t arg0, u_int32_t arg1, 162 u_int32_t arg2, u_int32_t arg3); 163 static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); 164 static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); 165 static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); 166 static int aac_rkt_get_outb_queue(struct aac_softc *sc); 167 static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); 168 169 struct aac_interface aac_rkt_interface = { 170 aac_rkt_get_fwstatus, 171 aac_rkt_qnotify, 172 aac_rkt_get_istatus, 173 aac_rkt_clear_istatus, 174 aac_rkt_set_mailbox, 175 aac_rkt_get_mailbox, 176 aac_rkt_set_interrupts, 177 aac_rkt_send_command, 178 aac_rkt_get_outb_queue, 179 aac_rkt_set_outb_queue 180 }; 181 182 /* Debugging and Diagnostics */ 183 static void aac_describe_controller(struct aac_softc *sc); 184 static char *aac_describe_code(struct aac_code_lookup *table, 185 u_int32_t code); 186 187 /* Management Interface */ 188 static d_open_t aac_open; 189 static d_close_t aac_close; 190 static d_ioctl_t aac_ioctl; 191 static d_kqfilter_t aac_kqfilter; 192 static void aac_filter_detach(struct knote *kn); 193 static int aac_filter_read(struct knote *kn, long hint); 194 static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); 195 static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); 196 static void aac_handle_aif(struct aac_softc *sc, 197 struct aac_fib *fib); 198 static int aac_rev_check(struct aac_softc *sc, caddr_t udata); 199 static int aac_open_aif(struct aac_softc *sc, caddr_t arg); 200 static int aac_close_aif(struct aac_softc *sc, caddr_t arg); 201 static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); 202 static int aac_return_aif(struct aac_softc *sc, 203 struct aac_fib_context *ctx, caddr_t uptr); 204 static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); 205 static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); 206 static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); 207 static void aac_ioctl_event(struct aac_softc *sc, 208 struct aac_event *event, void *arg); 209 static struct aac_mntinforesp * 210 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); 211 212 static struct dev_ops aac_ops = { 213 { "aac", 0, 0 }, 214 .d_open = aac_open, 215 .d_close = aac_close, 216 .d_ioctl = aac_ioctl, 217 .d_kqfilter = aac_kqfilter 218 }; 219 220 static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); 221 222 /* sysctl node */ 223 static SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD, 0, "AAC driver parameters"); 224 225 /* 226 * Device Interface 227 */ 228 229 /* 230 * Initialize the controller and softc 231 */ 232 int 233 aac_attach(struct aac_softc *sc) 234 { 235 int error, unit; 236 237 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 238 239 /* 240 * Initialize per-controller queues. 241 */ 242 aac_initq_free(sc); 243 aac_initq_ready(sc); 244 aac_initq_busy(sc); 245 aac_initq_bio(sc); 246 247 /* 248 * Initialize command-completion task. 249 */ 250 TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); 251 252 /* mark controller as suspended until we get ourselves organised */ 253 sc->aac_state |= AAC_STATE_SUSPEND; 254 255 /* 256 * Check that the firmware on the card is supported. 257 */ 258 if ((error = aac_check_firmware(sc)) != 0) 259 return(error); 260 261 /* 262 * Initialize locks 263 */ 264 lockinit(&sc->aac_aifq_lock, "AAC AIF lock", 0, LK_CANRECURSE); 265 lockinit(&sc->aac_io_lock, "AAC I/O lock", 0, LK_CANRECURSE); 266 lockinit(&sc->aac_container_lock, "AAC container lock", 0, LK_CANRECURSE); 267 TAILQ_INIT(&sc->aac_container_tqh); 268 TAILQ_INIT(&sc->aac_ev_cmfree); 269 270 /* Initialize the clock daemon callout. */ 271 callout_init(&sc->aac_daemontime); 272 273 /* 274 * Initialize the adapter. 275 */ 276 if ((error = aac_alloc(sc)) != 0) 277 return(error); 278 if ((error = aac_init(sc)) != 0) 279 return(error); 280 281 /* 282 * Allocate and connect our interrupt. 283 */ 284 if ((error = aac_setup_intr(sc)) != 0) 285 return(error); 286 287 /* 288 * Print a little information about the controller. 289 */ 290 aac_describe_controller(sc); 291 292 /* 293 * Add sysctls. 294 */ 295 sysctl_ctx_init(&sc->aac_sysctl_ctx); 296 sc->aac_sysctl_tree = SYSCTL_ADD_NODE(&sc->aac_sysctl_ctx, 297 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 298 device_get_nameunit(sc->aac_dev), CTLFLAG_RD, 0, ""); 299 if (sc->aac_sysctl_tree == NULL) { 300 device_printf(sc->aac_dev, "can't add sysctl node\n"); 301 return (EINVAL); 302 } 303 SYSCTL_ADD_INT(&sc->aac_sysctl_ctx, 304 SYSCTL_CHILDREN(sc->aac_sysctl_tree), 305 OID_AUTO, "firmware_build", CTLFLAG_RD, 306 &sc->aac_revision.buildNumber, 0, 307 "firmware build number"); 308 309 /* 310 * Register to probe our containers later. 311 */ 312 sc->aac_ich.ich_func = aac_startup; 313 sc->aac_ich.ich_arg = sc; 314 sc->aac_ich.ich_desc = "aac"; 315 if (config_intrhook_establish(&sc->aac_ich) != 0) { 316 device_printf(sc->aac_dev, 317 "can't establish configuration hook\n"); 318 return(ENXIO); 319 } 320 321 /* 322 * Make the control device. 323 */ 324 unit = device_get_unit(sc->aac_dev); 325 sc->aac_dev_t = make_dev(&aac_ops, unit, UID_ROOT, GID_OPERATOR, 326 0640, "aac%d", unit); 327 (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); 328 (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); 329 sc->aac_dev_t->si_drv1 = sc; 330 331 /* Create the AIF thread */ 332 if (kthread_create(aac_command_thread, sc, 333 &sc->aifthread, "aac%daif", unit)) 334 panic("Could not create AIF thread"); 335 336 /* Register the shutdown method to only be called post-dump */ 337 if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, 338 sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) 339 device_printf(sc->aac_dev, 340 "shutdown event registration failed\n"); 341 342 /* Register with CAM for the non-DASD devices */ 343 if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { 344 TAILQ_INIT(&sc->aac_sim_tqh); 345 aac_get_bus_info(sc); 346 } 347 348 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 349 callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); 350 lockmgr(&sc->aac_io_lock, LK_RELEASE); 351 352 return(0); 353 } 354 355 static void 356 aac_daemon(void *arg) 357 { 358 struct timeval tv; 359 struct aac_softc *sc; 360 struct aac_fib *fib; 361 362 sc = arg; 363 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 364 365 if (callout_pending(&sc->aac_daemontime) || 366 callout_active(&sc->aac_daemontime) == 0) { 367 lockmgr(&sc->aac_io_lock, LK_RELEASE); 368 return; 369 } 370 getmicrotime(&tv); 371 aac_alloc_sync_fib(sc, &fib); 372 *(uint32_t *)fib->data = tv.tv_sec; 373 aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); 374 aac_release_sync_fib(sc); 375 lockmgr(&sc->aac_io_lock, LK_RELEASE); 376 callout_reset(&sc->aac_daemontime, 30 * 60 * hz, aac_daemon, sc); 377 } 378 379 void 380 aac_add_event(struct aac_softc *sc, struct aac_event *event) 381 { 382 383 switch (event->ev_type & AAC_EVENT_MASK) { 384 case AAC_EVENT_CMFREE: 385 TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); 386 break; 387 default: 388 device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", 389 event->ev_type); 390 break; 391 } 392 393 return; 394 } 395 396 /* 397 * Request information of container #cid 398 */ 399 static struct aac_mntinforesp * 400 aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) 401 { 402 struct aac_mntinfo *mi; 403 404 mi = (struct aac_mntinfo *)&fib->data[0]; 405 /* use 64-bit LBA if enabled */ 406 mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 407 VM_NameServe64 : VM_NameServe; 408 mi->MntType = FT_FILESYS; 409 mi->MntCount = cid; 410 411 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 412 sizeof(struct aac_mntinfo))) { 413 device_printf(sc->aac_dev, "Error probing container %d\n", cid); 414 return (NULL); 415 } 416 417 return ((struct aac_mntinforesp *)&fib->data[0]); 418 } 419 420 /* 421 * Probe for containers, create disks. 422 */ 423 static void 424 aac_startup(void *arg) 425 { 426 struct aac_softc *sc; 427 struct aac_fib *fib; 428 struct aac_mntinforesp *mir; 429 int count = 0, i = 0; 430 431 sc = (struct aac_softc *)arg; 432 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 433 434 /* disconnect ourselves from the intrhook chain */ 435 config_intrhook_disestablish(&sc->aac_ich); 436 437 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 438 aac_alloc_sync_fib(sc, &fib); 439 440 /* loop over possible containers */ 441 do { 442 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 443 continue; 444 if (i == 0) 445 count = mir->MntRespCount; 446 aac_add_container(sc, mir, 0); 447 i++; 448 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 449 450 aac_release_sync_fib(sc); 451 lockmgr(&sc->aac_io_lock, LK_RELEASE); 452 453 /* poke the bus to actually attach the child devices */ 454 if (bus_generic_attach(sc->aac_dev)) 455 device_printf(sc->aac_dev, "bus_generic_attach failed\n"); 456 457 /* mark the controller up */ 458 sc->aac_state &= ~AAC_STATE_SUSPEND; 459 460 /* enable interrupts now */ 461 AAC_UNMASK_INTERRUPTS(sc); 462 } 463 464 /* 465 * Create a device to represent a new container 466 */ 467 static void 468 aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) 469 { 470 struct aac_container *co; 471 device_t child; 472 473 /* 474 * Check container volume type for validity. Note that many of 475 * the possible types may never show up. 476 */ 477 if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { 478 co = (struct aac_container *)kmalloc(sizeof *co, M_AACBUF, 479 M_INTWAIT | M_ZERO); 480 fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", 481 mir->MntTable[0].ObjectId, 482 mir->MntTable[0].FileSystemName, 483 mir->MntTable[0].Capacity, mir->MntTable[0].VolType); 484 485 if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) 486 device_printf(sc->aac_dev, "device_add_child failed\n"); 487 else 488 device_set_ivars(child, co); 489 device_set_desc(child, aac_describe_code(aac_container_types, 490 mir->MntTable[0].VolType)); 491 co->co_disk = child; 492 co->co_found = f; 493 bcopy(&mir->MntTable[0], &co->co_mntobj, 494 sizeof(struct aac_mntobj)); 495 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 496 TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); 497 lockmgr(&sc->aac_container_lock, LK_RELEASE); 498 } 499 } 500 501 /* 502 * Allocate resources associated with (sc) 503 */ 504 static int 505 aac_alloc(struct aac_softc *sc) 506 { 507 508 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 509 510 /* 511 * Create DMA tag for mapping buffers into controller-addressable space. 512 */ 513 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 514 1, 0, /* algnmnt, boundary */ 515 (sc->flags & AAC_FLAGS_SG_64BIT) ? 516 BUS_SPACE_MAXADDR : 517 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 518 BUS_SPACE_MAXADDR, /* highaddr */ 519 NULL, NULL, /* filter, filterarg */ 520 MAXBSIZE, /* maxsize */ 521 sc->aac_sg_tablesize, /* nsegments */ 522 MAXBSIZE, /* maxsegsize */ 523 BUS_DMA_ALLOCNOW, /* flags */ 524 &sc->aac_buffer_dmat)) { 525 device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); 526 return (ENOMEM); 527 } 528 529 /* 530 * Create DMA tag for mapping FIBs into controller-addressable space.. 531 */ 532 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 533 1, 0, /* algnmnt, boundary */ 534 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 535 BUS_SPACE_MAXADDR_32BIT : 536 0x7fffffff, /* lowaddr */ 537 BUS_SPACE_MAXADDR, /* highaddr */ 538 NULL, NULL, /* filter, filterarg */ 539 sc->aac_max_fibs_alloc * 540 sc->aac_max_fib_size, /* maxsize */ 541 1, /* nsegments */ 542 sc->aac_max_fibs_alloc * 543 sc->aac_max_fib_size, /* maxsize */ 544 0, /* flags */ 545 &sc->aac_fib_dmat)) { 546 device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); 547 return (ENOMEM); 548 } 549 550 /* 551 * Create DMA tag for the common structure and allocate it. 552 */ 553 if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 554 1, 0, /* algnmnt, boundary */ 555 (sc->flags & AAC_FLAGS_4GB_WINDOW) ? 556 BUS_SPACE_MAXADDR_32BIT : 557 0x7fffffff, /* lowaddr */ 558 BUS_SPACE_MAXADDR, /* highaddr */ 559 NULL, NULL, /* filter, filterarg */ 560 8192 + sizeof(struct aac_common), /* maxsize */ 561 1, /* nsegments */ 562 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 563 0, /* flags */ 564 &sc->aac_common_dmat)) { 565 device_printf(sc->aac_dev, 566 "can't allocate common structure DMA tag\n"); 567 return (ENOMEM); 568 } 569 if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, 570 BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { 571 device_printf(sc->aac_dev, "can't allocate common structure\n"); 572 return (ENOMEM); 573 } 574 575 /* 576 * Work around a bug in the 2120 and 2200 that cannot DMA commands 577 * below address 8192 in physical memory. 578 * XXX If the padding is not needed, can it be put to use instead 579 * of ignored? 580 */ 581 (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, 582 sc->aac_common, 8192 + sizeof(*sc->aac_common), 583 aac_common_map, sc, 0); 584 585 if (sc->aac_common_busaddr < 8192) { 586 sc->aac_common = (struct aac_common *) 587 ((uint8_t *)sc->aac_common + 8192); 588 sc->aac_common_busaddr += 8192; 589 } 590 bzero(sc->aac_common, sizeof(*sc->aac_common)); 591 592 /* Allocate some FIBs and associated command structs */ 593 TAILQ_INIT(&sc->aac_fibmap_tqh); 594 sc->aac_commands = kmalloc(sc->aac_max_fibs * sizeof(struct aac_command), 595 M_AACBUF, M_WAITOK|M_ZERO); 596 while (sc->total_fibs < sc->aac_max_fibs) { 597 if (aac_alloc_commands(sc) != 0) 598 break; 599 } 600 if (sc->total_fibs == 0) 601 return (ENOMEM); 602 603 return (0); 604 } 605 606 /* 607 * Free all of the resources associated with (sc) 608 * 609 * Should not be called if the controller is active. 610 */ 611 void 612 aac_free(struct aac_softc *sc) 613 { 614 615 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 616 617 /* remove the control device */ 618 if (sc->aac_dev_t != NULL) 619 destroy_dev(sc->aac_dev_t); 620 621 /* throw away any FIB buffers, discard the FIB DMA tag */ 622 aac_free_commands(sc); 623 if (sc->aac_fib_dmat) 624 bus_dma_tag_destroy(sc->aac_fib_dmat); 625 626 kfree(sc->aac_commands, M_AACBUF); 627 628 /* destroy the common area */ 629 if (sc->aac_common) { 630 bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); 631 bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, 632 sc->aac_common_dmamap); 633 } 634 if (sc->aac_common_dmat) 635 bus_dma_tag_destroy(sc->aac_common_dmat); 636 637 /* disconnect the interrupt handler */ 638 if (sc->aac_intr) 639 bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); 640 if (sc->aac_irq != NULL) 641 bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid, 642 sc->aac_irq); 643 644 /* destroy data-transfer DMA tag */ 645 if (sc->aac_buffer_dmat) 646 bus_dma_tag_destroy(sc->aac_buffer_dmat); 647 648 /* destroy the parent DMA tag */ 649 if (sc->aac_parent_dmat) 650 bus_dma_tag_destroy(sc->aac_parent_dmat); 651 652 /* release the register window mapping */ 653 if (sc->aac_regs_res0 != NULL) 654 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 655 sc->aac_regs_rid0, sc->aac_regs_res0); 656 if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) 657 bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, 658 sc->aac_regs_rid1, sc->aac_regs_res1); 659 dev_ops_remove_minor(&aac_ops, device_get_unit(sc->aac_dev)); 660 661 sysctl_ctx_free(&sc->aac_sysctl_ctx); 662 } 663 664 /* 665 * Disconnect from the controller completely, in preparation for unload. 666 */ 667 int 668 aac_detach(device_t dev) 669 { 670 struct aac_softc *sc; 671 struct aac_container *co; 672 struct aac_sim *sim; 673 int error; 674 675 sc = device_get_softc(dev); 676 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 677 678 #if 0 /* XXX swildner */ 679 callout_drain(&sc->aac_daemontime); 680 #else 681 callout_stop(&sc->aac_daemontime); 682 #endif 683 684 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 685 while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { 686 sc->aifflags |= AAC_AIFFLAGS_EXIT; 687 wakeup(sc->aifthread); 688 lksleep(sc->aac_dev, &sc->aac_io_lock, 0, "aacdch", 0); 689 } 690 lockmgr(&sc->aac_io_lock, LK_RELEASE); 691 KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, 692 ("%s: invalid detach state", __func__)); 693 694 /* Remove the child containers */ 695 while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { 696 error = device_delete_child(dev, co->co_disk); 697 if (error) 698 return (error); 699 TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); 700 kfree(co, M_AACBUF); 701 } 702 703 /* Remove the CAM SIMs */ 704 while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { 705 TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); 706 error = device_delete_child(dev, sim->sim_dev); 707 if (error) 708 return (error); 709 kfree(sim, M_AACBUF); 710 } 711 712 if ((error = aac_shutdown(dev))) 713 return(error); 714 715 EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); 716 717 aac_free(sc); 718 719 lockuninit(&sc->aac_aifq_lock); 720 lockuninit(&sc->aac_io_lock); 721 lockuninit(&sc->aac_container_lock); 722 723 return(0); 724 } 725 726 /* 727 * Bring the controller down to a dormant state and detach all child devices. 728 * 729 * This function is called before detach or system shutdown. 730 * 731 * Note that we can assume that the bioq on the controller is empty, as we won't 732 * allow shutdown if any device is open. 733 */ 734 int 735 aac_shutdown(device_t dev) 736 { 737 struct aac_softc *sc; 738 struct aac_fib *fib; 739 struct aac_close_command *cc; 740 741 sc = device_get_softc(dev); 742 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 743 744 sc->aac_state |= AAC_STATE_SUSPEND; 745 746 /* 747 * Send a Container shutdown followed by a HostShutdown FIB to the 748 * controller to convince it that we don't want to talk to it anymore. 749 * We've been closed and all I/O completed already 750 */ 751 device_printf(sc->aac_dev, "shutting down controller..."); 752 753 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 754 aac_alloc_sync_fib(sc, &fib); 755 cc = (struct aac_close_command *)&fib->data[0]; 756 757 bzero(cc, sizeof(struct aac_close_command)); 758 cc->Command = VM_CloseAll; 759 cc->ContainerId = 0xffffffff; 760 if (aac_sync_fib(sc, ContainerCommand, 0, fib, 761 sizeof(struct aac_close_command))) 762 kprintf("FAILED.\n"); 763 else 764 kprintf("done\n"); 765 #if 0 766 else { 767 fib->data[0] = 0; 768 /* 769 * XXX Issuing this command to the controller makes it shut down 770 * but also keeps it from coming back up without a reset of the 771 * PCI bus. This is not desirable if you are just unloading the 772 * driver module with the intent to reload it later. 773 */ 774 if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, 775 fib, 1)) { 776 kprintf("FAILED.\n"); 777 } else { 778 kprintf("done.\n"); 779 } 780 } 781 #endif 782 783 AAC_MASK_INTERRUPTS(sc); 784 aac_release_sync_fib(sc); 785 lockmgr(&sc->aac_io_lock, LK_RELEASE); 786 787 return(0); 788 } 789 790 /* 791 * Bring the controller to a quiescent state, ready for system suspend. 792 */ 793 int 794 aac_suspend(device_t dev) 795 { 796 struct aac_softc *sc; 797 798 sc = device_get_softc(dev); 799 800 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 801 sc->aac_state |= AAC_STATE_SUSPEND; 802 803 AAC_MASK_INTERRUPTS(sc); 804 return(0); 805 } 806 807 /* 808 * Bring the controller back to a state ready for operation. 809 */ 810 int 811 aac_resume(device_t dev) 812 { 813 struct aac_softc *sc; 814 815 sc = device_get_softc(dev); 816 817 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 818 sc->aac_state &= ~AAC_STATE_SUSPEND; 819 AAC_UNMASK_INTERRUPTS(sc); 820 return(0); 821 } 822 823 /* 824 * Interrupt handler for NEW_COMM interface. 825 */ 826 void 827 aac_new_intr(void *arg) 828 { 829 struct aac_softc *sc; 830 u_int32_t index, fast; 831 struct aac_command *cm; 832 struct aac_fib *fib; 833 int i; 834 835 sc = (struct aac_softc *)arg; 836 837 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 838 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 839 while (1) { 840 index = AAC_GET_OUTB_QUEUE(sc); 841 if (index == 0xffffffff) 842 index = AAC_GET_OUTB_QUEUE(sc); 843 if (index == 0xffffffff) 844 break; 845 if (index & 2) { 846 if (index == 0xfffffffe) { 847 /* XXX This means that the controller wants 848 * more work. Ignore it for now. 849 */ 850 continue; 851 } 852 /* AIF */ 853 fib = (struct aac_fib *)kmalloc(sizeof *fib, M_AACBUF, 854 M_INTWAIT | M_ZERO); 855 index &= ~2; 856 for (i = 0; i < sizeof(struct aac_fib)/4; ++i) 857 ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); 858 aac_handle_aif(sc, fib); 859 kfree(fib, M_AACBUF); 860 861 /* 862 * AIF memory is owned by the adapter, so let it 863 * know that we are done with it. 864 */ 865 AAC_SET_OUTB_QUEUE(sc, index); 866 AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); 867 } else { 868 fast = index & 1; 869 cm = sc->aac_commands + (index >> 2); 870 fib = cm->cm_fib; 871 if (fast) { 872 fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; 873 *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; 874 } 875 aac_remove_busy(cm); 876 aac_unmap_command(cm); 877 cm->cm_flags |= AAC_CMD_COMPLETED; 878 879 /* is there a completion handler? */ 880 if (cm->cm_complete != NULL) { 881 cm->cm_complete(cm); 882 } else { 883 /* assume that someone is sleeping on this 884 * command 885 */ 886 wakeup(cm); 887 } 888 sc->flags &= ~AAC_QUEUE_FRZN; 889 } 890 } 891 /* see if we can start some more I/O */ 892 if ((sc->flags & AAC_QUEUE_FRZN) == 0) 893 aac_startio(sc); 894 895 lockmgr(&sc->aac_io_lock, LK_RELEASE); 896 } 897 898 /* 899 * Interrupt filter for !NEW_COMM interface. 900 */ 901 void 902 aac_filter(void *arg) 903 { 904 struct aac_softc *sc; 905 u_int16_t reason; 906 907 sc = (struct aac_softc *)arg; 908 909 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 910 /* 911 * Read the status register directly. This is faster than taking the 912 * driver lock and reading the queues directly. It also saves having 913 * to turn parts of the driver lock into a spin mutex, which would be 914 * ugly. 915 */ 916 reason = AAC_GET_ISTATUS(sc); 917 AAC_CLEAR_ISTATUS(sc, reason); 918 919 /* handle completion processing */ 920 if (reason & AAC_DB_RESPONSE_READY) 921 taskqueue_enqueue(taskqueue_swi, &sc->aac_task_complete); 922 923 /* controller wants to talk to us */ 924 if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { 925 /* 926 * XXX Make sure that we don't get fooled by strange messages 927 * that start with a NULL. 928 */ 929 if ((reason & AAC_DB_PRINTF) && 930 (sc->aac_common->ac_printf[0] == 0)) 931 sc->aac_common->ac_printf[0] = 32; 932 933 /* 934 * This might miss doing the actual wakeup. However, the 935 * lksleep that this is waking up has a timeout, so it will 936 * wake up eventually. AIFs and printfs are low enough 937 * priority that they can handle hanging out for a few seconds 938 * if needed. 939 */ 940 wakeup(sc->aifthread); 941 } 942 } 943 944 /* 945 * Command Processing 946 */ 947 948 /* 949 * Start as much queued I/O as possible on the controller 950 */ 951 void 952 aac_startio(struct aac_softc *sc) 953 { 954 struct aac_command *cm; 955 int error; 956 957 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 958 959 for (;;) { 960 /* 961 * This flag might be set if the card is out of resources. 962 * Checking it here prevents an infinite loop of deferrals. 963 */ 964 if (sc->flags & AAC_QUEUE_FRZN) 965 break; 966 967 /* 968 * Try to get a command that's been put off for lack of 969 * resources 970 */ 971 cm = aac_dequeue_ready(sc); 972 973 /* 974 * Try to build a command off the bio queue (ignore error 975 * return) 976 */ 977 if (cm == NULL) 978 aac_bio_command(sc, &cm); 979 980 /* nothing to do? */ 981 if (cm == NULL) 982 break; 983 984 /* don't map more than once */ 985 if (cm->cm_flags & AAC_CMD_MAPPED) 986 panic("aac: command %p already mapped", cm); 987 988 /* 989 * Set up the command to go to the controller. If there are no 990 * data buffers associated with the command then it can bypass 991 * busdma. 992 */ 993 if (cm->cm_datalen != 0) { 994 error = bus_dmamap_load(sc->aac_buffer_dmat, 995 cm->cm_datamap, cm->cm_data, 996 cm->cm_datalen, 997 aac_map_command_sg, cm, 0); 998 if (error == EINPROGRESS) { 999 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); 1000 sc->flags |= AAC_QUEUE_FRZN; 1001 error = 0; 1002 } else if (error != 0) 1003 panic("aac_startio: unexpected error %d from " 1004 "busdma", error); 1005 } else 1006 aac_map_command_sg(cm, NULL, 0, 0); 1007 } 1008 } 1009 1010 /* 1011 * Handle notification of one or more FIBs coming from the controller. 1012 */ 1013 static void 1014 aac_command_thread(void *arg) 1015 { 1016 struct aac_softc *sc = arg; 1017 struct aac_fib *fib; 1018 u_int32_t fib_size; 1019 int size, retval; 1020 1021 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1022 1023 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1024 sc->aifflags = AAC_AIFFLAGS_RUNNING; 1025 1026 while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { 1027 1028 retval = 0; 1029 if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) 1030 retval = lksleep(sc->aifthread, &sc->aac_io_lock, 0, 1031 "aifthd", AAC_PERIODIC_INTERVAL * hz); 1032 1033 /* 1034 * First see if any FIBs need to be allocated. This needs 1035 * to be called without the driver lock because contigmalloc 1036 * can sleep. 1037 */ 1038 if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { 1039 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1040 aac_alloc_commands(sc); 1041 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1042 sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; 1043 aac_startio(sc); 1044 } 1045 1046 /* 1047 * While we're here, check to see if any commands are stuck. 1048 * This is pretty low-priority, so it's ok if it doesn't 1049 * always fire. 1050 */ 1051 if (retval == EWOULDBLOCK) 1052 aac_timeout(sc); 1053 1054 /* Check the hardware printf message buffer */ 1055 if (sc->aac_common->ac_printf[0] != 0) 1056 aac_print_printf(sc); 1057 1058 /* Also check to see if the adapter has a command for us. */ 1059 if (sc->flags & AAC_FLAGS_NEW_COMM) 1060 continue; 1061 for (;;) { 1062 if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, 1063 &fib_size, &fib)) 1064 break; 1065 1066 AAC_PRINT_FIB(sc, fib); 1067 1068 switch (fib->Header.Command) { 1069 case AifRequest: 1070 aac_handle_aif(sc, fib); 1071 break; 1072 default: 1073 device_printf(sc->aac_dev, "unknown command " 1074 "from controller\n"); 1075 break; 1076 } 1077 1078 if ((fib->Header.XferState == 0) || 1079 (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { 1080 break; 1081 } 1082 1083 /* Return the AIF to the controller. */ 1084 if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { 1085 fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; 1086 *(AAC_FSAStatus*)fib->data = ST_OK; 1087 1088 /* XXX Compute the Size field? */ 1089 size = fib->Header.Size; 1090 if (size > sizeof(struct aac_fib)) { 1091 size = sizeof(struct aac_fib); 1092 fib->Header.Size = size; 1093 } 1094 /* 1095 * Since we did not generate this command, it 1096 * cannot go through the normal 1097 * enqueue->startio chain. 1098 */ 1099 aac_enqueue_response(sc, 1100 AAC_ADAP_NORM_RESP_QUEUE, 1101 fib); 1102 } 1103 } 1104 } 1105 sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; 1106 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1107 wakeup(sc->aac_dev); 1108 } 1109 1110 /* 1111 * Process completed commands. 1112 */ 1113 static void 1114 aac_complete(void *context, int pending) 1115 { 1116 struct aac_softc *sc; 1117 struct aac_command *cm; 1118 struct aac_fib *fib; 1119 u_int32_t fib_size; 1120 1121 sc = (struct aac_softc *)context; 1122 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1123 1124 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1125 1126 /* pull completed commands off the queue */ 1127 for (;;) { 1128 /* look for completed FIBs on our queue */ 1129 if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, 1130 &fib)) 1131 break; /* nothing to do */ 1132 1133 /* get the command, unmap and hand off for processing */ 1134 cm = sc->aac_commands + fib->Header.SenderData; 1135 if (cm == NULL) { 1136 AAC_PRINT_FIB(sc, fib); 1137 break; 1138 } 1139 if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) 1140 device_printf(sc->aac_dev, 1141 "COMMAND %p COMPLETED AFTER %d SECONDS\n", 1142 cm, (int)(time_second-cm->cm_timestamp)); 1143 1144 aac_remove_busy(cm); 1145 1146 aac_unmap_command(cm); 1147 cm->cm_flags |= AAC_CMD_COMPLETED; 1148 1149 /* is there a completion handler? */ 1150 if (cm->cm_complete != NULL) { 1151 cm->cm_complete(cm); 1152 } else { 1153 /* assume that someone is sleeping on this command */ 1154 wakeup(cm); 1155 } 1156 } 1157 1158 /* see if we can start some more I/O */ 1159 sc->flags &= ~AAC_QUEUE_FRZN; 1160 aac_startio(sc); 1161 1162 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1163 } 1164 1165 /* 1166 * Handle a bio submitted from a disk device. 1167 */ 1168 void 1169 aac_submit_bio(struct aac_disk *ad, struct bio *bio) 1170 { 1171 struct aac_softc *sc; 1172 1173 bio->bio_driver_info = ad; 1174 sc = ad->ad_controller; 1175 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1176 1177 /* queue the BIO and try to get some work done */ 1178 aac_enqueue_bio(sc, bio); 1179 aac_startio(sc); 1180 } 1181 1182 /* 1183 * Get a bio and build a command to go with it. 1184 */ 1185 static int 1186 aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) 1187 { 1188 struct aac_command *cm; 1189 struct aac_fib *fib; 1190 struct aac_disk *ad; 1191 struct bio *bio; 1192 struct buf *bp; 1193 1194 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1195 1196 /* get the resources we will need */ 1197 cm = NULL; 1198 bio = NULL; 1199 if (aac_alloc_command(sc, &cm)) /* get a command */ 1200 goto fail; 1201 if ((bio = aac_dequeue_bio(sc)) == NULL) 1202 goto fail; 1203 1204 /* fill out the command */ 1205 bp = bio->bio_buf; 1206 cm->cm_data = (void *)bp->b_data; 1207 cm->cm_datalen = bp->b_bcount; 1208 cm->cm_complete = aac_bio_complete; 1209 cm->cm_private = bio; 1210 cm->cm_timestamp = time_second; 1211 1212 /* build the FIB */ 1213 fib = cm->cm_fib; 1214 fib->Header.Size = sizeof(struct aac_fib_header); 1215 fib->Header.XferState = 1216 AAC_FIBSTATE_HOSTOWNED | 1217 AAC_FIBSTATE_INITIALISED | 1218 AAC_FIBSTATE_EMPTY | 1219 AAC_FIBSTATE_FROMHOST | 1220 AAC_FIBSTATE_REXPECTED | 1221 AAC_FIBSTATE_NORM | 1222 AAC_FIBSTATE_ASYNC | 1223 AAC_FIBSTATE_FAST_RESPONSE; 1224 1225 /* build the read/write request */ 1226 ad = (struct aac_disk *)bio->bio_driver_info; 1227 1228 if (sc->flags & AAC_FLAGS_RAW_IO) { 1229 struct aac_raw_io *raw; 1230 raw = (struct aac_raw_io *)&fib->data[0]; 1231 fib->Header.Command = RawIo; 1232 raw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1233 raw->ByteCount = bp->b_bcount; 1234 raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1235 raw->BpTotal = 0; 1236 raw->BpComplete = 0; 1237 fib->Header.Size += sizeof(struct aac_raw_io); 1238 cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; 1239 if (bp->b_cmd == BUF_CMD_READ) { 1240 raw->Flags = 1; 1241 cm->cm_flags |= AAC_CMD_DATAIN; 1242 } else { 1243 raw->Flags = 0; 1244 cm->cm_flags |= AAC_CMD_DATAOUT; 1245 } 1246 } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1247 fib->Header.Command = ContainerCommand; 1248 if (bp->b_cmd == BUF_CMD_READ) { 1249 struct aac_blockread *br; 1250 br = (struct aac_blockread *)&fib->data[0]; 1251 br->Command = VM_CtBlockRead; 1252 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1253 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1254 br->ByteCount = bp->b_bcount; 1255 fib->Header.Size += sizeof(struct aac_blockread); 1256 cm->cm_sgtable = &br->SgMap; 1257 cm->cm_flags |= AAC_CMD_DATAIN; 1258 } else { 1259 struct aac_blockwrite *bw; 1260 bw = (struct aac_blockwrite *)&fib->data[0]; 1261 bw->Command = VM_CtBlockWrite; 1262 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1263 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1264 bw->ByteCount = bp->b_bcount; 1265 bw->Stable = CUNSTABLE; 1266 fib->Header.Size += sizeof(struct aac_blockwrite); 1267 cm->cm_flags |= AAC_CMD_DATAOUT; 1268 cm->cm_sgtable = &bw->SgMap; 1269 } 1270 } else { 1271 fib->Header.Command = ContainerCommand64; 1272 if (bp->b_cmd == BUF_CMD_READ) { 1273 struct aac_blockread64 *br; 1274 br = (struct aac_blockread64 *)&fib->data[0]; 1275 br->Command = VM_CtHostRead64; 1276 br->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1277 br->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE; 1278 br->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1279 br->Pad = 0; 1280 br->Flags = 0; 1281 fib->Header.Size += sizeof(struct aac_blockread64); 1282 cm->cm_flags |= AAC_CMD_DATAIN; 1283 cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; 1284 } else { 1285 struct aac_blockwrite64 *bw; 1286 bw = (struct aac_blockwrite64 *)&fib->data[0]; 1287 bw->Command = VM_CtHostWrite64; 1288 bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; 1289 bw->SectorCount = bp->b_bcount / AAC_BLOCK_SIZE; 1290 bw->BlockNumber = bio->bio_offset / AAC_BLOCK_SIZE; 1291 bw->Pad = 0; 1292 bw->Flags = 0; 1293 fib->Header.Size += sizeof(struct aac_blockwrite64); 1294 cm->cm_flags |= AAC_CMD_DATAOUT; 1295 cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; 1296 } 1297 } 1298 1299 *cmp = cm; 1300 return(0); 1301 1302 fail: 1303 if (bio != NULL) 1304 aac_enqueue_bio(sc, bio); 1305 if (cm != NULL) 1306 aac_release_command(cm); 1307 return(ENOMEM); 1308 } 1309 1310 /* 1311 * Handle a bio-instigated command that has been completed. 1312 */ 1313 static void 1314 aac_bio_complete(struct aac_command *cm) 1315 { 1316 struct aac_blockread_response *brr; 1317 struct aac_blockwrite_response *bwr; 1318 struct bio *bio; 1319 struct buf *bp; 1320 const char *code; 1321 AAC_FSAStatus status; 1322 1323 /* fetch relevant status and then release the command */ 1324 bio = (struct bio *)cm->cm_private; 1325 bp = bio->bio_buf; 1326 if (bp->b_cmd == BUF_CMD_READ) { 1327 brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; 1328 status = brr->Status; 1329 } else { 1330 bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; 1331 status = bwr->Status; 1332 } 1333 aac_release_command(cm); 1334 1335 /* fix up the bio based on status */ 1336 if (status == ST_OK) { 1337 bp->b_resid = 0; 1338 code = NULL; 1339 } else { 1340 bp->b_error = EIO; 1341 bp->b_flags |= B_ERROR; 1342 /* pass an error string out to the disk layer */ 1343 code = aac_describe_code(aac_command_status_table, status); 1344 } 1345 aac_biodone(bio, code); 1346 } 1347 1348 /* 1349 * Submit a command to the controller, return when it completes. 1350 * XXX This is very dangerous! If the card has gone out to lunch, we could 1351 * be stuck here forever. At the same time, signals are not caught 1352 * because there is a risk that a signal could wakeup the sleep before 1353 * the card has a chance to complete the command. Since there is no way 1354 * to cancel a command that is in progress, we can't protect against the 1355 * card completing a command late and spamming the command and data 1356 * memory. So, we are held hostage until the command completes. 1357 */ 1358 static int 1359 aac_wait_command(struct aac_command *cm) 1360 { 1361 struct aac_softc *sc; 1362 int error; 1363 1364 sc = cm->cm_sc; 1365 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1366 1367 /* Put the command on the ready queue and get things going */ 1368 aac_enqueue_ready(cm); 1369 aac_startio(sc); 1370 error = lksleep(cm, &sc->aac_io_lock, 0, "aacwait", 0); 1371 return(error); 1372 } 1373 1374 /* 1375 *Command Buffer Management 1376 */ 1377 1378 /* 1379 * Allocate a command. 1380 */ 1381 int 1382 aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) 1383 { 1384 struct aac_command *cm; 1385 1386 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1387 1388 if ((cm = aac_dequeue_free(sc)) == NULL) { 1389 if (sc->total_fibs < sc->aac_max_fibs) { 1390 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1391 sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; 1392 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1393 wakeup(sc->aifthread); 1394 } 1395 return (EBUSY); 1396 } 1397 1398 *cmp = cm; 1399 return(0); 1400 } 1401 1402 /* 1403 * Release a command back to the freelist. 1404 */ 1405 void 1406 aac_release_command(struct aac_command *cm) 1407 { 1408 struct aac_event *event; 1409 struct aac_softc *sc; 1410 1411 sc = cm->cm_sc; 1412 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1413 1414 /* (re)initialize the command/FIB */ 1415 cm->cm_sgtable = NULL; 1416 cm->cm_flags = 0; 1417 cm->cm_complete = NULL; 1418 cm->cm_private = NULL; 1419 cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; 1420 cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; 1421 cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; 1422 cm->cm_fib->Header.Flags = 0; 1423 cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; 1424 1425 /* 1426 * These are duplicated in aac_start to cover the case where an 1427 * intermediate stage may have destroyed them. They're left 1428 * initialized here for debugging purposes only. 1429 */ 1430 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1431 cm->cm_fib->Header.SenderData = 0; 1432 1433 aac_enqueue_free(cm); 1434 1435 if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { 1436 TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); 1437 event->ev_callback(sc, event, event->ev_arg); 1438 } 1439 } 1440 1441 /* 1442 * Map helper for command/FIB allocation. 1443 */ 1444 static void 1445 aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1446 { 1447 uint64_t *fibphys; 1448 1449 fibphys = (uint64_t *)arg; 1450 1451 *fibphys = segs[0].ds_addr; 1452 } 1453 1454 /* 1455 * Allocate and initialize commands/FIBs for this adapter. 1456 */ 1457 static int 1458 aac_alloc_commands(struct aac_softc *sc) 1459 { 1460 struct aac_command *cm; 1461 struct aac_fibmap *fm; 1462 uint64_t fibphys; 1463 int i, error; 1464 1465 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1466 1467 if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) 1468 return (ENOMEM); 1469 1470 fm = kmalloc(sizeof(struct aac_fibmap), M_AACBUF, M_INTWAIT | M_ZERO); 1471 1472 /* allocate the FIBs in DMAable memory and load them */ 1473 if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, 1474 BUS_DMA_NOWAIT, &fm->aac_fibmap)) { 1475 device_printf(sc->aac_dev, 1476 "Not enough contiguous memory available.\n"); 1477 kfree(fm, M_AACBUF); 1478 return (ENOMEM); 1479 } 1480 1481 /* Ignore errors since this doesn't bounce */ 1482 (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, 1483 sc->aac_max_fibs_alloc * sc->aac_max_fib_size, 1484 aac_map_command_helper, &fibphys, 0); 1485 1486 /* initialize constant fields in the command structure */ 1487 bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); 1488 for (i = 0; i < sc->aac_max_fibs_alloc; i++) { 1489 cm = sc->aac_commands + sc->total_fibs; 1490 fm->aac_commands = cm; 1491 cm->cm_sc = sc; 1492 cm->cm_fib = (struct aac_fib *) 1493 ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); 1494 cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; 1495 cm->cm_index = sc->total_fibs; 1496 1497 if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, 1498 &cm->cm_datamap)) != 0) 1499 break; 1500 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1501 aac_release_command(cm); 1502 sc->total_fibs++; 1503 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1504 } 1505 1506 if (i > 0) { 1507 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 1508 TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); 1509 fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); 1510 lockmgr(&sc->aac_io_lock, LK_RELEASE); 1511 return (0); 1512 } 1513 1514 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1515 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1516 kfree(fm, M_AACBUF); 1517 return (ENOMEM); 1518 } 1519 1520 /* 1521 * Free FIBs owned by this adapter. 1522 */ 1523 static void 1524 aac_free_commands(struct aac_softc *sc) 1525 { 1526 struct aac_fibmap *fm; 1527 struct aac_command *cm; 1528 int i; 1529 1530 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1531 1532 while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { 1533 1534 TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); 1535 /* 1536 * We check against total_fibs to handle partially 1537 * allocated blocks. 1538 */ 1539 for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { 1540 cm = fm->aac_commands + i; 1541 bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); 1542 } 1543 bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); 1544 bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); 1545 kfree(fm, M_AACBUF); 1546 } 1547 } 1548 1549 /* 1550 * Command-mapping helper function - populate this command's s/g table. 1551 */ 1552 static void 1553 aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1554 { 1555 struct aac_softc *sc; 1556 struct aac_command *cm; 1557 struct aac_fib *fib; 1558 int i; 1559 1560 cm = (struct aac_command *)arg; 1561 sc = cm->cm_sc; 1562 fib = cm->cm_fib; 1563 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1564 1565 /* copy into the FIB */ 1566 if (cm->cm_sgtable != NULL) { 1567 if (fib->Header.Command == RawIo) { 1568 struct aac_sg_tableraw *sg; 1569 sg = (struct aac_sg_tableraw *)cm->cm_sgtable; 1570 sg->SgCount = nseg; 1571 for (i = 0; i < nseg; i++) { 1572 sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; 1573 sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; 1574 sg->SgEntryRaw[i].Next = 0; 1575 sg->SgEntryRaw[i].Prev = 0; 1576 sg->SgEntryRaw[i].Flags = 0; 1577 } 1578 /* update the FIB size for the s/g count */ 1579 fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); 1580 } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 1581 struct aac_sg_table *sg; 1582 sg = cm->cm_sgtable; 1583 sg->SgCount = nseg; 1584 for (i = 0; i < nseg; i++) { 1585 sg->SgEntry[i].SgAddress = segs[i].ds_addr; 1586 sg->SgEntry[i].SgByteCount = segs[i].ds_len; 1587 } 1588 /* update the FIB size for the s/g count */ 1589 fib->Header.Size += nseg*sizeof(struct aac_sg_entry); 1590 } else { 1591 struct aac_sg_table64 *sg; 1592 sg = (struct aac_sg_table64 *)cm->cm_sgtable; 1593 sg->SgCount = nseg; 1594 for (i = 0; i < nseg; i++) { 1595 sg->SgEntry64[i].SgAddress = segs[i].ds_addr; 1596 sg->SgEntry64[i].SgByteCount = segs[i].ds_len; 1597 } 1598 /* update the FIB size for the s/g count */ 1599 fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); 1600 } 1601 } 1602 1603 /* Fix up the address values in the FIB. Use the command array index 1604 * instead of a pointer since these fields are only 32 bits. Shift 1605 * the SenderFibAddress over to make room for the fast response bit 1606 * and for the AIF bit 1607 */ 1608 cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); 1609 cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; 1610 1611 /* save a pointer to the command for speedy reverse-lookup */ 1612 cm->cm_fib->Header.SenderData = cm->cm_index; 1613 1614 if (cm->cm_flags & AAC_CMD_DATAIN) 1615 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1616 BUS_DMASYNC_PREREAD); 1617 if (cm->cm_flags & AAC_CMD_DATAOUT) 1618 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1619 BUS_DMASYNC_PREWRITE); 1620 cm->cm_flags |= AAC_CMD_MAPPED; 1621 1622 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1623 int count = 10000000L; 1624 while (AAC_SEND_COMMAND(sc, cm) != 0) { 1625 if (--count == 0) { 1626 aac_unmap_command(cm); 1627 sc->flags |= AAC_QUEUE_FRZN; 1628 aac_requeue_ready(cm); 1629 } 1630 DELAY(5); /* wait 5 usec. */ 1631 } 1632 } else { 1633 /* Put the FIB on the outbound queue */ 1634 if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { 1635 aac_unmap_command(cm); 1636 sc->flags |= AAC_QUEUE_FRZN; 1637 aac_requeue_ready(cm); 1638 } 1639 } 1640 1641 return; 1642 } 1643 1644 /* 1645 * Unmap a command from controller-visible space. 1646 */ 1647 static void 1648 aac_unmap_command(struct aac_command *cm) 1649 { 1650 struct aac_softc *sc; 1651 1652 sc = cm->cm_sc; 1653 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1654 1655 if (!(cm->cm_flags & AAC_CMD_MAPPED)) 1656 return; 1657 1658 if (cm->cm_datalen != 0) { 1659 if (cm->cm_flags & AAC_CMD_DATAIN) 1660 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1661 BUS_DMASYNC_POSTREAD); 1662 if (cm->cm_flags & AAC_CMD_DATAOUT) 1663 bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, 1664 BUS_DMASYNC_POSTWRITE); 1665 1666 bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); 1667 } 1668 cm->cm_flags &= ~AAC_CMD_MAPPED; 1669 } 1670 1671 /* 1672 * Hardware Interface 1673 */ 1674 1675 /* 1676 * Initialize the adapter. 1677 */ 1678 static void 1679 aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1680 { 1681 struct aac_softc *sc; 1682 1683 sc = (struct aac_softc *)arg; 1684 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1685 1686 sc->aac_common_busaddr = segs[0].ds_addr; 1687 } 1688 1689 static int 1690 aac_check_firmware(struct aac_softc *sc) 1691 { 1692 u_int32_t code, major, minor, options = 0, atu_size = 0; 1693 int status; 1694 time_t then; 1695 1696 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1697 /* 1698 * Wait for the adapter to come ready. 1699 */ 1700 then = time_second; 1701 do { 1702 code = AAC_GET_FWSTATUS(sc); 1703 if (code & AAC_SELF_TEST_FAILED) { 1704 device_printf(sc->aac_dev, "FATAL: selftest failed\n"); 1705 return(ENXIO); 1706 } 1707 if (code & AAC_KERNEL_PANIC) { 1708 device_printf(sc->aac_dev, 1709 "FATAL: controller kernel panic"); 1710 return(ENXIO); 1711 } 1712 if (time_second > (then + AAC_BOOT_TIMEOUT)) { 1713 device_printf(sc->aac_dev, 1714 "FATAL: controller not coming ready, " 1715 "status %x\n", code); 1716 return(ENXIO); 1717 } 1718 } while (!(code & AAC_UP_AND_RUNNING)); 1719 1720 /* 1721 * Retrieve the firmware version numbers. Dell PERC2/QC cards with 1722 * firmware version 1.x are not compatible with this driver. 1723 */ 1724 if (sc->flags & AAC_FLAGS_PERC2QC) { 1725 if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, 1726 NULL)) { 1727 device_printf(sc->aac_dev, 1728 "Error reading firmware version\n"); 1729 return (EIO); 1730 } 1731 1732 /* These numbers are stored as ASCII! */ 1733 major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; 1734 minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; 1735 if (major == 1) { 1736 device_printf(sc->aac_dev, 1737 "Firmware version %d.%d is not supported.\n", 1738 major, minor); 1739 return (EINVAL); 1740 } 1741 } 1742 1743 /* 1744 * Retrieve the capabilities/supported options word so we know what 1745 * work-arounds to enable. Some firmware revs don't support this 1746 * command. 1747 */ 1748 if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { 1749 if (status != AAC_SRB_STS_INVALID_REQUEST) { 1750 device_printf(sc->aac_dev, 1751 "RequestAdapterInfo failed\n"); 1752 return (EIO); 1753 } 1754 } else { 1755 options = AAC_GET_MAILBOX(sc, 1); 1756 atu_size = AAC_GET_MAILBOX(sc, 2); 1757 sc->supported_options = options; 1758 1759 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && 1760 (sc->flags & AAC_FLAGS_NO4GB) == 0) 1761 sc->flags |= AAC_FLAGS_4GB_WINDOW; 1762 if (options & AAC_SUPPORTED_NONDASD) 1763 sc->flags |= AAC_FLAGS_ENABLE_CAM; 1764 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 1765 && (sizeof(bus_addr_t) > 4)) { 1766 device_printf(sc->aac_dev, 1767 "Enabling 64-bit address support\n"); 1768 sc->flags |= AAC_FLAGS_SG_64BIT; 1769 } 1770 if ((options & AAC_SUPPORTED_NEW_COMM) 1771 && sc->aac_if.aif_send_command) 1772 sc->flags |= AAC_FLAGS_NEW_COMM; 1773 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) 1774 sc->flags |= AAC_FLAGS_ARRAY_64BIT; 1775 } 1776 1777 /* Check for broken hardware that does a lower number of commands */ 1778 sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); 1779 1780 /* Remap mem. resource, if required */ 1781 if ((sc->flags & AAC_FLAGS_NEW_COMM) && 1782 atu_size > rman_get_size(sc->aac_regs_res1)) { 1783 bus_release_resource( 1784 sc->aac_dev, SYS_RES_MEMORY, 1785 sc->aac_regs_rid1, sc->aac_regs_res1); 1786 sc->aac_regs_res1 = bus_alloc_resource( 1787 sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid1, 1788 0ul, ~0ul, atu_size, RF_ACTIVE); 1789 if (sc->aac_regs_res1 == NULL) { 1790 sc->aac_regs_res1 = bus_alloc_resource_any( 1791 sc->aac_dev, SYS_RES_MEMORY, 1792 &sc->aac_regs_rid1, RF_ACTIVE); 1793 if (sc->aac_regs_res1 == NULL) { 1794 device_printf(sc->aac_dev, 1795 "couldn't allocate register window\n"); 1796 return (ENXIO); 1797 } 1798 sc->flags &= ~AAC_FLAGS_NEW_COMM; 1799 } 1800 sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); 1801 sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); 1802 1803 if (sc->aac_hwif == AAC_HWIF_NARK) { 1804 sc->aac_regs_res0 = sc->aac_regs_res1; 1805 sc->aac_regs_rid0 = sc->aac_regs_rid1; 1806 sc->aac_btag0 = sc->aac_btag1; 1807 sc->aac_bhandle0 = sc->aac_bhandle1; 1808 } 1809 } 1810 1811 /* Read preferred settings */ 1812 sc->aac_max_fib_size = sizeof(struct aac_fib); 1813 sc->aac_max_sectors = 128; /* 64KB */ 1814 if (sc->flags & AAC_FLAGS_SG_64BIT) 1815 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1816 - sizeof(struct aac_blockwrite64)) 1817 / sizeof(struct aac_sg_entry64); 1818 else 1819 sc->aac_sg_tablesize = (AAC_FIB_DATASIZE 1820 - sizeof(struct aac_blockwrite)) 1821 / sizeof(struct aac_sg_entry); 1822 1823 if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { 1824 options = AAC_GET_MAILBOX(sc, 1); 1825 sc->aac_max_fib_size = (options & 0xFFFF); 1826 sc->aac_max_sectors = (options >> 16) << 1; 1827 options = AAC_GET_MAILBOX(sc, 2); 1828 sc->aac_sg_tablesize = (options >> 16); 1829 options = AAC_GET_MAILBOX(sc, 3); 1830 sc->aac_max_fibs = (options & 0xFFFF); 1831 } 1832 if (sc->aac_max_fib_size > PAGE_SIZE) 1833 sc->aac_max_fib_size = PAGE_SIZE; 1834 sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; 1835 1836 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1837 sc->flags |= AAC_FLAGS_RAW_IO; 1838 device_printf(sc->aac_dev, "Enable Raw I/O\n"); 1839 } 1840 if ((sc->flags & AAC_FLAGS_RAW_IO) && 1841 (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { 1842 sc->flags |= AAC_FLAGS_LBA_64BIT; 1843 device_printf(sc->aac_dev, "Enable 64-bit array\n"); 1844 } 1845 1846 return (0); 1847 } 1848 1849 static int 1850 aac_init(struct aac_softc *sc) 1851 { 1852 struct aac_adapter_init *ip; 1853 u_int32_t qoffset; 1854 int error; 1855 1856 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 1857 1858 /* 1859 * Fill in the init structure. This tells the adapter about the 1860 * physical location of various important shared data structures. 1861 */ 1862 ip = &sc->aac_common->ac_init; 1863 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; 1864 if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { 1865 ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; 1866 sc->flags |= AAC_FLAGS_RAW_IO; 1867 } 1868 ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; 1869 1870 ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + 1871 offsetof(struct aac_common, ac_fibs); 1872 ip->AdapterFibsVirtualAddress = 0; 1873 ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); 1874 ip->AdapterFibAlign = sizeof(struct aac_fib); 1875 1876 ip->PrintfBufferAddress = sc->aac_common_busaddr + 1877 offsetof(struct aac_common, ac_printf); 1878 ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; 1879 1880 /* 1881 * The adapter assumes that pages are 4K in size, except on some 1882 * broken firmware versions that do the page->byte conversion twice, 1883 * therefore 'assuming' that this value is in 16MB units (2^24). 1884 * Round up since the granularity is so high. 1885 */ 1886 ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; 1887 if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { 1888 ip->HostPhysMemPages = 1889 (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; 1890 } 1891 ip->HostElapsedSeconds = time_second; /* reset later if invalid */ 1892 1893 ip->InitFlags = 0; 1894 if (sc->flags & AAC_FLAGS_NEW_COMM) { 1895 ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; 1896 device_printf(sc->aac_dev, "New comm. interface enabled\n"); 1897 } 1898 1899 ip->MaxIoCommands = sc->aac_max_fibs; 1900 ip->MaxIoSize = sc->aac_max_sectors << 9; 1901 ip->MaxFibSize = sc->aac_max_fib_size; 1902 1903 /* 1904 * Initialize FIB queues. Note that it appears that the layout of the 1905 * indexes and the segmentation of the entries may be mandated by the 1906 * adapter, which is only told about the base of the queue index fields. 1907 * 1908 * The initial values of the indices are assumed to inform the adapter 1909 * of the sizes of the respective queues, and theoretically it could 1910 * work out the entire layout of the queue structures from this. We 1911 * take the easy route and just lay this area out like everyone else 1912 * does. 1913 * 1914 * The Linux driver uses a much more complex scheme whereby several 1915 * header records are kept for each queue. We use a couple of generic 1916 * list manipulation functions which 'know' the size of each list by 1917 * virtue of a table. 1918 */ 1919 qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; 1920 qoffset &= ~(AAC_QUEUE_ALIGN - 1); 1921 sc->aac_queues = 1922 (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); 1923 ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; 1924 1925 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1926 AAC_HOST_NORM_CMD_ENTRIES; 1927 sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1928 AAC_HOST_NORM_CMD_ENTRIES; 1929 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1930 AAC_HOST_HIGH_CMD_ENTRIES; 1931 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1932 AAC_HOST_HIGH_CMD_ENTRIES; 1933 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1934 AAC_ADAP_NORM_CMD_ENTRIES; 1935 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1936 AAC_ADAP_NORM_CMD_ENTRIES; 1937 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = 1938 AAC_ADAP_HIGH_CMD_ENTRIES; 1939 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = 1940 AAC_ADAP_HIGH_CMD_ENTRIES; 1941 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1942 AAC_HOST_NORM_RESP_ENTRIES; 1943 sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1944 AAC_HOST_NORM_RESP_ENTRIES; 1945 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1946 AAC_HOST_HIGH_RESP_ENTRIES; 1947 sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1948 AAC_HOST_HIGH_RESP_ENTRIES; 1949 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1950 AAC_ADAP_NORM_RESP_ENTRIES; 1951 sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1952 AAC_ADAP_NORM_RESP_ENTRIES; 1953 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= 1954 AAC_ADAP_HIGH_RESP_ENTRIES; 1955 sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= 1956 AAC_ADAP_HIGH_RESP_ENTRIES; 1957 sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = 1958 &sc->aac_queues->qt_HostNormCmdQueue[0]; 1959 sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = 1960 &sc->aac_queues->qt_HostHighCmdQueue[0]; 1961 sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = 1962 &sc->aac_queues->qt_AdapNormCmdQueue[0]; 1963 sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = 1964 &sc->aac_queues->qt_AdapHighCmdQueue[0]; 1965 sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = 1966 &sc->aac_queues->qt_HostNormRespQueue[0]; 1967 sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = 1968 &sc->aac_queues->qt_HostHighRespQueue[0]; 1969 sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = 1970 &sc->aac_queues->qt_AdapNormRespQueue[0]; 1971 sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = 1972 &sc->aac_queues->qt_AdapHighRespQueue[0]; 1973 1974 /* 1975 * Do controller-type-specific initialisation 1976 */ 1977 switch (sc->aac_hwif) { 1978 case AAC_HWIF_I960RX: 1979 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); 1980 break; 1981 case AAC_HWIF_RKT: 1982 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); 1983 break; 1984 default: 1985 break; 1986 } 1987 1988 /* 1989 * Give the init structure to the controller. 1990 */ 1991 if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, 1992 sc->aac_common_busaddr + 1993 offsetof(struct aac_common, ac_init), 0, 0, 0, 1994 NULL)) { 1995 device_printf(sc->aac_dev, 1996 "error establishing init structure\n"); 1997 error = EIO; 1998 goto out; 1999 } 2000 2001 error = 0; 2002 out: 2003 return(error); 2004 } 2005 2006 static int 2007 aac_setup_intr(struct aac_softc *sc) 2008 { 2009 sc->aac_irq_rid = 0; 2010 if ((sc->aac_irq = bus_alloc_resource_any(sc->aac_dev, SYS_RES_IRQ, 2011 &sc->aac_irq_rid, 2012 RF_SHAREABLE | 2013 RF_ACTIVE)) == NULL) { 2014 device_printf(sc->aac_dev, "can't allocate interrupt\n"); 2015 return (EINVAL); 2016 } 2017 if (sc->flags & AAC_FLAGS_NEW_COMM) { 2018 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2019 INTR_MPSAFE, 2020 aac_new_intr, sc, &sc->aac_intr, NULL)) { 2021 device_printf(sc->aac_dev, "can't set up interrupt\n"); 2022 return (EINVAL); 2023 } 2024 } else { 2025 if (bus_setup_intr(sc->aac_dev, sc->aac_irq, 2026 0, aac_filter, 2027 sc, &sc->aac_intr, NULL)) { 2028 device_printf(sc->aac_dev, 2029 "can't set up interrupt filter\n"); 2030 return (EINVAL); 2031 } 2032 } 2033 return (0); 2034 } 2035 2036 /* 2037 * Send a synchronous command to the controller and wait for a result. 2038 * Indicate if the controller completed the command with an error status. 2039 */ 2040 static int 2041 aac_sync_command(struct aac_softc *sc, u_int32_t command, 2042 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, 2043 u_int32_t *sp) 2044 { 2045 time_t then; 2046 u_int32_t status; 2047 2048 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2049 2050 /* populate the mailbox */ 2051 AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); 2052 2053 /* ensure the sync command doorbell flag is cleared */ 2054 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2055 2056 /* then set it to signal the adapter */ 2057 AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); 2058 2059 /* spin waiting for the command to complete */ 2060 then = time_second; 2061 do { 2062 if (time_second > (then + AAC_IMMEDIATE_TIMEOUT)) { 2063 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); 2064 return(EIO); 2065 } 2066 } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); 2067 2068 /* clear the completion flag */ 2069 AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); 2070 2071 /* get the command status */ 2072 status = AAC_GET_MAILBOX(sc, 0); 2073 if (sp != NULL) 2074 *sp = status; 2075 2076 if (status != AAC_SRB_STS_SUCCESS) 2077 return (-1); 2078 return(0); 2079 } 2080 2081 int 2082 aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, 2083 struct aac_fib *fib, u_int16_t datasize) 2084 { 2085 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2086 #if 0 /* XXX swildner */ 2087 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0); 2088 #endif 2089 2090 if (datasize > AAC_FIB_DATASIZE) 2091 return(EINVAL); 2092 2093 /* 2094 * Set up the sync FIB 2095 */ 2096 fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | 2097 AAC_FIBSTATE_INITIALISED | 2098 AAC_FIBSTATE_EMPTY; 2099 fib->Header.XferState |= xferstate; 2100 fib->Header.Command = command; 2101 fib->Header.StructType = AAC_FIBTYPE_TFIB; 2102 fib->Header.Size = sizeof(struct aac_fib_header) + datasize; 2103 fib->Header.SenderSize = sizeof(struct aac_fib); 2104 fib->Header.SenderFibAddress = 0; /* Not needed */ 2105 fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + 2106 offsetof(struct aac_common, 2107 ac_sync_fib); 2108 2109 /* 2110 * Give the FIB to the controller, wait for a response. 2111 */ 2112 if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, 2113 fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { 2114 fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); 2115 return(EIO); 2116 } 2117 2118 return (0); 2119 } 2120 2121 /* 2122 * Adapter-space FIB queue manipulation 2123 * 2124 * Note that the queue implementation here is a little funky; neither the PI or 2125 * CI will ever be zero. This behaviour is a controller feature. 2126 */ 2127 static struct { 2128 int size; 2129 int notify; 2130 } aac_qinfo[] = { 2131 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, 2132 {AAC_HOST_HIGH_CMD_ENTRIES, 0}, 2133 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, 2134 {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, 2135 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, 2136 {AAC_HOST_HIGH_RESP_ENTRIES, 0}, 2137 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, 2138 {AAC_ADAP_HIGH_RESP_ENTRIES, 0} 2139 }; 2140 2141 /* 2142 * Atomically insert an entry into the nominated queue, returns 0 on success or 2143 * EBUSY if the queue is full. 2144 * 2145 * Note: it would be more efficient to defer notifying the controller in 2146 * the case where we may be inserting several entries in rapid succession, 2147 * but implementing this usefully may be difficult (it would involve a 2148 * separate queue/notify interface). 2149 */ 2150 static int 2151 aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) 2152 { 2153 u_int32_t pi, ci; 2154 int error; 2155 u_int32_t fib_size; 2156 u_int32_t fib_addr; 2157 2158 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2159 2160 fib_size = cm->cm_fib->Header.Size; 2161 fib_addr = cm->cm_fib->Header.ReceiverFibAddress; 2162 2163 /* get the producer/consumer indices */ 2164 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2165 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2166 2167 /* wrap the queue? */ 2168 if (pi >= aac_qinfo[queue].size) 2169 pi = 0; 2170 2171 /* check for queue full */ 2172 if ((pi + 1) == ci) { 2173 error = EBUSY; 2174 goto out; 2175 } 2176 2177 /* 2178 * To avoid a race with its completion interrupt, place this command on 2179 * the busy queue prior to advertising it to the controller. 2180 */ 2181 aac_enqueue_busy(cm); 2182 2183 /* populate queue entry */ 2184 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2185 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2186 2187 /* update producer index */ 2188 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2189 2190 /* notify the adapter if we know how */ 2191 if (aac_qinfo[queue].notify != 0) 2192 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2193 2194 error = 0; 2195 2196 out: 2197 return(error); 2198 } 2199 2200 /* 2201 * Atomically remove one entry from the nominated queue, returns 0 on 2202 * success or ENOENT if the queue is empty. 2203 */ 2204 static int 2205 aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, 2206 struct aac_fib **fib_addr) 2207 { 2208 u_int32_t pi, ci; 2209 u_int32_t fib_index; 2210 int error; 2211 int notify; 2212 2213 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2214 2215 /* get the producer/consumer indices */ 2216 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2217 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2218 2219 /* check for queue empty */ 2220 if (ci == pi) { 2221 error = ENOENT; 2222 goto out; 2223 } 2224 2225 /* wrap the pi so the following test works */ 2226 if (pi >= aac_qinfo[queue].size) 2227 pi = 0; 2228 2229 notify = 0; 2230 if (ci == pi + 1) 2231 notify++; 2232 2233 /* wrap the queue? */ 2234 if (ci >= aac_qinfo[queue].size) 2235 ci = 0; 2236 2237 /* fetch the entry */ 2238 *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; 2239 2240 switch (queue) { 2241 case AAC_HOST_NORM_CMD_QUEUE: 2242 case AAC_HOST_HIGH_CMD_QUEUE: 2243 /* 2244 * The aq_fib_addr is only 32 bits wide so it can't be counted 2245 * on to hold an address. For AIF's, the adapter assumes 2246 * that it's giving us an address into the array of AIF fibs. 2247 * Therefore, we have to convert it to an index. 2248 */ 2249 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / 2250 sizeof(struct aac_fib); 2251 *fib_addr = &sc->aac_common->ac_fibs[fib_index]; 2252 break; 2253 2254 case AAC_HOST_NORM_RESP_QUEUE: 2255 case AAC_HOST_HIGH_RESP_QUEUE: 2256 { 2257 struct aac_command *cm; 2258 2259 /* 2260 * As above, an index is used instead of an actual address. 2261 * Gotta shift the index to account for the fast response 2262 * bit. No other correction is needed since this value was 2263 * originally provided by the driver via the SenderFibAddress 2264 * field. 2265 */ 2266 fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; 2267 cm = sc->aac_commands + (fib_index >> 2); 2268 *fib_addr = cm->cm_fib; 2269 2270 /* 2271 * Is this a fast response? If it is, update the fib fields in 2272 * local memory since the whole fib isn't DMA'd back up. 2273 */ 2274 if (fib_index & 0x01) { 2275 (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; 2276 *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; 2277 } 2278 break; 2279 } 2280 default: 2281 panic("Invalid queue in aac_dequeue_fib()"); 2282 break; 2283 } 2284 2285 /* update consumer index */ 2286 sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; 2287 2288 /* if we have made the queue un-full, notify the adapter */ 2289 if (notify && (aac_qinfo[queue].notify != 0)) 2290 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2291 error = 0; 2292 2293 out: 2294 return(error); 2295 } 2296 2297 /* 2298 * Put our response to an Adapter Initialed Fib on the response queue 2299 */ 2300 static int 2301 aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) 2302 { 2303 u_int32_t pi, ci; 2304 int error; 2305 u_int32_t fib_size; 2306 u_int32_t fib_addr; 2307 2308 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2309 2310 /* Tell the adapter where the FIB is */ 2311 fib_size = fib->Header.Size; 2312 fib_addr = fib->Header.SenderFibAddress; 2313 fib->Header.ReceiverFibAddress = fib_addr; 2314 2315 /* get the producer/consumer indices */ 2316 pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; 2317 ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; 2318 2319 /* wrap the queue? */ 2320 if (pi >= aac_qinfo[queue].size) 2321 pi = 0; 2322 2323 /* check for queue full */ 2324 if ((pi + 1) == ci) { 2325 error = EBUSY; 2326 goto out; 2327 } 2328 2329 /* populate queue entry */ 2330 (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; 2331 (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; 2332 2333 /* update producer index */ 2334 sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; 2335 2336 /* notify the adapter if we know how */ 2337 if (aac_qinfo[queue].notify != 0) 2338 AAC_QNOTIFY(sc, aac_qinfo[queue].notify); 2339 2340 error = 0; 2341 2342 out: 2343 return(error); 2344 } 2345 2346 /* 2347 * Check for commands that have been outstanding for a suspiciously long time, 2348 * and complain about them. 2349 */ 2350 static void 2351 aac_timeout(struct aac_softc *sc) 2352 { 2353 struct aac_command *cm; 2354 time_t deadline; 2355 int timedout, code; 2356 2357 /* 2358 * Traverse the busy command list, bitch about late commands once 2359 * only. 2360 */ 2361 timedout = 0; 2362 deadline = time_second - AAC_CMD_TIMEOUT; 2363 TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { 2364 if ((cm->cm_timestamp < deadline) 2365 && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { 2366 cm->cm_flags |= AAC_CMD_TIMEDOUT; 2367 device_printf(sc->aac_dev, 2368 "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", 2369 cm, cm->cm_fib->Header.Command, 2370 (int)(time_second-cm->cm_timestamp)); 2371 AAC_PRINT_FIB(sc, cm->cm_fib); 2372 timedout++; 2373 } 2374 } 2375 2376 if (timedout) { 2377 code = AAC_GET_FWSTATUS(sc); 2378 if (code != AAC_UP_AND_RUNNING) { 2379 device_printf(sc->aac_dev, "WARNING! Controller is no " 2380 "longer running! code= 0x%x\n", code); 2381 } 2382 } 2383 return; 2384 } 2385 2386 /* 2387 * Interface Function Vectors 2388 */ 2389 2390 /* 2391 * Read the current firmware status word. 2392 */ 2393 static int 2394 aac_sa_get_fwstatus(struct aac_softc *sc) 2395 { 2396 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2397 2398 return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); 2399 } 2400 2401 static int 2402 aac_rx_get_fwstatus(struct aac_softc *sc) 2403 { 2404 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2405 2406 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2407 AAC_RX_OMR0 : AAC_RX_FWSTATUS)); 2408 } 2409 2410 static int 2411 aac_rkt_get_fwstatus(struct aac_softc *sc) 2412 { 2413 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2414 2415 return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? 2416 AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); 2417 } 2418 2419 /* 2420 * Notify the controller of a change in a given queue 2421 */ 2422 2423 static void 2424 aac_sa_qnotify(struct aac_softc *sc, int qbit) 2425 { 2426 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2427 2428 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); 2429 } 2430 2431 static void 2432 aac_rx_qnotify(struct aac_softc *sc, int qbit) 2433 { 2434 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2435 2436 AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); 2437 } 2438 2439 static void 2440 aac_rkt_qnotify(struct aac_softc *sc, int qbit) 2441 { 2442 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2443 2444 AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); 2445 } 2446 2447 /* 2448 * Get the interrupt reason bits 2449 */ 2450 static int 2451 aac_sa_get_istatus(struct aac_softc *sc) 2452 { 2453 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2454 2455 return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); 2456 } 2457 2458 static int 2459 aac_rx_get_istatus(struct aac_softc *sc) 2460 { 2461 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2462 2463 return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); 2464 } 2465 2466 static int 2467 aac_rkt_get_istatus(struct aac_softc *sc) 2468 { 2469 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2470 2471 return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); 2472 } 2473 2474 /* 2475 * Clear some interrupt reason bits 2476 */ 2477 static void 2478 aac_sa_clear_istatus(struct aac_softc *sc, int mask) 2479 { 2480 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2481 2482 AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); 2483 } 2484 2485 static void 2486 aac_rx_clear_istatus(struct aac_softc *sc, int mask) 2487 { 2488 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2489 2490 AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); 2491 } 2492 2493 static void 2494 aac_rkt_clear_istatus(struct aac_softc *sc, int mask) 2495 { 2496 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2497 2498 AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); 2499 } 2500 2501 /* 2502 * Populate the mailbox and set the command word 2503 */ 2504 static void 2505 aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, 2506 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2507 { 2508 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2509 2510 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); 2511 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); 2512 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); 2513 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); 2514 AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); 2515 } 2516 2517 static void 2518 aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, 2519 u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2520 { 2521 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2522 2523 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); 2524 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); 2525 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); 2526 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); 2527 AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); 2528 } 2529 2530 static void 2531 aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, 2532 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 2533 { 2534 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2535 2536 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); 2537 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); 2538 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); 2539 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); 2540 AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); 2541 } 2542 2543 /* 2544 * Fetch the immediate command status word 2545 */ 2546 static int 2547 aac_sa_get_mailbox(struct aac_softc *sc, int mb) 2548 { 2549 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2550 2551 return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); 2552 } 2553 2554 static int 2555 aac_rx_get_mailbox(struct aac_softc *sc, int mb) 2556 { 2557 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2558 2559 return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); 2560 } 2561 2562 static int 2563 aac_rkt_get_mailbox(struct aac_softc *sc, int mb) 2564 { 2565 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2566 2567 return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); 2568 } 2569 2570 /* 2571 * Set/clear interrupt masks 2572 */ 2573 static void 2574 aac_sa_set_interrupts(struct aac_softc *sc, int enable) 2575 { 2576 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2577 2578 if (enable) { 2579 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); 2580 } else { 2581 AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); 2582 } 2583 } 2584 2585 static void 2586 aac_rx_set_interrupts(struct aac_softc *sc, int enable) 2587 { 2588 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2589 2590 if (enable) { 2591 if (sc->flags & AAC_FLAGS_NEW_COMM) 2592 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); 2593 else 2594 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); 2595 } else { 2596 AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); 2597 } 2598 } 2599 2600 static void 2601 aac_rkt_set_interrupts(struct aac_softc *sc, int enable) 2602 { 2603 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); 2604 2605 if (enable) { 2606 if (sc->flags & AAC_FLAGS_NEW_COMM) 2607 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); 2608 else 2609 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); 2610 } else { 2611 AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); 2612 } 2613 } 2614 2615 /* 2616 * New comm. interface: Send command functions 2617 */ 2618 static int 2619 aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) 2620 { 2621 u_int32_t index, device; 2622 2623 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2624 2625 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2626 if (index == 0xffffffffL) 2627 index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); 2628 if (index == 0xffffffffL) 2629 return index; 2630 aac_enqueue_busy(cm); 2631 device = index; 2632 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2633 device += 4; 2634 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2635 device += 4; 2636 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2637 AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); 2638 return 0; 2639 } 2640 2641 static int 2642 aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) 2643 { 2644 u_int32_t index, device; 2645 2646 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); 2647 2648 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2649 if (index == 0xffffffffL) 2650 index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); 2651 if (index == 0xffffffffL) 2652 return index; 2653 aac_enqueue_busy(cm); 2654 device = index; 2655 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); 2656 device += 4; 2657 AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); 2658 device += 4; 2659 AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); 2660 AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); 2661 return 0; 2662 } 2663 2664 /* 2665 * New comm. interface: get, set outbound queue index 2666 */ 2667 static int 2668 aac_rx_get_outb_queue(struct aac_softc *sc) 2669 { 2670 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2671 2672 return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); 2673 } 2674 2675 static int 2676 aac_rkt_get_outb_queue(struct aac_softc *sc) 2677 { 2678 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2679 2680 return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); 2681 } 2682 2683 static void 2684 aac_rx_set_outb_queue(struct aac_softc *sc, int index) 2685 { 2686 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2687 2688 AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); 2689 } 2690 2691 static void 2692 aac_rkt_set_outb_queue(struct aac_softc *sc, int index) 2693 { 2694 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2695 2696 AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); 2697 } 2698 2699 /* 2700 * Debugging and Diagnostics 2701 */ 2702 2703 /* 2704 * Print some information about the controller. 2705 */ 2706 static void 2707 aac_describe_controller(struct aac_softc *sc) 2708 { 2709 struct aac_fib *fib; 2710 struct aac_adapter_info *info; 2711 char *adapter_type = "Adaptec RAID controller"; 2712 2713 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2714 2715 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 2716 aac_alloc_sync_fib(sc, &fib); 2717 2718 fib->data[0] = 0; 2719 if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { 2720 device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); 2721 aac_release_sync_fib(sc); 2722 lockmgr(&sc->aac_io_lock, LK_RELEASE); 2723 return; 2724 } 2725 2726 /* save the kernel revision structure for later use */ 2727 info = (struct aac_adapter_info *)&fib->data[0]; 2728 sc->aac_revision = info->KernelRevision; 2729 2730 if (bootverbose) { 2731 device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " 2732 "(%dMB cache, %dMB execution), %s\n", 2733 aac_describe_code(aac_cpu_variant, info->CpuVariant), 2734 info->ClockSpeed, info->TotalMem / (1024 * 1024), 2735 info->BufferMem / (1024 * 1024), 2736 info->ExecutionMem / (1024 * 1024), 2737 aac_describe_code(aac_battery_platform, 2738 info->batteryPlatform)); 2739 2740 device_printf(sc->aac_dev, 2741 "Kernel %d.%d-%d, Build %d, S/N %6X\n", 2742 info->KernelRevision.external.comp.major, 2743 info->KernelRevision.external.comp.minor, 2744 info->KernelRevision.external.comp.dash, 2745 info->KernelRevision.buildNumber, 2746 (u_int32_t)(info->SerialNumber & 0xffffff)); 2747 2748 device_printf(sc->aac_dev, "Supported Options=%b\n", 2749 sc->supported_options, 2750 "\20" 2751 "\1SNAPSHOT" 2752 "\2CLUSTERS" 2753 "\3WCACHE" 2754 "\4DATA64" 2755 "\5HOSTTIME" 2756 "\6RAID50" 2757 "\7WINDOW4GB" 2758 "\10SCSIUPGD" 2759 "\11SOFTERR" 2760 "\12NORECOND" 2761 "\13SGMAP64" 2762 "\14ALARM" 2763 "\15NONDASD" 2764 "\16SCSIMGT" 2765 "\17RAIDSCSI" 2766 "\21ADPTINFO" 2767 "\22NEWCOMM" 2768 "\23ARRAY64BIT" 2769 "\24HEATSENSOR"); 2770 } 2771 2772 if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { 2773 fib->data[0] = 0; 2774 if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) 2775 device_printf(sc->aac_dev, 2776 "RequestSupplementAdapterInfo failed\n"); 2777 else 2778 adapter_type = ((struct aac_supplement_adapter_info *) 2779 &fib->data[0])->AdapterTypeText; 2780 } 2781 device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", 2782 adapter_type, 2783 AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, 2784 AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); 2785 2786 aac_release_sync_fib(sc); 2787 lockmgr(&sc->aac_io_lock, LK_RELEASE); 2788 } 2789 2790 /* 2791 * Look up a text description of a numeric error code and return a pointer to 2792 * same. 2793 */ 2794 static char * 2795 aac_describe_code(struct aac_code_lookup *table, u_int32_t code) 2796 { 2797 int i; 2798 2799 for (i = 0; table[i].string != NULL; i++) 2800 if (table[i].code == code) 2801 return(table[i].string); 2802 return(table[i + 1].string); 2803 } 2804 2805 /* 2806 * Management Interface 2807 */ 2808 2809 static int 2810 aac_open(struct dev_open_args *ap) 2811 { 2812 cdev_t dev = ap->a_head.a_dev; 2813 struct aac_softc *sc; 2814 2815 sc = dev->si_drv1; 2816 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2817 device_busy(sc->aac_dev); 2818 2819 return 0; 2820 } 2821 2822 static int 2823 aac_ioctl(struct dev_ioctl_args *ap) 2824 { 2825 caddr_t arg = ap->a_data; 2826 cdev_t dev = ap->a_head.a_dev; 2827 u_long cmd = ap->a_cmd; 2828 union aac_statrequest *as; 2829 struct aac_softc *sc; 2830 int error = 0; 2831 2832 as = (union aac_statrequest *)arg; 2833 sc = dev->si_drv1; 2834 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 2835 2836 switch (cmd) { 2837 case AACIO_STATS: 2838 switch (as->as_item) { 2839 case AACQ_FREE: 2840 case AACQ_BIO: 2841 case AACQ_READY: 2842 case AACQ_BUSY: 2843 bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, 2844 sizeof(struct aac_qstat)); 2845 break; 2846 default: 2847 error = ENOENT; 2848 break; 2849 } 2850 break; 2851 2852 case FSACTL_SENDFIB: 2853 case FSACTL_SEND_LARGE_FIB: 2854 arg = *(caddr_t*)arg; 2855 case FSACTL_LNX_SENDFIB: 2856 case FSACTL_LNX_SEND_LARGE_FIB: 2857 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); 2858 error = aac_ioctl_sendfib(sc, arg); 2859 break; 2860 case FSACTL_SEND_RAW_SRB: 2861 arg = *(caddr_t*)arg; 2862 case FSACTL_LNX_SEND_RAW_SRB: 2863 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); 2864 error = aac_ioctl_send_raw_srb(sc, arg); 2865 break; 2866 case FSACTL_AIF_THREAD: 2867 case FSACTL_LNX_AIF_THREAD: 2868 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); 2869 error = EINVAL; 2870 break; 2871 case FSACTL_OPEN_GET_ADAPTER_FIB: 2872 arg = *(caddr_t*)arg; 2873 case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: 2874 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); 2875 error = aac_open_aif(sc, arg); 2876 break; 2877 case FSACTL_GET_NEXT_ADAPTER_FIB: 2878 arg = *(caddr_t*)arg; 2879 case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: 2880 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); 2881 error = aac_getnext_aif(sc, arg); 2882 break; 2883 case FSACTL_CLOSE_GET_ADAPTER_FIB: 2884 arg = *(caddr_t*)arg; 2885 case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: 2886 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); 2887 error = aac_close_aif(sc, arg); 2888 break; 2889 case FSACTL_MINIPORT_REV_CHECK: 2890 arg = *(caddr_t*)arg; 2891 case FSACTL_LNX_MINIPORT_REV_CHECK: 2892 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); 2893 error = aac_rev_check(sc, arg); 2894 break; 2895 case FSACTL_QUERY_DISK: 2896 arg = *(caddr_t*)arg; 2897 case FSACTL_LNX_QUERY_DISK: 2898 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); 2899 error = aac_query_disk(sc, arg); 2900 break; 2901 case FSACTL_DELETE_DISK: 2902 case FSACTL_LNX_DELETE_DISK: 2903 /* 2904 * We don't trust the underland to tell us when to delete a 2905 * container, rather we rely on an AIF coming from the 2906 * controller 2907 */ 2908 error = 0; 2909 break; 2910 case FSACTL_GET_PCI_INFO: 2911 arg = *(caddr_t*)arg; 2912 case FSACTL_LNX_GET_PCI_INFO: 2913 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); 2914 error = aac_get_pci_info(sc, arg); 2915 break; 2916 case FSACTL_GET_FEATURES: 2917 arg = *(caddr_t*)arg; 2918 case FSACTL_LNX_GET_FEATURES: 2919 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); 2920 error = aac_supported_features(sc, arg); 2921 break; 2922 default: 2923 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); 2924 error = EINVAL; 2925 break; 2926 } 2927 return(error); 2928 } 2929 2930 static struct filterops aac_filterops = 2931 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, aac_filter_detach, aac_filter_read }; 2932 2933 static int 2934 aac_kqfilter(struct dev_kqfilter_args *ap) 2935 { 2936 cdev_t dev = ap->a_head.a_dev; 2937 struct aac_softc *sc = dev->si_drv1; 2938 struct knote *kn = ap->a_kn; 2939 struct klist *klist; 2940 2941 ap->a_result = 0; 2942 2943 switch (kn->kn_filter) { 2944 case EVFILT_READ: 2945 kn->kn_fop = &aac_filterops; 2946 kn->kn_hook = (caddr_t)sc; 2947 break; 2948 default: 2949 ap->a_result = EOPNOTSUPP; 2950 return (0); 2951 } 2952 2953 klist = &sc->rcv_kq.ki_note; 2954 knote_insert(klist, kn); 2955 2956 return (0); 2957 } 2958 2959 static void 2960 aac_filter_detach(struct knote *kn) 2961 { 2962 struct aac_softc *sc = (struct aac_softc *)kn->kn_hook; 2963 struct klist *klist; 2964 2965 klist = &sc->rcv_kq.ki_note; 2966 knote_remove(klist, kn); 2967 } 2968 2969 static int 2970 aac_filter_read(struct knote *kn, long hint) 2971 { 2972 struct aac_softc *sc; 2973 struct aac_fib_context *ctx; 2974 2975 sc = (struct aac_softc *)kn->kn_hook; 2976 2977 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 2978 for (ctx = sc->fibctx; ctx; ctx = ctx->next) 2979 if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) 2980 return(1); 2981 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 2982 2983 return (0); 2984 } 2985 2986 static void 2987 aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) 2988 { 2989 2990 switch (event->ev_type) { 2991 case AAC_EVENT_CMFREE: 2992 KKASSERT(lockstatus(&sc->aac_io_lock, curthread) != 0); 2993 if (aac_alloc_command(sc, (struct aac_command **)arg)) { 2994 aac_add_event(sc, event); 2995 return; 2996 } 2997 kfree(event, M_AACBUF); 2998 wakeup(arg); 2999 break; 3000 default: 3001 break; 3002 } 3003 } 3004 3005 /* 3006 * Send a FIB supplied from userspace 3007 */ 3008 static int 3009 aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) 3010 { 3011 struct aac_command *cm; 3012 int size, error; 3013 3014 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3015 3016 cm = NULL; 3017 3018 /* 3019 * Get a command 3020 */ 3021 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3022 if (aac_alloc_command(sc, &cm)) { 3023 struct aac_event *event; 3024 3025 event = kmalloc(sizeof(struct aac_event), M_AACBUF, 3026 M_INTWAIT | M_ZERO); 3027 event->ev_type = AAC_EVENT_CMFREE; 3028 event->ev_callback = aac_ioctl_event; 3029 event->ev_arg = &cm; 3030 aac_add_event(sc, event); 3031 lksleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); 3032 } 3033 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3034 3035 /* 3036 * Fetch the FIB header, then re-copy to get data as well. 3037 */ 3038 if ((error = copyin(ufib, cm->cm_fib, 3039 sizeof(struct aac_fib_header))) != 0) 3040 goto out; 3041 size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); 3042 if (size > sc->aac_max_fib_size) { 3043 device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", 3044 size, sc->aac_max_fib_size); 3045 size = sc->aac_max_fib_size; 3046 } 3047 if ((error = copyin(ufib, cm->cm_fib, size)) != 0) 3048 goto out; 3049 cm->cm_fib->Header.Size = size; 3050 cm->cm_timestamp = time_second; 3051 3052 /* 3053 * Pass the FIB to the controller, wait for it to complete. 3054 */ 3055 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3056 error = aac_wait_command(cm); 3057 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3058 if (error != 0) { 3059 device_printf(sc->aac_dev, 3060 "aac_wait_command return %d\n", error); 3061 goto out; 3062 } 3063 3064 /* 3065 * Copy the FIB and data back out to the caller. 3066 */ 3067 size = cm->cm_fib->Header.Size; 3068 if (size > sc->aac_max_fib_size) { 3069 device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", 3070 size, sc->aac_max_fib_size); 3071 size = sc->aac_max_fib_size; 3072 } 3073 error = copyout(cm->cm_fib, ufib, size); 3074 3075 out: 3076 if (cm != NULL) { 3077 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3078 aac_release_command(cm); 3079 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3080 } 3081 return(error); 3082 } 3083 3084 /* 3085 * Send a passthrough FIB supplied from userspace 3086 */ 3087 static int 3088 aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) 3089 { 3090 struct aac_command *cm; 3091 struct aac_event *event; 3092 struct aac_fib *fib; 3093 struct aac_srb *srbcmd, *user_srb; 3094 struct aac_sg_entry *sge; 3095 struct aac_sg_entry64 *sge64; 3096 void *srb_sg_address, *ureply; 3097 uint32_t fibsize, srb_sg_bytecount; 3098 int error, transfer_data; 3099 3100 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3101 3102 cm = NULL; 3103 transfer_data = 0; 3104 fibsize = 0; 3105 user_srb = (struct aac_srb *)arg; 3106 3107 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3108 if (aac_alloc_command(sc, &cm)) { 3109 event = kmalloc(sizeof(struct aac_event), M_AACBUF, 3110 M_NOWAIT | M_ZERO); 3111 if (event == NULL) { 3112 error = EBUSY; 3113 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3114 goto out; 3115 } 3116 event->ev_type = AAC_EVENT_CMFREE; 3117 event->ev_callback = aac_ioctl_event; 3118 event->ev_arg = &cm; 3119 aac_add_event(sc, event); 3120 lksleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); 3121 } 3122 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3123 3124 cm->cm_data = NULL; 3125 fib = cm->cm_fib; 3126 srbcmd = (struct aac_srb *)fib->data; 3127 error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); 3128 if (error != 0) 3129 goto out; 3130 if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { 3131 error = EINVAL; 3132 goto out; 3133 } 3134 error = copyin(user_srb, srbcmd, fibsize); 3135 if (error != 0) 3136 goto out; 3137 srbcmd->function = 0; 3138 srbcmd->retry_limit = 0; 3139 if (srbcmd->sg_map.SgCount > 1) { 3140 error = EINVAL; 3141 goto out; 3142 } 3143 3144 /* Retrieve correct SG entries. */ 3145 if (fibsize == (sizeof(struct aac_srb) + 3146 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { 3147 sge = srbcmd->sg_map.SgEntry; 3148 sge64 = NULL; 3149 srb_sg_bytecount = sge->SgByteCount; 3150 srb_sg_address = (void *)(uintptr_t)sge->SgAddress; 3151 } 3152 #ifdef __amd64__ 3153 else if (fibsize == (sizeof(struct aac_srb) + 3154 srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { 3155 sge = NULL; 3156 sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; 3157 srb_sg_bytecount = sge64->SgByteCount; 3158 srb_sg_address = (void *)sge64->SgAddress; 3159 if (sge64->SgAddress > 0xffffffffull && 3160 (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { 3161 error = EINVAL; 3162 goto out; 3163 } 3164 } 3165 #endif 3166 else { 3167 error = EINVAL; 3168 goto out; 3169 } 3170 ureply = (char *)arg + fibsize; 3171 srbcmd->data_len = srb_sg_bytecount; 3172 if (srbcmd->sg_map.SgCount == 1) 3173 transfer_data = 1; 3174 3175 cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; 3176 if (transfer_data) { 3177 cm->cm_datalen = srb_sg_bytecount; 3178 cm->cm_data = kmalloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); 3179 if (cm->cm_data == NULL) { 3180 error = ENOMEM; 3181 goto out; 3182 } 3183 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) 3184 cm->cm_flags |= AAC_CMD_DATAIN; 3185 if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { 3186 cm->cm_flags |= AAC_CMD_DATAOUT; 3187 error = copyin(srb_sg_address, cm->cm_data, 3188 cm->cm_datalen); 3189 if (error != 0) 3190 goto out; 3191 } 3192 } 3193 3194 fib->Header.Size = sizeof(struct aac_fib_header) + 3195 sizeof(struct aac_srb); 3196 fib->Header.XferState = 3197 AAC_FIBSTATE_HOSTOWNED | 3198 AAC_FIBSTATE_INITIALISED | 3199 AAC_FIBSTATE_EMPTY | 3200 AAC_FIBSTATE_FROMHOST | 3201 AAC_FIBSTATE_REXPECTED | 3202 AAC_FIBSTATE_NORM | 3203 AAC_FIBSTATE_ASYNC | 3204 AAC_FIBSTATE_FAST_RESPONSE; 3205 fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? 3206 ScsiPortCommandU64 : ScsiPortCommand; 3207 3208 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3209 aac_wait_command(cm); 3210 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3211 3212 if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { 3213 error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); 3214 if (error != 0) 3215 goto out; 3216 } 3217 error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); 3218 out: 3219 if (cm != NULL) { 3220 if (cm->cm_data != NULL) 3221 kfree(cm->cm_data, M_AACBUF); 3222 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3223 aac_release_command(cm); 3224 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3225 } 3226 return(error); 3227 } 3228 3229 static int 3230 aac_close(struct dev_close_args *ap) 3231 { 3232 cdev_t dev = ap->a_head.a_dev; 3233 struct aac_softc *sc; 3234 3235 sc = dev->si_drv1; 3236 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3237 get_mplock(); 3238 device_unbusy(sc->aac_dev); 3239 rel_mplock(); 3240 3241 return 0; 3242 } 3243 3244 /* 3245 * Handle an AIF sent to us by the controller; queue it for later reference. 3246 * If the queue fills up, then drop the older entries. 3247 */ 3248 static void 3249 aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) 3250 { 3251 struct aac_aif_command *aif; 3252 struct aac_container *co, *co_next; 3253 struct aac_fib_context *ctx; 3254 struct aac_mntinforesp *mir; 3255 int next, current, found; 3256 int count = 0, added = 0, i = 0; 3257 uint32_t channel; 3258 3259 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3260 3261 aif = (struct aac_aif_command*)&fib->data[0]; 3262 aac_print_aif(sc, aif); 3263 3264 /* Is it an event that we should care about? */ 3265 switch (aif->command) { 3266 case AifCmdEventNotify: 3267 switch (aif->data.EN.type) { 3268 case AifEnAddContainer: 3269 case AifEnDeleteContainer: 3270 /* 3271 * A container was added or deleted, but the message 3272 * doesn't tell us anything else! Re-enumerate the 3273 * containers and sort things out. 3274 */ 3275 aac_alloc_sync_fib(sc, &fib); 3276 do { 3277 /* 3278 * Ask the controller for its containers one at 3279 * a time. 3280 * XXX What if the controller's list changes 3281 * midway through this enumaration? 3282 * XXX This should be done async. 3283 */ 3284 if ((mir = aac_get_container_info(sc, fib, i)) == NULL) 3285 continue; 3286 if (i == 0) 3287 count = mir->MntRespCount; 3288 /* 3289 * Check the container against our list. 3290 * co->co_found was already set to 0 in a 3291 * previous run. 3292 */ 3293 if ((mir->Status == ST_OK) && 3294 (mir->MntTable[0].VolType != CT_NONE)) { 3295 found = 0; 3296 TAILQ_FOREACH(co, 3297 &sc->aac_container_tqh, 3298 co_link) { 3299 if (co->co_mntobj.ObjectId == 3300 mir->MntTable[0].ObjectId) { 3301 co->co_found = 1; 3302 found = 1; 3303 break; 3304 } 3305 } 3306 /* 3307 * If the container matched, continue 3308 * in the list. 3309 */ 3310 if (found) { 3311 i++; 3312 continue; 3313 } 3314 3315 /* 3316 * This is a new container. Do all the 3317 * appropriate things to set it up. 3318 */ 3319 aac_add_container(sc, mir, 1); 3320 added = 1; 3321 } 3322 i++; 3323 } while ((i < count) && (i < AAC_MAX_CONTAINERS)); 3324 aac_release_sync_fib(sc); 3325 3326 /* 3327 * Go through our list of containers and see which ones 3328 * were not marked 'found'. Since the controller didn't 3329 * list them they must have been deleted. Do the 3330 * appropriate steps to destroy the device. Also reset 3331 * the co->co_found field. 3332 */ 3333 co = TAILQ_FIRST(&sc->aac_container_tqh); 3334 while (co != NULL) { 3335 if (co->co_found == 0) { 3336 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3337 get_mplock(); 3338 device_delete_child(sc->aac_dev, 3339 co->co_disk); 3340 rel_mplock(); 3341 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3342 co_next = TAILQ_NEXT(co, co_link); 3343 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 3344 TAILQ_REMOVE(&sc->aac_container_tqh, co, 3345 co_link); 3346 lockmgr(&sc->aac_container_lock, LK_RELEASE); 3347 kfree(co, M_AACBUF); 3348 co = co_next; 3349 } else { 3350 co->co_found = 0; 3351 co = TAILQ_NEXT(co, co_link); 3352 } 3353 } 3354 3355 /* Attach the newly created containers */ 3356 if (added) { 3357 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3358 get_mplock(); 3359 bus_generic_attach(sc->aac_dev); 3360 rel_mplock(); 3361 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3362 } 3363 3364 break; 3365 3366 case AifEnEnclosureManagement: 3367 switch (aif->data.EN.data.EEE.eventType) { 3368 case AIF_EM_DRIVE_INSERTION: 3369 case AIF_EM_DRIVE_REMOVAL: 3370 channel = aif->data.EN.data.EEE.unitID; 3371 if (sc->cam_rescan_cb != NULL) 3372 sc->cam_rescan_cb(sc, 3373 (channel >> 24) & 0xF, 3374 (channel & 0xFFFF)); 3375 break; 3376 } 3377 break; 3378 3379 case AifEnAddJBOD: 3380 case AifEnDeleteJBOD: 3381 channel = aif->data.EN.data.ECE.container; 3382 if (sc->cam_rescan_cb != NULL) 3383 sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, 3384 AAC_CAM_TARGET_WILDCARD); 3385 break; 3386 3387 default: 3388 break; 3389 } 3390 3391 default: 3392 break; 3393 } 3394 3395 /* Copy the AIF data to the AIF queue for ioctl retrieval */ 3396 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3397 current = sc->aifq_idx; 3398 next = (current + 1) % AAC_AIFQ_LENGTH; 3399 if (next == 0) 3400 sc->aifq_filled = 1; 3401 bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); 3402 /* modify AIF contexts */ 3403 if (sc->aifq_filled) { 3404 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3405 if (next == ctx->ctx_idx) 3406 ctx->ctx_wrap = 1; 3407 else if (current == ctx->ctx_idx && ctx->ctx_wrap) 3408 ctx->ctx_idx = next; 3409 } 3410 } 3411 sc->aifq_idx = next; 3412 /* On the off chance that someone is sleeping for an aif... */ 3413 if (sc->aac_state & AAC_STATE_AIF_SLEEPER) 3414 wakeup(sc->aac_aifq); 3415 /* token may have been lost */ 3416 /* Wakeup any poll()ers */ 3417 KNOTE(&sc->rcv_kq.ki_note, 0); 3418 /* token may have been lost */ 3419 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3420 3421 return; 3422 } 3423 3424 /* 3425 * Return the Revision of the driver to userspace and check to see if the 3426 * userspace app is possibly compatible. This is extremely bogus since 3427 * our driver doesn't follow Adaptec's versioning system. Cheat by just 3428 * returning what the card reported. 3429 */ 3430 static int 3431 aac_rev_check(struct aac_softc *sc, caddr_t udata) 3432 { 3433 struct aac_rev_check rev_check; 3434 struct aac_rev_check_resp rev_check_resp; 3435 int error = 0; 3436 3437 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3438 3439 /* 3440 * Copyin the revision struct from userspace 3441 */ 3442 if ((error = copyin(udata, (caddr_t)&rev_check, 3443 sizeof(struct aac_rev_check))) != 0) { 3444 return error; 3445 } 3446 3447 fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", 3448 rev_check.callingRevision.buildNumber); 3449 3450 /* 3451 * Doctor up the response struct. 3452 */ 3453 rev_check_resp.possiblyCompatible = 1; 3454 rev_check_resp.adapterSWRevision.external.comp.major = 3455 AAC_DRIVER_MAJOR_VERSION; 3456 rev_check_resp.adapterSWRevision.external.comp.minor = 3457 AAC_DRIVER_MINOR_VERSION; 3458 rev_check_resp.adapterSWRevision.external.comp.type = 3459 AAC_DRIVER_TYPE; 3460 rev_check_resp.adapterSWRevision.external.comp.dash = 3461 AAC_DRIVER_BUGFIX_LEVEL; 3462 rev_check_resp.adapterSWRevision.buildNumber = 3463 AAC_DRIVER_BUILD; 3464 3465 return(copyout((caddr_t)&rev_check_resp, udata, 3466 sizeof(struct aac_rev_check_resp))); 3467 } 3468 3469 /* 3470 * Pass the fib context to the caller 3471 */ 3472 static int 3473 aac_open_aif(struct aac_softc *sc, caddr_t arg) 3474 { 3475 struct aac_fib_context *fibctx, *ctx; 3476 int error = 0; 3477 3478 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3479 3480 fibctx = kmalloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); 3481 if (fibctx == NULL) 3482 return (ENOMEM); 3483 3484 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3485 /* all elements are already 0, add to queue */ 3486 if (sc->fibctx == NULL) 3487 sc->fibctx = fibctx; 3488 else { 3489 for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) 3490 ; 3491 ctx->next = fibctx; 3492 fibctx->prev = ctx; 3493 } 3494 3495 /* evaluate unique value */ 3496 fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); 3497 ctx = sc->fibctx; 3498 while (ctx != fibctx) { 3499 if (ctx->unique == fibctx->unique) { 3500 fibctx->unique++; 3501 ctx = sc->fibctx; 3502 } else { 3503 ctx = ctx->next; 3504 } 3505 } 3506 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3507 3508 error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); 3509 if (error) 3510 aac_close_aif(sc, (caddr_t)ctx); 3511 return error; 3512 } 3513 3514 /* 3515 * Close the caller's fib context 3516 */ 3517 static int 3518 aac_close_aif(struct aac_softc *sc, caddr_t arg) 3519 { 3520 struct aac_fib_context *ctx; 3521 3522 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3523 3524 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3525 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3526 if (ctx->unique == *(uint32_t *)&arg) { 3527 if (ctx == sc->fibctx) 3528 sc->fibctx = NULL; 3529 else { 3530 ctx->prev->next = ctx->next; 3531 if (ctx->next) 3532 ctx->next->prev = ctx->prev; 3533 } 3534 break; 3535 } 3536 } 3537 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3538 if (ctx) 3539 kfree(ctx, M_AACBUF); 3540 3541 return 0; 3542 } 3543 3544 /* 3545 * Pass the caller the next AIF in their queue 3546 */ 3547 static int 3548 aac_getnext_aif(struct aac_softc *sc, caddr_t arg) 3549 { 3550 struct get_adapter_fib_ioctl agf; 3551 struct aac_fib_context *ctx; 3552 int error; 3553 3554 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3555 3556 if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { 3557 for (ctx = sc->fibctx; ctx; ctx = ctx->next) { 3558 if (agf.AdapterFibContext == ctx->unique) 3559 break; 3560 } 3561 if (!ctx) 3562 return (EFAULT); 3563 3564 error = aac_return_aif(sc, ctx, agf.AifFib); 3565 if (error == EAGAIN && agf.Wait) { 3566 fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); 3567 sc->aac_state |= AAC_STATE_AIF_SLEEPER; 3568 while (error == EAGAIN) { 3569 error = tsleep(sc->aac_aifq, 3570 PCATCH, "aacaif", 0); 3571 if (error == 0) 3572 error = aac_return_aif(sc, ctx, agf.AifFib); 3573 } 3574 sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; 3575 } 3576 } 3577 return(error); 3578 } 3579 3580 /* 3581 * Hand the next AIF off the top of the queue out to userspace. 3582 */ 3583 static int 3584 aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) 3585 { 3586 int current, error; 3587 3588 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3589 3590 lockmgr(&sc->aac_aifq_lock, LK_EXCLUSIVE); 3591 current = ctx->ctx_idx; 3592 if (current == sc->aifq_idx && !ctx->ctx_wrap) { 3593 /* empty */ 3594 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3595 return (EAGAIN); 3596 } 3597 error = 3598 copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); 3599 if (error) 3600 device_printf(sc->aac_dev, 3601 "aac_return_aif: copyout returned %d\n", error); 3602 else { 3603 ctx->ctx_wrap = 0; 3604 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; 3605 } 3606 lockmgr(&sc->aac_aifq_lock, LK_RELEASE); 3607 return(error); 3608 } 3609 3610 static int 3611 aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) 3612 { 3613 struct aac_pci_info { 3614 u_int32_t bus; 3615 u_int32_t slot; 3616 } pciinf; 3617 int error; 3618 3619 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3620 3621 pciinf.bus = pci_get_bus(sc->aac_dev); 3622 pciinf.slot = pci_get_slot(sc->aac_dev); 3623 3624 error = copyout((caddr_t)&pciinf, uptr, 3625 sizeof(struct aac_pci_info)); 3626 3627 return (error); 3628 } 3629 3630 static int 3631 aac_supported_features(struct aac_softc *sc, caddr_t uptr) 3632 { 3633 struct aac_features f; 3634 int error; 3635 3636 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3637 3638 if ((error = copyin(uptr, &f, sizeof (f))) != 0) 3639 return (error); 3640 3641 /* 3642 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3643 * ALL zero in the featuresState, the driver will return the current 3644 * state of all the supported features, the data field will not be 3645 * valid. 3646 * When the management driver receives FSACTL_GET_FEATURES ioctl with 3647 * a specific bit set in the featuresState, the driver will return the 3648 * current state of this specific feature and whatever data that are 3649 * associated with the feature in the data field or perform whatever 3650 * action needed indicates in the data field. 3651 */ 3652 if (f.feat.fValue == 0) { 3653 f.feat.fBits.largeLBA = 3654 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3655 /* TODO: In the future, add other features state here as well */ 3656 } else { 3657 if (f.feat.fBits.largeLBA) 3658 f.feat.fBits.largeLBA = 3659 (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; 3660 /* TODO: Add other features state and data in the future */ 3661 } 3662 3663 error = copyout(&f, uptr, sizeof (f)); 3664 return (error); 3665 } 3666 3667 /* 3668 * Give the userland some information about the container. The AAC arch 3669 * expects the driver to be a SCSI passthrough type driver, so it expects 3670 * the containers to have b:t:l numbers. Fake it. 3671 */ 3672 static int 3673 aac_query_disk(struct aac_softc *sc, caddr_t uptr) 3674 { 3675 struct aac_query_disk query_disk; 3676 struct aac_container *co; 3677 struct aac_disk *disk; 3678 int error, id; 3679 3680 fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); 3681 3682 disk = NULL; 3683 3684 error = copyin(uptr, (caddr_t)&query_disk, 3685 sizeof(struct aac_query_disk)); 3686 if (error) 3687 return (error); 3688 3689 id = query_disk.ContainerNumber; 3690 if (id == -1) 3691 return (EINVAL); 3692 3693 lockmgr(&sc->aac_container_lock, LK_EXCLUSIVE); 3694 TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { 3695 if (co->co_mntobj.ObjectId == id) 3696 break; 3697 } 3698 3699 if (co == NULL) { 3700 query_disk.Valid = 0; 3701 query_disk.Locked = 0; 3702 query_disk.Deleted = 1; /* XXX is this right? */ 3703 } else { 3704 disk = device_get_softc(co->co_disk); 3705 query_disk.Valid = 1; 3706 query_disk.Locked = 3707 (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; 3708 query_disk.Deleted = 0; 3709 query_disk.Bus = device_get_unit(sc->aac_dev); 3710 query_disk.Target = disk->unit; 3711 query_disk.Lun = 0; 3712 query_disk.UnMapped = 0; 3713 bcopy(disk->ad_dev_t->si_name, 3714 &query_disk.diskDeviceName[0], 10); 3715 } 3716 lockmgr(&sc->aac_container_lock, LK_RELEASE); 3717 3718 error = copyout((caddr_t)&query_disk, uptr, 3719 sizeof(struct aac_query_disk)); 3720 3721 return (error); 3722 } 3723 3724 static void 3725 aac_get_bus_info(struct aac_softc *sc) 3726 { 3727 struct aac_fib *fib; 3728 struct aac_ctcfg *c_cmd; 3729 struct aac_ctcfg_resp *c_resp; 3730 struct aac_vmioctl *vmi; 3731 struct aac_vmi_businf_resp *vmi_resp; 3732 struct aac_getbusinf businfo; 3733 struct aac_sim *caminf; 3734 device_t child; 3735 int i, found, error; 3736 3737 lockmgr(&sc->aac_io_lock, LK_EXCLUSIVE); 3738 aac_alloc_sync_fib(sc, &fib); 3739 c_cmd = (struct aac_ctcfg *)&fib->data[0]; 3740 bzero(c_cmd, sizeof(struct aac_ctcfg)); 3741 3742 c_cmd->Command = VM_ContainerConfig; 3743 c_cmd->cmd = CT_GET_SCSI_METHOD; 3744 c_cmd->param = 0; 3745 3746 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3747 sizeof(struct aac_ctcfg)); 3748 if (error) { 3749 device_printf(sc->aac_dev, "Error %d sending " 3750 "VM_ContainerConfig command\n", error); 3751 aac_release_sync_fib(sc); 3752 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3753 return; 3754 } 3755 3756 c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; 3757 if (c_resp->Status != ST_OK) { 3758 device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", 3759 c_resp->Status); 3760 aac_release_sync_fib(sc); 3761 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3762 return; 3763 } 3764 3765 sc->scsi_method_id = c_resp->param; 3766 3767 vmi = (struct aac_vmioctl *)&fib->data[0]; 3768 bzero(vmi, sizeof(struct aac_vmioctl)); 3769 3770 vmi->Command = VM_Ioctl; 3771 vmi->ObjType = FT_DRIVE; 3772 vmi->MethId = sc->scsi_method_id; 3773 vmi->ObjId = 0; 3774 vmi->IoctlCmd = GetBusInfo; 3775 3776 error = aac_sync_fib(sc, ContainerCommand, 0, fib, 3777 sizeof(struct aac_vmi_businf_resp)); 3778 if (error) { 3779 device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", 3780 error); 3781 aac_release_sync_fib(sc); 3782 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3783 return; 3784 } 3785 3786 vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; 3787 if (vmi_resp->Status != ST_OK) { 3788 device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", 3789 vmi_resp->Status); 3790 aac_release_sync_fib(sc); 3791 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3792 return; 3793 } 3794 3795 bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); 3796 aac_release_sync_fib(sc); 3797 lockmgr(&sc->aac_io_lock, LK_RELEASE); 3798 3799 found = 0; 3800 for (i = 0; i < businfo.BusCount; i++) { 3801 if (businfo.BusValid[i] != AAC_BUS_VALID) 3802 continue; 3803 3804 caminf = (struct aac_sim *)kmalloc(sizeof(struct aac_sim), 3805 M_AACBUF, M_INTWAIT | M_ZERO); 3806 3807 child = device_add_child(sc->aac_dev, "aacp", -1); 3808 if (child == NULL) { 3809 device_printf(sc->aac_dev, 3810 "device_add_child failed for passthrough bus %d\n", 3811 i); 3812 kfree(caminf, M_AACBUF); 3813 break; 3814 } 3815 3816 caminf->TargetsPerBus = businfo.TargetsPerBus; 3817 caminf->BusNumber = i; 3818 caminf->InitiatorBusId = businfo.InitiatorBusId[i]; 3819 caminf->aac_sc = sc; 3820 caminf->sim_dev = child; 3821 3822 device_set_ivars(child, caminf); 3823 device_set_desc(child, "SCSI Passthrough Bus"); 3824 TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); 3825 3826 found = 1; 3827 } 3828 3829 if (found) 3830 bus_generic_attach(sc->aac_dev); 3831 3832 return; 3833 } 3834