1 /*- 2 * Copyright (c) 1999,2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * Copyright (c) 2005 Scott Long 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002 Eric Moore 30 * Copyright (c) 2002, 2004 LSI Logic Corporation 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. The party using or redistributing the source code and binary forms 42 * agrees to the disclaimer below and the terms and conditions set forth 43 * herein. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * $FreeBSD: src/sys/dev/amr/amr.c,v 1.99 2012/08/31 09:42:46 scottl Exp $ 58 */ 59 60 /* 61 * Driver for the AMI MegaRaid family of controllers. 62 */ 63 64 #include "opt_amr.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/malloc.h> 69 #include <sys/kernel.h> 70 #include <sys/proc.h> 71 #include <sys/sysctl.h> 72 #include <sys/sysmsg.h> 73 74 #include <sys/bio.h> 75 #include <sys/bus.h> 76 #include <sys/conf.h> 77 #include <sys/stat.h> 78 79 #include <machine/cpu.h> 80 #include <sys/rman.h> 81 82 #include <bus/pci/pcireg.h> 83 #include <bus/pci/pcivar.h> 84 85 #include <dev/raid/amr/amrio.h> 86 #include <dev/raid/amr/amrreg.h> 87 #include <dev/raid/amr/amrvar.h> 88 #define AMR_DEFINE_TABLES 89 #include <dev/raid/amr/amr_tables.h> 90 91 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters"); 92 93 static d_open_t amr_open; 94 static d_close_t amr_close; 95 static d_ioctl_t amr_ioctl; 96 97 static struct dev_ops amr_ops = { 98 { "amr", 0, 0 }, 99 .d_open = amr_open, 100 .d_close = amr_close, 101 .d_ioctl = amr_ioctl, 102 }; 103 104 int linux_no_adapter = 0; 105 /* 106 * Initialisation, bus interface. 107 */ 108 static void amr_startup(void *arg); 109 110 /* 111 * Command wrappers 112 */ 113 static int amr_query_controller(struct amr_softc *sc); 114 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize, 115 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status); 116 static void amr_completeio(struct amr_command *ac); 117 static int amr_support_ext_cdb(struct amr_softc *sc); 118 119 /* 120 * Command buffer allocation. 121 */ 122 static void amr_alloccmd_cluster(struct amr_softc *sc); 123 static void amr_freecmd_cluster(struct amr_command_cluster *acc); 124 125 /* 126 * Command processing. 127 */ 128 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp); 129 static int amr_wait_command(struct amr_command *ac); 130 static int amr_mapcmd(struct amr_command *ac); 131 static void amr_unmapcmd(struct amr_command *ac); 132 static int amr_start(struct amr_command *ac); 133 static void amr_complete(void *context, ac_qhead_t *head); 134 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 135 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 136 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 137 static void amr_abort_load(struct amr_command *ac); 138 139 /* 140 * Interface-specific shims 141 */ 142 static int amr_quartz_submit_command(struct amr_command *ac); 143 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); 144 static int amr_quartz_poll_command(struct amr_command *ac); 145 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac); 146 147 static int amr_std_submit_command(struct amr_command *ac); 148 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); 149 static int amr_std_poll_command(struct amr_command *ac); 150 static void amr_std_attach_mailbox(struct amr_softc *sc); 151 152 #ifdef AMR_BOARD_INIT 153 static int amr_quartz_init(struct amr_softc *sc); 154 static int amr_std_init(struct amr_softc *sc); 155 #endif 156 157 /* 158 * Debugging 159 */ 160 static void amr_describe_controller(struct amr_softc *sc); 161 #ifdef AMR_DEBUG 162 #if 0 163 static void amr_printcommand(struct amr_command *ac); 164 #endif 165 #endif 166 167 static void amr_init_sysctl(struct amr_softc *sc); 168 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, 169 int32_t flag, struct sysmsg *sm); 170 171 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory"); 172 173 /******************************************************************************** 174 ******************************************************************************** 175 Inline Glue 176 ******************************************************************************** 177 ********************************************************************************/ 178 179 /******************************************************************************** 180 ******************************************************************************** 181 Public Interfaces 182 ******************************************************************************** 183 ********************************************************************************/ 184 185 /******************************************************************************** 186 * Initialise the controller and softc. 187 */ 188 int 189 amr_attach(struct amr_softc *sc) 190 { 191 device_t child; 192 193 debug_called(1); 194 195 /* 196 * Initialise per-controller queues. 197 */ 198 amr_init_qhead(&sc->amr_freecmds); 199 amr_init_qhead(&sc->amr_ready); 200 TAILQ_INIT(&sc->amr_cmd_clusters); 201 bioq_init(&sc->amr_bioq); 202 203 debug(2, "queue init done"); 204 205 /* 206 * Configure for this controller type. 207 */ 208 if (AMR_IS_QUARTZ(sc)) { 209 sc->amr_submit_command = amr_quartz_submit_command; 210 sc->amr_get_work = amr_quartz_get_work; 211 sc->amr_poll_command = amr_quartz_poll_command; 212 sc->amr_poll_command1 = amr_quartz_poll_command1; 213 } else { 214 sc->amr_submit_command = amr_std_submit_command; 215 sc->amr_get_work = amr_std_get_work; 216 sc->amr_poll_command = amr_std_poll_command; 217 amr_std_attach_mailbox(sc); 218 } 219 220 #ifdef AMR_BOARD_INIT 221 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))) 222 return(ENXIO); 223 #endif 224 225 /* 226 * Allocate initial commands. 227 */ 228 amr_alloccmd_cluster(sc); 229 230 /* 231 * Quiz controller for features and limits. 232 */ 233 if (amr_query_controller(sc)) 234 return(ENXIO); 235 236 debug(2, "controller query complete"); 237 238 /* 239 * preallocate the remaining commands. 240 */ 241 while (sc->amr_nextslot < sc->amr_maxio) 242 amr_alloccmd_cluster(sc); 243 244 /* 245 * Setup sysctls. 246 */ 247 sysctl_ctx_init(&sc->amr_sysctl_ctx); 248 sc->amr_sysctl_tree = SYSCTL_ADD_NODE(&sc->amr_sysctl_ctx, 249 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 250 device_get_nameunit(sc->amr_dev), CTLFLAG_RD, 0, ""); 251 if (sc->amr_sysctl_tree == NULL) { 252 device_printf(sc->amr_dev, "can't add sysctl node\n"); 253 return (EINVAL); 254 } 255 amr_init_sysctl(sc); 256 257 /* 258 * Attach our 'real' SCSI channels to CAM. 259 */ 260 child = device_add_child(sc->amr_dev, "amrp", -1); 261 sc->amr_pass = child; 262 if (child != NULL) { 263 device_set_softc(child, sc); 264 device_set_desc(child, "SCSI Passthrough Bus"); 265 bus_generic_attach(sc->amr_dev); 266 } 267 268 /* 269 * Create the control device. 270 */ 271 sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR, 272 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev)); 273 sc->amr_dev_t->si_drv1 = sc; 274 linux_no_adapter++; 275 if (device_get_unit(sc->amr_dev) == 0) 276 make_dev_alias(sc->amr_dev_t, "megadev0"); 277 278 /* 279 * Schedule ourselves to bring the controller up once interrupts are 280 * available. 281 */ 282 bzero(&sc->amr_ich, sizeof(struct intr_config_hook)); 283 sc->amr_ich.ich_func = amr_startup; 284 sc->amr_ich.ich_arg = sc; 285 sc->amr_ich.ich_desc = "amr"; 286 if (config_intrhook_establish(&sc->amr_ich) != 0) { 287 device_printf(sc->amr_dev, "can't establish configuration hook\n"); 288 return(ENOMEM); 289 } 290 291 /* 292 * Print a little information about the controller. 293 */ 294 amr_describe_controller(sc); 295 296 debug(2, "attach complete"); 297 return(0); 298 } 299 300 /******************************************************************************** 301 * Locate disk resources and attach children to them. 302 */ 303 static void 304 amr_startup(void *arg) 305 { 306 struct amr_softc *sc = (struct amr_softc *)arg; 307 struct amr_logdrive *dr; 308 int i, error; 309 310 debug_called(1); 311 312 /* pull ourselves off the intrhook chain */ 313 if (sc->amr_ich.ich_func) 314 config_intrhook_disestablish(&sc->amr_ich); 315 sc->amr_ich.ich_func = NULL; 316 317 /* get up-to-date drive information */ 318 if (amr_query_controller(sc)) { 319 device_printf(sc->amr_dev, "can't scan controller for drives\n"); 320 return; 321 } 322 323 /* iterate over available drives */ 324 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) { 325 /* are we already attached to this drive? */ 326 if (dr->al_disk == 0) { 327 /* generate geometry information */ 328 if (dr->al_size > 0x200000) { /* extended translation? */ 329 dr->al_heads = 255; 330 dr->al_sectors = 63; 331 } else { 332 dr->al_heads = 64; 333 dr->al_sectors = 32; 334 } 335 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors); 336 337 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1); 338 if (dr->al_disk == 0) 339 device_printf(sc->amr_dev, "device_add_child failed\n"); 340 device_set_ivars(dr->al_disk, dr); 341 } 342 } 343 344 if ((error = bus_generic_attach(sc->amr_dev)) != 0) 345 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error); 346 347 /* mark controller back up */ 348 sc->amr_state &= ~AMR_STATE_SHUTDOWN; 349 350 /* interrupts will be enabled before we do anything more */ 351 sc->amr_state |= AMR_STATE_INTEN; 352 353 return; 354 } 355 356 static void 357 amr_init_sysctl(struct amr_softc *sc) 358 { 359 360 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 361 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 362 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0, 363 ""); 364 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 365 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 366 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0, 367 ""); 368 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 369 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 370 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0, 371 ""); 372 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 373 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 374 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0, 375 ""); 376 } 377 378 379 /******************************************************************************* 380 * Free resources associated with a controller instance 381 */ 382 void 383 amr_free(struct amr_softc *sc) 384 { 385 struct amr_command_cluster *acc; 386 387 /* detach from CAM */ 388 if (sc->amr_pass != NULL) 389 device_delete_child(sc->amr_dev, sc->amr_pass); 390 391 /* throw away any command buffers */ 392 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) { 393 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link); 394 amr_freecmd_cluster(acc); 395 } 396 397 /* destroy control device */ 398 if(sc->amr_dev_t != NULL) 399 destroy_dev(sc->amr_dev_t); 400 dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev)); 401 402 #if 0 /* XXX swildner */ 403 if (mtx_initialized(&sc->amr_hw_lock)) 404 mtx_destroy(&sc->amr_hw_lock); 405 406 if (mtx_initialized(&sc->amr_list_lock)) 407 mtx_destroy(&sc->amr_list_lock); 408 #endif 409 410 if (sc->amr_sysctl_tree != NULL) 411 sysctl_ctx_free(&sc->amr_sysctl_ctx); 412 413 lockuninit(&sc->amr_hw_lock); 414 lockuninit(&sc->amr_list_lock); 415 } 416 417 /******************************************************************************* 418 * Receive a bio structure from a child device and queue it on a particular 419 * disk resource, then poke the disk resource to start as much work as it can. 420 */ 421 int 422 amr_submit_bio(struct amr_softc *sc, struct bio *bio) 423 { 424 debug_called(2); 425 426 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 427 amr_enqueue_bio(sc, bio); 428 amr_startio(sc); 429 lockmgr(&sc->amr_list_lock, LK_RELEASE); 430 return(0); 431 } 432 433 /******************************************************************************** 434 * Accept an open operation on the control device. 435 */ 436 static int 437 amr_open(struct dev_open_args *ap) 438 { 439 cdev_t dev = ap->a_head.a_dev; 440 int unit = minor(dev); 441 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); 442 443 debug_called(1); 444 445 sc->amr_state |= AMR_STATE_OPEN; 446 return(0); 447 } 448 449 /******************************************************************************** 450 * Accept the last close on the control device. 451 */ 452 static int 453 amr_close(struct dev_close_args *ap) 454 { 455 cdev_t dev = ap->a_head.a_dev; 456 int unit = minor(dev); 457 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); 458 459 debug_called(1); 460 461 sc->amr_state &= ~AMR_STATE_OPEN; 462 return (0); 463 } 464 465 /******************************************************************************** 466 * Handle controller-specific control operations. 467 */ 468 static void 469 amr_rescan_drives(struct cdev *dev) 470 { 471 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 472 int i, error = 0; 473 474 sc->amr_state |= AMR_STATE_REMAP_LD; 475 while (sc->amr_busyslots) { 476 device_printf(sc->amr_dev, "idle controller\n"); 477 amr_done(sc); 478 } 479 480 /* mark ourselves as in-shutdown */ 481 sc->amr_state |= AMR_STATE_SHUTDOWN; 482 483 /* flush controller */ 484 device_printf(sc->amr_dev, "flushing cache..."); 485 kprintf("%s\n", amr_flush(sc) ? "failed" : "done"); 486 487 /* delete all our child devices */ 488 for(i = 0 ; i < AMR_MAXLD; i++) { 489 if(sc->amr_drive[i].al_disk != 0) { 490 if((error = device_delete_child(sc->amr_dev, 491 sc->amr_drive[i].al_disk)) != 0) 492 goto shutdown_out; 493 494 sc->amr_drive[i].al_disk = 0; 495 } 496 } 497 498 shutdown_out: 499 amr_startup(sc); 500 } 501 502 /* 503 * Bug-for-bug compatibility with Linux! 504 * Some apps will send commands with inlen and outlen set to 0, 505 * even though they expect data to be transfered to them from the 506 * card. Linux accidentally allows this by allocating a 4KB 507 * buffer for the transfer anyways, but it then throws it away 508 * without copying it back to the app. 509 * 510 * The amr(4) firmware relies on this feature. In fact, it assumes 511 * the buffer is always a power of 2 up to a max of 64k. There is 512 * also at least one case where it assumes a buffer less than 16k is 513 * greater than 16k. Force a minimum buffer size of 32k and round 514 * sizes between 32k and 64k up to 64k as a workaround. 515 */ 516 static unsigned long 517 amr_ioctl_buffer_length(unsigned long len) 518 { 519 520 if (len <= 32 * 1024) 521 return (32 * 1024); 522 if (len <= 64 * 1024) 523 return (64 * 1024); 524 return (len); 525 } 526 527 int 528 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, 529 struct sysmsg *sm) 530 { 531 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 532 struct amr_command *ac; 533 struct amr_mailbox *mb; 534 struct amr_linux_ioctl ali; 535 void *dp, *temp; 536 int error; 537 int len, ac_flags = 0; 538 int logical_drives_changed = 0; 539 u_int32_t linux_version = 0x02100000; 540 u_int8_t status; 541 struct amr_passthrough *ap; /* 60 bytes */ 542 543 error = 0; 544 dp = NULL; 545 ac = NULL; 546 ap = NULL; 547 548 if ((error = copyin(addr, &ali, sizeof(ali))) != 0) 549 return (error); 550 switch (ali.ui.fcs.opcode) { 551 case 0x82: 552 switch(ali.ui.fcs.subopcode) { 553 case 'e': 554 copyout(&linux_version, (void *)(uintptr_t)ali.data, 555 sizeof(linux_version)); 556 error = 0; 557 break; 558 559 case 'm': 560 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data, 561 sizeof(linux_no_adapter)); 562 sm->sm_result.iresult = linux_no_adapter; 563 error = 0; 564 break; 565 566 default: 567 kprintf("Unknown subopcode\n"); 568 error = ENOIOCTL; 569 break; 570 } 571 break; 572 573 case 0x80: 574 case 0x81: 575 if (ali.ui.fcs.opcode == 0x80) 576 len = max(ali.outlen, ali.inlen); 577 else 578 len = ali.ui.fcs.length; 579 580 mb = (void *)&ali.mbox[0]; 581 582 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */ 583 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */ 584 if (sc->amr_allow_vol_config == 0) { 585 error = EPERM; 586 break; 587 } 588 logical_drives_changed = 1; 589 } 590 591 if (ali.mbox[0] == AMR_CMD_PASS) { 592 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 593 while ((ac = amr_alloccmd(sc)) == NULL) 594 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 595 lockmgr(&sc->amr_list_lock, LK_RELEASE); 596 ap = &ac->ac_ccb->ccb_pthru; 597 598 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap, 599 sizeof(struct amr_passthrough)); 600 if (error) 601 break; 602 603 if (ap->ap_data_transfer_length) 604 dp = kmalloc(ap->ap_data_transfer_length, M_AMR, 605 M_WAITOK | M_ZERO); 606 607 if (ali.inlen) { 608 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address, 609 dp, ap->ap_data_transfer_length); 610 if (error) 611 break; 612 } 613 614 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB; 615 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); 616 ac->ac_mailbox.mb_command = AMR_CMD_PASS; 617 ac->ac_flags = ac_flags; 618 619 ac->ac_data = dp; 620 ac->ac_length = ap->ap_data_transfer_length; 621 temp = (void *)(uintptr_t)ap->ap_data_transfer_address; 622 623 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 624 error = amr_wait_command(ac); 625 lockmgr(&sc->amr_list_lock, LK_RELEASE); 626 if (error) 627 break; 628 629 status = ac->ac_status; 630 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status)); 631 if (error) 632 break; 633 634 if (ali.outlen) { 635 error = copyout(dp, temp, ap->ap_data_transfer_length); 636 if (error) 637 break; 638 } 639 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length); 640 if (error) 641 break; 642 643 error = 0; 644 break; 645 } else if (ali.mbox[0] == AMR_CMD_PASS_64) { 646 kprintf("No AMR_CMD_PASS_64\n"); 647 error = ENOIOCTL; 648 break; 649 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) { 650 kprintf("No AMR_CMD_EXTPASS\n"); 651 error = ENOIOCTL; 652 break; 653 } else { 654 len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen)); 655 656 dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO); 657 658 if (ali.inlen) { 659 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len); 660 if (error) 661 break; 662 } 663 664 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 665 while ((ac = amr_alloccmd(sc)) == NULL) 666 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 667 668 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT; 669 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); 670 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox)); 671 672 ac->ac_length = len; 673 ac->ac_data = dp; 674 ac->ac_flags = ac_flags; 675 676 error = amr_wait_command(ac); 677 lockmgr(&sc->amr_list_lock, LK_RELEASE); 678 if (error) 679 break; 680 681 status = ac->ac_status; 682 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status)); 683 if (ali.outlen) { 684 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen); 685 if (error) 686 break; 687 } 688 689 error = 0; 690 if (logical_drives_changed) 691 amr_rescan_drives(dev); 692 break; 693 } 694 break; 695 696 default: 697 debug(1, "unknown linux ioctl 0x%lx", cmd); 698 kprintf("unknown linux ioctl 0x%lx\n", cmd); 699 error = ENOIOCTL; 700 break; 701 } 702 703 /* 704 * At this point, we know that there is a lock held and that these 705 * objects have been allocated. 706 */ 707 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 708 if (ac != NULL) 709 amr_releasecmd(ac); 710 lockmgr(&sc->amr_list_lock, LK_RELEASE); 711 if (dp != NULL) 712 kfree(dp, M_AMR); 713 return(error); 714 } 715 716 static int 717 amr_ioctl(struct dev_ioctl_args *ap) 718 { 719 cdev_t dev = ap->a_head.a_dev; 720 caddr_t addr = ap->a_data; 721 u_long cmd = ap->a_cmd; 722 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 723 union { 724 void *_p; 725 struct amr_user_ioctl *au; 726 #ifdef AMR_IO_COMMAND32 727 struct amr_user_ioctl32 *au32; 728 #endif 729 int *result; 730 } arg; 731 struct amr_command *ac; 732 struct amr_mailbox_ioctl *mbi; 733 void *dp, *au_buffer; 734 unsigned long au_length, real_length; 735 unsigned char *au_cmd; 736 int *au_statusp; 737 int error; 738 struct amr_passthrough *_ap; /* 60 bytes */ 739 int logical_drives_changed = 0; 740 741 debug_called(1); 742 743 arg._p = (void *)addr; 744 745 error = 0; 746 dp = NULL; 747 ac = NULL; 748 _ap = NULL; 749 750 switch(cmd) { 751 752 case AMR_IO_VERSION: 753 debug(1, "AMR_IO_VERSION"); 754 *arg.result = AMR_IO_VERSION_NUMBER; 755 return(0); 756 757 #ifdef AMR_IO_COMMAND32 758 /* 759 * Accept ioctl-s from 32-bit binaries on non-32-bit 760 * platforms, such as AMD. LSI's MEGAMGR utility is 761 * the only example known today... -mi 762 */ 763 case AMR_IO_COMMAND32: 764 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]); 765 au_cmd = arg.au32->au_cmd; 766 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer; 767 au_length = arg.au32->au_length; 768 au_statusp = &arg.au32->au_status; 769 break; 770 #endif 771 772 case AMR_IO_COMMAND: 773 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]); 774 au_cmd = arg.au->au_cmd; 775 au_buffer = (void *)arg.au->au_buffer; 776 au_length = arg.au->au_length; 777 au_statusp = &arg.au->au_status; 778 break; 779 780 case 0xc0046d00: 781 case 0xc06e6d00: /* Linux emulation */ 782 { 783 devclass_t devclass; 784 struct amr_linux_ioctl ali; 785 int adapter, error; 786 787 devclass = devclass_find("amr"); 788 if (devclass == NULL) 789 return (ENOENT); 790 791 error = copyin(addr, &ali, sizeof(ali)); 792 if (error) 793 return (error); 794 if (ali.ui.fcs.opcode == 0x82) 795 adapter = 0; 796 else 797 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8; 798 799 sc = devclass_get_softc(devclass, adapter); 800 if (sc == NULL) 801 return (ENOENT); 802 803 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg)); 804 } 805 default: 806 debug(1, "unknown ioctl 0x%lx", cmd); 807 return(ENOIOCTL); 808 } 809 810 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */ 811 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */ 812 if (sc->amr_allow_vol_config == 0) { 813 error = EPERM; 814 goto out; 815 } 816 logical_drives_changed = 1; 817 } 818 819 /* handle inbound data buffer */ 820 real_length = amr_ioctl_buffer_length(au_length); 821 if (au_length != 0 && au_cmd[0] != 0x06) { 822 if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) { 823 error = ENOMEM; 824 goto out; 825 } 826 if ((error = copyin(au_buffer, dp, au_length)) != 0) { 827 kfree(dp, M_AMR); 828 return (error); 829 } 830 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp); 831 } 832 833 /* Allocate this now before the mutex gets held */ 834 835 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 836 while ((ac = amr_alloccmd(sc)) == NULL) 837 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 838 839 /* handle SCSI passthrough command */ 840 if (au_cmd[0] == AMR_CMD_PASS) { 841 int len; 842 843 _ap = &ac->ac_ccb->ccb_pthru; 844 bzero(_ap, sizeof(struct amr_passthrough)); 845 846 /* copy cdb */ 847 len = au_cmd[2]; 848 _ap->ap_cdb_length = len; 849 bcopy(au_cmd + 3, _ap->ap_cdb, len); 850 851 /* build passthrough */ 852 _ap->ap_timeout = au_cmd[len + 3] & 0x07; 853 _ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0; 854 _ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0; 855 _ap->ap_logical_drive_no = au_cmd[len + 4]; 856 _ap->ap_channel = au_cmd[len + 5]; 857 _ap->ap_scsi_id = au_cmd[len + 6]; 858 _ap->ap_request_sense_length = 14; 859 _ap->ap_data_transfer_length = au_length; 860 /* XXX what about the request-sense area? does the caller want it? */ 861 862 /* build command */ 863 ac->ac_mailbox.mb_command = AMR_CMD_PASS; 864 ac->ac_flags = AMR_CMD_CCB; 865 866 } else { 867 /* direct command to controller */ 868 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox; 869 870 /* copy pertinent mailbox items */ 871 mbi->mb_command = au_cmd[0]; 872 mbi->mb_channel = au_cmd[1]; 873 mbi->mb_param = au_cmd[2]; 874 mbi->mb_pad[0] = au_cmd[3]; 875 mbi->mb_drive = au_cmd[4]; 876 ac->ac_flags = 0; 877 } 878 879 /* build the command */ 880 ac->ac_data = dp; 881 ac->ac_length = real_length; 882 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT; 883 884 /* run the command */ 885 error = amr_wait_command(ac); 886 lockmgr(&sc->amr_list_lock, LK_RELEASE); 887 if (error) 888 goto out; 889 890 /* copy out data and set status */ 891 if (au_length != 0) { 892 error = copyout(dp, au_buffer, au_length); 893 } 894 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer); 895 if (dp != NULL) 896 debug(2, "%p status 0x%x", dp, ac->ac_status); 897 *au_statusp = ac->ac_status; 898 899 out: 900 /* 901 * At this point, we know that there is a lock held and that these 902 * objects have been allocated. 903 */ 904 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 905 if (ac != NULL) 906 amr_releasecmd(ac); 907 lockmgr(&sc->amr_list_lock, LK_RELEASE); 908 if (dp != NULL) 909 kfree(dp, M_AMR); 910 911 if (logical_drives_changed) 912 amr_rescan_drives(dev); 913 914 return(error); 915 } 916 917 /******************************************************************************** 918 ******************************************************************************** 919 Command Wrappers 920 ******************************************************************************** 921 ********************************************************************************/ 922 923 /******************************************************************************** 924 * Interrogate the controller for the operational parameters we require. 925 */ 926 static int 927 amr_query_controller(struct amr_softc *sc) 928 { 929 struct amr_enquiry3 *aex; 930 struct amr_prodinfo *ap; 931 struct amr_enquiry *ae; 932 int ldrv; 933 int status; 934 935 /* 936 * Greater than 10 byte cdb support 937 */ 938 sc->support_ext_cdb = amr_support_ext_cdb(sc); 939 940 if(sc->support_ext_cdb) { 941 debug(2,"supports extended CDBs."); 942 } 943 944 /* 945 * Try to issue an ENQUIRY3 command 946 */ 947 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, 948 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) { 949 950 /* 951 * Fetch current state of logical drives. 952 */ 953 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) { 954 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv]; 955 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv]; 956 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv]; 957 debug(2, " drive %d: %d state %x properties %x", ldrv, sc->amr_drive[ldrv].al_size, 958 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); 959 } 960 kfree(aex, M_AMR); 961 962 /* 963 * Get product info for channel count. 964 */ 965 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) { 966 device_printf(sc->amr_dev, "can't obtain product data from controller\n"); 967 return(1); 968 } 969 sc->amr_maxdrives = 40; 970 sc->amr_maxchan = ap->ap_nschan; 971 sc->amr_maxio = ap->ap_maxio; 972 sc->amr_type |= AMR_TYPE_40LD; 973 kfree(ap, M_AMR); 974 975 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status); 976 if (ap != NULL) 977 kfree(ap, M_AMR); 978 if (!status) { 979 sc->amr_ld_del_supported = 1; 980 device_printf(sc->amr_dev, "delete logical drives supported by controller\n"); 981 } 982 } else { 983 984 /* failed, try the 8LD ENQUIRY commands */ 985 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) { 986 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) { 987 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n"); 988 return(1); 989 } 990 ae->ae_signature = 0; 991 } 992 993 /* 994 * Fetch current state of logical drives. 995 */ 996 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) { 997 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv]; 998 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv]; 999 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv]; 1000 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, 1001 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); 1002 } 1003 1004 sc->amr_maxdrives = 8; 1005 sc->amr_maxchan = ae->ae_adapter.aa_channels; 1006 sc->amr_maxio = ae->ae_adapter.aa_maxio; 1007 kfree(ae, M_AMR); 1008 } 1009 1010 /* 1011 * Mark remaining drives as unused. 1012 */ 1013 for (; ldrv < AMR_MAXLD; ldrv++) 1014 sc->amr_drive[ldrv].al_size = 0xffffffff; 1015 1016 /* 1017 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust 1018 * the controller's reported value, and lockups have been seen when we do. 1019 */ 1020 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD); 1021 1022 return(0); 1023 } 1024 1025 /******************************************************************************** 1026 * Run a generic enquiry-style command. 1027 */ 1028 static void * 1029 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status) 1030 { 1031 struct amr_command *ac; 1032 void *result; 1033 u_int8_t *mbox; 1034 int error; 1035 1036 debug_called(1); 1037 1038 error = 1; 1039 result = NULL; 1040 1041 /* get ourselves a command buffer */ 1042 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1043 ac = amr_alloccmd(sc); 1044 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1045 if (ac == NULL) 1046 goto out; 1047 /* allocate the response structure */ 1048 if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL) 1049 goto out; 1050 /* set command flags */ 1051 1052 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN; 1053 1054 /* point the command at our data */ 1055 ac->ac_data = result; 1056 ac->ac_length = bufsize; 1057 1058 /* build the command proper */ 1059 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ 1060 mbox[0] = cmd; 1061 mbox[2] = cmdsub; 1062 mbox[3] = cmdqual; 1063 *status = 0; 1064 1065 /* can't assume that interrupts are going to work here, so play it safe */ 1066 if (sc->amr_poll_command(ac)) 1067 goto out; 1068 error = ac->ac_status; 1069 *status = ac->ac_status; 1070 1071 out: 1072 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1073 if (ac != NULL) 1074 amr_releasecmd(ac); 1075 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1076 if ((error != 0) && (result != NULL)) { 1077 kfree(result, M_AMR); 1078 result = NULL; 1079 } 1080 return(result); 1081 } 1082 1083 /******************************************************************************** 1084 * Flush the controller's internal cache, return status. 1085 */ 1086 int 1087 amr_flush(struct amr_softc *sc) 1088 { 1089 struct amr_command *ac; 1090 int error; 1091 1092 /* get ourselves a command buffer */ 1093 error = 1; 1094 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1095 ac = amr_alloccmd(sc); 1096 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1097 if (ac == NULL) 1098 goto out; 1099 /* set command flags */ 1100 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1101 1102 /* build the command proper */ 1103 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH; 1104 1105 /* we have to poll, as the system may be going down or otherwise damaged */ 1106 if (sc->amr_poll_command(ac)) 1107 goto out; 1108 error = ac->ac_status; 1109 1110 out: 1111 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1112 if (ac != NULL) 1113 amr_releasecmd(ac); 1114 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1115 return(error); 1116 } 1117 1118 /******************************************************************************** 1119 * Detect extented cdb >> greater than 10 byte cdb support 1120 * returns '1' means this support exist 1121 * returns '0' means this support doesn't exist 1122 */ 1123 static int 1124 amr_support_ext_cdb(struct amr_softc *sc) 1125 { 1126 struct amr_command *ac; 1127 u_int8_t *mbox; 1128 int error; 1129 1130 /* get ourselves a command buffer */ 1131 error = 0; 1132 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1133 ac = amr_alloccmd(sc); 1134 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1135 if (ac == NULL) 1136 goto out; 1137 /* set command flags */ 1138 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1139 1140 /* build the command proper */ 1141 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ 1142 mbox[0] = 0xA4; 1143 mbox[2] = 0x16; 1144 1145 1146 /* we have to poll, as the system may be going down or otherwise damaged */ 1147 if (sc->amr_poll_command(ac)) 1148 goto out; 1149 if( ac->ac_status == AMR_STATUS_SUCCESS ) { 1150 error = 1; 1151 } 1152 1153 out: 1154 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1155 if (ac != NULL) 1156 amr_releasecmd(ac); 1157 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1158 return(error); 1159 } 1160 1161 /******************************************************************************** 1162 * Try to find I/O work for the controller from one or more of the work queues. 1163 * 1164 * We make the assumption that if the controller is not ready to take a command 1165 * at some given time, it will generate an interrupt at some later time when 1166 * it is. 1167 */ 1168 void 1169 amr_startio(struct amr_softc *sc) 1170 { 1171 struct amr_command *ac; 1172 1173 /* spin until something prevents us from doing any work */ 1174 for (;;) { 1175 1176 /* Don't bother to queue commands no bounce buffers are available. */ 1177 if (sc->amr_state & AMR_STATE_QUEUE_FRZN) 1178 break; 1179 1180 /* try to get a ready command */ 1181 ac = amr_dequeue_ready(sc); 1182 1183 /* if that failed, build a command from a bio */ 1184 if (ac == NULL) 1185 (void)amr_bio_command(sc, &ac); 1186 1187 /* if that failed, build a command from a ccb */ 1188 if ((ac == NULL) && (sc->amr_cam_command != NULL)) 1189 sc->amr_cam_command(sc, &ac); 1190 1191 /* if we don't have anything to do, give up */ 1192 if (ac == NULL) 1193 break; 1194 1195 /* try to give the command to the controller; if this fails save it for later and give up */ 1196 if (amr_start(ac)) { 1197 debug(2, "controller busy, command deferred"); 1198 amr_requeue_ready(ac); /* XXX schedule retry very soon? */ 1199 break; 1200 } 1201 } 1202 } 1203 1204 /******************************************************************************** 1205 * Handle completion of an I/O command. 1206 */ 1207 static void 1208 amr_completeio(struct amr_command *ac) 1209 { 1210 struct amr_softc *sc = ac->ac_sc; 1211 static struct timeval lastfail; 1212 static int curfail; 1213 struct buf *bp = ac->ac_bio->bio_buf; 1214 1215 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */ 1216 bp->b_error = EIO; 1217 bp->b_flags |= B_ERROR; 1218 1219 if (ppsratecheck(&lastfail, &curfail, 1)) 1220 device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status); 1221 /* amr_printcommand(ac);*/ 1222 } 1223 amrd_intr(ac->ac_bio); 1224 lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE); 1225 amr_releasecmd(ac); 1226 lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE); 1227 } 1228 1229 /******************************************************************************** 1230 ******************************************************************************** 1231 Command Processing 1232 ******************************************************************************** 1233 ********************************************************************************/ 1234 1235 /******************************************************************************** 1236 * Convert a bio off the top of the bio queue into a command. 1237 */ 1238 static int 1239 amr_bio_command(struct amr_softc *sc, struct amr_command **acp) 1240 { 1241 struct amr_command *ac; 1242 struct amrd_softc *amrd; 1243 struct bio *bio; 1244 struct buf *bp; 1245 int error; 1246 int blkcount; 1247 int driveno; 1248 int cmd; 1249 1250 ac = NULL; 1251 error = 0; 1252 1253 /* get a command */ 1254 if ((ac = amr_alloccmd(sc)) == NULL) 1255 return (ENOMEM); 1256 1257 /* get a bio to work on */ 1258 if ((bio = amr_dequeue_bio(sc)) == NULL) { 1259 amr_releasecmd(ac); 1260 return (0); 1261 } 1262 1263 /* connect the bio to the command */ 1264 bp = bio->bio_buf; 1265 ac->ac_complete = amr_completeio; 1266 ac->ac_bio = bio; 1267 ac->ac_data = bp->b_data; 1268 ac->ac_length = bp->b_bcount; 1269 cmd = 0; 1270 switch (bp->b_cmd) { 1271 case BUF_CMD_READ: 1272 ac->ac_flags |= AMR_CMD_DATAIN; 1273 if (AMR_IS_SG64(sc)) { 1274 cmd = AMR_CMD_LREAD64; 1275 ac->ac_flags |= AMR_CMD_SG64; 1276 } else 1277 cmd = AMR_CMD_LREAD; 1278 break; 1279 case BUF_CMD_WRITE: 1280 ac->ac_flags |= AMR_CMD_DATAOUT; 1281 if (AMR_IS_SG64(sc)) { 1282 cmd = AMR_CMD_LWRITE64; 1283 ac->ac_flags |= AMR_CMD_SG64; 1284 } else 1285 cmd = AMR_CMD_LWRITE; 1286 break; 1287 case BUF_CMD_FLUSH: 1288 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1289 cmd = AMR_CMD_FLUSH; 1290 break; 1291 default: 1292 panic("Invalid bio command"); 1293 } 1294 amrd = (struct amrd_softc *)bio->bio_driver_info; 1295 driveno = amrd->amrd_drive - sc->amr_drive; 1296 blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE; 1297 1298 ac->ac_mailbox.mb_command = cmd; 1299 if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) { 1300 ac->ac_mailbox.mb_blkcount = blkcount; 1301 ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE; 1302 if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) { 1303 device_printf(sc->amr_dev, 1304 "I/O beyond end of unit (%lld,%d > %lu)\n", 1305 (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount, 1306 (u_long)sc->amr_drive[driveno].al_size); 1307 } 1308 } 1309 ac->ac_mailbox.mb_drive = driveno; 1310 if (sc->amr_state & AMR_STATE_REMAP_LD) 1311 ac->ac_mailbox.mb_drive |= 0x80; 1312 1313 /* we fill in the s/g related data when the command is mapped */ 1314 1315 1316 *acp = ac; 1317 return(error); 1318 } 1319 1320 /******************************************************************************** 1321 * Take a command, submit it to the controller and sleep until it completes 1322 * or fails. Interrupts must be enabled, returns nonzero on error. 1323 */ 1324 static int 1325 amr_wait_command(struct amr_command *ac) 1326 { 1327 int error = 0; 1328 struct amr_softc *sc = ac->ac_sc; 1329 1330 debug_called(1); 1331 1332 ac->ac_complete = NULL; 1333 ac->ac_flags |= AMR_CMD_SLEEP; 1334 if ((error = amr_start(ac)) != 0) { 1335 return(error); 1336 } 1337 1338 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) { 1339 error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0); 1340 } 1341 1342 return(error); 1343 } 1344 1345 /******************************************************************************** 1346 * Take a command, submit it to the controller and busy-wait for it to return. 1347 * Returns nonzero on error. Can be safely called with interrupts enabled. 1348 */ 1349 static int 1350 amr_std_poll_command(struct amr_command *ac) 1351 { 1352 struct amr_softc *sc = ac->ac_sc; 1353 int error, count; 1354 1355 debug_called(2); 1356 1357 ac->ac_complete = NULL; 1358 if ((error = amr_start(ac)) != 0) 1359 return(error); 1360 1361 count = 0; 1362 do { 1363 /* 1364 * Poll for completion, although the interrupt handler may beat us to it. 1365 * Note that the timeout here is somewhat arbitrary. 1366 */ 1367 amr_done(sc); 1368 DELAY(1000); 1369 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000)); 1370 if (!(ac->ac_flags & AMR_CMD_BUSY)) { 1371 error = 0; 1372 } else { 1373 /* XXX the slot is now marked permanently busy */ 1374 error = EIO; 1375 device_printf(sc->amr_dev, "polled command timeout\n"); 1376 } 1377 return(error); 1378 } 1379 1380 static void 1381 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1382 { 1383 struct amr_command *ac = arg; 1384 struct amr_softc *sc = ac->ac_sc; 1385 int mb_channel; 1386 1387 if (err) { 1388 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1389 ac->ac_status = AMR_STATUS_ABORTED; 1390 return; 1391 } 1392 1393 amr_setup_sg(arg, segs, nsegs, err); 1394 1395 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ 1396 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; 1397 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && 1398 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || 1399 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) 1400 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; 1401 1402 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; 1403 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; 1404 if (AC_IS_SG64(ac)) { 1405 ac->ac_sg64_hi = 0; 1406 ac->ac_sg64_lo = ac->ac_sgbusaddr; 1407 } 1408 1409 sc->amr_poll_command1(sc, ac); 1410 } 1411 1412 /******************************************************************************** 1413 * Take a command, submit it to the controller and busy-wait for it to return. 1414 * Returns nonzero on error. Can be safely called with interrupts enabled. 1415 */ 1416 static int 1417 amr_quartz_poll_command(struct amr_command *ac) 1418 { 1419 struct amr_softc *sc = ac->ac_sc; 1420 int error; 1421 1422 debug_called(2); 1423 1424 error = 0; 1425 1426 if (AC_IS_SG64(ac)) { 1427 ac->ac_tag = sc->amr_buffer64_dmat; 1428 ac->ac_datamap = ac->ac_dma64map; 1429 } else { 1430 ac->ac_tag = sc->amr_buffer_dmat; 1431 ac->ac_datamap = ac->ac_dmamap; 1432 } 1433 1434 /* now we have a slot, we can map the command (unmapped in amr_complete) */ 1435 if (ac->ac_data != NULL && ac->ac_length != 0) { 1436 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, 1437 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) { 1438 error = 1; 1439 } 1440 } else { 1441 error = amr_quartz_poll_command1(sc, ac); 1442 } 1443 1444 return (error); 1445 } 1446 1447 static int 1448 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac) 1449 { 1450 int count, error; 1451 1452 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 1453 if ((sc->amr_state & AMR_STATE_INTEN) == 0) { 1454 count=0; 1455 while (sc->amr_busyslots) { 1456 lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz); 1457 if(count++>10) { 1458 break; 1459 } 1460 } 1461 1462 if(sc->amr_busyslots) { 1463 device_printf(sc->amr_dev, "adapter is busy\n"); 1464 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 1465 if (ac->ac_data != NULL) { 1466 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1467 } 1468 ac->ac_status=0; 1469 return(1); 1470 } 1471 } 1472 1473 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE); 1474 1475 /* clear the poll/ack fields in the mailbox */ 1476 sc->amr_mailbox->mb_ident = 0xFE; 1477 sc->amr_mailbox->mb_nstatus = 0xFF; 1478 sc->amr_mailbox->mb_status = 0xFF; 1479 sc->amr_mailbox->mb_poll = 0; 1480 sc->amr_mailbox->mb_ack = 0; 1481 sc->amr_mailbox->mb_busy = 1; 1482 1483 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); 1484 1485 while(sc->amr_mailbox->mb_nstatus == 0xFF) 1486 DELAY(1); 1487 while(sc->amr_mailbox->mb_status == 0xFF) 1488 DELAY(1); 1489 ac->ac_status=sc->amr_mailbox->mb_status; 1490 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0; 1491 while(sc->amr_mailbox->mb_poll != 0x77) 1492 DELAY(1); 1493 sc->amr_mailbox->mb_poll = 0; 1494 sc->amr_mailbox->mb_ack = 0x77; 1495 1496 /* acknowledge that we have the commands */ 1497 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK); 1498 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) 1499 DELAY(1); 1500 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 1501 1502 /* unmap the command's data buffer */ 1503 if (ac->ac_flags & AMR_CMD_DATAIN) { 1504 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD); 1505 } 1506 if (ac->ac_flags & AMR_CMD_DATAOUT) { 1507 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE); 1508 } 1509 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1510 1511 return(error); 1512 } 1513 1514 static __inline int 1515 amr_freeslot(struct amr_command *ac) 1516 { 1517 struct amr_softc *sc = ac->ac_sc; 1518 int slot; 1519 1520 debug_called(3); 1521 1522 slot = ac->ac_slot; 1523 if (sc->amr_busycmd[slot] == NULL) 1524 panic("amr: slot %d not busy?", slot); 1525 1526 sc->amr_busycmd[slot] = NULL; 1527 atomic_subtract_int(&sc->amr_busyslots, 1); 1528 1529 return (0); 1530 } 1531 1532 /******************************************************************************** 1533 * Map/unmap (ac)'s data in the controller's addressable space as required. 1534 * 1535 * These functions may be safely called multiple times on a given command. 1536 */ 1537 static void 1538 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 1539 { 1540 struct amr_command *ac = (struct amr_command *)arg; 1541 struct amr_sgentry *sg; 1542 struct amr_sg64entry *sg64; 1543 int flags, i; 1544 1545 debug_called(3); 1546 1547 /* get base address of s/g table */ 1548 sg = ac->ac_sg.sg32; 1549 sg64 = ac->ac_sg.sg64; 1550 1551 if (AC_IS_SG64(ac)) { 1552 ac->ac_nsegments = nsegments; 1553 ac->ac_mb_physaddr = 0xffffffff; 1554 for (i = 0; i < nsegments; i++, sg64++) { 1555 sg64->sg_addr = segs[i].ds_addr; 1556 sg64->sg_count = segs[i].ds_len; 1557 } 1558 } else { 1559 /* decide whether we need to populate the s/g table */ 1560 if (nsegments < 2) { 1561 ac->ac_nsegments = 0; 1562 ac->ac_mb_physaddr = segs[0].ds_addr; 1563 } else { 1564 ac->ac_nsegments = nsegments; 1565 ac->ac_mb_physaddr = ac->ac_sgbusaddr; 1566 for (i = 0; i < nsegments; i++, sg++) { 1567 sg->sg_addr = segs[i].ds_addr; 1568 sg->sg_count = segs[i].ds_len; 1569 } 1570 } 1571 } 1572 1573 flags = 0; 1574 if (ac->ac_flags & AMR_CMD_DATAIN) 1575 flags |= BUS_DMASYNC_PREREAD; 1576 if (ac->ac_flags & AMR_CMD_DATAOUT) 1577 flags |= BUS_DMASYNC_PREWRITE; 1578 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags); 1579 ac->ac_flags |= AMR_CMD_MAPPED; 1580 } 1581 1582 static void 1583 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1584 { 1585 struct amr_command *ac = arg; 1586 struct amr_softc *sc = ac->ac_sc; 1587 int mb_channel; 1588 1589 if (err) { 1590 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1591 amr_abort_load(ac); 1592 return; 1593 } 1594 1595 amr_setup_sg(arg, segs, nsegs, err); 1596 1597 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ 1598 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; 1599 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && 1600 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || 1601 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) 1602 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; 1603 1604 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; 1605 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; 1606 if (AC_IS_SG64(ac)) { 1607 ac->ac_sg64_hi = 0; 1608 ac->ac_sg64_lo = ac->ac_sgbusaddr; 1609 } 1610 1611 if (sc->amr_submit_command(ac) == EBUSY) { 1612 amr_freeslot(ac); 1613 amr_requeue_ready(ac); 1614 } 1615 } 1616 1617 static void 1618 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1619 { 1620 struct amr_command *ac = arg; 1621 struct amr_softc *sc = ac->ac_sc; 1622 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru; 1623 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru; 1624 1625 if (err) { 1626 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1627 amr_abort_load(ac); 1628 return; 1629 } 1630 1631 /* Set up the mailbox portion of the command to point at the ccb */ 1632 ac->ac_mailbox.mb_nsgelem = 0; 1633 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr; 1634 1635 amr_setup_sg(arg, segs, nsegs, err); 1636 1637 switch (ac->ac_mailbox.mb_command) { 1638 case AMR_CMD_EXTPASS: 1639 aep->ap_no_sg_elements = ac->ac_nsegments; 1640 aep->ap_data_transfer_address = ac->ac_mb_physaddr; 1641 break; 1642 case AMR_CMD_PASS: 1643 ap->ap_no_sg_elements = ac->ac_nsegments; 1644 ap->ap_data_transfer_address = ac->ac_mb_physaddr; 1645 break; 1646 default: 1647 panic("Unknown ccb command"); 1648 } 1649 1650 if (sc->amr_submit_command(ac) == EBUSY) { 1651 amr_freeslot(ac); 1652 amr_requeue_ready(ac); 1653 } 1654 } 1655 1656 static int 1657 amr_mapcmd(struct amr_command *ac) 1658 { 1659 bus_dmamap_callback_t *cb; 1660 struct amr_softc *sc = ac->ac_sc; 1661 1662 debug_called(3); 1663 1664 if (AC_IS_SG64(ac)) { 1665 ac->ac_tag = sc->amr_buffer64_dmat; 1666 ac->ac_datamap = ac->ac_dma64map; 1667 } else { 1668 ac->ac_tag = sc->amr_buffer_dmat; 1669 ac->ac_datamap = ac->ac_dmamap; 1670 } 1671 1672 if (ac->ac_flags & AMR_CMD_CCB) 1673 cb = amr_setup_ccb; 1674 else 1675 cb = amr_setup_data; 1676 1677 /* if the command involves data at all, and hasn't been mapped */ 1678 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) { 1679 /* map the data buffers into bus space and build the s/g list */ 1680 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, 1681 ac->ac_length, cb, ac, 0) == EINPROGRESS) { 1682 sc->amr_state |= AMR_STATE_QUEUE_FRZN; 1683 } 1684 } else { 1685 if (sc->amr_submit_command(ac) == EBUSY) { 1686 amr_freeslot(ac); 1687 amr_requeue_ready(ac); 1688 } 1689 } 1690 1691 return (0); 1692 } 1693 1694 static void 1695 amr_unmapcmd(struct amr_command *ac) 1696 { 1697 int flag; 1698 1699 debug_called(3); 1700 1701 /* if the command involved data at all and was mapped */ 1702 if (ac->ac_flags & AMR_CMD_MAPPED) { 1703 1704 if (ac->ac_data != NULL) { 1705 1706 flag = 0; 1707 if (ac->ac_flags & AMR_CMD_DATAIN) 1708 flag |= BUS_DMASYNC_POSTREAD; 1709 if (ac->ac_flags & AMR_CMD_DATAOUT) 1710 flag |= BUS_DMASYNC_POSTWRITE; 1711 1712 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag); 1713 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1714 } 1715 1716 ac->ac_flags &= ~AMR_CMD_MAPPED; 1717 } 1718 } 1719 1720 static void 1721 amr_abort_load(struct amr_command *ac) 1722 { 1723 ac_qhead_t head; 1724 struct amr_softc *sc = ac->ac_sc; 1725 1726 KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0); 1727 1728 ac->ac_status = AMR_STATUS_ABORTED; 1729 amr_init_qhead(&head); 1730 amr_enqueue_completed(ac, &head); 1731 1732 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1733 amr_complete(sc, &head); 1734 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1735 } 1736 1737 /******************************************************************************** 1738 * Take a command and give it to the controller, returns 0 if successful, or 1739 * EBUSY if the command should be retried later. 1740 */ 1741 static int 1742 amr_start(struct amr_command *ac) 1743 { 1744 struct amr_softc *sc; 1745 int error = 0; 1746 int slot; 1747 1748 debug_called(3); 1749 1750 /* mark command as busy so that polling consumer can tell */ 1751 sc = ac->ac_sc; 1752 ac->ac_flags |= AMR_CMD_BUSY; 1753 1754 /* get a command slot (freed in amr_done) */ 1755 slot = ac->ac_slot; 1756 if (sc->amr_busycmd[slot] != NULL) 1757 panic("amr: slot %d busy?", slot); 1758 sc->amr_busycmd[slot] = ac; 1759 atomic_add_int(&sc->amr_busyslots, 1); 1760 1761 /* Now we have a slot, we can map the command (unmapped in amr_complete). */ 1762 if ((error = amr_mapcmd(ac)) == ENOMEM) { 1763 /* 1764 * Memroy resources are short, so free the slot and let this be tried 1765 * later. 1766 */ 1767 amr_freeslot(ac); 1768 } 1769 1770 return (error); 1771 } 1772 1773 /******************************************************************************** 1774 * Extract one or more completed commands from the controller (sc) 1775 * 1776 * Returns nonzero if any commands on the work queue were marked as completed. 1777 */ 1778 1779 int 1780 amr_done(struct amr_softc *sc) 1781 { 1782 ac_qhead_t head; 1783 struct amr_command *ac; 1784 struct amr_mailbox mbox; 1785 int i, idx, result; 1786 1787 debug_called(3); 1788 1789 /* See if there's anything for us to do */ 1790 result = 0; 1791 amr_init_qhead(&head); 1792 1793 /* loop collecting completed commands */ 1794 for (;;) { 1795 /* poll for a completed command's identifier and status */ 1796 if (sc->amr_get_work(sc, &mbox)) { 1797 result = 1; 1798 1799 /* iterate over completed commands in this result */ 1800 for (i = 0; i < mbox.mb_nstatus; i++) { 1801 /* get pointer to busy command */ 1802 idx = mbox.mb_completed[i] - 1; 1803 ac = sc->amr_busycmd[idx]; 1804 1805 /* really a busy command? */ 1806 if (ac != NULL) { 1807 1808 /* pull the command from the busy index */ 1809 amr_freeslot(ac); 1810 1811 /* save status for later use */ 1812 ac->ac_status = mbox.mb_status; 1813 amr_enqueue_completed(ac, &head); 1814 debug(3, "completed command with status %x", mbox.mb_status); 1815 } else { 1816 device_printf(sc->amr_dev, "bad slot %d completed\n", idx); 1817 } 1818 } 1819 } else 1820 break; /* no work */ 1821 } 1822 1823 /* handle completion and timeouts */ 1824 amr_complete(sc, &head); 1825 1826 return(result); 1827 } 1828 1829 /******************************************************************************** 1830 * Do completion processing on done commands on (sc) 1831 */ 1832 1833 static void 1834 amr_complete(void *context, ac_qhead_t *head) 1835 { 1836 struct amr_softc *sc = (struct amr_softc *)context; 1837 struct amr_command *ac; 1838 1839 debug_called(3); 1840 1841 /* pull completed commands off the queue */ 1842 for (;;) { 1843 ac = amr_dequeue_completed(sc, head); 1844 if (ac == NULL) 1845 break; 1846 1847 /* unmap the command's data buffer */ 1848 amr_unmapcmd(ac); 1849 1850 /* 1851 * Is there a completion handler? 1852 */ 1853 if (ac->ac_complete != NULL) { 1854 /* unbusy the command */ 1855 ac->ac_flags &= ~AMR_CMD_BUSY; 1856 ac->ac_complete(ac); 1857 1858 /* 1859 * Is someone sleeping on this one? 1860 */ 1861 } else { 1862 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1863 ac->ac_flags &= ~AMR_CMD_BUSY; 1864 if (ac->ac_flags & AMR_CMD_SLEEP) { 1865 /* unbusy the command */ 1866 wakeup(ac); 1867 } 1868 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1869 } 1870 1871 if(!sc->amr_busyslots) { 1872 wakeup(sc); 1873 } 1874 } 1875 1876 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1877 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN; 1878 amr_startio(sc); 1879 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1880 } 1881 1882 /******************************************************************************** 1883 ******************************************************************************** 1884 Command Buffer Management 1885 ******************************************************************************** 1886 ********************************************************************************/ 1887 1888 /******************************************************************************** 1889 * Get a new command buffer. 1890 * 1891 * This may return NULL in low-memory cases. 1892 * 1893 * If possible, we recycle a command buffer that's been used before. 1894 */ 1895 struct amr_command * 1896 amr_alloccmd(struct amr_softc *sc) 1897 { 1898 struct amr_command *ac; 1899 1900 debug_called(3); 1901 1902 ac = amr_dequeue_free(sc); 1903 if (ac == NULL) { 1904 sc->amr_state |= AMR_STATE_QUEUE_FRZN; 1905 return(NULL); 1906 } 1907 1908 /* clear out significant fields */ 1909 ac->ac_status = 0; 1910 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox)); 1911 ac->ac_flags = 0; 1912 ac->ac_bio = NULL; 1913 ac->ac_data = NULL; 1914 ac->ac_complete = NULL; 1915 ac->ac_retries = 0; 1916 ac->ac_tag = NULL; 1917 ac->ac_datamap = NULL; 1918 return(ac); 1919 } 1920 1921 /******************************************************************************** 1922 * Release a command buffer for recycling. 1923 */ 1924 void 1925 amr_releasecmd(struct amr_command *ac) 1926 { 1927 debug_called(3); 1928 1929 amr_enqueue_free(ac); 1930 } 1931 1932 /******************************************************************************** 1933 * Allocate a new command cluster and initialise it. 1934 */ 1935 static void 1936 amr_alloccmd_cluster(struct amr_softc *sc) 1937 { 1938 struct amr_command_cluster *acc; 1939 struct amr_command *ac; 1940 int i, nextslot; 1941 1942 /* 1943 * If we haven't found the real limit yet, let us have a couple of 1944 * commands in order to be able to probe. 1945 */ 1946 if (sc->amr_maxio == 0) 1947 sc->amr_maxio = 2; 1948 1949 if (sc->amr_nextslot > sc->amr_maxio) 1950 return; 1951 acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO); 1952 if (acc != NULL) { 1953 nextslot = sc->amr_nextslot; 1954 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1955 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link); 1956 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1957 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { 1958 ac = &acc->acc_command[i]; 1959 ac->ac_sc = sc; 1960 ac->ac_slot = nextslot; 1961 1962 /* 1963 * The SG table for each slot is a fixed size and is assumed to 1964 * to hold 64-bit s/g objects when the driver is configured to do 1965 * 64-bit DMA. 32-bit DMA commands still use the same table, but 1966 * cast down to 32-bit objects. 1967 */ 1968 if (AMR_IS_SG64(sc)) { 1969 ac->ac_sgbusaddr = sc->amr_sgbusaddr + 1970 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry)); 1971 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG); 1972 } else { 1973 ac->ac_sgbusaddr = sc->amr_sgbusaddr + 1974 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry)); 1975 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); 1976 } 1977 1978 ac->ac_ccb = sc->amr_ccb + ac->ac_slot; 1979 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr + 1980 (ac->ac_slot * sizeof(union amr_ccb)); 1981 1982 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap)) 1983 break; 1984 if (AMR_IS_SG64(sc) && 1985 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map))) 1986 break; 1987 amr_releasecmd(ac); 1988 if (++nextslot > sc->amr_maxio) 1989 break; 1990 } 1991 sc->amr_nextslot = nextslot; 1992 } 1993 } 1994 1995 /******************************************************************************** 1996 * Free a command cluster 1997 */ 1998 static void 1999 amr_freecmd_cluster(struct amr_command_cluster *acc) 2000 { 2001 struct amr_softc *sc = acc->acc_command[0].ac_sc; 2002 int i; 2003 2004 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { 2005 if (acc->acc_command[i].ac_sc == NULL) 2006 break; 2007 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap); 2008 if (AMR_IS_SG64(sc)) 2009 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map); 2010 } 2011 kfree(acc, M_AMR); 2012 } 2013 2014 /******************************************************************************** 2015 ******************************************************************************** 2016 Interface-specific Shims 2017 ******************************************************************************** 2018 ********************************************************************************/ 2019 2020 /******************************************************************************** 2021 * Tell the controller that the mailbox contains a valid command 2022 */ 2023 static int 2024 amr_quartz_submit_command(struct amr_command *ac) 2025 { 2026 struct amr_softc *sc = ac->ac_sc; 2027 static struct timeval lastfail; 2028 static int curfail; 2029 int i = 0; 2030 2031 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 2032 while (sc->amr_mailbox->mb_busy && (i++ < 10)) { 2033 DELAY(1); 2034 /* This is a no-op read that flushes pending mailbox updates */ 2035 AMR_QGET_ODB(sc); 2036 } 2037 if (sc->amr_mailbox->mb_busy) { 2038 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2039 if (ac->ac_retries++ > 1000) { 2040 if (ppsratecheck(&lastfail, &curfail, 1)) 2041 device_printf(sc->amr_dev, "Too many retries on command %p. " 2042 "Controller is likely dead\n", ac); 2043 ac->ac_retries = 0; 2044 } 2045 return (EBUSY); 2046 } 2047 2048 /* 2049 * Save the slot number so that we can locate this command when complete. 2050 * Note that ident = 0 seems to be special, so we don't use it. 2051 */ 2052 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ 2053 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); 2054 sc->amr_mailbox->mb_busy = 1; 2055 sc->amr_mailbox->mb_poll = 0; 2056 sc->amr_mailbox->mb_ack = 0; 2057 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi; 2058 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo; 2059 2060 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); 2061 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2062 return(0); 2063 } 2064 2065 static int 2066 amr_std_submit_command(struct amr_command *ac) 2067 { 2068 struct amr_softc *sc = ac->ac_sc; 2069 static struct timeval lastfail; 2070 static int curfail; 2071 2072 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 2073 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) { 2074 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2075 if (ac->ac_retries++ > 1000) { 2076 if (ppsratecheck(&lastfail, &curfail, 1)) 2077 device_printf(sc->amr_dev, "Too many retries on command %p. " 2078 "Controller is likely dead\n", ac); 2079 ac->ac_retries = 0; 2080 } 2081 return (EBUSY); 2082 } 2083 2084 /* 2085 * Save the slot number so that we can locate this command when complete. 2086 * Note that ident = 0 seems to be special, so we don't use it. 2087 */ 2088 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ 2089 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); 2090 sc->amr_mailbox->mb_busy = 1; 2091 sc->amr_mailbox->mb_poll = 0; 2092 sc->amr_mailbox->mb_ack = 0; 2093 2094 AMR_SPOST_COMMAND(sc); 2095 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2096 return(0); 2097 } 2098 2099 /******************************************************************************** 2100 * Claim any work that the controller has completed; acknowledge completion, 2101 * save details of the completion in (mbsave) 2102 */ 2103 static int 2104 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) 2105 { 2106 int worked, i; 2107 u_int32_t outd; 2108 u_int8_t nstatus; 2109 u_int8_t completed[46]; 2110 2111 debug_called(3); 2112 2113 worked = 0; 2114 2115 /* work waiting for us? */ 2116 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) { 2117 2118 /* acknowledge interrupt */ 2119 AMR_QPUT_ODB(sc, AMR_QODB_READY); 2120 2121 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff) 2122 DELAY(1); 2123 sc->amr_mailbox->mb_nstatus = 0xff; 2124 2125 /* wait until fw wrote out all completions */ 2126 for (i = 0; i < nstatus; i++) { 2127 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff) 2128 DELAY(1); 2129 sc->amr_mailbox->mb_completed[i] = 0xff; 2130 } 2131 2132 /* Save information for later processing */ 2133 mbsave->mb_nstatus = nstatus; 2134 mbsave->mb_status = sc->amr_mailbox->mb_status; 2135 sc->amr_mailbox->mb_status = 0xff; 2136 2137 for (i = 0; i < nstatus; i++) 2138 mbsave->mb_completed[i] = completed[i]; 2139 2140 /* acknowledge that we have the commands */ 2141 AMR_QPUT_IDB(sc, AMR_QIDB_ACK); 2142 2143 #if 0 2144 #ifndef AMR_QUARTZ_GOFASTER 2145 /* 2146 * This waits for the controller to notice that we've taken the 2147 * command from it. It's very inefficient, and we shouldn't do it, 2148 * but if we remove this code, we stop completing commands under 2149 * load. 2150 * 2151 * Peter J says we shouldn't do this. The documentation says we 2152 * should. Who is right? 2153 */ 2154 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) 2155 ; /* XXX aiee! what if it dies? */ 2156 #endif 2157 #endif 2158 2159 worked = 1; /* got some work */ 2160 } 2161 2162 return(worked); 2163 } 2164 2165 static int 2166 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) 2167 { 2168 int worked; 2169 u_int8_t istat; 2170 2171 debug_called(3); 2172 2173 worked = 0; 2174 2175 /* check for valid interrupt status */ 2176 istat = AMR_SGET_ISTAT(sc); 2177 if ((istat & AMR_SINTR_VALID) != 0) { 2178 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */ 2179 2180 /* save mailbox, which contains a list of completed commands */ 2181 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave)); 2182 2183 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */ 2184 worked = 1; 2185 } 2186 2187 return(worked); 2188 } 2189 2190 /******************************************************************************** 2191 * Notify the controller of the mailbox location. 2192 */ 2193 static void 2194 amr_std_attach_mailbox(struct amr_softc *sc) 2195 { 2196 2197 /* program the mailbox physical address */ 2198 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff); 2199 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff); 2200 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff); 2201 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff); 2202 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR); 2203 2204 /* clear any outstanding interrupt and enable interrupts proper */ 2205 AMR_SACK_INTERRUPT(sc); 2206 AMR_SENABLE_INTR(sc); 2207 } 2208 2209 #ifdef AMR_BOARD_INIT 2210 /******************************************************************************** 2211 * Initialise the controller 2212 */ 2213 static int 2214 amr_quartz_init(struct amr_softc *sc) 2215 { 2216 int status, ostatus; 2217 2218 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc)); 2219 2220 AMR_QRESET(sc); 2221 2222 ostatus = 0xff; 2223 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) { 2224 if (status != ostatus) { 2225 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status)); 2226 ostatus = status; 2227 } 2228 switch (status) { 2229 case AMR_QINIT_NOMEM: 2230 return(ENOMEM); 2231 2232 case AMR_QINIT_SCAN: 2233 /* XXX we could print channel/target here */ 2234 break; 2235 } 2236 } 2237 return(0); 2238 } 2239 2240 static int 2241 amr_std_init(struct amr_softc *sc) 2242 { 2243 int status, ostatus; 2244 2245 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc)); 2246 2247 AMR_SRESET(sc); 2248 2249 ostatus = 0xff; 2250 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) { 2251 if (status != ostatus) { 2252 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status)); 2253 ostatus = status; 2254 } 2255 switch (status) { 2256 case AMR_SINIT_NOMEM: 2257 return(ENOMEM); 2258 2259 case AMR_SINIT_INPROG: 2260 /* XXX we could print channel/target here? */ 2261 break; 2262 } 2263 } 2264 return(0); 2265 } 2266 #endif 2267 2268 /******************************************************************************** 2269 ******************************************************************************** 2270 Debugging 2271 ******************************************************************************** 2272 ********************************************************************************/ 2273 2274 /******************************************************************************** 2275 * Identify the controller and print some information about it. 2276 */ 2277 static void 2278 amr_describe_controller(struct amr_softc *sc) 2279 { 2280 struct amr_prodinfo *ap; 2281 struct amr_enquiry *ae; 2282 char *prod; 2283 int status; 2284 2285 /* 2286 * Try to get 40LD product info, which tells us what the card is labelled as. 2287 */ 2288 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) { 2289 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n", 2290 ap->ap_product, ap->ap_firmware, ap->ap_bios, 2291 ap->ap_memsize); 2292 2293 kfree(ap, M_AMR); 2294 return; 2295 } 2296 2297 /* 2298 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table. 2299 */ 2300 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) { 2301 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature); 2302 2303 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) { 2304 2305 /* 2306 * Try to work it out based on the PCI signatures. 2307 */ 2308 switch (pci_get_device(sc->amr_dev)) { 2309 case 0x9010: 2310 prod = "Series 428"; 2311 break; 2312 case 0x9060: 2313 prod = "Series 434"; 2314 break; 2315 default: 2316 prod = "unknown controller"; 2317 break; 2318 } 2319 } else { 2320 device_printf(sc->amr_dev, "<unsupported controller>\n"); 2321 return; 2322 } 2323 2324 /* 2325 * HP NetRaid controllers have a special encoding of the firmware and 2326 * BIOS versions. The AMI version seems to have it as strings whereas 2327 * the HP version does it with a leading uppercase character and two 2328 * binary numbers. 2329 */ 2330 2331 if(ae->ae_adapter.aa_firmware[2] >= 'A' && 2332 ae->ae_adapter.aa_firmware[2] <= 'Z' && 2333 ae->ae_adapter.aa_firmware[1] < ' ' && 2334 ae->ae_adapter.aa_firmware[0] < ' ' && 2335 ae->ae_adapter.aa_bios[2] >= 'A' && 2336 ae->ae_adapter.aa_bios[2] <= 'Z' && 2337 ae->ae_adapter.aa_bios[1] < ' ' && 2338 ae->ae_adapter.aa_bios[0] < ' ') { 2339 2340 /* this looks like we have an HP NetRaid version of the MegaRaid */ 2341 2342 if(ae->ae_signature == AMR_SIG_438) { 2343 /* the AMI 438 is a NetRaid 3si in HP-land */ 2344 prod = "HP NetRaid 3si"; 2345 } 2346 2347 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n", 2348 prod, ae->ae_adapter.aa_firmware[2], 2349 ae->ae_adapter.aa_firmware[1], 2350 ae->ae_adapter.aa_firmware[0], 2351 ae->ae_adapter.aa_bios[2], 2352 ae->ae_adapter.aa_bios[1], 2353 ae->ae_adapter.aa_bios[0], 2354 ae->ae_adapter.aa_memorysize); 2355 } else { 2356 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n", 2357 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios, 2358 ae->ae_adapter.aa_memorysize); 2359 } 2360 kfree(ae, M_AMR); 2361 } 2362 2363 int 2364 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks) 2365 { 2366 struct amr_command *ac; 2367 int error = EIO; 2368 2369 debug_called(1); 2370 2371 sc->amr_state |= AMR_STATE_INTEN; 2372 2373 /* get ourselves a command buffer */ 2374 if ((ac = amr_alloccmd(sc)) == NULL) 2375 goto out; 2376 /* set command flags */ 2377 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 2378 2379 /* point the command at our data */ 2380 ac->ac_data = data; 2381 ac->ac_length = blks * AMR_BLKSIZE; 2382 2383 /* build the command proper */ 2384 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE; 2385 ac->ac_mailbox.mb_blkcount = blks; 2386 ac->ac_mailbox.mb_lba = lba; 2387 ac->ac_mailbox.mb_drive = unit; 2388 2389 /* can't assume that interrupts are going to work here, so play it safe */ 2390 if (sc->amr_poll_command(ac)) 2391 goto out; 2392 error = ac->ac_status; 2393 2394 out: 2395 if (ac != NULL) 2396 amr_releasecmd(ac); 2397 2398 sc->amr_state &= ~AMR_STATE_INTEN; 2399 return (error); 2400 } 2401 2402 2403 2404 #ifdef AMR_DEBUG 2405 /******************************************************************************** 2406 * Print the command (ac) in human-readable format 2407 */ 2408 #if 0 2409 static void 2410 amr_printcommand(struct amr_command *ac) 2411 { 2412 struct amr_softc *sc = ac->ac_sc; 2413 struct amr_sgentry *sg; 2414 int i; 2415 2416 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n", 2417 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive); 2418 device_printf(sc->amr_dev, "blkcount %d lba %d\n", 2419 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba); 2420 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length); 2421 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n", 2422 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem); 2423 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio); 2424 2425 /* get base address of s/g table */ 2426 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); 2427 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++) 2428 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count); 2429 } 2430 #endif 2431 #endif 2432