1 /*- 2 * Copyright (c) 1999,2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * Copyright (c) 2005 Scott Long 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002 Eric Moore 30 * Copyright (c) 2002, 2004 LSI Logic Corporation 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 3. The party using or redistributing the source code and binary forms 42 * agrees to the disclaimer below and the terms and conditions set forth 43 * herein. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 * 57 * $FreeBSD: src/sys/dev/amr/amr.c,v 1.99 2012/08/31 09:42:46 scottl Exp $ 58 */ 59 60 /* 61 * Driver for the AMI MegaRaid family of controllers. 62 */ 63 64 #include "opt_amr.h" 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/malloc.h> 69 #include <sys/kernel.h> 70 #include <sys/proc.h> 71 #include <sys/sysctl.h> 72 #include <sys/sysmsg.h> 73 74 #include <sys/bio.h> 75 #include <sys/bus.h> 76 #include <sys/conf.h> 77 #include <sys/stat.h> 78 79 #include <machine/cpu.h> 80 #include <sys/rman.h> 81 82 #include <bus/pci/pcireg.h> 83 #include <bus/pci/pcivar.h> 84 85 #include <dev/raid/amr/amrio.h> 86 #include <dev/raid/amr/amrreg.h> 87 #include <dev/raid/amr/amrvar.h> 88 #define AMR_DEFINE_TABLES 89 #include <dev/raid/amr/amr_tables.h> 90 91 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters"); 92 93 static d_open_t amr_open; 94 static d_close_t amr_close; 95 static d_ioctl_t amr_ioctl; 96 97 static struct dev_ops amr_ops = { 98 { "amr", 0, 0 }, 99 .d_open = amr_open, 100 .d_close = amr_close, 101 .d_ioctl = amr_ioctl, 102 }; 103 104 int linux_no_adapter = 0; 105 /* 106 * Initialisation, bus interface. 107 */ 108 static void amr_startup(void *arg); 109 110 /* 111 * Command wrappers 112 */ 113 static int amr_query_controller(struct amr_softc *sc); 114 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize, 115 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status); 116 static void amr_completeio(struct amr_command *ac); 117 static int amr_support_ext_cdb(struct amr_softc *sc); 118 119 /* 120 * Command buffer allocation. 121 */ 122 static void amr_alloccmd_cluster(struct amr_softc *sc); 123 static void amr_freecmd_cluster(struct amr_command_cluster *acc); 124 125 /* 126 * Command processing. 127 */ 128 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp); 129 static int amr_wait_command(struct amr_command *ac); 130 static int amr_mapcmd(struct amr_command *ac); 131 static void amr_unmapcmd(struct amr_command *ac); 132 static int amr_start(struct amr_command *ac); 133 static void amr_complete(void *context, ac_qhead_t *head); 134 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 135 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 136 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); 137 static void amr_abort_load(struct amr_command *ac); 138 139 /* 140 * Interface-specific shims 141 */ 142 static int amr_quartz_submit_command(struct amr_command *ac); 143 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); 144 static int amr_quartz_poll_command(struct amr_command *ac); 145 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac); 146 147 static int amr_std_submit_command(struct amr_command *ac); 148 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave); 149 static int amr_std_poll_command(struct amr_command *ac); 150 static void amr_std_attach_mailbox(struct amr_softc *sc); 151 152 #ifdef AMR_BOARD_INIT 153 static int amr_quartz_init(struct amr_softc *sc); 154 static int amr_std_init(struct amr_softc *sc); 155 #endif 156 157 /* 158 * Debugging 159 */ 160 static void amr_describe_controller(struct amr_softc *sc); 161 #ifdef AMR_DEBUG 162 #if 0 163 static void amr_printcommand(struct amr_command *ac); 164 #endif 165 #endif 166 167 static void amr_init_sysctl(struct amr_softc *sc); 168 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, 169 int32_t flag, struct sysmsg *sm); 170 171 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory"); 172 173 /******************************************************************************** 174 ******************************************************************************** 175 Inline Glue 176 ******************************************************************************** 177 ********************************************************************************/ 178 179 /******************************************************************************** 180 ******************************************************************************** 181 Public Interfaces 182 ******************************************************************************** 183 ********************************************************************************/ 184 185 /******************************************************************************** 186 * Initialise the controller and softc. 187 */ 188 int 189 amr_attach(struct amr_softc *sc) 190 { 191 device_t child; 192 193 debug_called(1); 194 195 /* 196 * Initialise per-controller queues. 197 */ 198 amr_init_qhead(&sc->amr_freecmds); 199 amr_init_qhead(&sc->amr_ready); 200 TAILQ_INIT(&sc->amr_cmd_clusters); 201 bioq_init(&sc->amr_bioq); 202 203 debug(2, "queue init done"); 204 205 /* 206 * Configure for this controller type. 207 */ 208 if (AMR_IS_QUARTZ(sc)) { 209 sc->amr_submit_command = amr_quartz_submit_command; 210 sc->amr_get_work = amr_quartz_get_work; 211 sc->amr_poll_command = amr_quartz_poll_command; 212 sc->amr_poll_command1 = amr_quartz_poll_command1; 213 } else { 214 sc->amr_submit_command = amr_std_submit_command; 215 sc->amr_get_work = amr_std_get_work; 216 sc->amr_poll_command = amr_std_poll_command; 217 amr_std_attach_mailbox(sc); 218 } 219 220 #ifdef AMR_BOARD_INIT 221 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))) 222 return(ENXIO); 223 #endif 224 225 /* 226 * Allocate initial commands. 227 */ 228 amr_alloccmd_cluster(sc); 229 230 /* 231 * Quiz controller for features and limits. 232 */ 233 if (amr_query_controller(sc)) 234 return(ENXIO); 235 236 debug(2, "controller query complete"); 237 238 /* 239 * preallocate the remaining commands. 240 */ 241 while (sc->amr_nextslot < sc->amr_maxio) 242 amr_alloccmd_cluster(sc); 243 244 /* 245 * Setup sysctls. 246 */ 247 sysctl_ctx_init(&sc->amr_sysctl_ctx); 248 sc->amr_sysctl_tree = SYSCTL_ADD_NODE(&sc->amr_sysctl_ctx, 249 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 250 device_get_nameunit(sc->amr_dev), CTLFLAG_RD, 0, ""); 251 if (sc->amr_sysctl_tree == NULL) { 252 device_printf(sc->amr_dev, "can't add sysctl node\n"); 253 return (EINVAL); 254 } 255 amr_init_sysctl(sc); 256 257 /* 258 * Attach our 'real' SCSI channels to CAM. 259 */ 260 child = device_add_child(sc->amr_dev, "amrp", -1); 261 sc->amr_pass = child; 262 if (child != NULL) { 263 device_set_softc(child, sc); 264 device_set_desc(child, "SCSI Passthrough Bus"); 265 bus_generic_attach(sc->amr_dev); 266 } 267 268 /* 269 * Create the control device. 270 */ 271 sc->amr_dev_t = make_dev(&amr_ops, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR, 272 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev)); 273 sc->amr_dev_t->si_drv1 = sc; 274 linux_no_adapter++; 275 if (device_get_unit(sc->amr_dev) == 0) 276 make_dev_alias(sc->amr_dev_t, "megadev0"); 277 278 /* 279 * Schedule ourselves to bring the controller up once interrupts are 280 * available. 281 */ 282 bzero(&sc->amr_ich, sizeof(struct intr_config_hook)); 283 sc->amr_ich.ich_func = amr_startup; 284 sc->amr_ich.ich_arg = sc; 285 sc->amr_ich.ich_desc = "amr"; 286 if (config_intrhook_establish(&sc->amr_ich) != 0) { 287 device_printf(sc->amr_dev, "can't establish configuration hook\n"); 288 return(ENOMEM); 289 } 290 291 /* 292 * Print a little information about the controller. 293 */ 294 amr_describe_controller(sc); 295 296 debug(2, "attach complete"); 297 return(0); 298 } 299 300 /******************************************************************************** 301 * Locate disk resources and attach children to them. 302 */ 303 static void 304 amr_startup(void *arg) 305 { 306 struct amr_softc *sc = (struct amr_softc *)arg; 307 struct amr_logdrive *dr; 308 int i, error; 309 310 debug_called(1); 311 312 /* pull ourselves off the intrhook chain */ 313 if (sc->amr_ich.ich_func) 314 config_intrhook_disestablish(&sc->amr_ich); 315 sc->amr_ich.ich_func = NULL; 316 317 /* get up-to-date drive information */ 318 if (amr_query_controller(sc)) { 319 device_printf(sc->amr_dev, "can't scan controller for drives\n"); 320 return; 321 } 322 323 /* iterate over available drives */ 324 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) { 325 /* are we already attached to this drive? */ 326 if (dr->al_disk == 0) { 327 /* generate geometry information */ 328 if (dr->al_size > 0x200000) { /* extended translation? */ 329 dr->al_heads = 255; 330 dr->al_sectors = 63; 331 } else { 332 dr->al_heads = 64; 333 dr->al_sectors = 32; 334 } 335 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors); 336 337 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1); 338 if (dr->al_disk == 0) 339 device_printf(sc->amr_dev, "device_add_child failed\n"); 340 device_set_ivars(dr->al_disk, dr); 341 } 342 } 343 344 if ((error = bus_generic_attach(sc->amr_dev)) != 0) 345 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error); 346 347 /* mark controller back up */ 348 sc->amr_state &= ~AMR_STATE_SHUTDOWN; 349 350 /* interrupts will be enabled before we do anything more */ 351 sc->amr_state |= AMR_STATE_INTEN; 352 353 return; 354 } 355 356 static void 357 amr_init_sysctl(struct amr_softc *sc) 358 { 359 360 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 361 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 362 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0, 363 ""); 364 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 365 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 366 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0, 367 ""); 368 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 369 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 370 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0, 371 ""); 372 SYSCTL_ADD_INT(&sc->amr_sysctl_ctx, 373 SYSCTL_CHILDREN(sc->amr_sysctl_tree), 374 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0, 375 ""); 376 } 377 378 379 /******************************************************************************* 380 * Free resources associated with a controller instance 381 */ 382 void 383 amr_free(struct amr_softc *sc) 384 { 385 struct amr_command_cluster *acc; 386 387 /* detach from CAM */ 388 if (sc->amr_pass != NULL) 389 device_delete_child(sc->amr_dev, sc->amr_pass); 390 391 /* throw away any command buffers */ 392 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) { 393 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link); 394 amr_freecmd_cluster(acc); 395 } 396 397 /* destroy control device */ 398 if(sc->amr_dev_t != NULL) 399 destroy_dev(sc->amr_dev_t); 400 dev_ops_remove_minor(&amr_ops, device_get_unit(sc->amr_dev)); 401 402 #if 0 /* XXX swildner */ 403 if (mtx_initialized(&sc->amr_hw_lock)) 404 mtx_destroy(&sc->amr_hw_lock); 405 406 if (mtx_initialized(&sc->amr_list_lock)) 407 mtx_destroy(&sc->amr_list_lock); 408 #endif 409 410 if (sc->amr_sysctl_tree != NULL) 411 sysctl_ctx_free(&sc->amr_sysctl_ctx); 412 413 lockuninit(&sc->amr_hw_lock); 414 lockuninit(&sc->amr_list_lock); 415 } 416 417 /******************************************************************************* 418 * Receive a bio structure from a child device and queue it on a particular 419 * disk resource, then poke the disk resource to start as much work as it can. 420 */ 421 int 422 amr_submit_bio(struct amr_softc *sc, struct bio *bio) 423 { 424 debug_called(2); 425 426 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 427 amr_enqueue_bio(sc, bio); 428 amr_startio(sc); 429 lockmgr(&sc->amr_list_lock, LK_RELEASE); 430 return(0); 431 } 432 433 /******************************************************************************** 434 * Accept an open operation on the control device. 435 */ 436 static int 437 amr_open(struct dev_open_args *ap) 438 { 439 cdev_t dev = ap->a_head.a_dev; 440 int unit = minor(dev); 441 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); 442 443 debug_called(1); 444 445 sc->amr_state |= AMR_STATE_OPEN; 446 return(0); 447 } 448 449 /******************************************************************************** 450 * Accept the last close on the control device. 451 */ 452 static int 453 amr_close(struct dev_close_args *ap) 454 { 455 cdev_t dev = ap->a_head.a_dev; 456 int unit = minor(dev); 457 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit); 458 459 debug_called(1); 460 461 sc->amr_state &= ~AMR_STATE_OPEN; 462 return (0); 463 } 464 465 /******************************************************************************** 466 * Handle controller-specific control operations. 467 */ 468 static void 469 amr_rescan_drives(struct cdev *dev) 470 { 471 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 472 int i, error = 0; 473 474 sc->amr_state |= AMR_STATE_REMAP_LD; 475 while (sc->amr_busyslots) { 476 device_printf(sc->amr_dev, "idle controller\n"); 477 amr_done(sc); 478 } 479 480 /* mark ourselves as in-shutdown */ 481 sc->amr_state |= AMR_STATE_SHUTDOWN; 482 483 /* flush controller */ 484 device_printf(sc->amr_dev, "flushing cache..."); 485 kprintf("%s\n", amr_flush(sc) ? "failed" : "done"); 486 487 /* delete all our child devices */ 488 for(i = 0 ; i < AMR_MAXLD; i++) { 489 if(sc->amr_drive[i].al_disk != 0) { 490 if((error = device_delete_child(sc->amr_dev, 491 sc->amr_drive[i].al_disk)) != 0) 492 goto shutdown_out; 493 494 sc->amr_drive[i].al_disk = 0; 495 } 496 } 497 498 shutdown_out: 499 amr_startup(sc); 500 } 501 502 /* 503 * Bug-for-bug compatibility with Linux! 504 * Some apps will send commands with inlen and outlen set to 0, 505 * even though they expect data to be transfered to them from the 506 * card. Linux accidentally allows this by allocating a 4KB 507 * buffer for the transfer anyways, but it then throws it away 508 * without copying it back to the app. 509 * 510 * The amr(4) firmware relies on this feature. In fact, it assumes 511 * the buffer is always a power of 2 up to a max of 64k. There is 512 * also at least one case where it assumes a buffer less than 16k is 513 * greater than 16k. Force a minimum buffer size of 32k and round 514 * sizes between 32k and 64k up to 64k as a workaround. 515 */ 516 static unsigned long 517 amr_ioctl_buffer_length(unsigned long len) 518 { 519 520 if (len <= 32 * 1024) 521 return (32 * 1024); 522 if (len <= 64 * 1024) 523 return (64 * 1024); 524 return (len); 525 } 526 527 int 528 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, 529 struct sysmsg *sm) 530 { 531 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 532 struct amr_command *ac; 533 struct amr_mailbox *mb; 534 struct amr_linux_ioctl ali; 535 void *dp, *temp; 536 int error; 537 int adapter, len, ac_flags = 0; 538 int logical_drives_changed = 0; 539 u_int32_t linux_version = 0x02100000; 540 u_int8_t status; 541 struct amr_passthrough *ap; /* 60 bytes */ 542 543 error = 0; 544 dp = NULL; 545 ac = NULL; 546 ap = NULL; 547 548 if ((error = copyin(addr, &ali, sizeof(ali))) != 0) 549 return (error); 550 switch (ali.ui.fcs.opcode) { 551 case 0x82: 552 switch(ali.ui.fcs.subopcode) { 553 case 'e': 554 copyout(&linux_version, (void *)(uintptr_t)ali.data, 555 sizeof(linux_version)); 556 error = 0; 557 break; 558 559 case 'm': 560 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data, 561 sizeof(linux_no_adapter)); 562 sm->sm_result.iresult = linux_no_adapter; 563 error = 0; 564 break; 565 566 default: 567 kprintf("Unknown subopcode\n"); 568 error = ENOIOCTL; 569 break; 570 } 571 break; 572 573 case 0x80: 574 case 0x81: 575 if (ali.ui.fcs.opcode == 0x80) 576 len = max(ali.outlen, ali.inlen); 577 else 578 len = ali.ui.fcs.length; 579 580 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8; 581 582 mb = (void *)&ali.mbox[0]; 583 584 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */ 585 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */ 586 if (sc->amr_allow_vol_config == 0) { 587 error = EPERM; 588 break; 589 } 590 logical_drives_changed = 1; 591 } 592 593 if (ali.mbox[0] == AMR_CMD_PASS) { 594 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 595 while ((ac = amr_alloccmd(sc)) == NULL) 596 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 597 lockmgr(&sc->amr_list_lock, LK_RELEASE); 598 ap = &ac->ac_ccb->ccb_pthru; 599 600 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap, 601 sizeof(struct amr_passthrough)); 602 if (error) 603 break; 604 605 if (ap->ap_data_transfer_length) 606 dp = kmalloc(ap->ap_data_transfer_length, M_AMR, 607 M_WAITOK | M_ZERO); 608 609 if (ali.inlen) { 610 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address, 611 dp, ap->ap_data_transfer_length); 612 if (error) 613 break; 614 } 615 616 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB; 617 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); 618 ac->ac_mailbox.mb_command = AMR_CMD_PASS; 619 ac->ac_flags = ac_flags; 620 621 ac->ac_data = dp; 622 ac->ac_length = ap->ap_data_transfer_length; 623 temp = (void *)(uintptr_t)ap->ap_data_transfer_address; 624 625 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 626 error = amr_wait_command(ac); 627 lockmgr(&sc->amr_list_lock, LK_RELEASE); 628 if (error) 629 break; 630 631 status = ac->ac_status; 632 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status)); 633 if (error) 634 break; 635 636 if (ali.outlen) { 637 error = copyout(dp, temp, ap->ap_data_transfer_length); 638 if (error) 639 break; 640 } 641 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length); 642 if (error) 643 break; 644 645 error = 0; 646 break; 647 } else if (ali.mbox[0] == AMR_CMD_PASS_64) { 648 kprintf("No AMR_CMD_PASS_64\n"); 649 error = ENOIOCTL; 650 break; 651 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) { 652 kprintf("No AMR_CMD_EXTPASS\n"); 653 error = ENOIOCTL; 654 break; 655 } else { 656 len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen)); 657 658 dp = kmalloc(len, M_AMR, M_WAITOK | M_ZERO); 659 660 if (ali.inlen) { 661 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len); 662 if (error) 663 break; 664 } 665 666 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 667 while ((ac = amr_alloccmd(sc)) == NULL) 668 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 669 670 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT; 671 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox)); 672 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox)); 673 674 ac->ac_length = len; 675 ac->ac_data = dp; 676 ac->ac_flags = ac_flags; 677 678 error = amr_wait_command(ac); 679 lockmgr(&sc->amr_list_lock, LK_RELEASE); 680 if (error) 681 break; 682 683 status = ac->ac_status; 684 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status)); 685 if (ali.outlen) { 686 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen); 687 if (error) 688 break; 689 } 690 691 error = 0; 692 if (logical_drives_changed) 693 amr_rescan_drives(dev); 694 break; 695 } 696 break; 697 698 default: 699 debug(1, "unknown linux ioctl 0x%lx", cmd); 700 kprintf("unknown linux ioctl 0x%lx\n", cmd); 701 error = ENOIOCTL; 702 break; 703 } 704 705 /* 706 * At this point, we know that there is a lock held and that these 707 * objects have been allocated. 708 */ 709 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 710 if (ac != NULL) 711 amr_releasecmd(ac); 712 lockmgr(&sc->amr_list_lock, LK_RELEASE); 713 if (dp != NULL) 714 kfree(dp, M_AMR); 715 return(error); 716 } 717 718 static int 719 amr_ioctl(struct dev_ioctl_args *ap) 720 { 721 cdev_t dev = ap->a_head.a_dev; 722 caddr_t addr = ap->a_data; 723 u_long cmd = ap->a_cmd; 724 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1; 725 union { 726 void *_p; 727 struct amr_user_ioctl *au; 728 #ifdef AMR_IO_COMMAND32 729 struct amr_user_ioctl32 *au32; 730 #endif 731 int *result; 732 } arg; 733 struct amr_command *ac; 734 struct amr_mailbox_ioctl *mbi; 735 void *dp, *au_buffer; 736 unsigned long au_length, real_length; 737 unsigned char *au_cmd; 738 int *au_statusp, au_direction; 739 int error; 740 struct amr_passthrough *_ap; /* 60 bytes */ 741 int logical_drives_changed = 0; 742 743 debug_called(1); 744 745 arg._p = (void *)addr; 746 747 error = 0; 748 dp = NULL; 749 ac = NULL; 750 _ap = NULL; 751 752 switch(cmd) { 753 754 case AMR_IO_VERSION: 755 debug(1, "AMR_IO_VERSION"); 756 *arg.result = AMR_IO_VERSION_NUMBER; 757 return(0); 758 759 #ifdef AMR_IO_COMMAND32 760 /* 761 * Accept ioctl-s from 32-bit binaries on non-32-bit 762 * platforms, such as AMD. LSI's MEGAMGR utility is 763 * the only example known today... -mi 764 */ 765 case AMR_IO_COMMAND32: 766 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]); 767 au_cmd = arg.au32->au_cmd; 768 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer; 769 au_length = arg.au32->au_length; 770 au_direction = arg.au32->au_direction; 771 au_statusp = &arg.au32->au_status; 772 break; 773 #endif 774 775 case AMR_IO_COMMAND: 776 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]); 777 au_cmd = arg.au->au_cmd; 778 au_buffer = (void *)arg.au->au_buffer; 779 au_length = arg.au->au_length; 780 au_direction = arg.au->au_direction; 781 au_statusp = &arg.au->au_status; 782 break; 783 784 case 0xc0046d00: 785 case 0xc06e6d00: /* Linux emulation */ 786 { 787 devclass_t devclass; 788 struct amr_linux_ioctl ali; 789 int adapter, error; 790 791 devclass = devclass_find("amr"); 792 if (devclass == NULL) 793 return (ENOENT); 794 795 error = copyin(addr, &ali, sizeof(ali)); 796 if (error) 797 return (error); 798 if (ali.ui.fcs.opcode == 0x82) 799 adapter = 0; 800 else 801 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8; 802 803 sc = devclass_get_softc(devclass, adapter); 804 if (sc == NULL) 805 return (ENOENT); 806 807 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, ap->a_sysmsg)); 808 } 809 default: 810 debug(1, "unknown ioctl 0x%lx", cmd); 811 return(ENOIOCTL); 812 } 813 814 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */ 815 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */ 816 if (sc->amr_allow_vol_config == 0) { 817 error = EPERM; 818 goto out; 819 } 820 logical_drives_changed = 1; 821 } 822 823 /* handle inbound data buffer */ 824 real_length = amr_ioctl_buffer_length(au_length); 825 if (au_length != 0 && au_cmd[0] != 0x06) { 826 if ((dp = kmalloc(real_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) { 827 error = ENOMEM; 828 goto out; 829 } 830 if ((error = copyin(au_buffer, dp, au_length)) != 0) { 831 kfree(dp, M_AMR); 832 return (error); 833 } 834 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp); 835 } 836 837 /* Allocate this now before the mutex gets held */ 838 839 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 840 while ((ac = amr_alloccmd(sc)) == NULL) 841 lksleep(sc, &sc->amr_list_lock, 0, "amrioc", hz); 842 843 /* handle SCSI passthrough command */ 844 if (au_cmd[0] == AMR_CMD_PASS) { 845 int len; 846 847 _ap = &ac->ac_ccb->ccb_pthru; 848 bzero(_ap, sizeof(struct amr_passthrough)); 849 850 /* copy cdb */ 851 len = au_cmd[2]; 852 _ap->ap_cdb_length = len; 853 bcopy(au_cmd + 3, _ap->ap_cdb, len); 854 855 /* build passthrough */ 856 _ap->ap_timeout = au_cmd[len + 3] & 0x07; 857 _ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0; 858 _ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0; 859 _ap->ap_logical_drive_no = au_cmd[len + 4]; 860 _ap->ap_channel = au_cmd[len + 5]; 861 _ap->ap_scsi_id = au_cmd[len + 6]; 862 _ap->ap_request_sense_length = 14; 863 _ap->ap_data_transfer_length = au_length; 864 /* XXX what about the request-sense area? does the caller want it? */ 865 866 /* build command */ 867 ac->ac_mailbox.mb_command = AMR_CMD_PASS; 868 ac->ac_flags = AMR_CMD_CCB; 869 870 } else { 871 /* direct command to controller */ 872 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox; 873 874 /* copy pertinent mailbox items */ 875 mbi->mb_command = au_cmd[0]; 876 mbi->mb_channel = au_cmd[1]; 877 mbi->mb_param = au_cmd[2]; 878 mbi->mb_pad[0] = au_cmd[3]; 879 mbi->mb_drive = au_cmd[4]; 880 ac->ac_flags = 0; 881 } 882 883 /* build the command */ 884 ac->ac_data = dp; 885 ac->ac_length = real_length; 886 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT; 887 888 /* run the command */ 889 error = amr_wait_command(ac); 890 lockmgr(&sc->amr_list_lock, LK_RELEASE); 891 if (error) 892 goto out; 893 894 /* copy out data and set status */ 895 if (au_length != 0) { 896 error = copyout(dp, au_buffer, au_length); 897 } 898 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer); 899 if (dp != NULL) 900 debug(2, "%p status 0x%x", dp, ac->ac_status); 901 *au_statusp = ac->ac_status; 902 903 out: 904 /* 905 * At this point, we know that there is a lock held and that these 906 * objects have been allocated. 907 */ 908 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 909 if (ac != NULL) 910 amr_releasecmd(ac); 911 lockmgr(&sc->amr_list_lock, LK_RELEASE); 912 if (dp != NULL) 913 kfree(dp, M_AMR); 914 915 if (logical_drives_changed) 916 amr_rescan_drives(dev); 917 918 return(error); 919 } 920 921 /******************************************************************************** 922 ******************************************************************************** 923 Command Wrappers 924 ******************************************************************************** 925 ********************************************************************************/ 926 927 /******************************************************************************** 928 * Interrogate the controller for the operational parameters we require. 929 */ 930 static int 931 amr_query_controller(struct amr_softc *sc) 932 { 933 struct amr_enquiry3 *aex; 934 struct amr_prodinfo *ap; 935 struct amr_enquiry *ae; 936 int ldrv; 937 int status; 938 939 /* 940 * Greater than 10 byte cdb support 941 */ 942 sc->support_ext_cdb = amr_support_ext_cdb(sc); 943 944 if(sc->support_ext_cdb) { 945 debug(2,"supports extended CDBs."); 946 } 947 948 /* 949 * Try to issue an ENQUIRY3 command 950 */ 951 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, 952 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) { 953 954 /* 955 * Fetch current state of logical drives. 956 */ 957 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) { 958 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv]; 959 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv]; 960 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv]; 961 debug(2, " drive %d: %d state %x properties %x", ldrv, sc->amr_drive[ldrv].al_size, 962 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); 963 } 964 kfree(aex, M_AMR); 965 966 /* 967 * Get product info for channel count. 968 */ 969 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) { 970 device_printf(sc->amr_dev, "can't obtain product data from controller\n"); 971 return(1); 972 } 973 sc->amr_maxdrives = 40; 974 sc->amr_maxchan = ap->ap_nschan; 975 sc->amr_maxio = ap->ap_maxio; 976 sc->amr_type |= AMR_TYPE_40LD; 977 kfree(ap, M_AMR); 978 979 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status); 980 if (ap != NULL) 981 kfree(ap, M_AMR); 982 if (!status) { 983 sc->amr_ld_del_supported = 1; 984 device_printf(sc->amr_dev, "delete logical drives supported by controller\n"); 985 } 986 } else { 987 988 /* failed, try the 8LD ENQUIRY commands */ 989 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) { 990 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) { 991 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n"); 992 return(1); 993 } 994 ae->ae_signature = 0; 995 } 996 997 /* 998 * Fetch current state of logical drives. 999 */ 1000 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) { 1001 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv]; 1002 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv]; 1003 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv]; 1004 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size, 1005 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties); 1006 } 1007 1008 sc->amr_maxdrives = 8; 1009 sc->amr_maxchan = ae->ae_adapter.aa_channels; 1010 sc->amr_maxio = ae->ae_adapter.aa_maxio; 1011 kfree(ae, M_AMR); 1012 } 1013 1014 /* 1015 * Mark remaining drives as unused. 1016 */ 1017 for (; ldrv < AMR_MAXLD; ldrv++) 1018 sc->amr_drive[ldrv].al_size = 0xffffffff; 1019 1020 /* 1021 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust 1022 * the controller's reported value, and lockups have been seen when we do. 1023 */ 1024 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD); 1025 1026 return(0); 1027 } 1028 1029 /******************************************************************************** 1030 * Run a generic enquiry-style command. 1031 */ 1032 static void * 1033 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status) 1034 { 1035 struct amr_command *ac; 1036 void *result; 1037 u_int8_t *mbox; 1038 int error; 1039 1040 debug_called(1); 1041 1042 error = 1; 1043 result = NULL; 1044 1045 /* get ourselves a command buffer */ 1046 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1047 ac = amr_alloccmd(sc); 1048 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1049 if (ac == NULL) 1050 goto out; 1051 /* allocate the response structure */ 1052 if ((result = kmalloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL) 1053 goto out; 1054 /* set command flags */ 1055 1056 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN; 1057 1058 /* point the command at our data */ 1059 ac->ac_data = result; 1060 ac->ac_length = bufsize; 1061 1062 /* build the command proper */ 1063 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ 1064 mbox[0] = cmd; 1065 mbox[2] = cmdsub; 1066 mbox[3] = cmdqual; 1067 *status = 0; 1068 1069 /* can't assume that interrupts are going to work here, so play it safe */ 1070 if (sc->amr_poll_command(ac)) 1071 goto out; 1072 error = ac->ac_status; 1073 *status = ac->ac_status; 1074 1075 out: 1076 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1077 if (ac != NULL) 1078 amr_releasecmd(ac); 1079 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1080 if ((error != 0) && (result != NULL)) { 1081 kfree(result, M_AMR); 1082 result = NULL; 1083 } 1084 return(result); 1085 } 1086 1087 /******************************************************************************** 1088 * Flush the controller's internal cache, return status. 1089 */ 1090 int 1091 amr_flush(struct amr_softc *sc) 1092 { 1093 struct amr_command *ac; 1094 int error; 1095 1096 /* get ourselves a command buffer */ 1097 error = 1; 1098 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1099 ac = amr_alloccmd(sc); 1100 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1101 if (ac == NULL) 1102 goto out; 1103 /* set command flags */ 1104 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1105 1106 /* build the command proper */ 1107 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH; 1108 1109 /* we have to poll, as the system may be going down or otherwise damaged */ 1110 if (sc->amr_poll_command(ac)) 1111 goto out; 1112 error = ac->ac_status; 1113 1114 out: 1115 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1116 if (ac != NULL) 1117 amr_releasecmd(ac); 1118 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1119 return(error); 1120 } 1121 1122 /******************************************************************************** 1123 * Detect extented cdb >> greater than 10 byte cdb support 1124 * returns '1' means this support exist 1125 * returns '0' means this support doesn't exist 1126 */ 1127 static int 1128 amr_support_ext_cdb(struct amr_softc *sc) 1129 { 1130 struct amr_command *ac; 1131 u_int8_t *mbox; 1132 int error; 1133 1134 /* get ourselves a command buffer */ 1135 error = 0; 1136 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1137 ac = amr_alloccmd(sc); 1138 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1139 if (ac == NULL) 1140 goto out; 1141 /* set command flags */ 1142 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1143 1144 /* build the command proper */ 1145 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */ 1146 mbox[0] = 0xA4; 1147 mbox[2] = 0x16; 1148 1149 1150 /* we have to poll, as the system may be going down or otherwise damaged */ 1151 if (sc->amr_poll_command(ac)) 1152 goto out; 1153 if( ac->ac_status == AMR_STATUS_SUCCESS ) { 1154 error = 1; 1155 } 1156 1157 out: 1158 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1159 if (ac != NULL) 1160 amr_releasecmd(ac); 1161 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1162 return(error); 1163 } 1164 1165 /******************************************************************************** 1166 * Try to find I/O work for the controller from one or more of the work queues. 1167 * 1168 * We make the assumption that if the controller is not ready to take a command 1169 * at some given time, it will generate an interrupt at some later time when 1170 * it is. 1171 */ 1172 void 1173 amr_startio(struct amr_softc *sc) 1174 { 1175 struct amr_command *ac; 1176 1177 /* spin until something prevents us from doing any work */ 1178 for (;;) { 1179 1180 /* Don't bother to queue commands no bounce buffers are available. */ 1181 if (sc->amr_state & AMR_STATE_QUEUE_FRZN) 1182 break; 1183 1184 /* try to get a ready command */ 1185 ac = amr_dequeue_ready(sc); 1186 1187 /* if that failed, build a command from a bio */ 1188 if (ac == NULL) 1189 (void)amr_bio_command(sc, &ac); 1190 1191 /* if that failed, build a command from a ccb */ 1192 if ((ac == NULL) && (sc->amr_cam_command != NULL)) 1193 sc->amr_cam_command(sc, &ac); 1194 1195 /* if we don't have anything to do, give up */ 1196 if (ac == NULL) 1197 break; 1198 1199 /* try to give the command to the controller; if this fails save it for later and give up */ 1200 if (amr_start(ac)) { 1201 debug(2, "controller busy, command deferred"); 1202 amr_requeue_ready(ac); /* XXX schedule retry very soon? */ 1203 break; 1204 } 1205 } 1206 } 1207 1208 /******************************************************************************** 1209 * Handle completion of an I/O command. 1210 */ 1211 static void 1212 amr_completeio(struct amr_command *ac) 1213 { 1214 struct amr_softc *sc = ac->ac_sc; 1215 static struct timeval lastfail; 1216 static int curfail; 1217 struct buf *bp = ac->ac_bio->bio_buf; 1218 1219 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */ 1220 bp->b_error = EIO; 1221 bp->b_flags |= B_ERROR; 1222 1223 if (ppsratecheck(&lastfail, &curfail, 1)) 1224 device_printf(sc->amr_dev, "I/O error - 0x%x\n", ac->ac_status); 1225 /* amr_printcommand(ac);*/ 1226 } 1227 amrd_intr(ac->ac_bio); 1228 lockmgr(&ac->ac_sc->amr_list_lock, LK_EXCLUSIVE); 1229 amr_releasecmd(ac); 1230 lockmgr(&ac->ac_sc->amr_list_lock, LK_RELEASE); 1231 } 1232 1233 /******************************************************************************** 1234 ******************************************************************************** 1235 Command Processing 1236 ******************************************************************************** 1237 ********************************************************************************/ 1238 1239 /******************************************************************************** 1240 * Convert a bio off the top of the bio queue into a command. 1241 */ 1242 static int 1243 amr_bio_command(struct amr_softc *sc, struct amr_command **acp) 1244 { 1245 struct amr_command *ac; 1246 struct amrd_softc *amrd; 1247 struct bio *bio; 1248 struct buf *bp; 1249 int error; 1250 int blkcount; 1251 int driveno; 1252 int cmd; 1253 1254 ac = NULL; 1255 error = 0; 1256 1257 /* get a command */ 1258 if ((ac = amr_alloccmd(sc)) == NULL) 1259 return (ENOMEM); 1260 1261 /* get a bio to work on */ 1262 if ((bio = amr_dequeue_bio(sc)) == NULL) { 1263 amr_releasecmd(ac); 1264 return (0); 1265 } 1266 1267 /* connect the bio to the command */ 1268 bp = bio->bio_buf; 1269 ac->ac_complete = amr_completeio; 1270 ac->ac_bio = bio; 1271 ac->ac_data = bp->b_data; 1272 ac->ac_length = bp->b_bcount; 1273 cmd = 0; 1274 switch (bp->b_cmd) { 1275 case BUF_CMD_READ: 1276 ac->ac_flags |= AMR_CMD_DATAIN; 1277 if (AMR_IS_SG64(sc)) { 1278 cmd = AMR_CMD_LREAD64; 1279 ac->ac_flags |= AMR_CMD_SG64; 1280 } else 1281 cmd = AMR_CMD_LREAD; 1282 break; 1283 case BUF_CMD_WRITE: 1284 ac->ac_flags |= AMR_CMD_DATAOUT; 1285 if (AMR_IS_SG64(sc)) { 1286 cmd = AMR_CMD_LWRITE64; 1287 ac->ac_flags |= AMR_CMD_SG64; 1288 } else 1289 cmd = AMR_CMD_LWRITE; 1290 break; 1291 case BUF_CMD_FLUSH: 1292 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 1293 cmd = AMR_CMD_FLUSH; 1294 break; 1295 default: 1296 panic("Invalid bio command"); 1297 } 1298 amrd = (struct amrd_softc *)bio->bio_driver_info; 1299 driveno = amrd->amrd_drive - sc->amr_drive; 1300 blkcount = (bp->b_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE; 1301 1302 ac->ac_mailbox.mb_command = cmd; 1303 if (bp->b_cmd & (BUF_CMD_READ|BUF_CMD_WRITE)) { 1304 ac->ac_mailbox.mb_blkcount = blkcount; 1305 ac->ac_mailbox.mb_lba = bio->bio_offset / AMR_BLKSIZE; 1306 if (((bio->bio_offset / AMR_BLKSIZE) + blkcount) > sc->amr_drive[driveno].al_size) { 1307 device_printf(sc->amr_dev, 1308 "I/O beyond end of unit (%lld,%d > %lu)\n", 1309 (long long)(bio->bio_offset / AMR_BLKSIZE), blkcount, 1310 (u_long)sc->amr_drive[driveno].al_size); 1311 } 1312 } 1313 ac->ac_mailbox.mb_drive = driveno; 1314 if (sc->amr_state & AMR_STATE_REMAP_LD) 1315 ac->ac_mailbox.mb_drive |= 0x80; 1316 1317 /* we fill in the s/g related data when the command is mapped */ 1318 1319 1320 *acp = ac; 1321 return(error); 1322 } 1323 1324 /******************************************************************************** 1325 * Take a command, submit it to the controller and sleep until it completes 1326 * or fails. Interrupts must be enabled, returns nonzero on error. 1327 */ 1328 static int 1329 amr_wait_command(struct amr_command *ac) 1330 { 1331 int error = 0; 1332 struct amr_softc *sc = ac->ac_sc; 1333 1334 debug_called(1); 1335 1336 ac->ac_complete = NULL; 1337 ac->ac_flags |= AMR_CMD_SLEEP; 1338 if ((error = amr_start(ac)) != 0) { 1339 return(error); 1340 } 1341 1342 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) { 1343 error = lksleep(ac,&sc->amr_list_lock, 0, "amrwcmd", 0); 1344 } 1345 1346 return(error); 1347 } 1348 1349 /******************************************************************************** 1350 * Take a command, submit it to the controller and busy-wait for it to return. 1351 * Returns nonzero on error. Can be safely called with interrupts enabled. 1352 */ 1353 static int 1354 amr_std_poll_command(struct amr_command *ac) 1355 { 1356 struct amr_softc *sc = ac->ac_sc; 1357 int error, count; 1358 1359 debug_called(2); 1360 1361 ac->ac_complete = NULL; 1362 if ((error = amr_start(ac)) != 0) 1363 return(error); 1364 1365 count = 0; 1366 do { 1367 /* 1368 * Poll for completion, although the interrupt handler may beat us to it. 1369 * Note that the timeout here is somewhat arbitrary. 1370 */ 1371 amr_done(sc); 1372 DELAY(1000); 1373 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000)); 1374 if (!(ac->ac_flags & AMR_CMD_BUSY)) { 1375 error = 0; 1376 } else { 1377 /* XXX the slot is now marked permanently busy */ 1378 error = EIO; 1379 device_printf(sc->amr_dev, "polled command timeout\n"); 1380 } 1381 return(error); 1382 } 1383 1384 static void 1385 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1386 { 1387 struct amr_command *ac = arg; 1388 struct amr_softc *sc = ac->ac_sc; 1389 int mb_channel; 1390 1391 if (err) { 1392 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1393 ac->ac_status = AMR_STATUS_ABORTED; 1394 return; 1395 } 1396 1397 amr_setup_sg(arg, segs, nsegs, err); 1398 1399 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ 1400 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; 1401 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && 1402 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || 1403 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) 1404 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; 1405 1406 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; 1407 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; 1408 if (AC_IS_SG64(ac)) { 1409 ac->ac_sg64_hi = 0; 1410 ac->ac_sg64_lo = ac->ac_sgbusaddr; 1411 } 1412 1413 sc->amr_poll_command1(sc, ac); 1414 } 1415 1416 /******************************************************************************** 1417 * Take a command, submit it to the controller and busy-wait for it to return. 1418 * Returns nonzero on error. Can be safely called with interrupts enabled. 1419 */ 1420 static int 1421 amr_quartz_poll_command(struct amr_command *ac) 1422 { 1423 struct amr_softc *sc = ac->ac_sc; 1424 int error; 1425 1426 debug_called(2); 1427 1428 error = 0; 1429 1430 if (AC_IS_SG64(ac)) { 1431 ac->ac_tag = sc->amr_buffer64_dmat; 1432 ac->ac_datamap = ac->ac_dma64map; 1433 } else { 1434 ac->ac_tag = sc->amr_buffer_dmat; 1435 ac->ac_datamap = ac->ac_dmamap; 1436 } 1437 1438 /* now we have a slot, we can map the command (unmapped in amr_complete) */ 1439 if (ac->ac_data != NULL && ac->ac_length != 0) { 1440 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, 1441 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) { 1442 error = 1; 1443 } 1444 } else { 1445 error = amr_quartz_poll_command1(sc, ac); 1446 } 1447 1448 return (error); 1449 } 1450 1451 static int 1452 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac) 1453 { 1454 int count, error; 1455 1456 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 1457 if ((sc->amr_state & AMR_STATE_INTEN) == 0) { 1458 count=0; 1459 while (sc->amr_busyslots) { 1460 lksleep(sc, &sc->amr_hw_lock, PCATCH, "amrpoll", hz); 1461 if(count++>10) { 1462 break; 1463 } 1464 } 1465 1466 if(sc->amr_busyslots) { 1467 device_printf(sc->amr_dev, "adapter is busy\n"); 1468 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 1469 if (ac->ac_data != NULL) { 1470 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1471 } 1472 ac->ac_status=0; 1473 return(1); 1474 } 1475 } 1476 1477 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE); 1478 1479 /* clear the poll/ack fields in the mailbox */ 1480 sc->amr_mailbox->mb_ident = 0xFE; 1481 sc->amr_mailbox->mb_nstatus = 0xFF; 1482 sc->amr_mailbox->mb_status = 0xFF; 1483 sc->amr_mailbox->mb_poll = 0; 1484 sc->amr_mailbox->mb_ack = 0; 1485 sc->amr_mailbox->mb_busy = 1; 1486 1487 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); 1488 1489 while(sc->amr_mailbox->mb_nstatus == 0xFF) 1490 DELAY(1); 1491 while(sc->amr_mailbox->mb_status == 0xFF) 1492 DELAY(1); 1493 ac->ac_status=sc->amr_mailbox->mb_status; 1494 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0; 1495 while(sc->amr_mailbox->mb_poll != 0x77) 1496 DELAY(1); 1497 sc->amr_mailbox->mb_poll = 0; 1498 sc->amr_mailbox->mb_ack = 0x77; 1499 1500 /* acknowledge that we have the commands */ 1501 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK); 1502 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) 1503 DELAY(1); 1504 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 1505 1506 /* unmap the command's data buffer */ 1507 if (ac->ac_flags & AMR_CMD_DATAIN) { 1508 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD); 1509 } 1510 if (ac->ac_flags & AMR_CMD_DATAOUT) { 1511 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE); 1512 } 1513 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1514 1515 return(error); 1516 } 1517 1518 static __inline int 1519 amr_freeslot(struct amr_command *ac) 1520 { 1521 struct amr_softc *sc = ac->ac_sc; 1522 int slot; 1523 1524 debug_called(3); 1525 1526 slot = ac->ac_slot; 1527 if (sc->amr_busycmd[slot] == NULL) 1528 panic("amr: slot %d not busy?", slot); 1529 1530 sc->amr_busycmd[slot] = NULL; 1531 atomic_subtract_int(&sc->amr_busyslots, 1); 1532 1533 return (0); 1534 } 1535 1536 /******************************************************************************** 1537 * Map/unmap (ac)'s data in the controller's addressable space as required. 1538 * 1539 * These functions may be safely called multiple times on a given command. 1540 */ 1541 static void 1542 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 1543 { 1544 struct amr_command *ac = (struct amr_command *)arg; 1545 struct amr_sgentry *sg; 1546 struct amr_sg64entry *sg64; 1547 int flags, i; 1548 1549 debug_called(3); 1550 1551 /* get base address of s/g table */ 1552 sg = ac->ac_sg.sg32; 1553 sg64 = ac->ac_sg.sg64; 1554 1555 if (AC_IS_SG64(ac)) { 1556 ac->ac_nsegments = nsegments; 1557 ac->ac_mb_physaddr = 0xffffffff; 1558 for (i = 0; i < nsegments; i++, sg64++) { 1559 sg64->sg_addr = segs[i].ds_addr; 1560 sg64->sg_count = segs[i].ds_len; 1561 } 1562 } else { 1563 /* decide whether we need to populate the s/g table */ 1564 if (nsegments < 2) { 1565 ac->ac_nsegments = 0; 1566 ac->ac_mb_physaddr = segs[0].ds_addr; 1567 } else { 1568 ac->ac_nsegments = nsegments; 1569 ac->ac_mb_physaddr = ac->ac_sgbusaddr; 1570 for (i = 0; i < nsegments; i++, sg++) { 1571 sg->sg_addr = segs[i].ds_addr; 1572 sg->sg_count = segs[i].ds_len; 1573 } 1574 } 1575 } 1576 1577 flags = 0; 1578 if (ac->ac_flags & AMR_CMD_DATAIN) 1579 flags |= BUS_DMASYNC_PREREAD; 1580 if (ac->ac_flags & AMR_CMD_DATAOUT) 1581 flags |= BUS_DMASYNC_PREWRITE; 1582 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags); 1583 ac->ac_flags |= AMR_CMD_MAPPED; 1584 } 1585 1586 static void 1587 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1588 { 1589 struct amr_command *ac = arg; 1590 struct amr_softc *sc = ac->ac_sc; 1591 int mb_channel; 1592 1593 if (err) { 1594 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1595 amr_abort_load(ac); 1596 return; 1597 } 1598 1599 amr_setup_sg(arg, segs, nsegs, err); 1600 1601 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */ 1602 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel; 1603 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && 1604 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) || 1605 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG))) 1606 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments; 1607 1608 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments; 1609 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr; 1610 if (AC_IS_SG64(ac)) { 1611 ac->ac_sg64_hi = 0; 1612 ac->ac_sg64_lo = ac->ac_sgbusaddr; 1613 } 1614 1615 if (sc->amr_submit_command(ac) == EBUSY) { 1616 amr_freeslot(ac); 1617 amr_requeue_ready(ac); 1618 } 1619 } 1620 1621 static void 1622 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) 1623 { 1624 struct amr_command *ac = arg; 1625 struct amr_softc *sc = ac->ac_sc; 1626 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru; 1627 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru; 1628 1629 if (err) { 1630 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__); 1631 amr_abort_load(ac); 1632 return; 1633 } 1634 1635 /* Set up the mailbox portion of the command to point at the ccb */ 1636 ac->ac_mailbox.mb_nsgelem = 0; 1637 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr; 1638 1639 amr_setup_sg(arg, segs, nsegs, err); 1640 1641 switch (ac->ac_mailbox.mb_command) { 1642 case AMR_CMD_EXTPASS: 1643 aep->ap_no_sg_elements = ac->ac_nsegments; 1644 aep->ap_data_transfer_address = ac->ac_mb_physaddr; 1645 break; 1646 case AMR_CMD_PASS: 1647 ap->ap_no_sg_elements = ac->ac_nsegments; 1648 ap->ap_data_transfer_address = ac->ac_mb_physaddr; 1649 break; 1650 default: 1651 panic("Unknown ccb command"); 1652 } 1653 1654 if (sc->amr_submit_command(ac) == EBUSY) { 1655 amr_freeslot(ac); 1656 amr_requeue_ready(ac); 1657 } 1658 } 1659 1660 static int 1661 amr_mapcmd(struct amr_command *ac) 1662 { 1663 bus_dmamap_callback_t *cb; 1664 struct amr_softc *sc = ac->ac_sc; 1665 1666 debug_called(3); 1667 1668 if (AC_IS_SG64(ac)) { 1669 ac->ac_tag = sc->amr_buffer64_dmat; 1670 ac->ac_datamap = ac->ac_dma64map; 1671 } else { 1672 ac->ac_tag = sc->amr_buffer_dmat; 1673 ac->ac_datamap = ac->ac_dmamap; 1674 } 1675 1676 if (ac->ac_flags & AMR_CMD_CCB) 1677 cb = amr_setup_ccb; 1678 else 1679 cb = amr_setup_data; 1680 1681 /* if the command involves data at all, and hasn't been mapped */ 1682 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) { 1683 /* map the data buffers into bus space and build the s/g list */ 1684 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data, 1685 ac->ac_length, cb, ac, 0) == EINPROGRESS) { 1686 sc->amr_state |= AMR_STATE_QUEUE_FRZN; 1687 } 1688 } else { 1689 if (sc->amr_submit_command(ac) == EBUSY) { 1690 amr_freeslot(ac); 1691 amr_requeue_ready(ac); 1692 } 1693 } 1694 1695 return (0); 1696 } 1697 1698 static void 1699 amr_unmapcmd(struct amr_command *ac) 1700 { 1701 int flag; 1702 1703 debug_called(3); 1704 1705 /* if the command involved data at all and was mapped */ 1706 if (ac->ac_flags & AMR_CMD_MAPPED) { 1707 1708 if (ac->ac_data != NULL) { 1709 1710 flag = 0; 1711 if (ac->ac_flags & AMR_CMD_DATAIN) 1712 flag |= BUS_DMASYNC_POSTREAD; 1713 if (ac->ac_flags & AMR_CMD_DATAOUT) 1714 flag |= BUS_DMASYNC_POSTWRITE; 1715 1716 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag); 1717 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap); 1718 } 1719 1720 ac->ac_flags &= ~AMR_CMD_MAPPED; 1721 } 1722 } 1723 1724 static void 1725 amr_abort_load(struct amr_command *ac) 1726 { 1727 ac_qhead_t head; 1728 struct amr_softc *sc = ac->ac_sc; 1729 1730 KKASSERT(lockstatus(&sc->amr_list_lock, curthread) != 0); 1731 1732 ac->ac_status = AMR_STATUS_ABORTED; 1733 amr_init_qhead(&head); 1734 amr_enqueue_completed(ac, &head); 1735 1736 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1737 amr_complete(sc, &head); 1738 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1739 } 1740 1741 /******************************************************************************** 1742 * Take a command and give it to the controller, returns 0 if successful, or 1743 * EBUSY if the command should be retried later. 1744 */ 1745 static int 1746 amr_start(struct amr_command *ac) 1747 { 1748 struct amr_softc *sc; 1749 int error = 0; 1750 int slot; 1751 1752 debug_called(3); 1753 1754 /* mark command as busy so that polling consumer can tell */ 1755 sc = ac->ac_sc; 1756 ac->ac_flags |= AMR_CMD_BUSY; 1757 1758 /* get a command slot (freed in amr_done) */ 1759 slot = ac->ac_slot; 1760 if (sc->amr_busycmd[slot] != NULL) 1761 panic("amr: slot %d busy?", slot); 1762 sc->amr_busycmd[slot] = ac; 1763 atomic_add_int(&sc->amr_busyslots, 1); 1764 1765 /* Now we have a slot, we can map the command (unmapped in amr_complete). */ 1766 if ((error = amr_mapcmd(ac)) == ENOMEM) { 1767 /* 1768 * Memroy resources are short, so free the slot and let this be tried 1769 * later. 1770 */ 1771 amr_freeslot(ac); 1772 } 1773 1774 return (error); 1775 } 1776 1777 /******************************************************************************** 1778 * Extract one or more completed commands from the controller (sc) 1779 * 1780 * Returns nonzero if any commands on the work queue were marked as completed. 1781 */ 1782 1783 int 1784 amr_done(struct amr_softc *sc) 1785 { 1786 ac_qhead_t head; 1787 struct amr_command *ac; 1788 struct amr_mailbox mbox; 1789 int i, idx, result; 1790 1791 debug_called(3); 1792 1793 /* See if there's anything for us to do */ 1794 result = 0; 1795 amr_init_qhead(&head); 1796 1797 /* loop collecting completed commands */ 1798 for (;;) { 1799 /* poll for a completed command's identifier and status */ 1800 if (sc->amr_get_work(sc, &mbox)) { 1801 result = 1; 1802 1803 /* iterate over completed commands in this result */ 1804 for (i = 0; i < mbox.mb_nstatus; i++) { 1805 /* get pointer to busy command */ 1806 idx = mbox.mb_completed[i] - 1; 1807 ac = sc->amr_busycmd[idx]; 1808 1809 /* really a busy command? */ 1810 if (ac != NULL) { 1811 1812 /* pull the command from the busy index */ 1813 amr_freeslot(ac); 1814 1815 /* save status for later use */ 1816 ac->ac_status = mbox.mb_status; 1817 amr_enqueue_completed(ac, &head); 1818 debug(3, "completed command with status %x", mbox.mb_status); 1819 } else { 1820 device_printf(sc->amr_dev, "bad slot %d completed\n", idx); 1821 } 1822 } 1823 } else 1824 break; /* no work */ 1825 } 1826 1827 /* handle completion and timeouts */ 1828 amr_complete(sc, &head); 1829 1830 return(result); 1831 } 1832 1833 /******************************************************************************** 1834 * Do completion processing on done commands on (sc) 1835 */ 1836 1837 static void 1838 amr_complete(void *context, ac_qhead_t *head) 1839 { 1840 struct amr_softc *sc = (struct amr_softc *)context; 1841 struct amr_command *ac; 1842 1843 debug_called(3); 1844 1845 /* pull completed commands off the queue */ 1846 for (;;) { 1847 ac = amr_dequeue_completed(sc, head); 1848 if (ac == NULL) 1849 break; 1850 1851 /* unmap the command's data buffer */ 1852 amr_unmapcmd(ac); 1853 1854 /* 1855 * Is there a completion handler? 1856 */ 1857 if (ac->ac_complete != NULL) { 1858 /* unbusy the command */ 1859 ac->ac_flags &= ~AMR_CMD_BUSY; 1860 ac->ac_complete(ac); 1861 1862 /* 1863 * Is someone sleeping on this one? 1864 */ 1865 } else { 1866 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1867 ac->ac_flags &= ~AMR_CMD_BUSY; 1868 if (ac->ac_flags & AMR_CMD_SLEEP) { 1869 /* unbusy the command */ 1870 wakeup(ac); 1871 } 1872 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1873 } 1874 1875 if(!sc->amr_busyslots) { 1876 wakeup(sc); 1877 } 1878 } 1879 1880 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1881 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN; 1882 amr_startio(sc); 1883 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1884 } 1885 1886 /******************************************************************************** 1887 ******************************************************************************** 1888 Command Buffer Management 1889 ******************************************************************************** 1890 ********************************************************************************/ 1891 1892 /******************************************************************************** 1893 * Get a new command buffer. 1894 * 1895 * This may return NULL in low-memory cases. 1896 * 1897 * If possible, we recycle a command buffer that's been used before. 1898 */ 1899 struct amr_command * 1900 amr_alloccmd(struct amr_softc *sc) 1901 { 1902 struct amr_command *ac; 1903 1904 debug_called(3); 1905 1906 ac = amr_dequeue_free(sc); 1907 if (ac == NULL) { 1908 sc->amr_state |= AMR_STATE_QUEUE_FRZN; 1909 return(NULL); 1910 } 1911 1912 /* clear out significant fields */ 1913 ac->ac_status = 0; 1914 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox)); 1915 ac->ac_flags = 0; 1916 ac->ac_bio = NULL; 1917 ac->ac_data = NULL; 1918 ac->ac_complete = NULL; 1919 ac->ac_retries = 0; 1920 ac->ac_tag = NULL; 1921 ac->ac_datamap = NULL; 1922 return(ac); 1923 } 1924 1925 /******************************************************************************** 1926 * Release a command buffer for recycling. 1927 */ 1928 void 1929 amr_releasecmd(struct amr_command *ac) 1930 { 1931 debug_called(3); 1932 1933 amr_enqueue_free(ac); 1934 } 1935 1936 /******************************************************************************** 1937 * Allocate a new command cluster and initialise it. 1938 */ 1939 static void 1940 amr_alloccmd_cluster(struct amr_softc *sc) 1941 { 1942 struct amr_command_cluster *acc; 1943 struct amr_command *ac; 1944 int i, nextslot; 1945 1946 /* 1947 * If we haven't found the real limit yet, let us have a couple of 1948 * commands in order to be able to probe. 1949 */ 1950 if (sc->amr_maxio == 0) 1951 sc->amr_maxio = 2; 1952 1953 if (sc->amr_nextslot > sc->amr_maxio) 1954 return; 1955 acc = kmalloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO); 1956 if (acc != NULL) { 1957 nextslot = sc->amr_nextslot; 1958 lockmgr(&sc->amr_list_lock, LK_EXCLUSIVE); 1959 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link); 1960 lockmgr(&sc->amr_list_lock, LK_RELEASE); 1961 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { 1962 ac = &acc->acc_command[i]; 1963 ac->ac_sc = sc; 1964 ac->ac_slot = nextslot; 1965 1966 /* 1967 * The SG table for each slot is a fixed size and is assumed to 1968 * to hold 64-bit s/g objects when the driver is configured to do 1969 * 64-bit DMA. 32-bit DMA commands still use the same table, but 1970 * cast down to 32-bit objects. 1971 */ 1972 if (AMR_IS_SG64(sc)) { 1973 ac->ac_sgbusaddr = sc->amr_sgbusaddr + 1974 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry)); 1975 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG); 1976 } else { 1977 ac->ac_sgbusaddr = sc->amr_sgbusaddr + 1978 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry)); 1979 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); 1980 } 1981 1982 ac->ac_ccb = sc->amr_ccb + ac->ac_slot; 1983 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr + 1984 (ac->ac_slot * sizeof(union amr_ccb)); 1985 1986 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap)) 1987 break; 1988 if (AMR_IS_SG64(sc) && 1989 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map))) 1990 break; 1991 amr_releasecmd(ac); 1992 if (++nextslot > sc->amr_maxio) 1993 break; 1994 } 1995 sc->amr_nextslot = nextslot; 1996 } 1997 } 1998 1999 /******************************************************************************** 2000 * Free a command cluster 2001 */ 2002 static void 2003 amr_freecmd_cluster(struct amr_command_cluster *acc) 2004 { 2005 struct amr_softc *sc = acc->acc_command[0].ac_sc; 2006 int i; 2007 2008 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) { 2009 if (acc->acc_command[i].ac_sc == NULL) 2010 break; 2011 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap); 2012 if (AMR_IS_SG64(sc)) 2013 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map); 2014 } 2015 kfree(acc, M_AMR); 2016 } 2017 2018 /******************************************************************************** 2019 ******************************************************************************** 2020 Interface-specific Shims 2021 ******************************************************************************** 2022 ********************************************************************************/ 2023 2024 /******************************************************************************** 2025 * Tell the controller that the mailbox contains a valid command 2026 */ 2027 static int 2028 amr_quartz_submit_command(struct amr_command *ac) 2029 { 2030 struct amr_softc *sc = ac->ac_sc; 2031 static struct timeval lastfail; 2032 static int curfail; 2033 int i = 0; 2034 2035 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 2036 while (sc->amr_mailbox->mb_busy && (i++ < 10)) { 2037 DELAY(1); 2038 /* This is a no-op read that flushes pending mailbox updates */ 2039 AMR_QGET_ODB(sc); 2040 } 2041 if (sc->amr_mailbox->mb_busy) { 2042 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2043 if (ac->ac_retries++ > 1000) { 2044 if (ppsratecheck(&lastfail, &curfail, 1)) 2045 device_printf(sc->amr_dev, "Too many retries on command %p. " 2046 "Controller is likely dead\n", ac); 2047 ac->ac_retries = 0; 2048 } 2049 return (EBUSY); 2050 } 2051 2052 /* 2053 * Save the slot number so that we can locate this command when complete. 2054 * Note that ident = 0 seems to be special, so we don't use it. 2055 */ 2056 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ 2057 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); 2058 sc->amr_mailbox->mb_busy = 1; 2059 sc->amr_mailbox->mb_poll = 0; 2060 sc->amr_mailbox->mb_ack = 0; 2061 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi; 2062 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo; 2063 2064 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT); 2065 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2066 return(0); 2067 } 2068 2069 static int 2070 amr_std_submit_command(struct amr_command *ac) 2071 { 2072 struct amr_softc *sc = ac->ac_sc; 2073 static struct timeval lastfail; 2074 static int curfail; 2075 2076 lockmgr(&sc->amr_hw_lock, LK_EXCLUSIVE); 2077 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) { 2078 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2079 if (ac->ac_retries++ > 1000) { 2080 if (ppsratecheck(&lastfail, &curfail, 1)) 2081 device_printf(sc->amr_dev, "Too many retries on command %p. " 2082 "Controller is likely dead\n", ac); 2083 ac->ac_retries = 0; 2084 } 2085 return (EBUSY); 2086 } 2087 2088 /* 2089 * Save the slot number so that we can locate this command when complete. 2090 * Note that ident = 0 seems to be special, so we don't use it. 2091 */ 2092 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */ 2093 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14); 2094 sc->amr_mailbox->mb_busy = 1; 2095 sc->amr_mailbox->mb_poll = 0; 2096 sc->amr_mailbox->mb_ack = 0; 2097 2098 AMR_SPOST_COMMAND(sc); 2099 lockmgr(&sc->amr_hw_lock, LK_RELEASE); 2100 return(0); 2101 } 2102 2103 /******************************************************************************** 2104 * Claim any work that the controller has completed; acknowledge completion, 2105 * save details of the completion in (mbsave) 2106 */ 2107 static int 2108 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) 2109 { 2110 int worked, i; 2111 u_int32_t outd; 2112 u_int8_t nstatus; 2113 u_int8_t completed[46]; 2114 2115 debug_called(3); 2116 2117 worked = 0; 2118 2119 /* work waiting for us? */ 2120 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) { 2121 2122 /* acknowledge interrupt */ 2123 AMR_QPUT_ODB(sc, AMR_QODB_READY); 2124 2125 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff) 2126 DELAY(1); 2127 sc->amr_mailbox->mb_nstatus = 0xff; 2128 2129 /* wait until fw wrote out all completions */ 2130 for (i = 0; i < nstatus; i++) { 2131 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff) 2132 DELAY(1); 2133 sc->amr_mailbox->mb_completed[i] = 0xff; 2134 } 2135 2136 /* Save information for later processing */ 2137 mbsave->mb_nstatus = nstatus; 2138 mbsave->mb_status = sc->amr_mailbox->mb_status; 2139 sc->amr_mailbox->mb_status = 0xff; 2140 2141 for (i = 0; i < nstatus; i++) 2142 mbsave->mb_completed[i] = completed[i]; 2143 2144 /* acknowledge that we have the commands */ 2145 AMR_QPUT_IDB(sc, AMR_QIDB_ACK); 2146 2147 #if 0 2148 #ifndef AMR_QUARTZ_GOFASTER 2149 /* 2150 * This waits for the controller to notice that we've taken the 2151 * command from it. It's very inefficient, and we shouldn't do it, 2152 * but if we remove this code, we stop completing commands under 2153 * load. 2154 * 2155 * Peter J says we shouldn't do this. The documentation says we 2156 * should. Who is right? 2157 */ 2158 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK) 2159 ; /* XXX aiee! what if it dies? */ 2160 #endif 2161 #endif 2162 2163 worked = 1; /* got some work */ 2164 } 2165 2166 return(worked); 2167 } 2168 2169 static int 2170 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave) 2171 { 2172 int worked; 2173 u_int8_t istat; 2174 2175 debug_called(3); 2176 2177 worked = 0; 2178 2179 /* check for valid interrupt status */ 2180 istat = AMR_SGET_ISTAT(sc); 2181 if ((istat & AMR_SINTR_VALID) != 0) { 2182 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */ 2183 2184 /* save mailbox, which contains a list of completed commands */ 2185 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave)); 2186 2187 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */ 2188 worked = 1; 2189 } 2190 2191 return(worked); 2192 } 2193 2194 /******************************************************************************** 2195 * Notify the controller of the mailbox location. 2196 */ 2197 static void 2198 amr_std_attach_mailbox(struct amr_softc *sc) 2199 { 2200 2201 /* program the mailbox physical address */ 2202 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff); 2203 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff); 2204 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff); 2205 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff); 2206 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR); 2207 2208 /* clear any outstanding interrupt and enable interrupts proper */ 2209 AMR_SACK_INTERRUPT(sc); 2210 AMR_SENABLE_INTR(sc); 2211 } 2212 2213 #ifdef AMR_BOARD_INIT 2214 /******************************************************************************** 2215 * Initialise the controller 2216 */ 2217 static int 2218 amr_quartz_init(struct amr_softc *sc) 2219 { 2220 int status, ostatus; 2221 2222 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc)); 2223 2224 AMR_QRESET(sc); 2225 2226 ostatus = 0xff; 2227 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) { 2228 if (status != ostatus) { 2229 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status)); 2230 ostatus = status; 2231 } 2232 switch (status) { 2233 case AMR_QINIT_NOMEM: 2234 return(ENOMEM); 2235 2236 case AMR_QINIT_SCAN: 2237 /* XXX we could print channel/target here */ 2238 break; 2239 } 2240 } 2241 return(0); 2242 } 2243 2244 static int 2245 amr_std_init(struct amr_softc *sc) 2246 { 2247 int status, ostatus; 2248 2249 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc)); 2250 2251 AMR_SRESET(sc); 2252 2253 ostatus = 0xff; 2254 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) { 2255 if (status != ostatus) { 2256 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status)); 2257 ostatus = status; 2258 } 2259 switch (status) { 2260 case AMR_SINIT_NOMEM: 2261 return(ENOMEM); 2262 2263 case AMR_SINIT_INPROG: 2264 /* XXX we could print channel/target here? */ 2265 break; 2266 } 2267 } 2268 return(0); 2269 } 2270 #endif 2271 2272 /******************************************************************************** 2273 ******************************************************************************** 2274 Debugging 2275 ******************************************************************************** 2276 ********************************************************************************/ 2277 2278 /******************************************************************************** 2279 * Identify the controller and print some information about it. 2280 */ 2281 static void 2282 amr_describe_controller(struct amr_softc *sc) 2283 { 2284 struct amr_prodinfo *ap; 2285 struct amr_enquiry *ae; 2286 char *prod; 2287 int status; 2288 2289 /* 2290 * Try to get 40LD product info, which tells us what the card is labelled as. 2291 */ 2292 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) { 2293 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n", 2294 ap->ap_product, ap->ap_firmware, ap->ap_bios, 2295 ap->ap_memsize); 2296 2297 kfree(ap, M_AMR); 2298 return; 2299 } 2300 2301 /* 2302 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table. 2303 */ 2304 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) { 2305 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature); 2306 2307 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) { 2308 2309 /* 2310 * Try to work it out based on the PCI signatures. 2311 */ 2312 switch (pci_get_device(sc->amr_dev)) { 2313 case 0x9010: 2314 prod = "Series 428"; 2315 break; 2316 case 0x9060: 2317 prod = "Series 434"; 2318 break; 2319 default: 2320 prod = "unknown controller"; 2321 break; 2322 } 2323 } else { 2324 device_printf(sc->amr_dev, "<unsupported controller>\n"); 2325 return; 2326 } 2327 2328 /* 2329 * HP NetRaid controllers have a special encoding of the firmware and 2330 * BIOS versions. The AMI version seems to have it as strings whereas 2331 * the HP version does it with a leading uppercase character and two 2332 * binary numbers. 2333 */ 2334 2335 if(ae->ae_adapter.aa_firmware[2] >= 'A' && 2336 ae->ae_adapter.aa_firmware[2] <= 'Z' && 2337 ae->ae_adapter.aa_firmware[1] < ' ' && 2338 ae->ae_adapter.aa_firmware[0] < ' ' && 2339 ae->ae_adapter.aa_bios[2] >= 'A' && 2340 ae->ae_adapter.aa_bios[2] <= 'Z' && 2341 ae->ae_adapter.aa_bios[1] < ' ' && 2342 ae->ae_adapter.aa_bios[0] < ' ') { 2343 2344 /* this looks like we have an HP NetRaid version of the MegaRaid */ 2345 2346 if(ae->ae_signature == AMR_SIG_438) { 2347 /* the AMI 438 is a NetRaid 3si in HP-land */ 2348 prod = "HP NetRaid 3si"; 2349 } 2350 2351 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n", 2352 prod, ae->ae_adapter.aa_firmware[2], 2353 ae->ae_adapter.aa_firmware[1], 2354 ae->ae_adapter.aa_firmware[0], 2355 ae->ae_adapter.aa_bios[2], 2356 ae->ae_adapter.aa_bios[1], 2357 ae->ae_adapter.aa_bios[0], 2358 ae->ae_adapter.aa_memorysize); 2359 } else { 2360 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n", 2361 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios, 2362 ae->ae_adapter.aa_memorysize); 2363 } 2364 kfree(ae, M_AMR); 2365 } 2366 2367 int 2368 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks) 2369 { 2370 struct amr_command *ac; 2371 int error = EIO; 2372 2373 debug_called(1); 2374 2375 sc->amr_state |= AMR_STATE_INTEN; 2376 2377 /* get ourselves a command buffer */ 2378 if ((ac = amr_alloccmd(sc)) == NULL) 2379 goto out; 2380 /* set command flags */ 2381 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT; 2382 2383 /* point the command at our data */ 2384 ac->ac_data = data; 2385 ac->ac_length = blks * AMR_BLKSIZE; 2386 2387 /* build the command proper */ 2388 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE; 2389 ac->ac_mailbox.mb_blkcount = blks; 2390 ac->ac_mailbox.mb_lba = lba; 2391 ac->ac_mailbox.mb_drive = unit; 2392 2393 /* can't assume that interrupts are going to work here, so play it safe */ 2394 if (sc->amr_poll_command(ac)) 2395 goto out; 2396 error = ac->ac_status; 2397 2398 out: 2399 if (ac != NULL) 2400 amr_releasecmd(ac); 2401 2402 sc->amr_state &= ~AMR_STATE_INTEN; 2403 return (error); 2404 } 2405 2406 2407 2408 #ifdef AMR_DEBUG 2409 /******************************************************************************** 2410 * Print the command (ac) in human-readable format 2411 */ 2412 #if 0 2413 static void 2414 amr_printcommand(struct amr_command *ac) 2415 { 2416 struct amr_softc *sc = ac->ac_sc; 2417 struct amr_sgentry *sg; 2418 int i; 2419 2420 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n", 2421 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive); 2422 device_printf(sc->amr_dev, "blkcount %d lba %d\n", 2423 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba); 2424 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length); 2425 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n", 2426 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem); 2427 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio); 2428 2429 /* get base address of s/g table */ 2430 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG); 2431 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++) 2432 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count); 2433 } 2434 #endif 2435 #endif 2436