1 /* 2 ***************************************************************************************** 3 ** O.S : FreeBSD 4 ** FILE NAME : arcmsr.c 5 ** BY : Erich Chen, Ching Huang 6 ** Description: SCSI RAID Device Driver for 7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter 8 ** ARCMSR RAID Host adapter 9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set] 10 ****************************************************************************************** 11 ************************************************************************ 12 ** 13 ** Copyright (c) 2004-2010 ARECA Co. Ltd. 14 ** Erich Chen, Taipei Taiwan All rights reserved. 15 ** 16 ** Redistribution and use in source and binary forms, with or without 17 ** modification, are permitted provided that the following conditions 18 ** are met: 19 ** 1. Redistributions of source code must retain the above copyright 20 ** notice, this list of conditions and the following disclaimer. 21 ** 2. Redistributions in binary form must reproduce the above copyright 22 ** notice, this list of conditions and the following disclaimer in the 23 ** documentation and/or other materials provided with the distribution. 24 ** 3. The name of the author may not be used to endorse or promote products 25 ** derived from this software without specific prior written permission. 26 ** 27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT 32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY 34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF 36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 ************************************************************************** 38 ** History 39 ** 40 ** REV# DATE NAME DESCRIPTION 41 ** 1.00.00.00 3/31/2004 Erich Chen First release 42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error 43 ** 1.20.00.03 4/19/2005 Erich Chen add SATA 24 Ports adapter type support 44 ** clean unused function 45 ** 1.20.00.12 9/12/2005 Erich Chen bug fix with abort command handling, 46 ** firmware version check 47 ** and firmware update notify for hardware bug fix 48 ** handling if none zero high part physical address 49 ** of srb resource 50 ** 1.20.00.13 8/18/2006 Erich Chen remove pending srb and report busy 51 ** add iop message xfer 52 ** with scsi pass-through command 53 ** add new device id of sas raid adapters 54 ** code fit for SPARC64 & PPC 55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report 56 ** and cause g_vfs_done() read write error 57 ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x 58 ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x 59 ** bus_dmamem_alloc() with BUS_DMA_ZERO 60 ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880 61 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed, 62 ** prevent cam_periph_error removing all LUN devices of one Target id 63 ** for any one LUN device failed 64 ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step" 65 ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B 66 ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0 67 ****************************************************************************************** 68 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.35 2010/11/13 08:58:36 delphij Exp $ 69 */ 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/bus.h> 75 #include <sys/queue.h> 76 #include <sys/stat.h> 77 #include <sys/devicestat.h> 78 #include <sys/kthread.h> 79 #include <sys/module.h> 80 #include <sys/proc.h> 81 #include <sys/lock.h> 82 #include <sys/sysctl.h> 83 #include <sys/thread2.h> 84 #include <sys/poll.h> 85 #include <sys/device.h> 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/pmap.h> 89 90 #include <machine/atomic.h> 91 #include <sys/conf.h> 92 #include <sys/rman.h> 93 94 #include <bus/cam/cam.h> 95 #include <bus/cam/cam_ccb.h> 96 #include <bus/cam/cam_sim.h> 97 #include <bus/cam/cam_periph.h> 98 #include <bus/cam/cam_xpt_periph.h> 99 #include <bus/cam/cam_xpt_sim.h> 100 #include <bus/cam/cam_debug.h> 101 #include <bus/cam/scsi/scsi_all.h> 102 #include <bus/cam/scsi/scsi_message.h> 103 /* 104 ************************************************************************** 105 ************************************************************************** 106 */ 107 #include <sys/endian.h> 108 #include <bus/pci/pcivar.h> 109 #include <bus/pci/pcireg.h> 110 #define ARCMSR_LOCK_INIT(l, s) lockinit(l, s, 0, LK_CANRECURSE) 111 #define ARCMSR_LOCK_DESTROY(l) lockuninit(l) 112 #define ARCMSR_LOCK_ACQUIRE(l) lockmgr(l, LK_EXCLUSIVE) 113 #define ARCMSR_LOCK_RELEASE(l) lockmgr(l, LK_RELEASE) 114 #define ARCMSR_LOCK_TRY(l) lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT); 115 #define arcmsr_htole32(x) htole32(x) 116 typedef struct lock arcmsr_lock_t; 117 118 #if !defined(CAM_NEW_TRAN_CODE) 119 #define CAM_NEW_TRAN_CODE 1 120 #endif 121 122 #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.19 2010-11-11" 123 #include <dev/raid/arcmsr/arcmsr.h> 124 #define ARCMSR_SRBS_POOL_SIZE ((sizeof(struct CommandControlBlock) * ARCMSR_MAX_FREESRB_NUM)) 125 /* 126 ************************************************************************** 127 ************************************************************************** 128 */ 129 #define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r)) 130 #define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d) 131 /* 132 ************************************************************************** 133 ************************************************************************** 134 */ 135 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb); 136 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb); 137 static int arcmsr_probe(device_t dev); 138 static int arcmsr_attach(device_t dev); 139 static int arcmsr_detach(device_t dev); 140 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg); 141 static void arcmsr_iop_parking(struct AdapterControlBlock *acb); 142 static int arcmsr_shutdown(device_t dev); 143 static void arcmsr_interrupt(struct AdapterControlBlock *acb); 144 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb); 145 static void arcmsr_free_resource(struct AdapterControlBlock *acb); 146 static void arcmsr_bus_reset(struct AdapterControlBlock *acb); 147 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 148 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 149 static void arcmsr_iop_init(struct AdapterControlBlock *acb); 150 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb); 151 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb); 152 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb); 153 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag); 154 static void arcmsr_iop_reset(struct AdapterControlBlock *acb); 155 static void arcmsr_report_sense_info(struct CommandControlBlock *srb); 156 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg); 157 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb); 158 static int arcmsr_resume(device_t dev); 159 static int arcmsr_suspend(device_t dev); 160 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb); 161 static void arcmsr_polling_devmap(void* arg); 162 /* 163 ************************************************************************** 164 ************************************************************************** 165 */ 166 static void UDELAY(u_int32_t us) { DELAY(us); } 167 /* 168 ************************************************************************** 169 ************************************************************************** 170 */ 171 static bus_dmamap_callback_t arcmsr_map_free_srb; 172 static bus_dmamap_callback_t arcmsr_execute_srb; 173 /* 174 ************************************************************************** 175 ************************************************************************** 176 */ 177 static d_open_t arcmsr_open; 178 static d_close_t arcmsr_close; 179 static d_ioctl_t arcmsr_ioctl; 180 181 static device_method_t arcmsr_methods[]={ 182 DEVMETHOD(device_probe, arcmsr_probe), 183 DEVMETHOD(device_attach, arcmsr_attach), 184 DEVMETHOD(device_detach, arcmsr_detach), 185 DEVMETHOD(device_shutdown, arcmsr_shutdown), 186 DEVMETHOD(device_suspend, arcmsr_suspend), 187 DEVMETHOD(device_resume, arcmsr_resume), 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 { 0, 0 } 191 }; 192 193 static driver_t arcmsr_driver={ 194 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock) 195 }; 196 197 static devclass_t arcmsr_devclass; 198 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL); 199 MODULE_DEPEND(arcmsr, pci, 1, 1, 1); 200 MODULE_DEPEND(arcmsr, cam, 1, 1, 1); 201 #ifndef BUS_DMA_COHERENT 202 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */ 203 #endif 204 205 static struct dev_ops arcmsr_ops = { 206 { "arcmsr", 0, 0 }, 207 .d_open = arcmsr_open, /* open */ 208 .d_close = arcmsr_close, /* close */ 209 .d_ioctl = arcmsr_ioctl, /* ioctl */ 210 }; 211 212 /* 213 ************************************************************************** 214 ************************************************************************** 215 */ 216 217 static int 218 arcmsr_open(struct dev_open_args *ap) 219 { 220 cdev_t dev = ap->a_head.a_dev; 221 struct AdapterControlBlock *acb=dev->si_drv1; 222 223 if(acb==NULL) { 224 return ENXIO; 225 } 226 return 0; 227 } 228 229 /* 230 ************************************************************************** 231 ************************************************************************** 232 */ 233 234 static int 235 arcmsr_close(struct dev_close_args *ap) 236 { 237 cdev_t dev = ap->a_head.a_dev; 238 struct AdapterControlBlock *acb=dev->si_drv1; 239 240 if(acb==NULL) { 241 return ENXIO; 242 } 243 return 0; 244 } 245 246 /* 247 ************************************************************************** 248 ************************************************************************** 249 */ 250 251 static int 252 arcmsr_ioctl(struct dev_ioctl_args *ap) 253 { 254 cdev_t dev = ap->a_head.a_dev; 255 u_long ioctl_cmd = ap->a_cmd; 256 caddr_t arg = ap->a_data; 257 struct AdapterControlBlock *acb=dev->si_drv1; 258 259 if(acb==NULL) { 260 return ENXIO; 261 } 262 return(arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg)); 263 } 264 265 /* 266 ********************************************************************** 267 ********************************************************************** 268 */ 269 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb) 270 { 271 u_int32_t intmask_org=0; 272 273 switch (acb->adapter_type) { 274 case ACB_ADAPTER_TYPE_A: { 275 /* disable all outbound interrupt */ 276 intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */ 277 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE); 278 } 279 break; 280 case ACB_ADAPTER_TYPE_B: { 281 /* disable all outbound interrupt */ 282 intmask_org=CHIP_REG_READ32(HBB_DOORBELL, 283 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */ 284 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */ 285 } 286 break; 287 case ACB_ADAPTER_TYPE_C: { 288 /* disable all outbound interrupt */ 289 intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */ 290 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE); 291 } 292 break; 293 } 294 return(intmask_org); 295 } 296 /* 297 ********************************************************************** 298 ********************************************************************** 299 */ 300 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org) 301 { 302 u_int32_t mask; 303 304 switch (acb->adapter_type) { 305 case ACB_ADAPTER_TYPE_A: { 306 /* enable outbound Post Queue, outbound doorbell Interrupt */ 307 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); 308 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask); 309 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; 310 } 311 break; 312 case ACB_ADAPTER_TYPE_B: { 313 /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */ 314 mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); 315 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/ 316 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; 317 } 318 break; 319 case ACB_ADAPTER_TYPE_C: { 320 /* enable outbound Post Queue, outbound doorbell Interrupt */ 321 mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 322 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask); 323 acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f; 324 } 325 break; 326 } 327 return; 328 } 329 /* 330 ********************************************************************** 331 ********************************************************************** 332 */ 333 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) 334 { 335 u_int32_t Index; 336 u_int8_t Retries=0x00; 337 338 do { 339 for(Index=0; Index < 100; Index++) { 340 if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 341 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/ 342 return TRUE; 343 } 344 UDELAY(10000); 345 }/*max 1 seconds*/ 346 }while(Retries++ < 20);/*max 20 sec*/ 347 return FALSE; 348 } 349 /* 350 ********************************************************************** 351 ********************************************************************** 352 */ 353 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 354 { 355 u_int32_t Index; 356 u_int8_t Retries=0x00; 357 358 do { 359 for(Index=0; Index < 100; Index++) { 360 if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 361 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/ 362 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 363 return TRUE; 364 } 365 UDELAY(10000); 366 }/*max 1 seconds*/ 367 }while(Retries++ < 20);/*max 20 sec*/ 368 return FALSE; 369 } 370 /* 371 ********************************************************************** 372 ********************************************************************** 373 */ 374 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb) 375 { 376 u_int32_t Index; 377 u_int8_t Retries=0x00; 378 379 do { 380 for(Index=0; Index < 100; Index++) { 381 if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 382 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/ 383 return TRUE; 384 } 385 UDELAY(10000); 386 }/*max 1 seconds*/ 387 }while(Retries++ < 20);/*max 20 sec*/ 388 return FALSE; 389 } 390 /* 391 ************************************************************************ 392 ************************************************************************ 393 */ 394 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 395 { 396 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ 397 398 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 399 do { 400 if(arcmsr_hba_wait_msgint_ready(acb)) { 401 break; 402 } else { 403 retry_count--; 404 } 405 }while(retry_count!=0); 406 return; 407 } 408 /* 409 ************************************************************************ 410 ************************************************************************ 411 */ 412 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) 413 { 414 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ 415 416 CHIP_REG_WRITE32(HBB_DOORBELL, 417 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE); 418 do { 419 if(arcmsr_hbb_wait_msgint_ready(acb)) { 420 break; 421 } else { 422 retry_count--; 423 } 424 }while(retry_count!=0); 425 return; 426 } 427 /* 428 ************************************************************************ 429 ************************************************************************ 430 */ 431 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb) 432 { 433 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */ 434 435 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE); 436 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 437 do { 438 if(arcmsr_hbc_wait_msgint_ready(acb)) { 439 break; 440 } else { 441 retry_count--; 442 } 443 }while(retry_count!=0); 444 return; 445 } 446 /* 447 ************************************************************************ 448 ************************************************************************ 449 */ 450 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 451 { 452 switch (acb->adapter_type) { 453 case ACB_ADAPTER_TYPE_A: { 454 arcmsr_flush_hba_cache(acb); 455 } 456 break; 457 case ACB_ADAPTER_TYPE_B: { 458 arcmsr_flush_hbb_cache(acb); 459 } 460 break; 461 case ACB_ADAPTER_TYPE_C: { 462 arcmsr_flush_hbc_cache(acb); 463 } 464 break; 465 } 466 return; 467 } 468 /* 469 ******************************************************************************* 470 ******************************************************************************* 471 */ 472 static int arcmsr_suspend(device_t dev) 473 { 474 struct AdapterControlBlock *acb = device_get_softc(dev); 475 476 /* flush controller */ 477 arcmsr_iop_parking(acb); 478 /* disable all outbound interrupt */ 479 arcmsr_disable_allintr(acb); 480 return(0); 481 } 482 /* 483 ******************************************************************************* 484 ******************************************************************************* 485 */ 486 static int arcmsr_resume(device_t dev) 487 { 488 struct AdapterControlBlock *acb = device_get_softc(dev); 489 490 arcmsr_iop_init(acb); 491 return(0); 492 } 493 /* 494 ********************************************************************************* 495 ********************************************************************************* 496 */ 497 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg) 498 { 499 struct AdapterControlBlock *acb; 500 u_int8_t target_id, target_lun; 501 struct cam_sim * sim; 502 503 sim=(struct cam_sim *) cb_arg; 504 acb =(struct AdapterControlBlock *) cam_sim_softc(sim); 505 switch (code) { 506 case AC_LOST_DEVICE: 507 target_id=xpt_path_target_id(path); 508 target_lun=xpt_path_lun_id(path); 509 if((target_id > ARCMSR_MAX_TARGETID) || (target_lun > ARCMSR_MAX_TARGETLUN)) { 510 break; 511 } 512 kprintf("%s:scsi id=%d lun=%d device lost \n", device_get_name(acb->pci_dev), target_id, target_lun); 513 break; 514 default: 515 break; 516 } 517 } 518 /* 519 ********************************************************************** 520 ********************************************************************** 521 */ 522 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag) 523 { 524 struct AdapterControlBlock *acb=srb->acb; 525 union ccb * pccb=srb->pccb; 526 527 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 528 bus_dmasync_op_t op; 529 530 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 531 op = BUS_DMASYNC_POSTREAD; 532 } else { 533 op = BUS_DMASYNC_POSTWRITE; 534 } 535 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 536 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 537 } 538 if(stand_flag==1) { 539 atomic_subtract_int(&acb->srboutstandingcount, 1); 540 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && ( 541 acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) { 542 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN; 543 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 544 } 545 } 546 srb->startdone=ARCMSR_SRB_DONE; 547 srb->srb_flags=0; 548 acb->srbworkingQ[acb->workingsrb_doneindex]=srb; 549 acb->workingsrb_doneindex++; 550 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM; 551 xpt_done(pccb); 552 return; 553 } 554 /* 555 ********************************************************************** 556 ********************************************************************** 557 */ 558 static void arcmsr_report_sense_info(struct CommandControlBlock *srb) 559 { 560 union ccb * pccb=srb->pccb; 561 562 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 563 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 564 if(&pccb->csio.sense_data) { 565 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data)); 566 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData, 567 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data))); 568 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */ 569 pccb->ccb_h.status |= CAM_AUTOSNS_VALID; 570 } 571 return; 572 } 573 /* 574 ********************************************************************* 575 ********************************************************************* 576 */ 577 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 578 { 579 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 580 if(!arcmsr_hba_wait_msgint_ready(acb)) { 581 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 582 } 583 return; 584 } 585 /* 586 ********************************************************************* 587 ********************************************************************* 588 */ 589 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 590 { 591 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD); 592 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 593 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 594 } 595 return; 596 } 597 /* 598 ********************************************************************* 599 ********************************************************************* 600 */ 601 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb) 602 { 603 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD); 604 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 605 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 606 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit); 607 } 608 return; 609 } 610 /* 611 ********************************************************************* 612 ********************************************************************* 613 */ 614 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 615 { 616 switch (acb->adapter_type) { 617 case ACB_ADAPTER_TYPE_A: { 618 arcmsr_abort_hba_allcmd(acb); 619 } 620 break; 621 case ACB_ADAPTER_TYPE_B: { 622 arcmsr_abort_hbb_allcmd(acb); 623 } 624 break; 625 case ACB_ADAPTER_TYPE_C: { 626 arcmsr_abort_hbc_allcmd(acb); 627 } 628 break; 629 } 630 return; 631 } 632 /* 633 ************************************************************************** 634 ************************************************************************** 635 */ 636 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error) 637 { 638 int target, lun; 639 640 target=srb->pccb->ccb_h.target_id; 641 lun=srb->pccb->ccb_h.target_lun; 642 if(error == FALSE) { 643 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 644 acb->devstate[target][lun]=ARECA_RAID_GOOD; 645 } 646 srb->pccb->ccb_h.status |= CAM_REQ_CMP; 647 arcmsr_srb_complete(srb, 1); 648 } else { 649 switch(srb->arcmsr_cdb.DeviceStatus) { 650 case ARCMSR_DEV_SELECT_TIMEOUT: { 651 if(acb->devstate[target][lun]==ARECA_RAID_GOOD) { 652 kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun); 653 } 654 acb->devstate[target][lun]=ARECA_RAID_GONE; 655 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 656 arcmsr_srb_complete(srb, 1); 657 } 658 break; 659 case ARCMSR_DEV_ABORTED: 660 case ARCMSR_DEV_INIT_FAIL: { 661 acb->devstate[target][lun]=ARECA_RAID_GONE; 662 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 663 arcmsr_srb_complete(srb, 1); 664 } 665 break; 666 case SCSISTAT_CHECK_CONDITION: { 667 acb->devstate[target][lun]=ARECA_RAID_GOOD; 668 arcmsr_report_sense_info(srb); 669 arcmsr_srb_complete(srb, 1); 670 } 671 break; 672 default: 673 kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done,but got unknow DeviceStatus=0x%x \n" 674 , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus); 675 acb->devstate[target][lun]=ARECA_RAID_GONE; 676 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY; 677 /*unknow error or crc error just for retry*/ 678 arcmsr_srb_complete(srb, 1); 679 break; 680 } 681 } 682 return; 683 } 684 /* 685 ************************************************************************** 686 ************************************************************************** 687 */ 688 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error) 689 { 690 struct CommandControlBlock *srb; 691 692 /* check if command done with no error*/ 693 switch (acb->adapter_type) { 694 case ACB_ADAPTER_TYPE_C: 695 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/ 696 break; 697 case ACB_ADAPTER_TYPE_A: 698 case ACB_ADAPTER_TYPE_B: 699 default: 700 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 701 break; 702 } 703 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 704 if(srb->startdone==ARCMSR_SRB_ABORTED) { 705 kprintf("arcmsr%d: srb='%p' isr got aborted command \n", acb->pci_unit, srb); 706 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 707 arcmsr_srb_complete(srb, 1); 708 return; 709 } 710 kprintf("arcmsr%d: isr get an illegal srb command done" 711 "acb='%p' srb='%p' srbacb='%p' startdone=0x%xsrboutstandingcount=%d \n", 712 acb->pci_unit, acb, srb, srb->acb,srb->startdone, acb->srboutstandingcount); 713 return; 714 } 715 arcmsr_report_srb_state(acb, srb, error); 716 return; 717 } 718 /* 719 ********************************************************************** 720 ********************************************************************** 721 */ 722 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 723 { 724 int i=0; 725 u_int32_t flag_srb; 726 u_int16_t error; 727 728 switch (acb->adapter_type) { 729 case ACB_ADAPTER_TYPE_A: { 730 u_int32_t outbound_intstatus; 731 732 /*clear and abort all outbound posted Q*/ 733 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 734 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 735 while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 736 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 737 arcmsr_drain_donequeue(acb, flag_srb, error); 738 } 739 } 740 break; 741 case ACB_ADAPTER_TYPE_B: { 742 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 743 744 /*clear all outbound posted Q*/ 745 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ 746 for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 747 if((flag_srb=phbbmu->done_qbuffer[i])!=0) { 748 phbbmu->done_qbuffer[i]=0; 749 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 750 arcmsr_drain_donequeue(acb, flag_srb, error); 751 } 752 phbbmu->post_qbuffer[i]=0; 753 }/*drain reply FIFO*/ 754 phbbmu->doneq_index=0; 755 phbbmu->postq_index=0; 756 } 757 break; 758 case ACB_ADAPTER_TYPE_C: { 759 760 while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 761 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 762 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 763 arcmsr_drain_donequeue(acb, flag_srb, error); 764 } 765 } 766 break; 767 } 768 return; 769 } 770 /* 771 **************************************************************************** 772 **************************************************************************** 773 */ 774 static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 775 { 776 struct CommandControlBlock *srb; 777 u_int32_t intmask_org; 778 u_int32_t i=0; 779 780 if(acb->srboutstandingcount>0) { 781 /* disable all outbound interrupt */ 782 intmask_org=arcmsr_disable_allintr(acb); 783 /*clear and abort all outbound posted Q*/ 784 arcmsr_done4abort_postqueue(acb); 785 /* talk to iop 331 outstanding command aborted*/ 786 arcmsr_abort_allcmd(acb); 787 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 788 srb=acb->psrb_pool[i]; 789 if(srb->startdone==ARCMSR_SRB_START) { 790 srb->startdone=ARCMSR_SRB_ABORTED; 791 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 792 arcmsr_srb_complete(srb, 1); 793 } 794 } 795 /* enable all outbound interrupt */ 796 arcmsr_enable_allintr(acb, intmask_org); 797 } 798 atomic_set_int(&acb->srboutstandingcount, 0); 799 acb->workingsrb_doneindex=0; 800 acb->workingsrb_startindex=0; 801 return; 802 } 803 /* 804 ********************************************************************** 805 ********************************************************************** 806 */ 807 static void arcmsr_build_srb(struct CommandControlBlock *srb, 808 bus_dma_segment_t *dm_segs, u_int32_t nseg) 809 { 810 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb; 811 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u; 812 u_int32_t address_lo, address_hi; 813 union ccb * pccb=srb->pccb; 814 struct ccb_scsiio * pcsio= &pccb->csio; 815 u_int32_t arccdbsize=0x30; 816 817 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); 818 arcmsr_cdb->Bus=0; 819 arcmsr_cdb->TargetID=pccb->ccb_h.target_id; 820 arcmsr_cdb->LUN=pccb->ccb_h.target_lun; 821 arcmsr_cdb->Function=1; 822 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len; 823 arcmsr_cdb->Context=0; 824 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len); 825 if(nseg != 0) { 826 struct AdapterControlBlock *acb=srb->acb; 827 bus_dmasync_op_t op; 828 u_int32_t length, i, cdb_sgcount=0; 829 830 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 831 op=BUS_DMASYNC_PREREAD; 832 } else { 833 op=BUS_DMASYNC_PREWRITE; 834 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE; 835 srb->srb_flags|=SRB_FLAG_WRITE; 836 } 837 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op); 838 for(i=0;i<nseg;i++) { 839 /* Get the physical address of the current data pointer */ 840 length=arcmsr_htole32(dm_segs[i].ds_len); 841 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr)); 842 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr)); 843 if(address_hi==0) { 844 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge; 845 pdma_sg->address=address_lo; 846 pdma_sg->length=length; 847 psge += sizeof(struct SG32ENTRY); 848 arccdbsize += sizeof(struct SG32ENTRY); 849 } else { 850 u_int32_t sg64s_size=0, tmplength=length; 851 852 while(1) { 853 u_int64_t span4G, length0; 854 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge; 855 856 span4G=(u_int64_t)address_lo + tmplength; 857 pdma_sg->addresshigh=address_hi; 858 pdma_sg->address=address_lo; 859 if(span4G > 0x100000000) { 860 /*see if cross 4G boundary*/ 861 length0=0x100000000-address_lo; 862 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR; 863 address_hi=address_hi+1; 864 address_lo=0; 865 tmplength=tmplength-(u_int32_t)length0; 866 sg64s_size += sizeof(struct SG64ENTRY); 867 psge += sizeof(struct SG64ENTRY); 868 cdb_sgcount++; 869 } else { 870 pdma_sg->length=tmplength|IS_SG64_ADDR; 871 sg64s_size += sizeof(struct SG64ENTRY); 872 psge += sizeof(struct SG64ENTRY); 873 break; 874 } 875 } 876 arccdbsize += sg64s_size; 877 } 878 cdb_sgcount++; 879 } 880 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount; 881 arcmsr_cdb->DataLength=pcsio->dxfer_len; 882 if( arccdbsize > 256) { 883 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE; 884 } 885 } else { 886 arcmsr_cdb->DataLength = 0; 887 } 888 srb->arc_cdb_size=arccdbsize; 889 return; 890 } 891 /* 892 ************************************************************************** 893 ************************************************************************** 894 */ 895 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb) 896 { 897 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr; 898 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb; 899 900 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD); 901 atomic_add_int(&acb->srboutstandingcount, 1); 902 srb->startdone=ARCMSR_SRB_START; 903 904 switch (acb->adapter_type) { 905 case ACB_ADAPTER_TYPE_A: { 906 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 907 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE); 908 } else { 909 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr); 910 } 911 } 912 break; 913 case ACB_ADAPTER_TYPE_B: { 914 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 915 int ending_index, index; 916 917 index=phbbmu->postq_index; 918 ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE); 919 phbbmu->post_qbuffer[ending_index]=0; 920 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 921 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE; 922 } else { 923 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr; 924 } 925 index++; 926 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 927 phbbmu->postq_index=index; 928 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED); 929 } 930 break; 931 case ACB_ADAPTER_TYPE_C: 932 { 933 u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32; 934 935 arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size; 936 ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1); 937 cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high; 938 if(cdb_phyaddr_hi32) 939 { 940 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32); 941 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); 942 } 943 else 944 { 945 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp); 946 } 947 } 948 break; 949 } 950 return; 951 } 952 /* 953 ************************************************************************ 954 ************************************************************************ 955 */ 956 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb) 957 { 958 struct QBUFFER *qbuffer=NULL; 959 960 switch (acb->adapter_type) { 961 case ACB_ADAPTER_TYPE_A: { 962 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; 963 964 qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer; 965 } 966 break; 967 case ACB_ADAPTER_TYPE_B: { 968 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 969 970 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer; 971 } 972 break; 973 case ACB_ADAPTER_TYPE_C: { 974 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; 975 976 qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer; 977 } 978 break; 979 } 980 return(qbuffer); 981 } 982 /* 983 ************************************************************************ 984 ************************************************************************ 985 */ 986 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb) 987 { 988 struct QBUFFER *qbuffer=NULL; 989 990 switch (acb->adapter_type) { 991 case ACB_ADAPTER_TYPE_A: { 992 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu; 993 994 qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer; 995 } 996 break; 997 case ACB_ADAPTER_TYPE_B: { 998 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 999 1000 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer; 1001 } 1002 break; 1003 case ACB_ADAPTER_TYPE_C: { 1004 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu; 1005 1006 qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer; 1007 } 1008 break; 1009 } 1010 return(qbuffer); 1011 } 1012 /* 1013 ************************************************************************** 1014 ************************************************************************** 1015 */ 1016 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1017 { 1018 switch (acb->adapter_type) { 1019 case ACB_ADAPTER_TYPE_A: { 1020 /* let IOP know data has been read */ 1021 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 1022 } 1023 break; 1024 case ACB_ADAPTER_TYPE_B: { 1025 /* let IOP know data has been read */ 1026 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); 1027 } 1028 break; 1029 case ACB_ADAPTER_TYPE_C: { 1030 /* let IOP know data has been read */ 1031 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); 1032 } 1033 } 1034 return; 1035 } 1036 /* 1037 ************************************************************************** 1038 ************************************************************************** 1039 */ 1040 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) 1041 { 1042 switch (acb->adapter_type) { 1043 case ACB_ADAPTER_TYPE_A: { 1044 /* 1045 ** push inbound doorbell tell iop, driver data write ok 1046 ** and wait reply on next hwinterrupt for next Qbuffer post 1047 */ 1048 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK); 1049 } 1050 break; 1051 case ACB_ADAPTER_TYPE_B: { 1052 /* 1053 ** push inbound doorbell tell iop, driver data write ok 1054 ** and wait reply on next hwinterrupt for next Qbuffer post 1055 */ 1056 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK); 1057 } 1058 break; 1059 case ACB_ADAPTER_TYPE_C: { 1060 /* 1061 ** push inbound doorbell tell iop, driver data write ok 1062 ** and wait reply on next hwinterrupt for next Qbuffer post 1063 */ 1064 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK); 1065 } 1066 break; 1067 } 1068 } 1069 /* 1070 ********************************************************************** 1071 ********************************************************************** 1072 */ 1073 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) 1074 { 1075 u_int8_t *pQbuffer; 1076 struct QBUFFER *pwbuffer; 1077 u_int8_t * iop_data; 1078 int32_t allxfer_len=0; 1079 1080 pwbuffer=arcmsr_get_iop_wqbuffer(acb); 1081 iop_data=(u_int8_t *)pwbuffer->data; 1082 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) { 1083 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); 1084 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) 1085 && (allxfer_len<124)) { 1086 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; 1087 memcpy(iop_data, pQbuffer, 1); 1088 acb->wqbuf_firstindex++; 1089 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ 1090 iop_data++; 1091 allxfer_len++; 1092 } 1093 pwbuffer->data_len=allxfer_len; 1094 /* 1095 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post 1096 */ 1097 arcmsr_iop_message_wrote(acb); 1098 } 1099 return; 1100 } 1101 /* 1102 ************************************************************************ 1103 ************************************************************************ 1104 */ 1105 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) 1106 { 1107 acb->acb_flags &=~ACB_F_MSG_START_BGRB; 1108 CHIP_REG_WRITE32(HBA_MessageUnit, 1109 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1110 if(!arcmsr_hba_wait_msgint_ready(acb)) { 1111 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1112 , acb->pci_unit); 1113 } 1114 return; 1115 } 1116 /* 1117 ************************************************************************ 1118 ************************************************************************ 1119 */ 1120 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) 1121 { 1122 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1123 CHIP_REG_WRITE32(HBB_DOORBELL, 1124 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB); 1125 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 1126 kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1127 , acb->pci_unit); 1128 } 1129 return; 1130 } 1131 /* 1132 ************************************************************************ 1133 ************************************************************************ 1134 */ 1135 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb) 1136 { 1137 acb->acb_flags &=~ACB_F_MSG_START_BGRB; 1138 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB); 1139 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 1140 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 1141 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit); 1142 } 1143 return; 1144 } 1145 /* 1146 ************************************************************************ 1147 ************************************************************************ 1148 */ 1149 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1150 { 1151 switch (acb->adapter_type) { 1152 case ACB_ADAPTER_TYPE_A: { 1153 arcmsr_stop_hba_bgrb(acb); 1154 } 1155 break; 1156 case ACB_ADAPTER_TYPE_B: { 1157 arcmsr_stop_hbb_bgrb(acb); 1158 } 1159 break; 1160 case ACB_ADAPTER_TYPE_C: { 1161 arcmsr_stop_hbc_bgrb(acb); 1162 } 1163 break; 1164 } 1165 return; 1166 } 1167 /* 1168 ************************************************************************ 1169 ************************************************************************ 1170 */ 1171 static void arcmsr_poll(struct cam_sim * psim) 1172 { 1173 struct AdapterControlBlock *acb; 1174 1175 acb = (struct AdapterControlBlock *)cam_sim_softc(psim); 1176 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1177 arcmsr_interrupt(acb); 1178 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1179 return; 1180 } 1181 /* 1182 ************************************************************************** 1183 ************************************************************************** 1184 */ 1185 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 1186 { 1187 struct QBUFFER *prbuffer; 1188 u_int8_t *pQbuffer; 1189 u_int8_t *iop_data; 1190 int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 1191 1192 /*check this iop data if overflow my rqbuffer*/ 1193 rqbuf_lastindex=acb->rqbuf_lastindex; 1194 rqbuf_firstindex=acb->rqbuf_firstindex; 1195 prbuffer=arcmsr_get_iop_rqbuffer(acb); 1196 iop_data=(u_int8_t *)prbuffer->data; 1197 iop_len=prbuffer->data_len; 1198 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 1199 if(my_empty_len>=iop_len) { 1200 while(iop_len > 0) { 1201 pQbuffer=&acb->rqbuffer[rqbuf_lastindex]; 1202 memcpy(pQbuffer, iop_data, 1); 1203 rqbuf_lastindex++; 1204 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */ 1205 iop_data++; 1206 iop_len--; 1207 } 1208 acb->rqbuf_lastindex=rqbuf_lastindex; 1209 arcmsr_iop_message_read(acb); 1210 /*signature, let IOP know data has been read */ 1211 } else { 1212 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW; 1213 } 1214 return; 1215 } 1216 /* 1217 ************************************************************************** 1218 ************************************************************************** 1219 */ 1220 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 1221 { 1222 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ; 1223 /* 1224 ***************************************************************** 1225 ** check if there are any mail packages from user space program 1226 ** in my post bag, now is the time to send them into Areca's firmware 1227 ***************************************************************** 1228 */ 1229 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) { 1230 u_int8_t *pQbuffer; 1231 struct QBUFFER *pwbuffer; 1232 u_int8_t *iop_data; 1233 int allxfer_len=0; 1234 1235 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ); 1236 pwbuffer=arcmsr_get_iop_wqbuffer(acb); 1237 iop_data=(u_int8_t *)pwbuffer->data; 1238 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex) 1239 && (allxfer_len<124)) { 1240 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex]; 1241 memcpy(iop_data, pQbuffer, 1); 1242 acb->wqbuf_firstindex++; 1243 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */ 1244 iop_data++; 1245 allxfer_len++; 1246 } 1247 pwbuffer->data_len=allxfer_len; 1248 /* 1249 ** push inbound doorbell tell iop driver data write ok 1250 ** and wait reply on next hwinterrupt for next Qbuffer post 1251 */ 1252 arcmsr_iop_message_wrote(acb); 1253 } 1254 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) { 1255 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 1256 } 1257 return; 1258 } 1259 1260 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb) 1261 { 1262 /* 1263 if (ccb->ccb_h.status != CAM_REQ_CMP) 1264 kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status); 1265 else 1266 kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n"); 1267 */ 1268 xpt_free_path(ccb->ccb_h.path); 1269 } 1270 1271 static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun) 1272 { 1273 struct cam_path *path; 1274 union ccb ccb; 1275 1276 if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP) 1277 return; 1278 /* kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */ 1279 bzero(&ccb, sizeof(union ccb)); 1280 xpt_setup_ccb(&ccb.ccb_h, path, 5); 1281 ccb.ccb_h.func_code = XPT_SCAN_LUN; 1282 ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb; 1283 ccb.crcn.flags = CAM_FLAG_NONE; 1284 xpt_action(&ccb); 1285 return; 1286 } 1287 1288 1289 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun) 1290 { 1291 struct CommandControlBlock *srb; 1292 u_int32_t intmask_org; 1293 int i; 1294 1295 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1296 /* disable all outbound interrupts */ 1297 intmask_org = arcmsr_disable_allintr(acb); 1298 for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++) 1299 { 1300 srb = acb->psrb_pool[i]; 1301 if (srb->startdone == ARCMSR_SRB_START) 1302 { 1303 if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun)) 1304 { 1305 srb->startdone = ARCMSR_SRB_ABORTED; 1306 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 1307 arcmsr_srb_complete(srb, 1); 1308 } 1309 } 1310 } 1311 /* enable outbound Post Queue, outbound doorbell Interrupt */ 1312 arcmsr_enable_allintr(acb, intmask_org); 1313 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1314 } 1315 1316 1317 /* 1318 ************************************************************************** 1319 ************************************************************************** 1320 */ 1321 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) { 1322 u_int32_t devicemap; 1323 u_int32_t target, lun; 1324 u_int32_t deviceMapCurrent[4]={0}; 1325 u_int8_t *pDevMap; 1326 1327 switch (acb->adapter_type) { 1328 case ACB_ADAPTER_TYPE_A: 1329 devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1330 for (target= 0; target < 4; target++) 1331 { 1332 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1333 devicemap += 4; 1334 } 1335 break; 1336 1337 case ACB_ADAPTER_TYPE_B: 1338 devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1339 for (target= 0; target < 4; target++) 1340 { 1341 deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap); 1342 devicemap += 4; 1343 } 1344 break; 1345 1346 case ACB_ADAPTER_TYPE_C: 1347 devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 1348 for (target= 0; target < 4; target++) 1349 { 1350 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap); 1351 devicemap += 4; 1352 } 1353 break; 1354 } 1355 if(acb->acb_flags & ACB_F_BUS_HANG_ON) 1356 { 1357 acb->acb_flags &= ~ACB_F_BUS_HANG_ON; 1358 } 1359 /* 1360 ** adapter posted CONFIG message 1361 ** copy the new map, note if there are differences with the current map 1362 */ 1363 pDevMap = (u_int8_t *)&deviceMapCurrent[0]; 1364 for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++) 1365 { 1366 if (*pDevMap != acb->device_map[target]) 1367 { 1368 u_int8_t difference, bit_check; 1369 1370 difference= *pDevMap ^ acb->device_map[target]; 1371 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++) 1372 { 1373 bit_check=(1 << lun); /*check bit from 0....31*/ 1374 if(difference & bit_check) 1375 { 1376 if(acb->device_map[target] & bit_check) 1377 {/* unit departed */ 1378 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun); 1379 arcmsr_abort_dr_ccbs(acb, target, lun); 1380 arcmsr_rescan_lun(acb, target, lun); 1381 acb->devstate[target][lun] = ARECA_RAID_GONE; 1382 } 1383 else 1384 {/* unit arrived */ 1385 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, ARRIVING!!!\n",target,lun); 1386 arcmsr_rescan_lun(acb, target, lun); 1387 acb->devstate[target][lun] = ARECA_RAID_GOOD; 1388 } 1389 } 1390 } 1391 /* kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */ 1392 acb->device_map[target]= *pDevMap; 1393 } 1394 pDevMap++; 1395 } 1396 } 1397 /* 1398 ************************************************************************** 1399 ************************************************************************** 1400 */ 1401 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { 1402 u_int32_t outbound_message; 1403 1404 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT); 1405 outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]); 1406 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1407 arcmsr_dr_handle( acb ); 1408 } 1409 /* 1410 ************************************************************************** 1411 ************************************************************************** 1412 */ 1413 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { 1414 u_int32_t outbound_message; 1415 1416 /* clear interrupts */ 1417 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN); 1418 outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]); 1419 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1420 arcmsr_dr_handle( acb ); 1421 } 1422 /* 1423 ************************************************************************** 1424 ************************************************************************** 1425 */ 1426 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { 1427 u_int32_t outbound_message; 1428 1429 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR); 1430 outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]); 1431 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG) 1432 arcmsr_dr_handle( acb ); 1433 } 1434 /* 1435 ************************************************************************** 1436 ************************************************************************** 1437 */ 1438 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) 1439 { 1440 u_int32_t outbound_doorbell; 1441 1442 /* 1443 ******************************************************************* 1444 ** Maybe here we need to check wrqbuffer_lock is lock or not 1445 ** DOORBELL: din! don! 1446 ** check if there are any mail need to pack from firmware 1447 ******************************************************************* 1448 */ 1449 outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit, 1450 0, outbound_doorbell); 1451 CHIP_REG_WRITE32(HBA_MessageUnit, 1452 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */ 1453 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 1454 arcmsr_iop2drv_data_wrote_handle(acb); 1455 } 1456 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 1457 arcmsr_iop2drv_data_read_handle(acb); 1458 } 1459 return; 1460 } 1461 /* 1462 ************************************************************************** 1463 ************************************************************************** 1464 */ 1465 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb) 1466 { 1467 u_int32_t outbound_doorbell; 1468 1469 /* 1470 ******************************************************************* 1471 ** Maybe here we need to check wrqbuffer_lock is lock or not 1472 ** DOORBELL: din! don! 1473 ** check if there are any mail need to pack from firmware 1474 ******************************************************************* 1475 */ 1476 outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); 1477 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */ 1478 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 1479 arcmsr_iop2drv_data_wrote_handle(acb); 1480 } 1481 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { 1482 arcmsr_iop2drv_data_read_handle(acb); 1483 } 1484 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 1485 arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */ 1486 } 1487 return; 1488 } 1489 /* 1490 ************************************************************************** 1491 ************************************************************************** 1492 */ 1493 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) 1494 { 1495 u_int32_t flag_srb; 1496 u_int16_t error; 1497 1498 /* 1499 ***************************************************************************** 1500 ** areca cdb command done 1501 ***************************************************************************** 1502 */ 1503 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 1504 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1505 while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 1506 0, outbound_queueport)) != 0xFFFFFFFF) { 1507 /* check if command done with no error*/ 1508 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 1509 arcmsr_drain_donequeue(acb, flag_srb, error); 1510 } /*drain reply FIFO*/ 1511 return; 1512 } 1513 /* 1514 ************************************************************************** 1515 ************************************************************************** 1516 */ 1517 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1518 { 1519 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 1520 u_int32_t flag_srb; 1521 int index; 1522 u_int16_t error; 1523 1524 /* 1525 ***************************************************************************** 1526 ** areca cdb command done 1527 ***************************************************************************** 1528 */ 1529 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, 1530 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1531 index=phbbmu->doneq_index; 1532 while((flag_srb=phbbmu->done_qbuffer[index]) != 0) { 1533 phbbmu->done_qbuffer[index]=0; 1534 index++; 1535 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 1536 phbbmu->doneq_index=index; 1537 /* check if command done with no error*/ 1538 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 1539 arcmsr_drain_donequeue(acb, flag_srb, error); 1540 } /*drain reply FIFO*/ 1541 return; 1542 } 1543 /* 1544 ************************************************************************** 1545 ************************************************************************** 1546 */ 1547 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) 1548 { 1549 u_int32_t flag_srb,throttling=0; 1550 u_int16_t error; 1551 1552 /* 1553 ***************************************************************************** 1554 ** areca cdb command done 1555 ***************************************************************************** 1556 */ 1557 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1558 1559 while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { 1560 1561 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 1562 /* check if command done with no error*/ 1563 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 1564 arcmsr_drain_donequeue(acb, flag_srb, error); 1565 if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 1566 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING); 1567 break; 1568 } 1569 throttling++; 1570 } /*drain reply FIFO*/ 1571 return; 1572 } 1573 /* 1574 ********************************************************************** 1575 ********************************************************************** 1576 */ 1577 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb) 1578 { 1579 u_int32_t outbound_intstatus; 1580 /* 1581 ********************************************* 1582 ** check outbound intstatus 1583 ********************************************* 1584 */ 1585 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 1586 if(!outbound_intstatus) { 1587 /*it must be share irq*/ 1588 return; 1589 } 1590 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/ 1591 /* MU doorbell interrupts*/ 1592 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 1593 arcmsr_hba_doorbell_isr(acb); 1594 } 1595 /* MU post queue interrupts*/ 1596 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 1597 arcmsr_hba_postqueue_isr(acb); 1598 } 1599 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 1600 arcmsr_hba_message_isr(acb); 1601 } 1602 return; 1603 } 1604 /* 1605 ********************************************************************** 1606 ********************************************************************** 1607 */ 1608 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb) 1609 { 1610 u_int32_t outbound_doorbell; 1611 /* 1612 ********************************************* 1613 ** check outbound intstatus 1614 ********************************************* 1615 */ 1616 outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable; 1617 if(!outbound_doorbell) { 1618 /*it must be share irq*/ 1619 return; 1620 } 1621 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */ 1622 CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell); 1623 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 1624 /* MU ioctl transfer doorbell interrupts*/ 1625 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 1626 arcmsr_iop2drv_data_wrote_handle(acb); 1627 } 1628 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { 1629 arcmsr_iop2drv_data_read_handle(acb); 1630 } 1631 /* MU post queue interrupts*/ 1632 if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 1633 arcmsr_hbb_postqueue_isr(acb); 1634 } 1635 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 1636 arcmsr_hbb_message_isr(acb); 1637 } 1638 return; 1639 } 1640 /* 1641 ********************************************************************** 1642 ********************************************************************** 1643 */ 1644 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb) 1645 { 1646 u_int32_t host_interrupt_status; 1647 /* 1648 ********************************************* 1649 ** check outbound intstatus 1650 ********************************************* 1651 */ 1652 host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status); 1653 if(!host_interrupt_status) { 1654 /*it must be share irq*/ 1655 return; 1656 } 1657 /* MU doorbell interrupts*/ 1658 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { 1659 arcmsr_hbc_doorbell_isr(acb); 1660 } 1661 /* MU post queue interrupts*/ 1662 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { 1663 arcmsr_hbc_postqueue_isr(acb); 1664 } 1665 return; 1666 } 1667 /* 1668 ****************************************************************************** 1669 ****************************************************************************** 1670 */ 1671 static void arcmsr_interrupt(struct AdapterControlBlock *acb) 1672 { 1673 switch (acb->adapter_type) { 1674 case ACB_ADAPTER_TYPE_A: 1675 arcmsr_handle_hba_isr(acb); 1676 break; 1677 case ACB_ADAPTER_TYPE_B: 1678 arcmsr_handle_hbb_isr(acb); 1679 break; 1680 case ACB_ADAPTER_TYPE_C: 1681 arcmsr_handle_hbc_isr(acb); 1682 break; 1683 default: 1684 kprintf("arcmsr%d: interrupt service," 1685 " unknow adapter type =%d\n", acb->pci_unit, acb->adapter_type); 1686 break; 1687 } 1688 return; 1689 } 1690 /* 1691 ********************************************************************** 1692 ********************************************************************** 1693 */ 1694 static void arcmsr_intr_handler(void *arg) 1695 { 1696 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg; 1697 1698 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1699 arcmsr_interrupt(acb); 1700 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1701 } 1702 /* 1703 ****************************************************************************** 1704 ****************************************************************************** 1705 */ 1706 static void arcmsr_polling_devmap(void* arg) 1707 { 1708 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg; 1709 switch (acb->adapter_type) { 1710 case ACB_ADAPTER_TYPE_A: 1711 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1712 break; 1713 1714 case ACB_ADAPTER_TYPE_B: 1715 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); 1716 break; 1717 1718 case ACB_ADAPTER_TYPE_C: 1719 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 1720 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 1721 break; 1722 } 1723 1724 if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0) 1725 { 1726 callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */ 1727 } 1728 } 1729 1730 /* 1731 ******************************************************************************* 1732 ** 1733 ******************************************************************************* 1734 */ 1735 static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 1736 { 1737 u_int32_t intmask_org; 1738 1739 if(acb!=NULL) { 1740 /* stop adapter background rebuild */ 1741 if(acb->acb_flags & ACB_F_MSG_START_BGRB) { 1742 intmask_org = arcmsr_disable_allintr(acb); 1743 arcmsr_stop_adapter_bgrb(acb); 1744 arcmsr_flush_adapter_cache(acb); 1745 arcmsr_enable_allintr(acb, intmask_org); 1746 } 1747 } 1748 } 1749 /* 1750 *********************************************************************** 1751 ** 1752 ************************************************************************ 1753 */ 1754 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg) 1755 { 1756 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1757 u_int32_t retvalue=EINVAL; 1758 1759 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg; 1760 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) { 1761 return retvalue; 1762 } 1763 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1764 switch(ioctl_cmd) { 1765 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1766 u_int8_t * pQbuffer; 1767 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1768 u_int32_t allxfer_len=0; 1769 1770 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex) 1771 && (allxfer_len<1031)) { 1772 /*copy READ QBUFFER to srb*/ 1773 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex]; 1774 memcpy(ptmpQbuffer, pQbuffer, 1); 1775 acb->rqbuf_firstindex++; 1776 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1777 /*if last index number set it to 0 */ 1778 ptmpQbuffer++; 1779 allxfer_len++; 1780 } 1781 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1782 struct QBUFFER * prbuffer; 1783 u_int8_t * iop_data; 1784 u_int32_t iop_len; 1785 1786 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1787 prbuffer=arcmsr_get_iop_rqbuffer(acb); 1788 iop_data=(u_int8_t *)prbuffer->data; 1789 iop_len=(u_int32_t)prbuffer->data_len; 1790 /*this iop data does no chance to make me overflow again here, so just do it*/ 1791 while(iop_len>0) { 1792 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 1793 memcpy(pQbuffer, iop_data, 1); 1794 acb->rqbuf_lastindex++; 1795 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1796 /*if last index number set it to 0 */ 1797 iop_data++; 1798 iop_len--; 1799 } 1800 arcmsr_iop_message_read(acb); 1801 /*signature, let IOP know data has been readed */ 1802 } 1803 pcmdmessagefld->cmdmessage.Length=allxfer_len; 1804 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1805 retvalue=ARCMSR_MESSAGE_SUCCESS; 1806 } 1807 break; 1808 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1809 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1810 u_int8_t * pQbuffer; 1811 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 1812 1813 user_len=pcmdmessagefld->cmdmessage.Length; 1814 /*check if data xfer length of this request will overflow my array qbuffer */ 1815 wqbuf_lastindex=acb->wqbuf_lastindex; 1816 wqbuf_firstindex=acb->wqbuf_firstindex; 1817 if(wqbuf_lastindex!=wqbuf_firstindex) { 1818 arcmsr_post_ioctldata2iop(acb); 1819 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1820 } else { 1821 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1); 1822 if(my_empty_len>=user_len) { 1823 while(user_len>0) { 1824 /*copy srb data to wqbuffer*/ 1825 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex]; 1826 memcpy(pQbuffer, ptmpuserbuffer, 1); 1827 acb->wqbuf_lastindex++; 1828 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 1829 /*if last index number set it to 0 */ 1830 ptmpuserbuffer++; 1831 user_len--; 1832 } 1833 /*post fist Qbuffer*/ 1834 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 1835 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED; 1836 arcmsr_post_ioctldata2iop(acb); 1837 } 1838 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1839 } else { 1840 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1841 } 1842 } 1843 retvalue=ARCMSR_MESSAGE_SUCCESS; 1844 } 1845 break; 1846 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 1847 u_int8_t * pQbuffer=acb->rqbuffer; 1848 1849 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1850 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1851 arcmsr_iop_message_read(acb); 1852 /*signature, let IOP know data has been readed */ 1853 } 1854 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 1855 acb->rqbuf_firstindex=0; 1856 acb->rqbuf_lastindex=0; 1857 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1858 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1859 retvalue=ARCMSR_MESSAGE_SUCCESS; 1860 } 1861 break; 1862 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: 1863 { 1864 u_int8_t * pQbuffer=acb->wqbuffer; 1865 1866 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1867 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1868 arcmsr_iop_message_read(acb); 1869 /*signature, let IOP know data has been readed */ 1870 } 1871 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); 1872 acb->wqbuf_firstindex=0; 1873 acb->wqbuf_lastindex=0; 1874 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 1875 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1876 retvalue=ARCMSR_MESSAGE_SUCCESS; 1877 } 1878 break; 1879 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 1880 u_int8_t * pQbuffer; 1881 1882 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1883 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 1884 arcmsr_iop_message_read(acb); 1885 /*signature, let IOP know data has been readed */ 1886 } 1887 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED 1888 |ACB_F_MESSAGE_RQBUFFER_CLEARED 1889 |ACB_F_MESSAGE_WQBUFFER_READ); 1890 acb->rqbuf_firstindex=0; 1891 acb->rqbuf_lastindex=0; 1892 acb->wqbuf_firstindex=0; 1893 acb->wqbuf_lastindex=0; 1894 pQbuffer=acb->rqbuffer; 1895 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1896 pQbuffer=acb->wqbuffer; 1897 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 1898 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1899 retvalue=ARCMSR_MESSAGE_SUCCESS; 1900 } 1901 break; 1902 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 1903 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F; 1904 retvalue=ARCMSR_MESSAGE_SUCCESS; 1905 } 1906 break; 1907 case ARCMSR_MESSAGE_SAY_HELLO: { 1908 u_int8_t * hello_string="Hello! I am ARCMSR"; 1909 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer; 1910 1911 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) { 1912 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR; 1913 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1914 return ENOIOCTL; 1915 } 1916 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK; 1917 retvalue=ARCMSR_MESSAGE_SUCCESS; 1918 } 1919 break; 1920 case ARCMSR_MESSAGE_SAY_GOODBYE: { 1921 arcmsr_iop_parking(acb); 1922 retvalue=ARCMSR_MESSAGE_SUCCESS; 1923 } 1924 break; 1925 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: { 1926 arcmsr_flush_adapter_cache(acb); 1927 retvalue=ARCMSR_MESSAGE_SUCCESS; 1928 } 1929 break; 1930 } 1931 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1932 return retvalue; 1933 } 1934 /* 1935 ************************************************************************** 1936 ************************************************************************** 1937 */ 1938 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb) 1939 { 1940 struct CommandControlBlock *srb=NULL; 1941 u_int32_t workingsrb_startindex, workingsrb_doneindex; 1942 1943 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 1944 workingsrb_doneindex=acb->workingsrb_doneindex; 1945 workingsrb_startindex=acb->workingsrb_startindex; 1946 srb=acb->srbworkingQ[workingsrb_startindex]; 1947 workingsrb_startindex++; 1948 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM; 1949 if(workingsrb_doneindex!=workingsrb_startindex) { 1950 acb->workingsrb_startindex=workingsrb_startindex; 1951 } else { 1952 srb=NULL; 1953 } 1954 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 1955 return(srb); 1956 } 1957 /* 1958 ************************************************************************** 1959 ************************************************************************** 1960 */ 1961 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb) 1962 { 1963 struct CMD_MESSAGE_FIELD * pcmdmessagefld; 1964 int retvalue = 0, transfer_len = 0; 1965 char *buffer; 1966 u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 | 1967 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 | 1968 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 | 1969 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8]; 1970 /* 4 bytes: Areca io control code */ 1971 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1972 buffer = pccb->csio.data_ptr; 1973 transfer_len = pccb->csio.dxfer_len; 1974 } else { 1975 retvalue = ARCMSR_MESSAGE_FAIL; 1976 goto message_out; 1977 } 1978 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 1979 retvalue = ARCMSR_MESSAGE_FAIL; 1980 goto message_out; 1981 } 1982 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 1983 switch(controlcode) { 1984 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1985 u_int8_t *pQbuffer; 1986 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer; 1987 int32_t allxfer_len = 0; 1988 1989 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1990 && (allxfer_len < 1031)) { 1991 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1992 memcpy(ptmpQbuffer, pQbuffer, 1); 1993 acb->rqbuf_firstindex++; 1994 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 1995 ptmpQbuffer++; 1996 allxfer_len++; 1997 } 1998 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 1999 struct QBUFFER *prbuffer; 2000 u_int8_t *iop_data; 2001 int32_t iop_len; 2002 2003 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2004 prbuffer=arcmsr_get_iop_rqbuffer(acb); 2005 iop_data = (u_int8_t *)prbuffer->data; 2006 iop_len =(u_int32_t)prbuffer->data_len; 2007 while (iop_len > 0) { 2008 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex]; 2009 memcpy(pQbuffer, iop_data, 1); 2010 acb->rqbuf_lastindex++; 2011 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 2012 iop_data++; 2013 iop_len--; 2014 } 2015 arcmsr_iop_message_read(acb); 2016 } 2017 pcmdmessagefld->cmdmessage.Length = allxfer_len; 2018 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2019 retvalue=ARCMSR_MESSAGE_SUCCESS; 2020 } 2021 break; 2022 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2023 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 2024 u_int8_t *pQbuffer; 2025 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer; 2026 2027 user_len = pcmdmessagefld->cmdmessage.Length; 2028 wqbuf_lastindex = acb->wqbuf_lastindex; 2029 wqbuf_firstindex = acb->wqbuf_firstindex; 2030 if (wqbuf_lastindex != wqbuf_firstindex) { 2031 arcmsr_post_ioctldata2iop(acb); 2032 /* has error report sensedata */ 2033 if(&pccb->csio.sense_data) { 2034 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 2035 /* Valid,ErrorCode */ 2036 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 2037 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 2038 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 2039 /* AdditionalSenseLength */ 2040 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 2041 /* AdditionalSenseCode */ 2042 } 2043 retvalue = ARCMSR_MESSAGE_FAIL; 2044 } else { 2045 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 2046 &(ARCMSR_MAX_QBUFFER - 1); 2047 if (my_empty_len >= user_len) { 2048 while (user_len > 0) { 2049 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; 2050 memcpy(pQbuffer, ptmpuserbuffer, 1); 2051 acb->wqbuf_lastindex++; 2052 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 2053 ptmpuserbuffer++; 2054 user_len--; 2055 } 2056 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 2057 acb->acb_flags &= 2058 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 2059 arcmsr_post_ioctldata2iop(acb); 2060 } 2061 } else { 2062 /* has error report sensedata */ 2063 if(&pccb->csio.sense_data) { 2064 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); 2065 /* Valid,ErrorCode */ 2066 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05; 2067 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */ 2068 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A; 2069 /* AdditionalSenseLength */ 2070 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20; 2071 /* AdditionalSenseCode */ 2072 } 2073 retvalue = ARCMSR_MESSAGE_FAIL; 2074 } 2075 } 2076 } 2077 break; 2078 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 2079 u_int8_t *pQbuffer = acb->rqbuffer; 2080 2081 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2082 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2083 arcmsr_iop_message_read(acb); 2084 } 2085 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2086 acb->rqbuf_firstindex = 0; 2087 acb->rqbuf_lastindex = 0; 2088 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2089 pcmdmessagefld->cmdmessage.ReturnCode = 2090 ARCMSR_MESSAGE_RETURNCODE_OK; 2091 } 2092 break; 2093 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 2094 u_int8_t *pQbuffer = acb->wqbuffer; 2095 2096 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2097 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2098 arcmsr_iop_message_read(acb); 2099 } 2100 acb->acb_flags |= 2101 (ACB_F_MESSAGE_WQBUFFER_CLEARED | 2102 ACB_F_MESSAGE_WQBUFFER_READ); 2103 acb->wqbuf_firstindex = 0; 2104 acb->wqbuf_lastindex = 0; 2105 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2106 pcmdmessagefld->cmdmessage.ReturnCode = 2107 ARCMSR_MESSAGE_RETURNCODE_OK; 2108 } 2109 break; 2110 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 2111 u_int8_t *pQbuffer; 2112 2113 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2114 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2115 arcmsr_iop_message_read(acb); 2116 } 2117 acb->acb_flags |= 2118 (ACB_F_MESSAGE_WQBUFFER_CLEARED 2119 | ACB_F_MESSAGE_RQBUFFER_CLEARED 2120 | ACB_F_MESSAGE_WQBUFFER_READ); 2121 acb->rqbuf_firstindex = 0; 2122 acb->rqbuf_lastindex = 0; 2123 acb->wqbuf_firstindex = 0; 2124 acb->wqbuf_lastindex = 0; 2125 pQbuffer = acb->rqbuffer; 2126 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 2127 pQbuffer = acb->wqbuffer; 2128 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 2129 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2130 } 2131 break; 2132 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: { 2133 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; 2134 } 2135 break; 2136 case ARCMSR_MESSAGE_SAY_HELLO: { 2137 int8_t * hello_string = "Hello! I am ARCMSR"; 2138 2139 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 2140 , (int16_t)strlen(hello_string)); 2141 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 2142 } 2143 break; 2144 case ARCMSR_MESSAGE_SAY_GOODBYE: 2145 arcmsr_iop_parking(acb); 2146 break; 2147 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 2148 arcmsr_flush_adapter_cache(acb); 2149 break; 2150 default: 2151 retvalue = ARCMSR_MESSAGE_FAIL; 2152 } 2153 message_out: 2154 return retvalue; 2155 } 2156 /* 2157 ********************************************************************* 2158 ********************************************************************* 2159 */ 2160 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2161 { 2162 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg; 2163 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb; 2164 union ccb * pccb; 2165 int target, lun; 2166 2167 pccb=srb->pccb; 2168 target=pccb->ccb_h.target_id; 2169 lun=pccb->ccb_h.target_lun; 2170 if(error != 0) { 2171 if(error != EFBIG) { 2172 kprintf("arcmsr%d: unexpected error %x" 2173 " returned from 'bus_dmamap_load' \n" 2174 , acb->pci_unit, error); 2175 } 2176 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 2177 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 2178 } 2179 arcmsr_srb_complete(srb, 0); 2180 return; 2181 } 2182 if(nseg > ARCMSR_MAX_SG_ENTRIES) { 2183 pccb->ccb_h.status |= CAM_REQ_TOO_BIG; 2184 arcmsr_srb_complete(srb, 0); 2185 return; 2186 } 2187 if(acb->acb_flags & ACB_F_BUS_RESET) { 2188 kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit); 2189 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET; 2190 arcmsr_srb_complete(srb, 0); 2191 return; 2192 } 2193 if(acb->devstate[target][lun]==ARECA_RAID_GONE) { 2194 u_int8_t block_cmd; 2195 2196 block_cmd=pccb->csio.cdb_io.cdb_bytes[0] & 0x0f; 2197 if(block_cmd==0x08 || block_cmd==0x0a) { 2198 kprintf("arcmsr%d:block 'read/write' command " 2199 "with gone raid volume Cmd=%2x, TargetId=%d, Lun=%d \n" 2200 , acb->pci_unit, block_cmd, target, lun); 2201 pccb->ccb_h.status |= CAM_DEV_NOT_THERE; 2202 arcmsr_srb_complete(srb, 0); 2203 return; 2204 } 2205 } 2206 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2207 if(nseg != 0) { 2208 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap); 2209 } 2210 arcmsr_srb_complete(srb, 0); 2211 return; 2212 } 2213 if(acb->srboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) { 2214 xpt_freeze_simq(acb->psim, 1); 2215 pccb->ccb_h.status = CAM_REQUEUE_REQ; 2216 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN; 2217 arcmsr_srb_complete(srb, 0); 2218 return; 2219 } 2220 pccb->ccb_h.status |= CAM_SIM_QUEUED; 2221 arcmsr_build_srb(srb, dm_segs, nseg); 2222 /* if (pccb->ccb_h.timeout != CAM_TIME_INFINITY) 2223 callout_reset(&srb->ccb_callout, (pccb->ccb_h.timeout * hz) / 1000, arcmsr_srb_timeout, srb); 2224 */ 2225 arcmsr_post_srb(acb, srb); 2226 return; 2227 } 2228 /* 2229 ***************************************************************************************** 2230 ***************************************************************************************** 2231 */ 2232 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb) 2233 { 2234 struct CommandControlBlock *srb; 2235 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr; 2236 u_int32_t intmask_org; 2237 int i=0; 2238 2239 acb->num_aborts++; 2240 /* 2241 *************************************************************************** 2242 ** It is the upper layer do abort command this lock just prior to calling us. 2243 ** First determine if we currently own this command. 2244 ** Start by searching the device queue. If not found 2245 ** at all, and the system wanted us to just abort the 2246 ** command return success. 2247 *************************************************************************** 2248 */ 2249 if(acb->srboutstandingcount!=0) { 2250 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 2251 srb=acb->psrb_pool[i]; 2252 if(srb->startdone==ARCMSR_SRB_START) { 2253 if(srb->pccb==abortccb) { 2254 srb->startdone=ARCMSR_SRB_ABORTED; 2255 kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'" 2256 "outstanding command \n" 2257 , acb->pci_unit, abortccb->ccb_h.target_id 2258 , abortccb->ccb_h.target_lun, srb); 2259 goto abort_outstanding_cmd; 2260 } 2261 } 2262 } 2263 } 2264 return(FALSE); 2265 abort_outstanding_cmd: 2266 /* disable all outbound interrupt */ 2267 intmask_org=arcmsr_disable_allintr(acb); 2268 arcmsr_polling_srbdone(acb, srb); 2269 /* enable outbound Post Queue, outbound doorbell Interrupt */ 2270 arcmsr_enable_allintr(acb, intmask_org); 2271 return (TRUE); 2272 } 2273 /* 2274 **************************************************************************** 2275 **************************************************************************** 2276 */ 2277 static void arcmsr_bus_reset(struct AdapterControlBlock *acb) 2278 { 2279 int retry=0; 2280 2281 acb->num_resets++; 2282 acb->acb_flags |=ACB_F_BUS_RESET; 2283 while(acb->srboutstandingcount!=0 && retry < 400) { 2284 arcmsr_interrupt(acb); 2285 UDELAY(25000); 2286 retry++; 2287 } 2288 arcmsr_iop_reset(acb); 2289 acb->acb_flags &= ~ACB_F_BUS_RESET; 2290 return; 2291 } 2292 /* 2293 ************************************************************************** 2294 ************************************************************************** 2295 */ 2296 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, 2297 union ccb * pccb) 2298 { 2299 pccb->ccb_h.status |= CAM_REQ_CMP; 2300 switch (pccb->csio.cdb_io.cdb_bytes[0]) { 2301 case INQUIRY: { 2302 unsigned char inqdata[36]; 2303 char *buffer=pccb->csio.data_ptr; 2304 2305 if (pccb->ccb_h.target_lun) { 2306 pccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2307 xpt_done(pccb); 2308 return; 2309 } 2310 inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ 2311 inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ 2312 inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ 2313 inqdata[3] = 0; 2314 inqdata[4] = 31; /* length of additional data */ 2315 inqdata[5] = 0; 2316 inqdata[6] = 0; 2317 inqdata[7] = 0; 2318 strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ 2319 strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ 2320 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 2321 memcpy(buffer, inqdata, sizeof(inqdata)); 2322 xpt_done(pccb); 2323 } 2324 break; 2325 case WRITE_BUFFER: 2326 case READ_BUFFER: { 2327 if (arcmsr_iop_message_xfer(acb, pccb)) { 2328 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2329 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 2330 } 2331 xpt_done(pccb); 2332 } 2333 break; 2334 default: 2335 xpt_done(pccb); 2336 } 2337 } 2338 /* 2339 ********************************************************************* 2340 ********************************************************************* 2341 */ 2342 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb) 2343 { 2344 struct AdapterControlBlock * acb; 2345 2346 acb=(struct AdapterControlBlock *) cam_sim_softc(psim); 2347 if(acb==NULL) { 2348 pccb->ccb_h.status |= CAM_REQ_INVALID; 2349 xpt_done(pccb); 2350 return; 2351 } 2352 switch (pccb->ccb_h.func_code) { 2353 case XPT_SCSI_IO: { 2354 struct CommandControlBlock *srb; 2355 int target=pccb->ccb_h.target_id; 2356 2357 if(target == 16) { 2358 /* virtual device for iop message transfer */ 2359 arcmsr_handle_virtual_command(acb, pccb); 2360 return; 2361 } 2362 if((srb=arcmsr_get_freesrb(acb)) == NULL) { 2363 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL; 2364 xpt_done(pccb); 2365 return; 2366 } 2367 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb; 2368 pccb->ccb_h.arcmsr_ccbacb_ptr=acb; 2369 srb->pccb=pccb; 2370 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2371 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) { 2372 /* Single buffer */ 2373 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) { 2374 /* Buffer is virtual */ 2375 u_int32_t error; 2376 2377 crit_enter(); 2378 error = bus_dmamap_load(acb->dm_segs_dmat 2379 , srb->dm_segs_dmamap 2380 , pccb->csio.data_ptr 2381 , pccb->csio.dxfer_len 2382 , arcmsr_execute_srb, srb, /*flags*/0); 2383 if(error == EINPROGRESS) { 2384 xpt_freeze_simq(acb->psim, 1); 2385 pccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2386 } 2387 crit_exit(); 2388 } 2389 else { /* Buffer is physical */ 2390 struct bus_dma_segment seg; 2391 2392 seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr; 2393 seg.ds_len = pccb->csio.dxfer_len; 2394 arcmsr_execute_srb(srb, &seg, 1, 0); 2395 } 2396 } else { 2397 /* Scatter/gather list */ 2398 struct bus_dma_segment *segs; 2399 2400 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 2401 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2402 pccb->ccb_h.status |= CAM_PROVIDE_FAIL; 2403 xpt_done(pccb); 2404 kfree(srb, M_DEVBUF); 2405 return; 2406 } 2407 segs=(struct bus_dma_segment *)pccb->csio.data_ptr; 2408 arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0); 2409 } 2410 } else { 2411 arcmsr_execute_srb(srb, NULL, 0, 0); 2412 } 2413 break; 2414 } 2415 case XPT_TARGET_IO: { 2416 /* target mode not yet support vendor specific commands. */ 2417 pccb->ccb_h.status |= CAM_REQ_CMP; 2418 xpt_done(pccb); 2419 break; 2420 } 2421 case XPT_PATH_INQ: { 2422 struct ccb_pathinq *cpi= &pccb->cpi; 2423 2424 cpi->version_num=1; 2425 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE; 2426 cpi->target_sprt=0; 2427 cpi->hba_misc=0; 2428 cpi->hba_eng_cnt=0; 2429 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */ 2430 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */ 2431 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */ 2432 cpi->bus_id=cam_sim_bus(psim); 2433 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2434 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN); 2435 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN); 2436 cpi->unit_number=cam_sim_unit(psim); 2437 #ifdef CAM_NEW_TRAN_CODE 2438 cpi->transport = XPORT_SPI; 2439 cpi->transport_version = 2; 2440 cpi->protocol = PROTO_SCSI; 2441 cpi->protocol_version = SCSI_REV_2; 2442 #endif 2443 cpi->ccb_h.status |= CAM_REQ_CMP; 2444 xpt_done(pccb); 2445 break; 2446 } 2447 case XPT_ABORT: { 2448 union ccb *pabort_ccb; 2449 2450 pabort_ccb=pccb->cab.abort_ccb; 2451 switch (pabort_ccb->ccb_h.func_code) { 2452 case XPT_ACCEPT_TARGET_IO: 2453 case XPT_IMMED_NOTIFY: 2454 case XPT_CONT_TARGET_IO: 2455 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) { 2456 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED; 2457 xpt_done(pabort_ccb); 2458 pccb->ccb_h.status |= CAM_REQ_CMP; 2459 } else { 2460 xpt_print_path(pabort_ccb->ccb_h.path); 2461 kprintf("Not found\n"); 2462 pccb->ccb_h.status |= CAM_PATH_INVALID; 2463 } 2464 break; 2465 case XPT_SCSI_IO: 2466 pccb->ccb_h.status |= CAM_UA_ABORT; 2467 break; 2468 default: 2469 pccb->ccb_h.status |= CAM_REQ_INVALID; 2470 break; 2471 } 2472 xpt_done(pccb); 2473 break; 2474 } 2475 case XPT_RESET_BUS: 2476 case XPT_RESET_DEV: { 2477 u_int32_t i; 2478 2479 arcmsr_bus_reset(acb); 2480 for (i=0; i < 500; i++) { 2481 DELAY(1000); 2482 } 2483 pccb->ccb_h.status |= CAM_REQ_CMP; 2484 xpt_done(pccb); 2485 break; 2486 } 2487 case XPT_TERM_IO: { 2488 pccb->ccb_h.status |= CAM_REQ_INVALID; 2489 xpt_done(pccb); 2490 break; 2491 } 2492 case XPT_GET_TRAN_SETTINGS: { 2493 struct ccb_trans_settings *cts; 2494 2495 if(pccb->ccb_h.target_id == 16) { 2496 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 2497 xpt_done(pccb); 2498 break; 2499 } 2500 cts= &pccb->cts; 2501 #ifdef CAM_NEW_TRAN_CODE 2502 { 2503 struct ccb_trans_settings_scsi *scsi; 2504 struct ccb_trans_settings_spi *spi; 2505 2506 scsi = &cts->proto_specific.scsi; 2507 spi = &cts->xport_specific.spi; 2508 cts->protocol = PROTO_SCSI; 2509 cts->protocol_version = SCSI_REV_2; 2510 cts->transport = XPORT_SPI; 2511 cts->transport_version = 2; 2512 spi->flags = CTS_SPI_FLAGS_DISC_ENB; 2513 spi->sync_period=3; 2514 spi->sync_offset=32; 2515 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 2516 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 2517 spi->valid = CTS_SPI_VALID_DISC 2518 | CTS_SPI_VALID_SYNC_RATE 2519 | CTS_SPI_VALID_SYNC_OFFSET 2520 | CTS_SPI_VALID_BUS_WIDTH; 2521 scsi->valid = CTS_SCSI_VALID_TQ; 2522 } 2523 #else 2524 { 2525 cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); 2526 cts->sync_period=3; 2527 cts->sync_offset=32; 2528 cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT; 2529 cts->valid=CCB_TRANS_SYNC_RATE_VALID | 2530 CCB_TRANS_SYNC_OFFSET_VALID | 2531 CCB_TRANS_BUS_WIDTH_VALID | 2532 CCB_TRANS_DISC_VALID | 2533 CCB_TRANS_TQ_VALID; 2534 } 2535 #endif 2536 pccb->ccb_h.status |= CAM_REQ_CMP; 2537 xpt_done(pccb); 2538 break; 2539 } 2540 case XPT_SET_TRAN_SETTINGS: { 2541 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 2542 xpt_done(pccb); 2543 break; 2544 } 2545 case XPT_CALC_GEOMETRY: { 2546 struct ccb_calc_geometry *ccg; 2547 u_int32_t size_mb; 2548 u_int32_t secs_per_cylinder; 2549 2550 if(pccb->ccb_h.target_id == 16) { 2551 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL; 2552 xpt_done(pccb); 2553 break; 2554 } 2555 ccg= &pccb->ccg; 2556 if (ccg->block_size == 0) { 2557 pccb->ccb_h.status = CAM_REQ_INVALID; 2558 xpt_done(pccb); 2559 break; 2560 } 2561 if(((1024L * 1024L)/ccg->block_size) < 0) { 2562 pccb->ccb_h.status = CAM_REQ_INVALID; 2563 xpt_done(pccb); 2564 break; 2565 } 2566 size_mb=ccg->volume_size/((1024L * 1024L)/ccg->block_size); 2567 if(size_mb > 1024 ) { 2568 ccg->heads=255; 2569 ccg->secs_per_track=63; 2570 } else { 2571 ccg->heads=64; 2572 ccg->secs_per_track=32; 2573 } 2574 secs_per_cylinder=ccg->heads * ccg->secs_per_track; 2575 ccg->cylinders=ccg->volume_size / secs_per_cylinder; 2576 pccb->ccb_h.status |= CAM_REQ_CMP; 2577 xpt_done(pccb); 2578 break; 2579 } 2580 default: 2581 pccb->ccb_h.status |= CAM_REQ_INVALID; 2582 xpt_done(pccb); 2583 break; 2584 } 2585 return; 2586 } 2587 /* 2588 ********************************************************************** 2589 ********************************************************************** 2590 */ 2591 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 2592 { 2593 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2594 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 2595 if(!arcmsr_hba_wait_msgint_ready(acb)) { 2596 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 2597 } 2598 return; 2599 } 2600 /* 2601 ********************************************************************** 2602 ********************************************************************** 2603 */ 2604 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) 2605 { 2606 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2607 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB); 2608 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 2609 kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 2610 } 2611 return; 2612 } 2613 /* 2614 ********************************************************************** 2615 ********************************************************************** 2616 */ 2617 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb) 2618 { 2619 acb->acb_flags |= ACB_F_MSG_START_BGRB; 2620 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB); 2621 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 2622 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 2623 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit); 2624 } 2625 return; 2626 } 2627 /* 2628 ********************************************************************** 2629 ********************************************************************** 2630 */ 2631 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 2632 { 2633 switch (acb->adapter_type) { 2634 case ACB_ADAPTER_TYPE_A: 2635 arcmsr_start_hba_bgrb(acb); 2636 break; 2637 case ACB_ADAPTER_TYPE_B: 2638 arcmsr_start_hbb_bgrb(acb); 2639 break; 2640 case ACB_ADAPTER_TYPE_C: 2641 arcmsr_start_hbc_bgrb(acb); 2642 break; 2643 } 2644 return; 2645 } 2646 /* 2647 ********************************************************************** 2648 ** 2649 ********************************************************************** 2650 */ 2651 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2652 { 2653 struct CommandControlBlock *srb; 2654 u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0; 2655 u_int16_t error; 2656 2657 polling_ccb_retry: 2658 poll_count++; 2659 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable; 2660 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/ 2661 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2662 while(1) { 2663 if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 2664 0, outbound_queueport))==0xFFFFFFFF) { 2665 if(poll_srb_done) { 2666 break;/*chip FIFO no ccb for completion already*/ 2667 } else { 2668 UDELAY(25000); 2669 if ((poll_count > 100) && (poll_srb != NULL)) { 2670 break; 2671 } 2672 goto polling_ccb_retry; 2673 } 2674 } 2675 /* check if command done with no error*/ 2676 srb=(struct CommandControlBlock *) 2677 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 2678 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 2679 poll_srb_done = (srb==poll_srb) ? 1:0; 2680 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 2681 if(srb->startdone==ARCMSR_SRB_ABORTED) { 2682 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 2683 "poll command abort successfully \n" 2684 , acb->pci_unit 2685 , srb->pccb->ccb_h.target_id 2686 , srb->pccb->ccb_h.target_lun, srb); 2687 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2688 arcmsr_srb_complete(srb, 1); 2689 continue; 2690 } 2691 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'" 2692 "srboutstandingcount=%d \n" 2693 , acb->pci_unit 2694 , srb, acb->srboutstandingcount); 2695 continue; 2696 } 2697 arcmsr_report_srb_state(acb, srb, error); 2698 } /*drain reply FIFO*/ 2699 return; 2700 } 2701 /* 2702 ********************************************************************** 2703 ** 2704 ********************************************************************** 2705 */ 2706 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2707 { 2708 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu; 2709 struct CommandControlBlock *srb; 2710 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 2711 int index; 2712 u_int16_t error; 2713 2714 polling_ccb_retry: 2715 poll_count++; 2716 CHIP_REG_WRITE32(HBB_DOORBELL, 2717 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */ 2718 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2719 while(1) { 2720 index=phbbmu->doneq_index; 2721 if((flag_srb=phbbmu->done_qbuffer[index]) == 0) { 2722 if(poll_srb_done) { 2723 break;/*chip FIFO no ccb for completion already*/ 2724 } else { 2725 UDELAY(25000); 2726 if ((poll_count > 100) && (poll_srb != NULL)) { 2727 break; 2728 } 2729 goto polling_ccb_retry; 2730 } 2731 } 2732 phbbmu->done_qbuffer[index]=0; 2733 index++; 2734 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */ 2735 phbbmu->doneq_index=index; 2736 /* check if command done with no error*/ 2737 srb=(struct CommandControlBlock *) 2738 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/ 2739 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE; 2740 poll_srb_done = (srb==poll_srb) ? 1:0; 2741 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 2742 if(srb->startdone==ARCMSR_SRB_ABORTED) { 2743 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'" 2744 "poll command abort successfully \n" 2745 , acb->pci_unit 2746 , srb->pccb->ccb_h.target_id 2747 , srb->pccb->ccb_h.target_lun, srb); 2748 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2749 arcmsr_srb_complete(srb, 1); 2750 continue; 2751 } 2752 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'" 2753 "srboutstandingcount=%d \n" 2754 , acb->pci_unit 2755 , srb, acb->srboutstandingcount); 2756 continue; 2757 } 2758 arcmsr_report_srb_state(acb, srb, error); 2759 } /*drain reply FIFO*/ 2760 return; 2761 } 2762 /* 2763 ********************************************************************** 2764 ** 2765 ********************************************************************** 2766 */ 2767 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2768 { 2769 struct CommandControlBlock *srb; 2770 u_int32_t flag_srb, poll_srb_done=0, poll_count=0; 2771 u_int16_t error; 2772 2773 polling_ccb_retry: 2774 poll_count++; 2775 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2776 while(1) { 2777 if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) { 2778 if(poll_srb_done) { 2779 break;/*chip FIFO no ccb for completion already*/ 2780 } else { 2781 UDELAY(25000); 2782 if ((poll_count > 100) && (poll_srb != NULL)) { 2783 break; 2784 } 2785 if (acb->srboutstandingcount == 0) { 2786 break; 2787 } 2788 goto polling_ccb_retry; 2789 } 2790 } 2791 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low); 2792 /* check if command done with no error*/ 2793 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFF0));/*frame must be 32 bytes aligned*/ 2794 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE; 2795 if (poll_srb != NULL) 2796 poll_srb_done = (srb==poll_srb) ? 1:0; 2797 if((srb->acb!=acb) || (srb->startdone!=ARCMSR_SRB_START)) { 2798 if(srb->startdone==ARCMSR_SRB_ABORTED) { 2799 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n" 2800 , acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb); 2801 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 2802 arcmsr_srb_complete(srb, 1); 2803 continue; 2804 } 2805 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n" 2806 , acb->pci_unit, srb, acb->srboutstandingcount); 2807 continue; 2808 } 2809 arcmsr_report_srb_state(acb, srb, error); 2810 } /*drain reply FIFO*/ 2811 return; 2812 } 2813 /* 2814 ********************************************************************** 2815 ********************************************************************** 2816 */ 2817 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb) 2818 { 2819 switch (acb->adapter_type) { 2820 case ACB_ADAPTER_TYPE_A: { 2821 arcmsr_polling_hba_srbdone(acb, poll_srb); 2822 } 2823 break; 2824 case ACB_ADAPTER_TYPE_B: { 2825 arcmsr_polling_hbb_srbdone(acb, poll_srb); 2826 } 2827 break; 2828 case ACB_ADAPTER_TYPE_C: { 2829 arcmsr_polling_hbc_srbdone(acb, poll_srb); 2830 } 2831 break; 2832 } 2833 } 2834 /* 2835 ********************************************************************** 2836 ********************************************************************** 2837 */ 2838 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb) 2839 { 2840 char *acb_firm_model=acb->firm_model; 2841 char *acb_firm_version=acb->firm_version; 2842 char *acb_device_map = acb->device_map; 2843 size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 2844 size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 2845 size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 2846 int i; 2847 2848 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2849 if(!arcmsr_hba_wait_msgint_ready(acb)) { 2850 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 2851 } 2852 i=0; 2853 while(i<8) { 2854 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 2855 /* 8 bytes firm_model, 15, 60-67*/ 2856 acb_firm_model++; 2857 i++; 2858 } 2859 i=0; 2860 while(i<16) { 2861 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 2862 /* 16 bytes firm_version, 17, 68-83*/ 2863 acb_firm_version++; 2864 i++; 2865 } 2866 i=0; 2867 while(i<16) { 2868 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 2869 acb_device_map++; 2870 i++; 2871 } 2872 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 2873 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 2874 acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 2875 acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 2876 acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 2877 acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 2878 acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 2879 return; 2880 } 2881 /* 2882 ********************************************************************** 2883 ********************************************************************** 2884 */ 2885 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 2886 { 2887 char *acb_firm_model=acb->firm_model; 2888 char *acb_firm_version=acb->firm_version; 2889 char *acb_device_map = acb->device_map; 2890 size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 2891 size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 2892 size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 2893 int i; 2894 2895 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG); 2896 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 2897 kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 2898 } 2899 i=0; 2900 while(i<8) { 2901 *acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i); 2902 /* 8 bytes firm_model, 15, 60-67*/ 2903 acb_firm_model++; 2904 i++; 2905 } 2906 i=0; 2907 while(i<16) { 2908 *acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i); 2909 /* 16 bytes firm_version, 17, 68-83*/ 2910 acb_firm_version++; 2911 i++; 2912 } 2913 i=0; 2914 while(i<16) { 2915 *acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i); 2916 acb_device_map++; 2917 i++; 2918 } 2919 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 2920 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 2921 acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 2922 acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 2923 acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 2924 acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 2925 acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 2926 return; 2927 } 2928 /* 2929 ********************************************************************** 2930 ********************************************************************** 2931 */ 2932 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb) 2933 { 2934 char *acb_firm_model=acb->firm_model; 2935 char *acb_firm_version=acb->firm_version; 2936 char *acb_device_map = acb->device_map; 2937 size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/ 2938 size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/ 2939 size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]); 2940 int i; 2941 2942 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG); 2943 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 2944 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 2945 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit); 2946 } 2947 i=0; 2948 while(i<8) { 2949 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i); 2950 /* 8 bytes firm_model, 15, 60-67*/ 2951 acb_firm_model++; 2952 i++; 2953 } 2954 i=0; 2955 while(i<16) { 2956 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i); 2957 /* 16 bytes firm_version, 17, 68-83*/ 2958 acb_firm_version++; 2959 i++; 2960 } 2961 i=0; 2962 while(i<16) { 2963 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i); 2964 acb_device_map++; 2965 i++; 2966 } 2967 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION); 2968 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version); 2969 acb->firm_request_len =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/ 2970 acb->firm_numbers_queue =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/ 2971 acb->firm_sdram_size =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/ 2972 acb->firm_ide_channels =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/ 2973 acb->firm_cfg_version =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */ 2974 return; 2975 } 2976 /* 2977 ********************************************************************** 2978 ********************************************************************** 2979 */ 2980 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 2981 { 2982 switch (acb->adapter_type) { 2983 case ACB_ADAPTER_TYPE_A: { 2984 arcmsr_get_hba_config(acb); 2985 } 2986 break; 2987 case ACB_ADAPTER_TYPE_B: { 2988 arcmsr_get_hbb_config(acb); 2989 } 2990 break; 2991 case ACB_ADAPTER_TYPE_C: { 2992 arcmsr_get_hbc_config(acb); 2993 } 2994 break; 2995 } 2996 return; 2997 } 2998 /* 2999 ********************************************************************** 3000 ********************************************************************** 3001 */ 3002 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb) 3003 { 3004 int timeout=0; 3005 3006 switch (acb->adapter_type) { 3007 case ACB_ADAPTER_TYPE_A: { 3008 while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) 3009 { 3010 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3011 { 3012 kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit); 3013 return; 3014 } 3015 UDELAY(15000); /* wait 15 milli-seconds */ 3016 } 3017 } 3018 break; 3019 case ACB_ADAPTER_TYPE_B: { 3020 while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0) 3021 { 3022 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3023 { 3024 kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit); 3025 return; 3026 } 3027 UDELAY(15000); /* wait 15 milli-seconds */ 3028 } 3029 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT); 3030 } 3031 break; 3032 case ACB_ADAPTER_TYPE_C: { 3033 while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0) 3034 { 3035 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */ 3036 { 3037 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit); 3038 return; 3039 } 3040 UDELAY(15000); /* wait 15 milli-seconds */ 3041 } 3042 } 3043 break; 3044 } 3045 return; 3046 } 3047 /* 3048 ********************************************************************** 3049 ********************************************************************** 3050 */ 3051 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb) 3052 { 3053 u_int32_t outbound_doorbell; 3054 3055 switch (acb->adapter_type) { 3056 case ACB_ADAPTER_TYPE_A: { 3057 /* empty doorbell Qbuffer if door bell ringed */ 3058 outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell); 3059 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */ 3060 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK); 3061 3062 } 3063 break; 3064 case ACB_ADAPTER_TYPE_B: { 3065 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/ 3066 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK); 3067 /* let IOP know data has been read */ 3068 } 3069 break; 3070 case ACB_ADAPTER_TYPE_C: { 3071 /* empty doorbell Qbuffer if door bell ringed */ 3072 outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell); 3073 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */ 3074 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK); 3075 3076 } 3077 break; 3078 } 3079 return; 3080 } 3081 /* 3082 ************************************************************************ 3083 ************************************************************************ 3084 */ 3085 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3086 { 3087 unsigned long srb_phyaddr; 3088 u_int32_t srb_phyaddr_hi32; 3089 3090 /* 3091 ******************************************************************** 3092 ** here we need to tell iop 331 our freesrb.HighPart 3093 ** if freesrb.HighPart is not zero 3094 ******************************************************************** 3095 */ 3096 srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr; 3097 // srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16); 3098 srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high; 3099 switch (acb->adapter_type) { 3100 case ACB_ADAPTER_TYPE_A: { 3101 if(srb_phyaddr_hi32!=0) { 3102 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 3103 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 3104 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 3105 if(!arcmsr_hba_wait_msgint_ready(acb)) { 3106 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 3107 return FALSE; 3108 } 3109 } 3110 } 3111 break; 3112 /* 3113 *********************************************************************** 3114 ** if adapter type B, set window of "post command Q" 3115 *********************************************************************** 3116 */ 3117 case ACB_ADAPTER_TYPE_B: { 3118 u_int32_t post_queue_phyaddr; 3119 struct HBB_MessageUnit *phbbmu; 3120 3121 phbbmu=(struct HBB_MessageUnit *)acb->pmu; 3122 phbbmu->postq_index=0; 3123 phbbmu->doneq_index=0; 3124 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW); 3125 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3126 kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit); 3127 return FALSE; 3128 } 3129 post_queue_phyaddr = srb_phyaddr + ARCMSR_MAX_FREESRB_NUM*sizeof(struct CommandControlBlock) 3130 + offsetof(struct HBB_MessageUnit, post_qbuffer); 3131 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */ 3132 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */ 3133 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */ 3134 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */ 3135 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */ 3136 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG); 3137 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3138 kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit); 3139 return FALSE; 3140 } 3141 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE); 3142 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3143 kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit); 3144 return FALSE; 3145 } 3146 } 3147 break; 3148 case ACB_ADAPTER_TYPE_C: { 3149 if(srb_phyaddr_hi32!=0) { 3150 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); 3151 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32); 3152 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG); 3153 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE); 3154 if(!arcmsr_hbc_wait_msgint_ready(acb)) { 3155 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit); 3156 return FALSE; 3157 } 3158 } 3159 } 3160 break; 3161 } 3162 return TRUE; 3163 } 3164 /* 3165 ************************************************************************ 3166 ************************************************************************ 3167 */ 3168 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) 3169 { 3170 switch (acb->adapter_type) 3171 { 3172 case ACB_ADAPTER_TYPE_A: 3173 case ACB_ADAPTER_TYPE_C: 3174 break; 3175 case ACB_ADAPTER_TYPE_B: { 3176 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE); 3177 if(!arcmsr_hbb_wait_msgint_ready(acb)) { 3178 kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit); 3179 3180 return; 3181 } 3182 } 3183 break; 3184 } 3185 return; 3186 } 3187 /* 3188 ********************************************************************** 3189 ********************************************************************** 3190 */ 3191 static void arcmsr_iop_init(struct AdapterControlBlock *acb) 3192 { 3193 u_int32_t intmask_org; 3194 3195 /* disable all outbound interrupt */ 3196 intmask_org=arcmsr_disable_allintr(acb); 3197 arcmsr_wait_firmware_ready(acb); 3198 arcmsr_iop_confirm(acb); 3199 arcmsr_get_firmware_spec(acb); 3200 /*start background rebuild*/ 3201 arcmsr_start_adapter_bgrb(acb); 3202 /* empty doorbell Qbuffer if door bell ringed */ 3203 arcmsr_clear_doorbell_queue_buffer(acb); 3204 arcmsr_enable_eoi_mode(acb); 3205 /* enable outbound Post Queue, outbound doorbell Interrupt */ 3206 arcmsr_enable_allintr(acb, intmask_org); 3207 acb->acb_flags |=ACB_F_IOP_INITED; 3208 return; 3209 } 3210 /* 3211 ********************************************************************** 3212 ********************************************************************** 3213 */ 3214 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3215 { 3216 struct AdapterControlBlock *acb=arg; 3217 struct CommandControlBlock *srb_tmp; 3218 u_int8_t * dma_memptr; 3219 u_int32_t i; 3220 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr; 3221 3222 dma_memptr=acb->uncacheptr; 3223 acb->srb_phyaddr.phyaddr=srb_phyaddr; 3224 srb_tmp=(struct CommandControlBlock *)dma_memptr; 3225 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 3226 if(bus_dmamap_create(acb->dm_segs_dmat, 3227 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) { 3228 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD; 3229 kprintf("arcmsr%d:" 3230 " srb dmamap bus_dmamap_create error\n", acb->pci_unit); 3231 return; 3232 } 3233 srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5); 3234 srb_tmp->acb=acb; 3235 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp; 3236 srb_phyaddr=srb_phyaddr+sizeof(struct CommandControlBlock); 3237 srb_tmp++; 3238 } 3239 acb->vir2phy_offset=(unsigned long)srb_tmp-(unsigned long)srb_phyaddr; 3240 return; 3241 } 3242 /* 3243 ************************************************************************ 3244 ** 3245 ** 3246 ************************************************************************ 3247 */ 3248 static void arcmsr_free_resource(struct AdapterControlBlock *acb) 3249 { 3250 /* remove the control device */ 3251 if(acb->ioctl_dev != NULL) { 3252 destroy_dev(acb->ioctl_dev); 3253 } 3254 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap); 3255 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap); 3256 bus_dma_tag_destroy(acb->srb_dmat); 3257 bus_dma_tag_destroy(acb->dm_segs_dmat); 3258 bus_dma_tag_destroy(acb->parent_dmat); 3259 return; 3260 } 3261 /* 3262 ************************************************************************ 3263 ************************************************************************ 3264 */ 3265 static u_int32_t arcmsr_initialize(device_t dev) 3266 { 3267 struct AdapterControlBlock *acb=device_get_softc(dev); 3268 u_int16_t pci_command; 3269 int i, j,max_coherent_size; 3270 3271 switch (pci_get_devid(dev)) { 3272 case PCIDevVenIDARC1880: { 3273 acb->adapter_type=ACB_ADAPTER_TYPE_C; 3274 max_coherent_size=ARCMSR_SRBS_POOL_SIZE; 3275 } 3276 break; 3277 case PCIDevVenIDARC1200: 3278 case PCIDevVenIDARC1201: { 3279 acb->adapter_type=ACB_ADAPTER_TYPE_B; 3280 max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit)); 3281 } 3282 break; 3283 case PCIDevVenIDARC1110: 3284 case PCIDevVenIDARC1120: 3285 case PCIDevVenIDARC1130: 3286 case PCIDevVenIDARC1160: 3287 case PCIDevVenIDARC1170: 3288 case PCIDevVenIDARC1210: 3289 case PCIDevVenIDARC1220: 3290 case PCIDevVenIDARC1230: 3291 case PCIDevVenIDARC1231: 3292 case PCIDevVenIDARC1260: 3293 case PCIDevVenIDARC1261: 3294 case PCIDevVenIDARC1270: 3295 case PCIDevVenIDARC1280: 3296 case PCIDevVenIDARC1212: 3297 case PCIDevVenIDARC1222: 3298 case PCIDevVenIDARC1380: 3299 case PCIDevVenIDARC1381: 3300 case PCIDevVenIDARC1680: 3301 case PCIDevVenIDARC1681: { 3302 acb->adapter_type=ACB_ADAPTER_TYPE_A; 3303 max_coherent_size=ARCMSR_SRBS_POOL_SIZE; 3304 } 3305 break; 3306 default: { 3307 kprintf("arcmsr%d:" 3308 " unknown RAID adapter type \n", device_get_unit(dev)); 3309 return ENOMEM; 3310 } 3311 } 3312 if(bus_dma_tag_create( /*parent*/ NULL, 3313 /*alignemnt*/ 1, 3314 /*boundary*/ 0, 3315 /*lowaddr*/ BUS_SPACE_MAXADDR, 3316 /*highaddr*/ BUS_SPACE_MAXADDR, 3317 /*filter*/ NULL, 3318 /*filterarg*/ NULL, 3319 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT, 3320 /*nsegments*/ BUS_SPACE_UNRESTRICTED, 3321 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 3322 /*flags*/ 0, 3323 &acb->parent_dmat) != 0) 3324 { 3325 kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 3326 return ENOMEM; 3327 } 3328 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */ 3329 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 3330 /*alignment*/ 1, 3331 /*boundary*/ 0, 3332 /*lowaddr*/ BUS_SPACE_MAXADDR, 3333 /*highaddr*/ BUS_SPACE_MAXADDR, 3334 /*filter*/ NULL, 3335 /*filterarg*/ NULL, 3336 /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM, 3337 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES, 3338 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 3339 /*flags*/ 0, 3340 &acb->dm_segs_dmat) != 0) 3341 { 3342 bus_dma_tag_destroy(acb->parent_dmat); 3343 kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 3344 return ENOMEM; 3345 } 3346 /* DMA tag for our srb structures.... Allocate the freesrb memory */ 3347 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat, 3348 /*alignment*/ 0x20, 3349 /*boundary*/ 0, 3350 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT, 3351 /*highaddr*/ BUS_SPACE_MAXADDR, 3352 /*filter*/ NULL, 3353 /*filterarg*/ NULL, 3354 /*maxsize*/ max_coherent_size, 3355 /*nsegments*/ 1, 3356 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT, 3357 /*flags*/ 0, 3358 &acb->srb_dmat) != 0) 3359 { 3360 bus_dma_tag_destroy(acb->dm_segs_dmat); 3361 bus_dma_tag_destroy(acb->parent_dmat); 3362 kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev)); 3363 return ENXIO; 3364 } 3365 /* Allocation for our srbs */ 3366 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) { 3367 bus_dma_tag_destroy(acb->srb_dmat); 3368 bus_dma_tag_destroy(acb->dm_segs_dmat); 3369 bus_dma_tag_destroy(acb->parent_dmat); 3370 kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev)); 3371 return ENXIO; 3372 } 3373 /* And permanently map them */ 3374 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) { 3375 bus_dma_tag_destroy(acb->srb_dmat); 3376 bus_dma_tag_destroy(acb->dm_segs_dmat); 3377 bus_dma_tag_destroy(acb->parent_dmat); 3378 kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev)); 3379 return ENXIO; 3380 } 3381 pci_command=pci_read_config(dev, PCIR_COMMAND, 2); 3382 pci_command |= PCIM_CMD_BUSMASTEREN; 3383 pci_command |= PCIM_CMD_PERRESPEN; 3384 pci_command |= PCIM_CMD_MWRICEN; 3385 /* Enable Busmaster/Mem */ 3386 pci_command |= PCIM_CMD_MEMEN; 3387 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 3388 switch(acb->adapter_type) { 3389 case ACB_ADAPTER_TYPE_A: { 3390 u_int32_t rid0=PCIR_BAR(0); 3391 vm_offset_t mem_base0; 3392 3393 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE); 3394 if(acb->sys_res_arcmsr[0] == NULL) { 3395 arcmsr_free_resource(acb); 3396 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 3397 return ENOMEM; 3398 } 3399 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 3400 arcmsr_free_resource(acb); 3401 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 3402 return ENXIO; 3403 } 3404 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 3405 if(mem_base0==0) { 3406 arcmsr_free_resource(acb); 3407 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 3408 return ENXIO; 3409 } 3410 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); 3411 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); 3412 acb->pmu=(struct MessageUnit_UNION *)mem_base0; 3413 } 3414 break; 3415 case ACB_ADAPTER_TYPE_B: { 3416 struct HBB_MessageUnit *phbbmu; 3417 struct CommandControlBlock *freesrb; 3418 u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) }; 3419 vm_offset_t mem_base[]={0,0}; 3420 for(i=0; i<2; i++) { 3421 if(i==0) { 3422 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i], 3423 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE); 3424 } else { 3425 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i], 3426 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE); 3427 } 3428 if(acb->sys_res_arcmsr[i] == NULL) { 3429 arcmsr_free_resource(acb); 3430 kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i); 3431 return ENOMEM; 3432 } 3433 if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) { 3434 arcmsr_free_resource(acb); 3435 kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i); 3436 return ENXIO; 3437 } 3438 mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]); 3439 if(mem_base[i]==0) { 3440 arcmsr_free_resource(acb); 3441 kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i); 3442 return ENXIO; 3443 } 3444 acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]); 3445 acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]); 3446 } 3447 freesrb=(struct CommandControlBlock *)acb->uncacheptr; 3448 acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM]; 3449 phbbmu=(struct HBB_MessageUnit *)acb->pmu; 3450 phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0]; 3451 phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1]; 3452 } 3453 break; 3454 case ACB_ADAPTER_TYPE_C: { 3455 u_int32_t rid0=PCIR_BAR(1); 3456 vm_offset_t mem_base0; 3457 3458 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE); 3459 if(acb->sys_res_arcmsr[0] == NULL) { 3460 arcmsr_free_resource(acb); 3461 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev)); 3462 return ENOMEM; 3463 } 3464 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) { 3465 arcmsr_free_resource(acb); 3466 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev)); 3467 return ENXIO; 3468 } 3469 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]); 3470 if(mem_base0==0) { 3471 arcmsr_free_resource(acb); 3472 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev)); 3473 return ENXIO; 3474 } 3475 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]); 3476 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]); 3477 acb->pmu=(struct MessageUnit_UNION *)mem_base0; 3478 } 3479 break; 3480 } 3481 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) { 3482 arcmsr_free_resource(acb); 3483 kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev)); 3484 return ENXIO; 3485 } 3486 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ); 3487 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 3488 /* 3489 ******************************************************************** 3490 ** init raid volume state 3491 ******************************************************************** 3492 */ 3493 for(i=0;i<ARCMSR_MAX_TARGETID;i++) { 3494 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) { 3495 acb->devstate[i][j]=ARECA_RAID_GONE; 3496 } 3497 } 3498 arcmsr_iop_init(acb); 3499 return(0); 3500 } 3501 /* 3502 ************************************************************************ 3503 ************************************************************************ 3504 */ 3505 static int arcmsr_attach(device_t dev) 3506 { 3507 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 3508 u_int32_t unit=device_get_unit(dev); 3509 struct ccb_setasync csa; 3510 struct cam_devq *devq; /* Device Queue to use for this SIM */ 3511 struct resource *irqres; 3512 int rid; 3513 3514 if(acb == NULL) { 3515 kprintf("arcmsr%d: cannot allocate softc\n", unit); 3516 return (ENOMEM); 3517 } 3518 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock"); 3519 if(arcmsr_initialize(dev)) { 3520 kprintf("arcmsr%d: initialize failure!\n", unit); 3521 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3522 return ENXIO; 3523 } 3524 /* After setting up the adapter, map our interrupt */ 3525 rid=0; 3526 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); 3527 if(irqres == NULL || 3528 bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) { 3529 arcmsr_free_resource(acb); 3530 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3531 kprintf("arcmsr%d: unable to register interrupt handler!\n", unit); 3532 return ENXIO; 3533 } 3534 acb->irqres=irqres; 3535 acb->pci_dev=dev; 3536 acb->pci_unit=unit; 3537 /* 3538 * Now let the CAM generic SCSI layer find the SCSI devices on 3539 * the bus * start queue to reset to the idle loop. * 3540 * Create device queue of SIM(s) * (MAX_START_JOB - 1) : 3541 * max_sim_transactions 3542 */ 3543 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB); 3544 if(devq == NULL) { 3545 arcmsr_free_resource(acb); 3546 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3547 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3548 kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit); 3549 return ENXIO; 3550 } 3551 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq); 3552 if(acb->psim == NULL) { 3553 arcmsr_free_resource(acb); 3554 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3555 cam_simq_release(devq); 3556 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3557 kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit); 3558 return ENXIO; 3559 } 3560 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 3561 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) { 3562 arcmsr_free_resource(acb); 3563 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3564 cam_sim_free(acb->psim); 3565 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3566 kprintf("arcmsr%d: xpt_bus_register failure!\n", unit); 3567 return ENXIO; 3568 } 3569 if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 3570 arcmsr_free_resource(acb); 3571 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3572 xpt_bus_deregister(cam_sim_path(acb->psim)); 3573 cam_sim_free(acb->psim); 3574 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3575 kprintf("arcmsr%d: xpt_create_path failure!\n", unit); 3576 return ENXIO; 3577 } 3578 /* 3579 **************************************************** 3580 */ 3581 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5); 3582 csa.ccb_h.func_code=XPT_SASYNC_CB; 3583 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE; 3584 csa.callback=arcmsr_async; 3585 csa.callback_arg=acb->psim; 3586 xpt_action((union ccb *)&csa); 3587 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 3588 /* Create the control device. */ 3589 acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit); 3590 3591 acb->ioctl_dev->si_drv1=acb; 3592 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit); 3593 callout_init(&acb->devmap_callout); 3594 callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb); 3595 return 0; 3596 } 3597 /* 3598 ************************************************************************ 3599 ************************************************************************ 3600 */ 3601 static int arcmsr_probe(device_t dev) 3602 { 3603 u_int32_t id; 3604 static char buf[256]; 3605 char x_type[]={"X-TYPE"}; 3606 char *type; 3607 int raid6 = 1; 3608 3609 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) { 3610 return (ENXIO); 3611 } 3612 switch(id=pci_get_devid(dev)) { 3613 case PCIDevVenIDARC1110: 3614 case PCIDevVenIDARC1200: 3615 case PCIDevVenIDARC1201: 3616 case PCIDevVenIDARC1210: 3617 raid6 = 0; 3618 /*FALLTHRU*/ 3619 case PCIDevVenIDARC1120: 3620 case PCIDevVenIDARC1130: 3621 case PCIDevVenIDARC1160: 3622 case PCIDevVenIDARC1170: 3623 case PCIDevVenIDARC1220: 3624 case PCIDevVenIDARC1230: 3625 case PCIDevVenIDARC1231: 3626 case PCIDevVenIDARC1260: 3627 case PCIDevVenIDARC1261: 3628 case PCIDevVenIDARC1270: 3629 case PCIDevVenIDARC1280: 3630 type = "SATA"; 3631 break; 3632 case PCIDevVenIDARC1212: 3633 case PCIDevVenIDARC1222: 3634 case PCIDevVenIDARC1380: 3635 case PCIDevVenIDARC1381: 3636 case PCIDevVenIDARC1680: 3637 case PCIDevVenIDARC1681: 3638 type = "SAS 3G"; 3639 break; 3640 case PCIDevVenIDARC1880: 3641 type = "SAS 6G"; 3642 break; 3643 default: 3644 type = x_type; 3645 break; 3646 } 3647 if(type == x_type) 3648 return(ENXIO); 3649 ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : ""); 3650 device_set_desc_copy(dev, buf); 3651 return 0; 3652 } 3653 /* 3654 ************************************************************************ 3655 ************************************************************************ 3656 */ 3657 static int arcmsr_shutdown(device_t dev) 3658 { 3659 u_int32_t i; 3660 u_int32_t intmask_org; 3661 struct CommandControlBlock *srb; 3662 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 3663 3664 /* stop adapter background rebuild */ 3665 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 3666 /* disable all outbound interrupt */ 3667 intmask_org=arcmsr_disable_allintr(acb); 3668 arcmsr_stop_adapter_bgrb(acb); 3669 arcmsr_flush_adapter_cache(acb); 3670 /* abort all outstanding command */ 3671 acb->acb_flags |= ACB_F_SCSISTOPADAPTER; 3672 acb->acb_flags &= ~ACB_F_IOP_INITED; 3673 if(acb->srboutstandingcount!=0) { 3674 /*clear and abort all outbound posted Q*/ 3675 arcmsr_done4abort_postqueue(acb); 3676 /* talk to iop 331 outstanding command aborted*/ 3677 arcmsr_abort_allcmd(acb); 3678 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) { 3679 srb=acb->psrb_pool[i]; 3680 if(srb->startdone==ARCMSR_SRB_START) { 3681 srb->startdone=ARCMSR_SRB_ABORTED; 3682 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED; 3683 arcmsr_srb_complete(srb, 1); 3684 } 3685 } 3686 } 3687 atomic_set_int(&acb->srboutstandingcount, 0); 3688 acb->workingsrb_doneindex=0; 3689 acb->workingsrb_startindex=0; 3690 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 3691 return (0); 3692 } 3693 /* 3694 ************************************************************************ 3695 ************************************************************************ 3696 */ 3697 static int arcmsr_detach(device_t dev) 3698 { 3699 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev); 3700 int i; 3701 3702 callout_stop(&acb->devmap_callout); 3703 bus_teardown_intr(dev, acb->irqres, acb->ih); 3704 arcmsr_shutdown(dev); 3705 arcmsr_free_resource(acb); 3706 for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) { 3707 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]); 3708 } 3709 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres); 3710 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock); 3711 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL); 3712 xpt_free_path(acb->ppath); 3713 xpt_bus_deregister(cam_sim_path(acb->psim)); 3714 cam_sim_free(acb->psim); 3715 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock); 3716 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock); 3717 return (0); 3718 } 3719