1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */ 2 /* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.20 2005/06/09 20:55:05 swildner Exp $ */ 3 /* 4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 5 * Copyright (c) 2000-2001 Adaptec Corporation 6 * All rights reserved. 7 * 8 * TERMS AND CONDITIONS OF USE 9 * 10 * Redistribution and use in source form, with or without modification, are 11 * permitted provided that redistributions of source code must retain the 12 * above copyright notice, this list of conditions and the following disclaimer. 13 * 14 * This software is provided `as is' by Adaptec and any express or implied 15 * warranties, including, but not limited to, the implied warranties of 16 * merchantability and fitness for a particular purpose, are disclaimed. In no 17 * event shall Adaptec be liable for any direct, indirect, incidental, special, 18 * exemplary or consequential damages (including, but not limited to, 19 * procurement of substitute goods or services; loss of use, data, or profits; 20 * or business interruptions) however caused and on any theory of liability, 21 * whether in contract, strict liability, or tort (including negligence or 22 * otherwise) arising in any way out of the use of this driver software, even 23 * if advised of the possibility of such damage. 24 * 25 * SCSI I2O host adapter driver 26 * 27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com 28 * - The 2000S and 2005S do not initialize on some machines, 29 * increased timeout to 255ms from 50ms for the StatusGet 30 * command. 31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com 32 * - I knew this one was too good to be true. The error return 33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not 34 * to the bit masked status. 35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com 36 * - The 2005S that was supported is affectionately called the 37 * Conjoined BAR Firmware. In order to support RAID-5 in a 38 * 16MB low-cost configuration, Firmware was forced to go 39 * to a Split BAR Firmware. This requires a separate IOP and 40 * Messaging base address. 41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com 42 * - Handle support for 2005S Zero Channel RAID solution. 43 * - System locked up if the Adapter locked up. Do not try 44 * to send other commands if the resetIOP command fails. The 45 * fail outstanding command discovery loop was flawed as the 46 * removal of the command from the list prevented discovering 47 * all the commands. 48 * - Comment changes to clarify driver. 49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM. 50 * - We do not use the AC_FOUND_DEV event because of I2O. 51 * Removed asr_async. 52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org, 53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com. 54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0 55 * mode as this is confused with competitor adapters in run 56 * mode. 57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove 58 * to prevent operating system panic. 59 * - moved default major number to 154 from 97. 60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com 61 * - The controller is not actually an ASR (Adaptec SCSI RAID) 62 * series that is visible, it's more of an internal code name. 63 * remove any visible references within reason for now. 64 * - bus_ptr->LUN was not correctly zeroed when initially 65 * allocated causing a possible panic of the operating system 66 * during boot. 67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com 68 * - Code always fails for ASR_getTid affecting performance. 69 * - initiated a set of changes that resulted from a formal 70 * code inspection by Mark_Salyzyn@adaptec.com, 71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com, 72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com. 73 * Their findings were focussed on the LCT & TID handler, and 74 * all resulting changes were to improve code readability, 75 * consistency or have a positive effect on performance. 76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com 77 * - Passthrough returned an incorrect error. 78 * - Passthrough did not migrate the intrinsic scsi layer wakeup 79 * on command completion. 80 * - generate control device nodes using make_dev and delete_dev. 81 * - Performance affected by TID caching reallocing. 82 * - Made suggested changes by Justin_Gibbs@adaptec.com 83 * - use splcam instead of splbio. 84 * - use cam_imask instead of bio_imask. 85 * - use u_int8_t instead of u_char. 86 * - use u_int16_t instead of u_short. 87 * - use u_int32_t instead of u_long where appropriate. 88 * - use 64 bit context handler instead of 32 bit. 89 * - create_ccb should only allocate the worst case 90 * requirements for the driver since CAM may evolve 91 * making union ccb much larger than needed here. 92 * renamed create_ccb to asr_alloc_ccb. 93 * - go nutz justifying all debug prints as macros 94 * defined at the top and remove unsightly ifdefs. 95 * - INLINE STATIC viewed as confusing. Historically 96 * utilized to affect code performance and debug 97 * issues in OS, Compiler or OEM specific situations. 98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com 99 * - Ported from FreeBSD 2.2.X DPT I2O driver. 100 * changed struct scsi_xfer to union ccb/struct ccb_hdr 101 * changed variable name xs to ccb 102 * changed struct scsi_link to struct cam_path 103 * changed struct scsibus_data to struct cam_sim 104 * stopped using fordriver for holding on to the TID 105 * use proprietary packet creation instead of scsi_inquire 106 * CAM layer sends synchronize commands. 107 */ 108 109 #define ASR_VERSION 1 110 #define ASR_REVISION '0' 111 #define ASR_SUBREVISION '8' 112 #define ASR_MONTH 8 113 #define ASR_DAY 21 114 #define ASR_YEAR 2001 - 1980 115 116 /* 117 * Debug macros to reduce the unsightly ifdefs 118 */ 119 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD)) 120 # define debug_asr_message(message) \ 121 { \ 122 u_int32_t * pointer = (u_int32_t *)message; \ 123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\ 124 u_int32_t counter = 0; \ 125 \ 126 while (length--) { \ 127 printf ("%08lx%c", (u_long)*(pointer++), \ 128 (((++counter & 7) == 0) || (length == 0)) \ 129 ? '\n' \ 130 : ' '); \ 131 } \ 132 } 133 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */ 134 135 #if (defined(DEBUG_ASR)) 136 /* Breaks on none STDC based compilers :-( */ 137 # define debug_asr_printf(fmt,args...) printf(fmt, ##args) 138 # define debug_asr_dump_message(message) debug_asr_message(message) 139 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path); 140 /* None fatal version of the ASSERT macro */ 141 # if (defined(__STDC__)) 142 # define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__) 143 # else 144 # define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__) 145 # endif 146 #else /* DEBUG_ASR */ 147 # define debug_asr_printf(fmt,args...) 148 # define debug_asr_dump_message(message) 149 # define debug_asr_print_path(ccb) 150 # define ASSERT(x) 151 #endif /* DEBUG_ASR */ 152 153 /* 154 * If DEBUG_ASR_CMD is defined: 155 * 0 - Display incoming SCSI commands 156 * 1 - add in a quick character before queueing. 157 * 2 - add in outgoing message frames. 158 */ 159 #if (defined(DEBUG_ASR_CMD)) 160 # define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args) 161 # define debug_asr_dump_ccb(ccb) \ 162 { \ 163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \ 164 int len = ccb->csio.cdb_len; \ 165 \ 166 while (len) { \ 167 debug_asr_cmd_printf (" %02x", *(cp++)); \ 168 --len; \ 169 } \ 170 } 171 # if (DEBUG_ASR_CMD > 0) 172 # define debug_asr_cmd1_printf debug_asr_cmd_printf 173 # else 174 # define debug_asr_cmd1_printf(fmt,args...) 175 # endif 176 # if (DEBUG_ASR_CMD > 1) 177 # define debug_asr_cmd2_printf debug_asr_cmd_printf 178 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message) 179 # else 180 # define debug_asr_cmd2_printf(fmt,args...) 181 # define debug_asr_cmd2_dump_message(message) 182 # endif 183 #else /* DEBUG_ASR_CMD */ 184 # define debug_asr_cmd_printf(fmt,args...) 185 # define debug_asr_cmd_dump_ccb(ccb) 186 # define debug_asr_cmd1_printf(fmt,args...) 187 # define debug_asr_cmd2_printf(fmt,args...) 188 # define debug_asr_cmd2_dump_message(message) 189 #endif /* DEBUG_ASR_CMD */ 190 191 #if (defined(DEBUG_ASR_USR_CMD)) 192 # define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args) 193 # define debug_usr_cmd_dump_message(message) debug_usr_message(message) 194 #else /* DEBUG_ASR_USR_CMD */ 195 # define debug_usr_cmd_printf(fmt,args...) 196 # define debug_usr_cmd_dump_message(message) 197 #endif /* DEBUG_ASR_USR_CMD */ 198 199 #define dsDescription_size 46 /* Snug as a bug in a rug */ 200 #include "dptsig.h" 201 202 static dpt_sig_S ASR_sig = { 203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL, 204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0, 205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, 206 ADF_ALL_SC5, 207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION, 208 ASR_MONTH, ASR_DAY, ASR_YEAR, 209 /* 01234567890123456789012345678901234567890123456789 < 50 chars */ 210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver" 211 /* ^^^^^ asr_attach alters these to match OS */ 212 }; 213 214 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */ 215 #include <sys/kernel.h> 216 #include <sys/systm.h> 217 #include <sys/malloc.h> 218 #include <sys/proc.h> 219 #include <sys/conf.h> 220 #include <sys/disklabel.h> 221 #include <sys/bus.h> 222 #include <machine/resource.h> 223 #include <machine/bus.h> 224 #include <sys/rman.h> 225 #include <sys/stat.h> 226 #include <sys/device.h> 227 #include <sys/thread2.h> 228 229 #include <bus/cam/cam.h> 230 #include <bus/cam/cam_ccb.h> 231 #include <bus/cam/cam_sim.h> 232 #include <bus/cam/cam_xpt_sim.h> 233 #include <bus/cam/cam_xpt_periph.h> 234 235 #include <bus/cam/scsi/scsi_all.h> 236 #include <bus/cam/scsi/scsi_message.h> 237 238 #include <vm/vm.h> 239 #include <vm/pmap.h> 240 #include <machine/cputypes.h> 241 #include <machine/clock.h> 242 #include <i386/include/vmparam.h> 243 244 #include <bus/pci/pcivar.h> 245 #include <bus/pci/pcireg.h> 246 247 #define STATIC static 248 #define INLINE 249 250 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0)) 251 # undef STATIC 252 # define STATIC 253 # undef INLINE 254 # define INLINE 255 #endif 256 #define IN 257 #define OUT 258 #define INOUT 259 260 #define osdSwap4(x) ((u_long)ntohl((u_long)(x))) 261 #define KVTOPHYS(x) vtophys(x) 262 #include "dptalign.h" 263 #include "i2oexec.h" 264 #include "i2obscsi.h" 265 #include "i2odpt.h" 266 #include "i2oadptr.h" 267 #include "sys_info.h" 268 269 /* Configuration Definitions */ 270 271 #define SG_SIZE 58 /* Scatter Gather list Size */ 272 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */ 273 #define MAX_LUN 255 /* Maximum LUN Supported */ 274 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */ 275 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */ 276 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */ 277 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */ 278 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */ 279 /* Also serves as the minimum map for */ 280 /* the 2005S zero channel RAID product */ 281 282 /************************************************************************** 283 ** ASR Host Adapter structure - One Structure For Each Host Adapter That ** 284 ** Is Configured Into The System. The Structure Supplies Configuration ** 285 ** Information, Status Info, Queue Info And An Active CCB List Pointer. ** 286 ***************************************************************************/ 287 288 /* I2O register set */ 289 typedef struct { 290 U8 Address[0x30]; 291 volatile U32 Status; 292 volatile U32 Mask; 293 # define Mask_InterruptsDisabled 0x08 294 U32 x[2]; 295 volatile U32 ToFIFO; /* In Bound FIFO */ 296 volatile U32 FromFIFO; /* Out Bound FIFO */ 297 } i2oRegs_t; 298 299 /* 300 * A MIX of performance and space considerations for TID lookups 301 */ 302 typedef u_int16_t tid_t; 303 304 typedef struct { 305 u_int32_t size; /* up to MAX_LUN */ 306 tid_t TID[1]; 307 } lun2tid_t; 308 309 typedef struct { 310 u_int32_t size; /* up to MAX_TARGET */ 311 lun2tid_t * LUN[1]; 312 } target2lun_t; 313 314 /* 315 * To ensure that we only allocate and use the worst case ccb here, lets 316 * make our own local ccb union. If asr_alloc_ccb is utilized for another 317 * ccb type, ensure that you add the additional structures into our local 318 * ccb union. To ensure strict type checking, we will utilize the local 319 * ccb definition wherever possible. 320 */ 321 union asr_ccb { 322 struct ccb_hdr ccb_h; /* For convenience */ 323 struct ccb_scsiio csio; 324 struct ccb_setasync csa; 325 }; 326 327 typedef struct Asr_softc { 328 u_int16_t ha_irq; 329 void * ha_Base; /* base port for each board */ 330 u_int8_t * volatile ha_blinkLED; 331 i2oRegs_t * ha_Virt; /* Base address of IOP */ 332 U8 * ha_Fvirt; /* Base address of Frames */ 333 I2O_IOP_ENTRY ha_SystemTable; 334 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */ 335 struct cam_path * ha_path[MAX_CHANNEL+1]; 336 struct cam_sim * ha_sim[MAX_CHANNEL+1]; 337 struct resource * ha_mem_res; 338 struct resource * ha_mes_res; 339 struct resource * ha_irq_res; 340 void * ha_intr; 341 PI2O_LCT ha_LCT; /* Complete list of devices */ 342 # define le_type IdentityTag[0] 343 # define I2O_BSA 0x20 344 # define I2O_FCA 0x40 345 # define I2O_SCSI 0x00 346 # define I2O_PORT 0x80 347 # define I2O_UNKNOWN 0x7F 348 # define le_bus IdentityTag[1] 349 # define le_target IdentityTag[2] 350 # define le_lun IdentityTag[3] 351 target2lun_t * ha_targets[MAX_CHANNEL+1]; 352 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs; 353 u_long ha_Msgs_Phys; 354 355 u_int8_t ha_in_reset; 356 # define HA_OPERATIONAL 0 357 # define HA_IN_RESET 1 358 # define HA_OFF_LINE 2 359 # define HA_OFF_LINE_RECOVERY 3 360 /* Configuration information */ 361 /* The target id maximums we take */ 362 u_int8_t ha_MaxBus; /* Maximum bus */ 363 u_int8_t ha_MaxId; /* Maximum target ID */ 364 u_int8_t ha_MaxLun; /* Maximum target LUN */ 365 u_int8_t ha_SgSize; /* Max SG elements */ 366 u_int8_t ha_pciBusNum; 367 u_int8_t ha_pciDeviceNum; 368 u_int8_t ha_adapter_target[MAX_CHANNEL+1]; 369 u_int16_t ha_QueueSize; /* Max outstanding commands */ 370 u_int16_t ha_Msgs_Count; 371 372 /* Links into other parents and HBAs */ 373 struct Asr_softc * ha_next; /* HBA list */ 374 } Asr_softc_t; 375 376 STATIC Asr_softc_t * Asr_softc; 377 378 /* 379 * Prototypes of the routines we have in this object. 380 */ 381 382 /* Externally callable routines */ 383 #define PROBE_ARGS IN device_t tag 384 #define PROBE_RET int 385 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag) 386 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);} 387 #define ATTACH_ARGS IN device_t tag 388 #define ATTACH_RET int 389 #define ATTACH_SET() int unit = device_get_unit(tag) 390 #define ATTACH_RETURN(retval) return(retval) 391 /* I2O HDM interface */ 392 STATIC PROBE_RET asr_probe (PROBE_ARGS); 393 STATIC ATTACH_RET asr_attach (ATTACH_ARGS); 394 /* DOMINO placeholder */ 395 STATIC PROBE_RET domino_probe (PROBE_ARGS); 396 STATIC ATTACH_RET domino_attach (ATTACH_ARGS); 397 /* MODE0 adapter placeholder */ 398 STATIC PROBE_RET mode0_probe (PROBE_ARGS); 399 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS); 400 401 STATIC Asr_softc_t * ASR_get_sc ( 402 IN dev_t dev); 403 STATIC int asr_ioctl ( 404 IN dev_t dev, 405 IN u_long cmd, 406 INOUT caddr_t data, 407 int flag, 408 d_thread_t *td); 409 STATIC int asr_open ( 410 IN dev_t dev, 411 int32_t flags, 412 int32_t ifmt, 413 IN d_thread_t *td); 414 STATIC int asr_close ( 415 dev_t dev, 416 int flags, 417 int ifmt, 418 d_thread_t *td); 419 STATIC int asr_intr ( 420 IN Asr_softc_t * sc); 421 STATIC void asr_timeout ( 422 INOUT void * arg); 423 STATIC int ASR_init ( 424 IN Asr_softc_t * sc); 425 STATIC INLINE int ASR_acquireLct ( 426 INOUT Asr_softc_t * sc); 427 STATIC INLINE int ASR_acquireHrt ( 428 INOUT Asr_softc_t * sc); 429 STATIC void asr_action ( 430 IN struct cam_sim * sim, 431 IN union ccb * ccb); 432 STATIC void asr_poll ( 433 IN struct cam_sim * sim); 434 435 /* 436 * Here is the auto-probe structure used to nest our tests appropriately 437 * during the startup phase of the operating system. 438 */ 439 STATIC device_method_t asr_methods[] = { 440 DEVMETHOD(device_probe, asr_probe), 441 DEVMETHOD(device_attach, asr_attach), 442 { 0, 0 } 443 }; 444 445 STATIC driver_t asr_driver = { 446 "asr", 447 asr_methods, 448 sizeof(Asr_softc_t) 449 }; 450 451 STATIC devclass_t asr_devclass; 452 453 DECLARE_DUMMY_MODULE(asr); 454 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0); 455 456 STATIC device_method_t domino_methods[] = { 457 DEVMETHOD(device_probe, domino_probe), 458 DEVMETHOD(device_attach, domino_attach), 459 { 0, 0 } 460 }; 461 462 STATIC driver_t domino_driver = { 463 "domino", 464 domino_methods, 465 0 466 }; 467 468 STATIC devclass_t domino_devclass; 469 470 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0); 471 472 STATIC device_method_t mode0_methods[] = { 473 DEVMETHOD(device_probe, mode0_probe), 474 DEVMETHOD(device_attach, mode0_attach), 475 { 0, 0 } 476 }; 477 478 STATIC driver_t mode0_driver = { 479 "mode0", 480 mode0_methods, 481 0 482 }; 483 484 STATIC devclass_t mode0_devclass; 485 486 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0); 487 488 /* 489 * devsw for asr hba driver 490 * 491 * only ioctl is used. the sd driver provides all other access. 492 */ 493 #define CDEV_MAJOR 154 /* prefered default character major */ 494 STATIC struct cdevsw asr_cdevsw = { 495 "asr", /* name */ 496 CDEV_MAJOR, /* maj */ 497 0, /* flags */ 498 NULL, /* port */ 499 0, /* auto */ 500 501 asr_open, /* open */ 502 asr_close, /* close */ 503 noread, /* read */ 504 nowrite, /* write */ 505 asr_ioctl, /* ioctl */ 506 nopoll, /* poll */ 507 nommap, /* mmap */ 508 nostrategy, /* strategy */ 509 nodump, /* dump */ 510 nopsize /* psize */ 511 }; 512 513 /* 514 * Initialize the dynamic cdevsw hooks. 515 */ 516 STATIC void 517 asr_drvinit (void * unused) 518 { 519 static int asr_devsw_installed = 0; 520 521 if (asr_devsw_installed) { 522 return; 523 } 524 asr_devsw_installed++; 525 /* 526 * Find a free spot (the report during driver load used by 527 * osd layer in engine to generate the controlling nodes). 528 * 529 * XXX this is garbage code, store a unit number in asr_cdevsw 530 * and iterate through that instead? 531 */ 532 while (asr_cdevsw.d_maj < NUMCDEVSW && 533 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL 534 ) { 535 ++asr_cdevsw.d_maj; 536 } 537 if (asr_cdevsw.d_maj >= NUMCDEVSW) { 538 asr_cdevsw.d_maj = 0; 539 while (asr_cdevsw.d_maj < CDEV_MAJOR && 540 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL 541 ) { 542 ++asr_cdevsw.d_maj; 543 } 544 } 545 546 /* 547 * Come to papa 548 */ 549 cdevsw_add(&asr_cdevsw, 0, 0); 550 } /* asr_drvinit */ 551 552 /* Must initialize before CAM layer picks up our HBA driver */ 553 SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL) 554 555 /* I2O support routines */ 556 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)] 557 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME)) 558 559 /* 560 * Fill message with default. 561 */ 562 STATIC PI2O_MESSAGE_FRAME 563 ASR_fillMessage ( 564 IN char * Message, 565 IN u_int16_t size) 566 { 567 OUT PI2O_MESSAGE_FRAME Message_Ptr; 568 569 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message); 570 bzero ((void *)Message_Ptr, size); 571 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11); 572 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 573 (size + sizeof(U32) - 1) >> 2); 574 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 575 return (Message_Ptr); 576 } /* ASR_fillMessage */ 577 578 #define EMPTY_QUEUE ((U32)-1L) 579 580 STATIC INLINE U32 581 ASR_getMessage( 582 IN i2oRegs_t * virt) 583 { 584 OUT U32 MessageOffset; 585 586 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) { 587 MessageOffset = virt->ToFIFO; 588 } 589 return (MessageOffset); 590 } /* ASR_getMessage */ 591 592 /* Issue a polled command */ 593 STATIC U32 594 ASR_initiateCp ( 595 INOUT i2oRegs_t * virt, 596 INOUT U8 * fvirt, 597 IN PI2O_MESSAGE_FRAME Message) 598 { 599 OUT U32 Mask = -1L; 600 U32 MessageOffset; 601 u_int Delay = 1500; 602 603 /* 604 * ASR_initiateCp is only used for synchronous commands and will 605 * be made more resiliant to adapter delays since commands like 606 * resetIOP can cause the adapter to be deaf for a little time. 607 */ 608 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE) 609 && (--Delay != 0)) { 610 DELAY (10000); 611 } 612 if (MessageOffset != EMPTY_QUEUE) { 613 bcopy (Message, fvirt + MessageOffset, 614 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 615 /* 616 * Disable the Interrupts 617 */ 618 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled; 619 virt->ToFIFO = MessageOffset; 620 } 621 return (Mask); 622 } /* ASR_initiateCp */ 623 624 /* 625 * Reset the adapter. 626 */ 627 STATIC U32 628 ASR_resetIOP ( 629 INOUT i2oRegs_t * virt, 630 INOUT U8 * fvirt) 631 { 632 struct resetMessage { 633 I2O_EXEC_IOP_RESET_MESSAGE M; 634 U32 R; 635 }; 636 defAlignLong(struct resetMessage,Message); 637 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr; 638 OUT U32 * volatile Reply_Ptr; 639 U32 Old; 640 641 /* 642 * Build up our copy of the Message. 643 */ 644 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message, 645 sizeof(I2O_EXEC_IOP_RESET_MESSAGE)); 646 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET); 647 /* 648 * Reset the Reply Status 649 */ 650 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 651 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0; 652 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr, 653 KVTOPHYS((void *)Reply_Ptr)); 654 /* 655 * Send the Message out 656 */ 657 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 658 /* 659 * Wait for a response (Poll), timeouts are dangerous if 660 * the card is truly responsive. We assume response in 2s. 661 */ 662 u_int8_t Delay = 200; 663 664 while ((*Reply_Ptr == 0) && (--Delay != 0)) { 665 DELAY (10000); 666 } 667 /* 668 * Re-enable the interrupts. 669 */ 670 virt->Mask = Old; 671 ASSERT (*Reply_Ptr); 672 return (*Reply_Ptr); 673 } 674 ASSERT (Old != (U32)-1L); 675 return (0); 676 } /* ASR_resetIOP */ 677 678 /* 679 * Get the curent state of the adapter 680 */ 681 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY 682 ASR_getStatus ( 683 INOUT i2oRegs_t * virt, 684 INOUT U8 * fvirt, 685 OUT PI2O_EXEC_STATUS_GET_REPLY buffer) 686 { 687 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message); 688 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr; 689 U32 Old; 690 691 /* 692 * Build up our copy of the Message. 693 */ 694 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message, 695 sizeof(I2O_EXEC_STATUS_GET_MESSAGE)); 696 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr, 697 I2O_EXEC_STATUS_GET); 698 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr, 699 KVTOPHYS((void *)buffer)); 700 /* This one is a Byte Count */ 701 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr, 702 sizeof(I2O_EXEC_STATUS_GET_REPLY)); 703 /* 704 * Reset the Reply Status 705 */ 706 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY)); 707 /* 708 * Send the Message out 709 */ 710 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 711 /* 712 * Wait for a response (Poll), timeouts are dangerous if 713 * the card is truly responsive. We assume response in 50ms. 714 */ 715 u_int8_t Delay = 255; 716 717 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) { 718 if (--Delay == 0) { 719 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL; 720 break; 721 } 722 DELAY (1000); 723 } 724 /* 725 * Re-enable the interrupts. 726 */ 727 virt->Mask = Old; 728 return (buffer); 729 } 730 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL); 731 } /* ASR_getStatus */ 732 733 /* 734 * Check if the device is a SCSI I2O HBA, and add it to the list. 735 */ 736 737 /* 738 * Probe for ASR controller. If we find it, we will use it. 739 * virtual adapters. 740 */ 741 STATIC PROBE_RET 742 asr_probe(PROBE_ARGS) 743 { 744 PROBE_SET(); 745 if ((id == 0xA5011044) || (id == 0xA5111044)) { 746 PROBE_RETURN ("Adaptec Caching SCSI RAID"); 747 } 748 PROBE_RETURN (NULL); 749 } /* asr_probe */ 750 751 /* 752 * Probe/Attach for DOMINO chipset. 753 */ 754 STATIC PROBE_RET 755 domino_probe(PROBE_ARGS) 756 { 757 PROBE_SET(); 758 if (id == 0x10121044) { 759 PROBE_RETURN ("Adaptec Caching Memory Controller"); 760 } 761 PROBE_RETURN (NULL); 762 } /* domino_probe */ 763 764 STATIC ATTACH_RET 765 domino_attach (ATTACH_ARGS) 766 { 767 ATTACH_RETURN (0); 768 } /* domino_attach */ 769 770 /* 771 * Probe/Attach for MODE0 adapters. 772 */ 773 STATIC PROBE_RET 774 mode0_probe(PROBE_ARGS) 775 { 776 PROBE_SET(); 777 778 /* 779 * If/When we can get a business case to commit to a 780 * Mode0 driver here, we can make all these tests more 781 * specific and robust. Mode0 adapters have their processors 782 * turned off, this the chips are in a raw state. 783 */ 784 785 /* This is a PLX9054 */ 786 if (id == 0x905410B5) { 787 PROBE_RETURN ("Adaptec Mode0 PM3757"); 788 } 789 /* This is a PLX9080 */ 790 if (id == 0x908010B5) { 791 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755"); 792 } 793 /* This is a ZION 80303 */ 794 if (id == 0x53098086) { 795 PROBE_RETURN ("Adaptec Mode0 3010S"); 796 } 797 /* This is an i960RS */ 798 if (id == 0x39628086) { 799 PROBE_RETURN ("Adaptec Mode0 2100S"); 800 } 801 /* This is an i960RN */ 802 if (id == 0x19648086) { 803 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S"); 804 } 805 #if 0 /* this would match any generic i960 -- mjs */ 806 /* This is an i960RP (typically also on Motherboards) */ 807 if (id == 0x19608086) { 808 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654"); 809 } 810 #endif 811 PROBE_RETURN (NULL); 812 } /* mode0_probe */ 813 814 STATIC ATTACH_RET 815 mode0_attach (ATTACH_ARGS) 816 { 817 ATTACH_RETURN (0); 818 } /* mode0_attach */ 819 820 STATIC INLINE union asr_ccb * 821 asr_alloc_ccb ( 822 IN Asr_softc_t * sc) 823 { 824 OUT union asr_ccb * new_ccb; 825 826 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb), 827 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) { 828 bzero (new_ccb, sizeof(*new_ccb)); 829 new_ccb->ccb_h.pinfo.priority = 1; 830 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX; 831 new_ccb->ccb_h.spriv_ptr0 = sc; 832 } 833 return (new_ccb); 834 } /* asr_alloc_ccb */ 835 836 STATIC INLINE void 837 asr_free_ccb ( 838 IN union asr_ccb * free_ccb) 839 { 840 free(free_ccb, M_DEVBUF); 841 } /* asr_free_ccb */ 842 843 /* 844 * Print inquiry data `carefully' 845 */ 846 STATIC void 847 ASR_prstring ( 848 u_int8_t * s, 849 int len) 850 { 851 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) { 852 printf ("%c", *(s++)); 853 } 854 } /* ASR_prstring */ 855 856 /* 857 * Prototypes 858 */ 859 STATIC INLINE int ASR_queue ( 860 IN Asr_softc_t * sc, 861 IN PI2O_MESSAGE_FRAME Message); 862 /* 863 * Send a message synchronously and without Interrupt to a ccb. 864 */ 865 STATIC int 866 ASR_queue_s ( 867 INOUT union asr_ccb * ccb, 868 IN PI2O_MESSAGE_FRAME Message) 869 { 870 U32 Mask; 871 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 872 873 /* 874 * We do not need any (optional byteswapping) method access to 875 * the Initiator context field. 876 */ 877 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 878 879 /* Prevent interrupt service */ 880 crit_enter(); 881 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask) 882 | Mask_InterruptsDisabled; 883 884 if (ASR_queue (sc, Message) == EMPTY_QUEUE) { 885 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 886 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 887 } 888 889 /* 890 * Wait for this board to report a finished instruction. 891 */ 892 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 893 (void)asr_intr (sc); 894 } 895 896 /* Re-enable Interrupts */ 897 sc->ha_Virt->Mask = Mask; 898 crit_exit(); 899 900 return (ccb->ccb_h.status); 901 } /* ASR_queue_s */ 902 903 /* 904 * Send a message synchronously to a Asr_softc_t 905 */ 906 STATIC int 907 ASR_queue_c ( 908 IN Asr_softc_t * sc, 909 IN PI2O_MESSAGE_FRAME Message) 910 { 911 union asr_ccb * ccb; 912 OUT int status; 913 914 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 915 return (CAM_REQUEUE_REQ); 916 } 917 918 status = ASR_queue_s (ccb, Message); 919 920 asr_free_ccb(ccb); 921 922 return (status); 923 } /* ASR_queue_c */ 924 925 /* 926 * Add the specified ccb to the active queue 927 */ 928 STATIC INLINE void 929 ASR_ccbAdd ( 930 IN Asr_softc_t * sc, 931 INOUT union asr_ccb * ccb) 932 { 933 crit_enter(); 934 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le); 935 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 936 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) { 937 /* 938 * RAID systems can take considerable time to 939 * complete some commands given the large cache 940 * flashes switching from write back to write thru. 941 */ 942 ccb->ccb_h.timeout = 6 * 60 * 1000; 943 } 944 callout_reset(&ccb->ccb_h.timeout_ch, 945 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb); 946 } 947 crit_exit(); 948 } /* ASR_ccbAdd */ 949 950 /* 951 * Remove the specified ccb from the active queue. 952 */ 953 STATIC INLINE void 954 ASR_ccbRemove ( 955 IN Asr_softc_t * sc, 956 INOUT union asr_ccb * ccb) 957 { 958 crit_enter(); 959 callout_stop(&ccb->ccb_h.timeout_ch); 960 LIST_REMOVE(&(ccb->ccb_h), sim_links.le); 961 crit_exit(); 962 } /* ASR_ccbRemove */ 963 964 /* 965 * Fail all the active commands, so they get re-issued by the operating 966 * system. 967 */ 968 STATIC INLINE void 969 ASR_failActiveCommands ( 970 IN Asr_softc_t * sc) 971 { 972 struct ccb_hdr * ccb; 973 974 #if 0 /* Currently handled by callers, unnecessary paranoia currently */ 975 /* Left in for historical perspective. */ 976 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message); 977 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 978 979 /* Send a blind LCT command to wait for the enableSys to complete */ 980 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message, 981 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)); 982 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 983 I2O_EXEC_LCT_NOTIFY); 984 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 985 I2O_CLASS_MATCH_ANYCLASS); 986 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 987 #endif 988 989 crit_enter(); 990 /* 991 * We do not need to inform the CAM layer that we had a bus 992 * reset since we manage it on our own, this also prevents the 993 * SCSI_DELAY settling that would be required on other systems. 994 * The `SCSI_DELAY' has already been handled by the card via the 995 * acquisition of the LCT table while we are at CAM priority level. 996 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) { 997 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL); 998 * } 999 */ 1000 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) { 1001 ASR_ccbRemove (sc, (union asr_ccb *)ccb); 1002 1003 ccb->status &= ~CAM_STATUS_MASK; 1004 ccb->status |= CAM_REQUEUE_REQ; 1005 /* Nothing Transfered */ 1006 ((struct ccb_scsiio *)ccb)->resid 1007 = ((struct ccb_scsiio *)ccb)->dxfer_len; 1008 1009 if (ccb->path) { 1010 xpt_done ((union ccb *)ccb); 1011 } else { 1012 wakeup ((caddr_t)ccb); 1013 } 1014 } 1015 crit_exit(); 1016 } /* ASR_failActiveCommands */ 1017 1018 /* 1019 * The following command causes the HBA to reset the specific bus 1020 */ 1021 STATIC INLINE void 1022 ASR_resetBus( 1023 IN Asr_softc_t * sc, 1024 IN int bus) 1025 { 1026 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message); 1027 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr; 1028 PI2O_LCT_ENTRY Device; 1029 1030 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message, 1031 sizeof(I2O_HBA_BUS_RESET_MESSAGE)); 1032 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame, 1033 I2O_HBA_BUS_RESET); 1034 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1035 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1036 ++Device) { 1037 if (((Device->le_type & I2O_PORT) != 0) 1038 && (Device->le_bus == bus)) { 1039 I2O_MESSAGE_FRAME_setTargetAddress( 1040 &Message_Ptr->StdMessageFrame, 1041 I2O_LCT_ENTRY_getLocalTID(Device)); 1042 /* Asynchronous command, with no expectations */ 1043 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1044 break; 1045 } 1046 } 1047 } /* ASR_resetBus */ 1048 1049 STATIC INLINE int 1050 ASR_getBlinkLedCode ( 1051 IN Asr_softc_t * sc) 1052 { 1053 if ((sc != (Asr_softc_t *)NULL) 1054 && (sc->ha_blinkLED != (u_int8_t *)NULL) 1055 && (sc->ha_blinkLED[1] == 0xBC)) { 1056 return (sc->ha_blinkLED[0]); 1057 } 1058 return (0); 1059 } /* ASR_getBlinkCode */ 1060 1061 /* 1062 * Determine the address of an TID lookup. Must be done at high priority 1063 * since the address can be changed by other threads of execution. 1064 * 1065 * Returns NULL pointer if not indexible (but will attempt to generate 1066 * an index if `new_entry' flag is set to TRUE). 1067 * 1068 * All addressible entries are to be guaranteed zero if never initialized. 1069 */ 1070 STATIC INLINE tid_t * 1071 ASR_getTidAddress( 1072 INOUT Asr_softc_t * sc, 1073 IN int bus, 1074 IN int target, 1075 IN int lun, 1076 IN int new_entry) 1077 { 1078 target2lun_t * bus_ptr; 1079 lun2tid_t * target_ptr; 1080 unsigned new_size; 1081 1082 /* 1083 * Validity checking of incoming parameters. More of a bound 1084 * expansion limit than an issue with the code dealing with the 1085 * values. 1086 * 1087 * sc must be valid before it gets here, so that check could be 1088 * dropped if speed a critical issue. 1089 */ 1090 if ((sc == (Asr_softc_t *)NULL) 1091 || (bus > MAX_CHANNEL) 1092 || (target > sc->ha_MaxId) 1093 || (lun > sc->ha_MaxLun)) { 1094 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n", 1095 (u_long)sc, bus, target, lun); 1096 return ((tid_t *)NULL); 1097 } 1098 /* 1099 * See if there is an associated bus list. 1100 * 1101 * for performance, allocate in size of BUS_CHUNK chunks. 1102 * BUS_CHUNK must be a power of two. This is to reduce 1103 * fragmentation effects on the allocations. 1104 */ 1105 # define BUS_CHUNK 8 1106 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1)); 1107 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) { 1108 /* 1109 * Allocate a new structure? 1110 * Since one element in structure, the +1 1111 * needed for size has been abstracted. 1112 */ 1113 if ((new_entry == FALSE) 1114 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc ( 1115 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1116 M_TEMP, M_WAITOK)) 1117 == (target2lun_t *)NULL)) { 1118 debug_asr_printf("failed to allocate bus list\n"); 1119 return ((tid_t *)NULL); 1120 } 1121 bzero (bus_ptr, sizeof(*bus_ptr) 1122 + (sizeof(bus_ptr->LUN) * new_size)); 1123 bus_ptr->size = new_size + 1; 1124 } else if (bus_ptr->size <= new_size) { 1125 target2lun_t * new_bus_ptr; 1126 1127 /* 1128 * Reallocate a new structure? 1129 * Since one element in structure, the +1 1130 * needed for size has been abstracted. 1131 */ 1132 if ((new_entry == FALSE) 1133 || ((new_bus_ptr = (target2lun_t *)malloc ( 1134 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size), 1135 M_TEMP, M_WAITOK)) 1136 == (target2lun_t *)NULL)) { 1137 debug_asr_printf("failed to reallocate bus list\n"); 1138 return ((tid_t *)NULL); 1139 } 1140 /* 1141 * Zero and copy the whole thing, safer, simpler coding 1142 * and not really performance critical at this point. 1143 */ 1144 bzero (new_bus_ptr, sizeof(*bus_ptr) 1145 + (sizeof(bus_ptr->LUN) * new_size)); 1146 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr) 1147 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1))); 1148 sc->ha_targets[bus] = new_bus_ptr; 1149 free (bus_ptr, M_TEMP); 1150 bus_ptr = new_bus_ptr; 1151 bus_ptr->size = new_size + 1; 1152 } 1153 /* 1154 * We now have the bus list, lets get to the target list. 1155 * Since most systems have only *one* lun, we do not allocate 1156 * in chunks as above, here we allow one, then in chunk sizes. 1157 * TARGET_CHUNK must be a power of two. This is to reduce 1158 * fragmentation effects on the allocations. 1159 */ 1160 # define TARGET_CHUNK 8 1161 if ((new_size = lun) != 0) { 1162 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1)); 1163 } 1164 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) { 1165 /* 1166 * Allocate a new structure? 1167 * Since one element in structure, the +1 1168 * needed for size has been abstracted. 1169 */ 1170 if ((new_entry == FALSE) 1171 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc ( 1172 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1173 M_TEMP, M_WAITOK)) 1174 == (lun2tid_t *)NULL)) { 1175 debug_asr_printf("failed to allocate target list\n"); 1176 return ((tid_t *)NULL); 1177 } 1178 bzero (target_ptr, sizeof(*target_ptr) 1179 + (sizeof(target_ptr->TID) * new_size)); 1180 target_ptr->size = new_size + 1; 1181 } else if (target_ptr->size <= new_size) { 1182 lun2tid_t * new_target_ptr; 1183 1184 /* 1185 * Reallocate a new structure? 1186 * Since one element in structure, the +1 1187 * needed for size has been abstracted. 1188 */ 1189 if ((new_entry == FALSE) 1190 || ((new_target_ptr = (lun2tid_t *)malloc ( 1191 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size), 1192 M_TEMP, M_WAITOK)) 1193 == (lun2tid_t *)NULL)) { 1194 debug_asr_printf("failed to reallocate target list\n"); 1195 return ((tid_t *)NULL); 1196 } 1197 /* 1198 * Zero and copy the whole thing, safer, simpler coding 1199 * and not really performance critical at this point. 1200 */ 1201 bzero (new_target_ptr, sizeof(*target_ptr) 1202 + (sizeof(target_ptr->TID) * new_size)); 1203 bcopy (target_ptr, new_target_ptr, 1204 sizeof(*target_ptr) 1205 + (sizeof(target_ptr->TID) * (target_ptr->size - 1))); 1206 bus_ptr->LUN[target] = new_target_ptr; 1207 free (target_ptr, M_TEMP); 1208 target_ptr = new_target_ptr; 1209 target_ptr->size = new_size + 1; 1210 } 1211 /* 1212 * Now, acquire the TID address from the LUN indexed list. 1213 */ 1214 return (&(target_ptr->TID[lun])); 1215 } /* ASR_getTidAddress */ 1216 1217 /* 1218 * Get a pre-existing TID relationship. 1219 * 1220 * If the TID was never set, return (tid_t)-1. 1221 * 1222 * should use mutex rather than spl. 1223 */ 1224 STATIC INLINE tid_t 1225 ASR_getTid ( 1226 IN Asr_softc_t * sc, 1227 IN int bus, 1228 IN int target, 1229 IN int lun) 1230 { 1231 tid_t * tid_ptr; 1232 OUT tid_t retval; 1233 1234 crit_enter(); 1235 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE)) 1236 == (tid_t *)NULL) 1237 /* (tid_t)0 or (tid_t)-1 indicate no TID */ 1238 || (*tid_ptr == (tid_t)0)) { 1239 crit_exit(); 1240 return ((tid_t)-1); 1241 } 1242 retval = *tid_ptr; 1243 crit_exit(); 1244 return (retval); 1245 } /* ASR_getTid */ 1246 1247 /* 1248 * Set a TID relationship. 1249 * 1250 * If the TID was not set, return (tid_t)-1. 1251 * 1252 * should use mutex rather than spl. 1253 */ 1254 STATIC INLINE tid_t 1255 ASR_setTid ( 1256 INOUT Asr_softc_t * sc, 1257 IN int bus, 1258 IN int target, 1259 IN int lun, 1260 INOUT tid_t TID) 1261 { 1262 tid_t * tid_ptr; 1263 1264 if (TID != (tid_t)-1) { 1265 if (TID == 0) { 1266 return ((tid_t)-1); 1267 } 1268 crit_enter(); 1269 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE)) 1270 == (tid_t *)NULL) { 1271 crit_exit(); 1272 return ((tid_t)-1); 1273 } 1274 *tid_ptr = TID; 1275 crit_exit(); 1276 } 1277 return (TID); 1278 } /* ASR_setTid */ 1279 1280 /*-------------------------------------------------------------------------*/ 1281 /* Function ASR_rescan */ 1282 /*-------------------------------------------------------------------------*/ 1283 /* The Parameters Passed To This Function Are : */ 1284 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1285 /* */ 1286 /* This Function Will rescan the adapter and resynchronize any data */ 1287 /* */ 1288 /* Return : 0 For OK, Error Code Otherwise */ 1289 /*-------------------------------------------------------------------------*/ 1290 1291 STATIC INLINE int 1292 ASR_rescan( 1293 IN Asr_softc_t * sc) 1294 { 1295 int bus; 1296 OUT int error; 1297 1298 /* 1299 * Re-acquire the LCT table and synchronize us to the adapter. 1300 */ 1301 if ((error = ASR_acquireLct(sc)) == 0) { 1302 error = ASR_acquireHrt(sc); 1303 } 1304 1305 if (error != 0) { 1306 return error; 1307 } 1308 1309 bus = sc->ha_MaxBus; 1310 /* Reset all existing cached TID lookups */ 1311 do { 1312 int target, event = 0; 1313 1314 /* 1315 * Scan for all targets on this bus to see if they 1316 * got affected by the rescan. 1317 */ 1318 for (target = 0; target <= sc->ha_MaxId; ++target) { 1319 int lun; 1320 1321 /* Stay away from the controller ID */ 1322 if (target == sc->ha_adapter_target[bus]) { 1323 continue; 1324 } 1325 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 1326 PI2O_LCT_ENTRY Device; 1327 tid_t TID = (tid_t)-1; 1328 tid_t LastTID; 1329 1330 /* 1331 * See if the cached TID changed. Search for 1332 * the device in our new LCT. 1333 */ 1334 for (Device = sc->ha_LCT->LCTEntry; 1335 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT) 1336 + I2O_LCT_getTableSize(sc->ha_LCT)); 1337 ++Device) { 1338 if ((Device->le_type != I2O_UNKNOWN) 1339 && (Device->le_bus == bus) 1340 && (Device->le_target == target) 1341 && (Device->le_lun == lun) 1342 && (I2O_LCT_ENTRY_getUserTID(Device) 1343 == 0xFFF)) { 1344 TID = I2O_LCT_ENTRY_getLocalTID( 1345 Device); 1346 break; 1347 } 1348 } 1349 /* 1350 * Indicate to the OS that the label needs 1351 * to be recalculated, or that the specific 1352 * open device is no longer valid (Merde) 1353 * because the cached TID changed. 1354 */ 1355 LastTID = ASR_getTid (sc, bus, target, lun); 1356 if (LastTID != TID) { 1357 struct cam_path * path; 1358 1359 if (xpt_create_path(&path, 1360 /*periph*/NULL, 1361 cam_sim_path(sc->ha_sim[bus]), 1362 target, lun) != CAM_REQ_CMP) { 1363 if (TID == (tid_t)-1) { 1364 event |= AC_LOST_DEVICE; 1365 } else { 1366 event |= AC_INQ_CHANGED 1367 | AC_GETDEV_CHANGED; 1368 } 1369 } else { 1370 if (TID == (tid_t)-1) { 1371 xpt_async( 1372 AC_LOST_DEVICE, 1373 path, NULL); 1374 } else if (LastTID == (tid_t)-1) { 1375 struct ccb_getdev ccb; 1376 1377 xpt_setup_ccb( 1378 &(ccb.ccb_h), 1379 path, /*priority*/5); 1380 xpt_async( 1381 AC_FOUND_DEVICE, 1382 path, 1383 &ccb); 1384 } else { 1385 xpt_async( 1386 AC_INQ_CHANGED, 1387 path, NULL); 1388 xpt_async( 1389 AC_GETDEV_CHANGED, 1390 path, NULL); 1391 } 1392 } 1393 } 1394 /* 1395 * We have the option of clearing the 1396 * cached TID for it to be rescanned, or to 1397 * set it now even if the device never got 1398 * accessed. We chose the later since we 1399 * currently do not use the condition that 1400 * the TID ever got cached. 1401 */ 1402 ASR_setTid (sc, bus, target, lun, TID); 1403 } 1404 } 1405 /* 1406 * The xpt layer can not handle multiple events at the 1407 * same call. 1408 */ 1409 if (event & AC_LOST_DEVICE) { 1410 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL); 1411 } 1412 if (event & AC_INQ_CHANGED) { 1413 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL); 1414 } 1415 if (event & AC_GETDEV_CHANGED) { 1416 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL); 1417 } 1418 } while (--bus >= 0); 1419 return (error); 1420 } /* ASR_rescan */ 1421 1422 /*-------------------------------------------------------------------------*/ 1423 /* Function ASR_reset */ 1424 /*-------------------------------------------------------------------------*/ 1425 /* The Parameters Passed To This Function Are : */ 1426 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 1427 /* */ 1428 /* This Function Will reset the adapter and resynchronize any data */ 1429 /* */ 1430 /* Return : None */ 1431 /*-------------------------------------------------------------------------*/ 1432 1433 STATIC INLINE int 1434 ASR_reset( 1435 IN Asr_softc_t * sc) 1436 { 1437 int retVal; 1438 1439 crit_enter(); 1440 if ((sc->ha_in_reset == HA_IN_RESET) 1441 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) { 1442 crit_exit(); 1443 return (EBUSY); 1444 } 1445 /* 1446 * Promotes HA_OPERATIONAL to HA_IN_RESET, 1447 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY. 1448 */ 1449 ++(sc->ha_in_reset); 1450 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) { 1451 debug_asr_printf ("ASR_resetIOP failed\n"); 1452 /* 1453 * We really need to take this card off-line, easier said 1454 * than make sense. Better to keep retrying for now since if a 1455 * UART cable is connected the blinkLEDs the adapter is now in 1456 * a hard state requiring action from the monitor commands to 1457 * the HBA to continue. For debugging waiting forever is a 1458 * good thing. In a production system, however, one may wish 1459 * to instead take the card off-line ... 1460 */ 1461 # if 0 && (defined(HA_OFF_LINE)) 1462 /* 1463 * Take adapter off-line. 1464 */ 1465 printf ("asr%d: Taking adapter off-line\n", 1466 sc->ha_path[0] 1467 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1468 : 0); 1469 sc->ha_in_reset = HA_OFF_LINE; 1470 crit_exit(); 1471 return (ENXIO); 1472 # else 1473 /* Wait Forever */ 1474 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0); 1475 # endif 1476 } 1477 retVal = ASR_init (sc); 1478 crit_exit(); 1479 if (retVal != 0) { 1480 debug_asr_printf ("ASR_init failed\n"); 1481 sc->ha_in_reset = HA_OFF_LINE; 1482 return (ENXIO); 1483 } 1484 if (ASR_rescan (sc) != 0) { 1485 debug_asr_printf ("ASR_rescan failed\n"); 1486 } 1487 ASR_failActiveCommands (sc); 1488 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) { 1489 printf ("asr%d: Brining adapter back on-line\n", 1490 sc->ha_path[0] 1491 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0])) 1492 : 0); 1493 } 1494 sc->ha_in_reset = HA_OPERATIONAL; 1495 return (0); 1496 } /* ASR_reset */ 1497 1498 /* 1499 * Device timeout handler. 1500 */ 1501 STATIC void 1502 asr_timeout( 1503 INOUT void * arg) 1504 { 1505 union asr_ccb * ccb = (union asr_ccb *)arg; 1506 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1507 int s; 1508 1509 debug_asr_print_path(ccb); 1510 debug_asr_printf("timed out"); 1511 1512 /* 1513 * Check if the adapter has locked up? 1514 */ 1515 if ((s = ASR_getBlinkLedCode(sc)) != 0) { 1516 /* Reset Adapter */ 1517 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 1518 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s); 1519 if (ASR_reset (sc) == ENXIO) { 1520 /* Try again later */ 1521 callout_reset(&ccb->ccb_h.timeout_ch, 1522 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb); 1523 } 1524 return; 1525 } 1526 /* 1527 * Abort does not function on the ASR card!!! Walking away from 1528 * the SCSI command is also *very* dangerous. A SCSI BUS reset is 1529 * our best bet, followed by a complete adapter reset if that fails. 1530 */ 1531 crit_enter(); 1532 /* Check if we already timed out once to raise the issue */ 1533 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) { 1534 debug_asr_printf (" AGAIN\nreinitializing adapter\n"); 1535 if (ASR_reset (sc) == ENXIO) { 1536 callout_reset(&ccb->ccb_h.timeout_ch, 1537 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb); 1538 } 1539 crit_exit(); 1540 return; 1541 } 1542 debug_asr_printf ("\nresetting bus\n"); 1543 /* If the BUS reset does not take, then an adapter reset is next! */ 1544 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1545 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1546 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000, 1547 asr_timeout, ccb); 1548 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path))); 1549 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL); 1550 crit_exit(); 1551 } /* asr_timeout */ 1552 1553 /* 1554 * send a message asynchronously 1555 */ 1556 STATIC INLINE int 1557 ASR_queue( 1558 IN Asr_softc_t * sc, 1559 IN PI2O_MESSAGE_FRAME Message) 1560 { 1561 OUT U32 MessageOffset; 1562 union asr_ccb * ccb; 1563 1564 debug_asr_printf ("Host Command Dump:\n"); 1565 debug_asr_dump_message (Message); 1566 1567 ccb = (union asr_ccb *)(long) 1568 I2O_MESSAGE_FRAME_getInitiatorContext64(Message); 1569 1570 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) { 1571 bcopy (Message, sc->ha_Fvirt + MessageOffset, 1572 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2); 1573 if (ccb) { 1574 ASR_ccbAdd (sc, ccb); 1575 } 1576 /* Post the command */ 1577 sc->ha_Virt->ToFIFO = MessageOffset; 1578 } else { 1579 if (ASR_getBlinkLedCode(sc)) { 1580 /* 1581 * Unlikely we can do anything if we can't grab a 1582 * message frame :-(, but lets give it a try. 1583 */ 1584 (void)ASR_reset (sc); 1585 } 1586 } 1587 return (MessageOffset); 1588 } /* ASR_queue */ 1589 1590 1591 /* Simple Scatter Gather elements */ 1592 #define SG(SGL,Index,Flags,Buffer,Size) \ 1593 I2O_FLAGS_COUNT_setCount( \ 1594 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1595 Size); \ 1596 I2O_FLAGS_COUNT_setFlags( \ 1597 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \ 1598 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \ 1599 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \ 1600 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \ 1601 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer)) 1602 1603 /* 1604 * Retrieve Parameter Group. 1605 * Buffer must be allocated using defAlignLong macro. 1606 */ 1607 STATIC void * 1608 ASR_getParams( 1609 IN Asr_softc_t * sc, 1610 IN tid_t TID, 1611 IN int Group, 1612 OUT void * Buffer, 1613 IN unsigned BufferSize) 1614 { 1615 struct paramGetMessage { 1616 I2O_UTIL_PARAMS_GET_MESSAGE M; 1617 char F[ 1618 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)]; 1619 struct Operations { 1620 I2O_PARAM_OPERATIONS_LIST_HEADER Header; 1621 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1]; 1622 } O; 1623 }; 1624 defAlignLong(struct paramGetMessage, Message); 1625 struct Operations * Operations_Ptr; 1626 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr; 1627 struct ParamBuffer { 1628 I2O_PARAM_RESULTS_LIST_HEADER Header; 1629 I2O_PARAM_READ_OPERATION_RESULT Read; 1630 char Info[1]; 1631 } * Buffer_Ptr; 1632 1633 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message, 1634 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1635 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1636 Operations_Ptr = (struct Operations *)((char *)Message_Ptr 1637 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) 1638 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)); 1639 bzero ((void *)Operations_Ptr, sizeof(struct Operations)); 1640 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount( 1641 &(Operations_Ptr->Header), 1); 1642 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation( 1643 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET); 1644 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount( 1645 &(Operations_Ptr->Template[0]), 0xFFFF); 1646 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber( 1647 &(Operations_Ptr->Template[0]), Group); 1648 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)), 1649 BufferSize); 1650 1651 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1652 I2O_VERSION_11 1653 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1654 / sizeof(U32)) << 4)); 1655 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame), 1656 TID); 1657 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 1658 I2O_UTIL_PARAMS_GET); 1659 /* 1660 * Set up the buffers as scatter gather elements. 1661 */ 1662 SG(&(Message_Ptr->SGL), 0, 1663 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, 1664 Operations_Ptr, sizeof(struct Operations)); 1665 SG(&(Message_Ptr->SGL), 1, 1666 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 1667 Buffer_Ptr, BufferSize); 1668 1669 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP) 1670 && (Buffer_Ptr->Header.ResultCount)) { 1671 return ((void *)(Buffer_Ptr->Info)); 1672 } 1673 return ((void *)NULL); 1674 } /* ASR_getParams */ 1675 1676 /* 1677 * Acquire the LCT information. 1678 */ 1679 STATIC INLINE int 1680 ASR_acquireLct ( 1681 INOUT Asr_softc_t * sc) 1682 { 1683 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr; 1684 PI2O_SGE_SIMPLE_ELEMENT sg; 1685 int MessageSizeInBytes; 1686 caddr_t v; 1687 int len; 1688 I2O_LCT Table; 1689 PI2O_LCT_ENTRY Entry; 1690 1691 /* 1692 * sc value assumed valid 1693 */ 1694 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) 1695 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT); 1696 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc ( 1697 MessageSizeInBytes, M_TEMP, M_WAITOK)) 1698 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1699 return (ENOMEM); 1700 } 1701 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes); 1702 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 1703 (I2O_VERSION_11 + 1704 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1705 / sizeof(U32)) << 4))); 1706 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 1707 I2O_EXEC_LCT_NOTIFY); 1708 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr, 1709 I2O_CLASS_MATCH_ANYCLASS); 1710 /* 1711 * Call the LCT table to determine the number of device entries 1712 * to reserve space for. 1713 */ 1714 SG(&(Message_Ptr->SGL), 0, 1715 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table, 1716 sizeof(I2O_LCT)); 1717 /* 1718 * since this code is reused in several systems, code efficiency 1719 * is greater by using a shift operation rather than a divide by 1720 * sizeof(u_int32_t). 1721 */ 1722 I2O_LCT_setTableSize(&Table, 1723 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1724 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1725 /* 1726 * Determine the size of the LCT table. 1727 */ 1728 if (sc->ha_LCT) { 1729 free (sc->ha_LCT, M_TEMP); 1730 } 1731 /* 1732 * malloc only generates contiguous memory when less than a 1733 * page is expected. We must break the request up into an SG list ... 1734 */ 1735 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <= 1736 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY))) 1737 || (len > (128 * 1024))) { /* Arbitrary */ 1738 free (Message_Ptr, M_TEMP); 1739 return (EINVAL); 1740 } 1741 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) 1742 == (PI2O_LCT)NULL) { 1743 free (Message_Ptr, M_TEMP); 1744 return (ENOMEM); 1745 } 1746 /* 1747 * since this code is reused in several systems, code efficiency 1748 * is greater by using a shift operation rather than a divide by 1749 * sizeof(u_int32_t). 1750 */ 1751 I2O_LCT_setTableSize(sc->ha_LCT, 1752 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2); 1753 /* 1754 * Convert the access to the LCT table into a SG list. 1755 */ 1756 sg = Message_Ptr->SGL.u.Simple; 1757 v = (caddr_t)(sc->ha_LCT); 1758 for (;;) { 1759 int next, base, span; 1760 1761 span = 0; 1762 next = base = KVTOPHYS(v); 1763 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 1764 1765 /* How far can we go contiguously */ 1766 while ((len > 0) && (base == next)) { 1767 int size; 1768 1769 next = trunc_page(base) + PAGE_SIZE; 1770 size = next - base; 1771 if (size > len) { 1772 size = len; 1773 } 1774 span += size; 1775 v += size; 1776 len -= size; 1777 base = KVTOPHYS(v); 1778 } 1779 1780 /* Construct the Flags */ 1781 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 1782 { 1783 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT; 1784 if (len <= 0) { 1785 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT 1786 | I2O_SGL_FLAGS_LAST_ELEMENT 1787 | I2O_SGL_FLAGS_END_OF_BUFFER); 1788 } 1789 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw); 1790 } 1791 1792 if (len <= 0) { 1793 break; 1794 } 1795 1796 /* 1797 * Incrementing requires resizing of the packet. 1798 */ 1799 ++sg; 1800 MessageSizeInBytes += sizeof(*sg); 1801 I2O_MESSAGE_FRAME_setMessageSize( 1802 &(Message_Ptr->StdMessageFrame), 1803 I2O_MESSAGE_FRAME_getMessageSize( 1804 &(Message_Ptr->StdMessageFrame)) 1805 + (sizeof(*sg) / sizeof(U32))); 1806 { 1807 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr; 1808 1809 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE) 1810 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK)) 1811 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) { 1812 free (sc->ha_LCT, M_TEMP); 1813 sc->ha_LCT = (PI2O_LCT)NULL; 1814 free (Message_Ptr, M_TEMP); 1815 return (ENOMEM); 1816 } 1817 span = ((caddr_t)sg) - (caddr_t)Message_Ptr; 1818 bcopy ((caddr_t)Message_Ptr, 1819 (caddr_t)NewMessage_Ptr, span); 1820 free (Message_Ptr, M_TEMP); 1821 sg = (PI2O_SGE_SIMPLE_ELEMENT) 1822 (((caddr_t)NewMessage_Ptr) + span); 1823 Message_Ptr = NewMessage_Ptr; 1824 } 1825 } 1826 { int retval; 1827 1828 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 1829 free (Message_Ptr, M_TEMP); 1830 if (retval != CAM_REQ_CMP) { 1831 return (ENODEV); 1832 } 1833 } 1834 /* If the LCT table grew, lets truncate accesses */ 1835 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) { 1836 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table)); 1837 } 1838 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY) 1839 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1840 ++Entry) { 1841 Entry->le_type = I2O_UNKNOWN; 1842 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) { 1843 1844 case I2O_CLASS_RANDOM_BLOCK_STORAGE: 1845 Entry->le_type = I2O_BSA; 1846 break; 1847 1848 case I2O_CLASS_SCSI_PERIPHERAL: 1849 Entry->le_type = I2O_SCSI; 1850 break; 1851 1852 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: 1853 Entry->le_type = I2O_FCA; 1854 break; 1855 1856 case I2O_CLASS_BUS_ADAPTER_PORT: 1857 Entry->le_type = I2O_PORT | I2O_SCSI; 1858 /* FALLTHRU */ 1859 case I2O_CLASS_FIBRE_CHANNEL_PORT: 1860 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) == 1861 I2O_CLASS_FIBRE_CHANNEL_PORT) { 1862 Entry->le_type = I2O_PORT | I2O_FCA; 1863 } 1864 { struct ControllerInfo { 1865 I2O_PARAM_RESULTS_LIST_HEADER Header; 1866 I2O_PARAM_READ_OPERATION_RESULT Read; 1867 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1868 }; 1869 defAlignLong(struct ControllerInfo, Buffer); 1870 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info; 1871 1872 Entry->le_bus = 0xff; 1873 Entry->le_target = 0xff; 1874 Entry->le_lun = 0xff; 1875 1876 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR) 1877 ASR_getParams(sc, 1878 I2O_LCT_ENTRY_getLocalTID(Entry), 1879 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO, 1880 Buffer, sizeof(struct ControllerInfo))) 1881 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) { 1882 continue; 1883 } 1884 Entry->le_target 1885 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID( 1886 Info); 1887 Entry->le_lun = 0; 1888 } /* FALLTHRU */ 1889 default: 1890 continue; 1891 } 1892 { struct DeviceInfo { 1893 I2O_PARAM_RESULTS_LIST_HEADER Header; 1894 I2O_PARAM_READ_OPERATION_RESULT Read; 1895 I2O_DPT_DEVICE_INFO_SCALAR Info; 1896 }; 1897 defAlignLong (struct DeviceInfo, Buffer); 1898 PI2O_DPT_DEVICE_INFO_SCALAR Info; 1899 1900 Entry->le_bus = 0xff; 1901 Entry->le_target = 0xff; 1902 Entry->le_lun = 0xff; 1903 1904 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR) 1905 ASR_getParams(sc, 1906 I2O_LCT_ENTRY_getLocalTID(Entry), 1907 I2O_DPT_DEVICE_INFO_GROUP_NO, 1908 Buffer, sizeof(struct DeviceInfo))) 1909 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) { 1910 continue; 1911 } 1912 Entry->le_type 1913 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info); 1914 Entry->le_bus 1915 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info); 1916 if ((Entry->le_bus > sc->ha_MaxBus) 1917 && (Entry->le_bus <= MAX_CHANNEL)) { 1918 sc->ha_MaxBus = Entry->le_bus; 1919 } 1920 Entry->le_target 1921 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info); 1922 Entry->le_lun 1923 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info); 1924 } 1925 } 1926 /* 1927 * A zero return value indicates success. 1928 */ 1929 return (0); 1930 } /* ASR_acquireLct */ 1931 1932 /* 1933 * Initialize a message frame. 1934 * We assume that the CDB has already been set up, so all we do here is 1935 * generate the Scatter Gather list. 1936 */ 1937 STATIC INLINE PI2O_MESSAGE_FRAME 1938 ASR_init_message( 1939 IN union asr_ccb * ccb, 1940 OUT PI2O_MESSAGE_FRAME Message) 1941 { 1942 int next, span, base, rw; 1943 OUT PI2O_MESSAGE_FRAME Message_Ptr; 1944 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0); 1945 PI2O_SGE_SIMPLE_ELEMENT sg; 1946 caddr_t v; 1947 vm_size_t size, len; 1948 U32 MessageSize; 1949 1950 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */ 1951 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message), 1952 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))); 1953 1954 { 1955 int target = ccb->ccb_h.target_id; 1956 int lun = ccb->ccb_h.target_lun; 1957 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)); 1958 tid_t TID; 1959 1960 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) { 1961 PI2O_LCT_ENTRY Device; 1962 1963 TID = (tid_t)0; 1964 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 1965 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 1966 ++Device) { 1967 if ((Device->le_type != I2O_UNKNOWN) 1968 && (Device->le_bus == bus) 1969 && (Device->le_target == target) 1970 && (Device->le_lun == lun) 1971 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) { 1972 TID = I2O_LCT_ENTRY_getLocalTID(Device); 1973 ASR_setTid (sc, Device->le_bus, 1974 Device->le_target, Device->le_lun, 1975 TID); 1976 break; 1977 } 1978 } 1979 } 1980 if (TID == (tid_t)0) { 1981 return ((PI2O_MESSAGE_FRAME)NULL); 1982 } 1983 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID); 1984 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID( 1985 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID); 1986 } 1987 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 | 1988 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 1989 / sizeof(U32)) << 4)); 1990 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 1991 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 1992 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)); 1993 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1); 1994 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE); 1995 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 1996 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC); 1997 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 1998 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 1999 I2O_SCB_FLAG_ENABLE_DISCONNECT 2000 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2001 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2002 /* 2003 * We do not need any (optional byteswapping) method access to 2004 * the Initiator & Transaction context field. 2005 */ 2006 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb); 2007 2008 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2009 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID); 2010 /* 2011 * copy the cdb over 2012 */ 2013 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength( 2014 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len); 2015 bcopy (&(ccb->csio.cdb_io), 2016 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len); 2017 2018 /* 2019 * Given a buffer describing a transfer, set up a scatter/gather map 2020 * in a ccb to map that SCSI transfer. 2021 */ 2022 2023 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR; 2024 2025 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags ( 2026 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2027 (ccb->csio.dxfer_len) 2028 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE 2029 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2030 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2031 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER) 2032 : (I2O_SCB_FLAG_XFER_FROM_DEVICE 2033 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2034 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2035 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)) 2036 : (I2O_SCB_FLAG_ENABLE_DISCONNECT 2037 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2038 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2039 2040 /* 2041 * Given a transfer described by a `data', fill in the SG list. 2042 */ 2043 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0]; 2044 2045 len = ccb->csio.dxfer_len; 2046 v = ccb->csio.data_ptr; 2047 ASSERT (ccb->csio.dxfer_len >= 0); 2048 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr); 2049 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2050 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len); 2051 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2052 Message_Ptr)->SGL.u.Simple[SG_SIZE])) { 2053 span = 0; 2054 next = base = KVTOPHYS(v); 2055 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base); 2056 2057 /* How far can we go contiguously */ 2058 while ((len > 0) && (base == next)) { 2059 next = trunc_page(base) + PAGE_SIZE; 2060 size = next - base; 2061 if (size > len) { 2062 size = len; 2063 } 2064 span += size; 2065 v += size; 2066 len -= size; 2067 base = KVTOPHYS(v); 2068 } 2069 2070 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span); 2071 if (len == 0) { 2072 rw |= I2O_SGL_FLAGS_LAST_ELEMENT; 2073 } 2074 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), 2075 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw); 2076 ++sg; 2077 MessageSize += sizeof(*sg) / sizeof(U32); 2078 } 2079 /* We always do the request sense ... */ 2080 if ((span = ccb->csio.sense_len) == 0) { 2081 span = sizeof(ccb->csio.sense_data); 2082 } 2083 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2084 &(ccb->csio.sense_data), span); 2085 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 2086 MessageSize + (sizeof(*sg) / sizeof(U32))); 2087 return (Message_Ptr); 2088 } /* ASR_init_message */ 2089 2090 /* 2091 * Reset the adapter. 2092 */ 2093 STATIC INLINE U32 2094 ASR_initOutBound ( 2095 INOUT Asr_softc_t * sc) 2096 { 2097 struct initOutBoundMessage { 2098 I2O_EXEC_OUTBOUND_INIT_MESSAGE M; 2099 U32 R; 2100 }; 2101 defAlignLong(struct initOutBoundMessage,Message); 2102 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr; 2103 OUT U32 * volatile Reply_Ptr; 2104 U32 Old; 2105 2106 /* 2107 * Build up our copy of the Message. 2108 */ 2109 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message, 2110 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE)); 2111 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2112 I2O_EXEC_OUTBOUND_INIT); 2113 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE); 2114 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr, 2115 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)); 2116 /* 2117 * Reset the Reply Status 2118 */ 2119 *(Reply_Ptr = (U32 *)((char *)Message_Ptr 2120 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0; 2121 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr, 2122 sizeof(U32)); 2123 /* 2124 * Send the Message out 2125 */ 2126 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) { 2127 u_long size, addr; 2128 2129 /* 2130 * Wait for a response (Poll). 2131 */ 2132 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED); 2133 /* 2134 * Re-enable the interrupts. 2135 */ 2136 sc->ha_Virt->Mask = Old; 2137 /* 2138 * Populate the outbound table. 2139 */ 2140 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2141 2142 /* Allocate the reply frames */ 2143 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2144 * sc->ha_Msgs_Count; 2145 2146 /* 2147 * contigmalloc only works reliably at 2148 * initialization time. 2149 */ 2150 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 2151 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul, 2152 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) 2153 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 2154 (void)bzero ((char *)sc->ha_Msgs, size); 2155 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs); 2156 } 2157 } 2158 2159 /* Initialize the outbound FIFO */ 2160 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) 2161 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys; 2162 size; --size) { 2163 sc->ha_Virt->FromFIFO = addr; 2164 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME); 2165 } 2166 return (*Reply_Ptr); 2167 } 2168 return (0); 2169 } /* ASR_initOutBound */ 2170 2171 /* 2172 * Set the system table 2173 */ 2174 STATIC INLINE int 2175 ASR_setSysTab( 2176 IN Asr_softc_t * sc) 2177 { 2178 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr; 2179 PI2O_SET_SYSTAB_HEADER SystemTable; 2180 Asr_softc_t * ha; 2181 PI2O_SGE_SIMPLE_ELEMENT sg; 2182 int retVal; 2183 2184 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc ( 2185 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK)) 2186 == (PI2O_SET_SYSTAB_HEADER)NULL) { 2187 return (ENOMEM); 2188 } 2189 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2190 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2191 ++SystemTable->NumberEntries; 2192 } 2193 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc ( 2194 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2195 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)), 2196 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) { 2197 free (SystemTable, M_TEMP); 2198 return (ENOMEM); 2199 } 2200 (void)ASR_fillMessage((char *)Message_Ptr, 2201 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2202 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT))); 2203 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2204 (I2O_VERSION_11 + 2205 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2206 / sizeof(U32)) << 4))); 2207 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2208 I2O_EXEC_SYS_TAB_SET); 2209 /* 2210 * Call the LCT table to determine the number of device entries 2211 * to reserve space for. 2212 * since this code is reused in several systems, code efficiency 2213 * is greater by using a shift operation rather than a divide by 2214 * sizeof(u_int32_t). 2215 */ 2216 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 2217 + ((I2O_MESSAGE_FRAME_getVersionOffset( 2218 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2)); 2219 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER)); 2220 ++sg; 2221 for (ha = Asr_softc; ha; ha = ha->ha_next) { 2222 SG(sg, 0, 2223 ((ha->ha_next) 2224 ? (I2O_SGL_FLAGS_DIR) 2225 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)), 2226 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable)); 2227 ++sg; 2228 } 2229 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2230 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT 2231 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0); 2232 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2233 free (Message_Ptr, M_TEMP); 2234 free (SystemTable, M_TEMP); 2235 return (retVal); 2236 } /* ASR_setSysTab */ 2237 2238 STATIC INLINE int 2239 ASR_acquireHrt ( 2240 INOUT Asr_softc_t * sc) 2241 { 2242 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message); 2243 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr; 2244 struct { 2245 I2O_HRT Header; 2246 I2O_HRT_ENTRY Entry[MAX_CHANNEL]; 2247 } Hrt; 2248 u_int8_t NumberOfEntries; 2249 PI2O_HRT_ENTRY Entry; 2250 2251 bzero ((void *)&Hrt, sizeof (Hrt)); 2252 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message, 2253 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT) 2254 + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2255 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame), 2256 (I2O_VERSION_11 2257 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)) 2258 / sizeof(U32)) << 4))); 2259 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame), 2260 I2O_EXEC_HRT_GET); 2261 2262 /* 2263 * Set up the buffers as scatter gather elements. 2264 */ 2265 SG(&(Message_Ptr->SGL), 0, 2266 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2267 &Hrt, sizeof(Hrt)); 2268 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) { 2269 return (ENODEV); 2270 } 2271 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header)) 2272 > (MAX_CHANNEL + 1)) { 2273 NumberOfEntries = MAX_CHANNEL + 1; 2274 } 2275 for (Entry = Hrt.Header.HRTEntry; 2276 NumberOfEntries != 0; 2277 ++Entry, --NumberOfEntries) { 2278 PI2O_LCT_ENTRY Device; 2279 2280 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2281 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2282 ++Device) { 2283 if (I2O_LCT_ENTRY_getLocalTID(Device) 2284 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) { 2285 Device->le_bus = I2O_HRT_ENTRY_getAdapterID( 2286 Entry) >> 16; 2287 if ((Device->le_bus > sc->ha_MaxBus) 2288 && (Device->le_bus <= MAX_CHANNEL)) { 2289 sc->ha_MaxBus = Device->le_bus; 2290 } 2291 } 2292 } 2293 } 2294 return (0); 2295 } /* ASR_acquireHrt */ 2296 2297 /* 2298 * Enable the adapter. 2299 */ 2300 STATIC INLINE int 2301 ASR_enableSys ( 2302 IN Asr_softc_t * sc) 2303 { 2304 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message); 2305 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr; 2306 2307 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message, 2308 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE)); 2309 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame), 2310 I2O_EXEC_SYS_ENABLE); 2311 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0); 2312 } /* ASR_enableSys */ 2313 2314 /* 2315 * Perform the stages necessary to initialize the adapter 2316 */ 2317 STATIC int 2318 ASR_init( 2319 IN Asr_softc_t * sc) 2320 { 2321 return ((ASR_initOutBound(sc) == 0) 2322 || (ASR_setSysTab(sc) != CAM_REQ_CMP) 2323 || (ASR_enableSys(sc) != CAM_REQ_CMP)); 2324 } /* ASR_init */ 2325 2326 /* 2327 * Send a Synchronize Cache command to the target device. 2328 */ 2329 STATIC INLINE void 2330 ASR_sync ( 2331 IN Asr_softc_t * sc, 2332 IN int bus, 2333 IN int target, 2334 IN int lun) 2335 { 2336 tid_t TID; 2337 2338 /* 2339 * We will not synchronize the device when there are outstanding 2340 * commands issued by the OS (this is due to a locked up device, 2341 * as the OS normally would flush all outstanding commands before 2342 * issuing a shutdown or an adapter reset). 2343 */ 2344 if ((sc != (Asr_softc_t *)NULL) 2345 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL) 2346 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1) 2347 && (TID != (tid_t)0)) { 2348 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2349 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2350 2351 bzero (Message_Ptr 2352 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2353 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2354 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2355 2356 I2O_MESSAGE_FRAME_setVersionOffset( 2357 (PI2O_MESSAGE_FRAME)Message_Ptr, 2358 I2O_VERSION_11 2359 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2360 - sizeof(I2O_SG_ELEMENT)) 2361 / sizeof(U32)) << 4)); 2362 I2O_MESSAGE_FRAME_setMessageSize( 2363 (PI2O_MESSAGE_FRAME)Message_Ptr, 2364 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2365 - sizeof(I2O_SG_ELEMENT)) 2366 / sizeof(U32)); 2367 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2368 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2369 I2O_MESSAGE_FRAME_setFunction( 2370 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2371 I2O_MESSAGE_FRAME_setTargetAddress( 2372 (PI2O_MESSAGE_FRAME)Message_Ptr, TID); 2373 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2374 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2375 I2O_SCSI_SCB_EXEC); 2376 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID); 2377 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2378 I2O_SCB_FLAG_ENABLE_DISCONNECT 2379 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2380 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2381 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2382 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2383 DPT_ORGANIZATION_ID); 2384 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2385 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE; 2386 Message_Ptr->CDB[1] = (lun << 5); 2387 2388 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2389 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2390 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2391 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2392 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2393 2394 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2395 2396 } 2397 } 2398 2399 STATIC INLINE void 2400 ASR_synchronize ( 2401 IN Asr_softc_t * sc) 2402 { 2403 int bus, target, lun; 2404 2405 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2406 for (target = 0; target <= sc->ha_MaxId; ++target) { 2407 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) { 2408 ASR_sync(sc,bus,target,lun); 2409 } 2410 } 2411 } 2412 } 2413 2414 /* 2415 * Reset the HBA, targets and BUS. 2416 * Currently this resets *all* the SCSI busses. 2417 */ 2418 STATIC INLINE void 2419 asr_hbareset( 2420 IN Asr_softc_t * sc) 2421 { 2422 ASR_synchronize (sc); 2423 (void)ASR_reset (sc); 2424 } /* asr_hbareset */ 2425 2426 /* 2427 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP 2428 * limit and a reduction in error checking (in the pre 4.0 case). 2429 */ 2430 STATIC int 2431 asr_pci_map_mem ( 2432 IN device_t tag, 2433 IN Asr_softc_t * sc) 2434 { 2435 int rid; 2436 u_int32_t p, l, s; 2437 2438 /* 2439 * I2O specification says we must find first *memory* mapped BAR 2440 */ 2441 for (rid = PCIR_MAPS; 2442 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t)); 2443 rid += sizeof(u_int32_t)) { 2444 p = pci_read_config(tag, rid, sizeof(p)); 2445 if ((p & 1) == 0) { 2446 break; 2447 } 2448 } 2449 /* 2450 * Give up? 2451 */ 2452 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2453 rid = PCIR_MAPS; 2454 } 2455 p = pci_read_config(tag, rid, sizeof(p)); 2456 pci_write_config(tag, rid, -1, sizeof(p)); 2457 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2458 pci_write_config(tag, rid, p, sizeof(p)); 2459 if (l > MAX_MAP) { 2460 l = MAX_MAP; 2461 } 2462 /* 2463 * The 2005S Zero Channel RAID solution is not a perfect PCI 2464 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once 2465 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to 2466 * BAR0+2MB and sets it's size to 2MB. The IOP registers are 2467 * accessible via BAR0, the messaging registers are accessible 2468 * via BAR1. If the subdevice code is 50 to 59 decimal. 2469 */ 2470 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s)); 2471 if (s != 0xA5111044) { 2472 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s)); 2473 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0) 2474 && (ADPTDOMINATOR_SUB_ID_START <= s) 2475 && (s <= ADPTDOMINATOR_SUB_ID_END)) { 2476 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */ 2477 } 2478 } 2479 p &= ~15; 2480 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2481 p, p + l, l, RF_ACTIVE); 2482 if (sc->ha_mem_res == (struct resource *)NULL) { 2483 return (0); 2484 } 2485 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res); 2486 if (sc->ha_Base == (void *)NULL) { 2487 return (0); 2488 } 2489 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res); 2490 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */ 2491 if ((rid += sizeof(u_int32_t)) 2492 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) { 2493 return (0); 2494 } 2495 p = pci_read_config(tag, rid, sizeof(p)); 2496 pci_write_config(tag, rid, -1, sizeof(p)); 2497 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15); 2498 pci_write_config(tag, rid, p, sizeof(p)); 2499 if (l > MAX_MAP) { 2500 l = MAX_MAP; 2501 } 2502 p &= ~15; 2503 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid, 2504 p, p + l, l, RF_ACTIVE); 2505 if (sc->ha_mes_res == (struct resource *)NULL) { 2506 return (0); 2507 } 2508 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) { 2509 return (0); 2510 } 2511 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res); 2512 } else { 2513 sc->ha_Fvirt = (U8 *)(sc->ha_Virt); 2514 } 2515 return (1); 2516 } /* asr_pci_map_mem */ 2517 2518 /* 2519 * A simplified copy of the real pci_map_int with additional 2520 * registration requirements. 2521 */ 2522 STATIC int 2523 asr_pci_map_int ( 2524 IN device_t tag, 2525 IN Asr_softc_t * sc) 2526 { 2527 int rid = 0; 2528 int error; 2529 2530 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid, 2531 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 2532 if (sc->ha_irq_res == (struct resource *)NULL) { 2533 return (0); 2534 } 2535 error = bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM, 2536 (driver_intr_t *)asr_intr, (void *)sc, 2537 &(sc->ha_intr), NULL); 2538 if (error) { 2539 return (0); 2540 } 2541 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char)); 2542 return (1); 2543 } /* asr_pci_map_int */ 2544 2545 /* 2546 * Attach the devices, and virtual devices to the driver list. 2547 */ 2548 STATIC ATTACH_RET 2549 asr_attach (ATTACH_ARGS) 2550 { 2551 Asr_softc_t * sc; 2552 struct scsi_inquiry_data * iq; 2553 ATTACH_SET(); 2554 2555 sc = malloc(sizeof(*sc), M_DEVBUF, M_INTWAIT); 2556 if (Asr_softc == (Asr_softc_t *)NULL) { 2557 /* 2558 * Fixup the OS revision as saved in the dptsig for the 2559 * engine (dptioctl.h) to pick up. 2560 */ 2561 bcopy (osrelease, &ASR_sig.dsDescription[16], 5); 2562 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj); 2563 } 2564 /* 2565 * Initialize the software structure 2566 */ 2567 bzero (sc, sizeof(*sc)); 2568 LIST_INIT(&(sc->ha_ccb)); 2569 /* Link us into the HA list */ 2570 { 2571 Asr_softc_t **ha; 2572 2573 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next)); 2574 *(ha) = sc; 2575 } 2576 { 2577 PI2O_EXEC_STATUS_GET_REPLY status; 2578 int size; 2579 2580 /* 2581 * This is the real McCoy! 2582 */ 2583 if (!asr_pci_map_mem(tag, sc)) { 2584 printf ("asr%d: could not map memory\n", unit); 2585 ATTACH_RETURN(ENXIO); 2586 } 2587 /* Enable if not formerly enabled */ 2588 pci_write_config (tag, PCIR_COMMAND, 2589 pci_read_config (tag, PCIR_COMMAND, sizeof(char)) 2590 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char)); 2591 /* Knowledge is power, responsibility is direct */ 2592 { 2593 struct pci_devinfo { 2594 STAILQ_ENTRY(pci_devinfo) pci_links; 2595 struct resource_list resources; 2596 pcicfgregs cfg; 2597 } * dinfo = device_get_ivars(tag); 2598 sc->ha_pciBusNum = dinfo->cfg.bus; 2599 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3) 2600 | dinfo->cfg.func; 2601 } 2602 /* Check if the device is there? */ 2603 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0) 2604 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc ( 2605 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK)) 2606 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) 2607 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) { 2608 printf ("asr%d: could not initialize hardware\n", unit); 2609 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */ 2610 } 2611 sc->ha_SystemTable.OrganizationID = status->OrganizationID; 2612 sc->ha_SystemTable.IOP_ID = status->IOP_ID; 2613 sc->ha_SystemTable.I2oVersion = status->I2oVersion; 2614 sc->ha_SystemTable.IopState = status->IopState; 2615 sc->ha_SystemTable.MessengerType = status->MessengerType; 2616 sc->ha_SystemTable.InboundMessageFrameSize 2617 = status->InboundMFrameSize; 2618 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow 2619 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO)); 2620 2621 if (!asr_pci_map_int(tag, (void *)sc)) { 2622 printf ("asr%d: could not map interrupt\n", unit); 2623 ATTACH_RETURN(ENXIO); 2624 } 2625 2626 /* Adjust the maximim inbound count */ 2627 if (((sc->ha_QueueSize 2628 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) 2629 > MAX_INBOUND) 2630 || (sc->ha_QueueSize == 0)) { 2631 sc->ha_QueueSize = MAX_INBOUND; 2632 } 2633 2634 /* Adjust the maximum outbound count */ 2635 if (((sc->ha_Msgs_Count 2636 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) 2637 > MAX_OUTBOUND) 2638 || (sc->ha_Msgs_Count == 0)) { 2639 sc->ha_Msgs_Count = MAX_OUTBOUND; 2640 } 2641 if (sc->ha_Msgs_Count > sc->ha_QueueSize) { 2642 sc->ha_Msgs_Count = sc->ha_QueueSize; 2643 } 2644 2645 /* Adjust the maximum SG size to adapter */ 2646 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize( 2647 status) << 2)) > MAX_INBOUND_SIZE) { 2648 size = MAX_INBOUND_SIZE; 2649 } 2650 free (status, M_TEMP); 2651 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2652 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT); 2653 } 2654 2655 /* 2656 * Only do a bus/HBA reset on the first time through. On this 2657 * first time through, we do not send a flush to the devices. 2658 */ 2659 if (ASR_init(sc) == 0) { 2660 struct BufferInfo { 2661 I2O_PARAM_RESULTS_LIST_HEADER Header; 2662 I2O_PARAM_READ_OPERATION_RESULT Read; 2663 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2664 }; 2665 defAlignLong (struct BufferInfo, Buffer); 2666 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info; 2667 # define FW_DEBUG_BLED_OFFSET 8 2668 2669 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR) 2670 ASR_getParams(sc, 0, 2671 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO, 2672 Buffer, sizeof(struct BufferInfo))) 2673 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) { 2674 sc->ha_blinkLED = sc->ha_Fvirt 2675 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info) 2676 + FW_DEBUG_BLED_OFFSET; 2677 } 2678 if (ASR_acquireLct(sc) == 0) { 2679 (void)ASR_acquireHrt(sc); 2680 } 2681 } else { 2682 printf ("asr%d: failed to initialize\n", unit); 2683 ATTACH_RETURN(ENXIO); 2684 } 2685 /* 2686 * Add in additional probe responses for more channels. We 2687 * are reusing the variable `target' for a channel loop counter. 2688 * Done here because of we need both the acquireLct and 2689 * acquireHrt data. 2690 */ 2691 { PI2O_LCT_ENTRY Device; 2692 2693 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY) 2694 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); 2695 ++Device) { 2696 if (Device->le_type == I2O_UNKNOWN) { 2697 continue; 2698 } 2699 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) { 2700 if (Device->le_target > sc->ha_MaxId) { 2701 sc->ha_MaxId = Device->le_target; 2702 } 2703 if (Device->le_lun > sc->ha_MaxLun) { 2704 sc->ha_MaxLun = Device->le_lun; 2705 } 2706 } 2707 if (((Device->le_type & I2O_PORT) != 0) 2708 && (Device->le_bus <= MAX_CHANNEL)) { 2709 /* Do not increase MaxId for efficiency */ 2710 sc->ha_adapter_target[Device->le_bus] 2711 = Device->le_target; 2712 } 2713 } 2714 } 2715 2716 2717 /* 2718 * Print the HBA model number as inquired from the card. 2719 */ 2720 2721 printf ("asr%d:", unit); 2722 2723 if ((iq = (struct scsi_inquiry_data *)malloc ( 2724 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK)) 2725 != (struct scsi_inquiry_data *)NULL) { 2726 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message); 2727 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr; 2728 int posted = 0; 2729 2730 bzero (iq, sizeof(struct scsi_inquiry_data)); 2731 bzero (Message_Ptr 2732 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message), 2733 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2734 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)); 2735 2736 I2O_MESSAGE_FRAME_setVersionOffset( 2737 (PI2O_MESSAGE_FRAME)Message_Ptr, 2738 I2O_VERSION_11 2739 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2740 - sizeof(I2O_SG_ELEMENT)) 2741 / sizeof(U32)) << 4)); 2742 I2O_MESSAGE_FRAME_setMessageSize( 2743 (PI2O_MESSAGE_FRAME)Message_Ptr, 2744 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) 2745 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) 2746 / sizeof(U32)); 2747 I2O_MESSAGE_FRAME_setInitiatorAddress ( 2748 (PI2O_MESSAGE_FRAME)Message_Ptr, 1); 2749 I2O_MESSAGE_FRAME_setFunction( 2750 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE); 2751 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode ( 2752 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2753 I2O_SCSI_SCB_EXEC); 2754 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2755 I2O_SCB_FLAG_ENABLE_DISCONNECT 2756 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2757 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER); 2758 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1); 2759 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID( 2760 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, 2761 DPT_ORGANIZATION_ID); 2762 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6); 2763 Message_Ptr->CDB[0] = INQUIRY; 2764 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data); 2765 if (Message_Ptr->CDB[4] == 0) { 2766 Message_Ptr->CDB[4] = 255; 2767 } 2768 2769 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr, 2770 (I2O_SCB_FLAG_XFER_FROM_DEVICE 2771 | I2O_SCB_FLAG_ENABLE_DISCONNECT 2772 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG 2773 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)); 2774 2775 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount( 2776 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, 2777 sizeof(struct scsi_inquiry_data)); 2778 SG(&(Message_Ptr->SGL), 0, 2779 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, 2780 iq, sizeof(struct scsi_inquiry_data)); 2781 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 2782 2783 if (iq->vendor[0] && (iq->vendor[0] != ' ')) { 2784 printf (" "); 2785 ASR_prstring (iq->vendor, 8); 2786 ++posted; 2787 } 2788 if (iq->product[0] && (iq->product[0] != ' ')) { 2789 printf (" "); 2790 ASR_prstring (iq->product, 16); 2791 ++posted; 2792 } 2793 if (iq->revision[0] && (iq->revision[0] != ' ')) { 2794 printf (" FW Rev. "); 2795 ASR_prstring (iq->revision, 4); 2796 ++posted; 2797 } 2798 free ((caddr_t)iq, M_TEMP); 2799 if (posted) { 2800 printf (","); 2801 } 2802 } 2803 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1, 2804 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize); 2805 2806 /* 2807 * fill in the prototype cam_path. 2808 */ 2809 { 2810 int bus; 2811 union asr_ccb * ccb; 2812 2813 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 2814 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit); 2815 ATTACH_RETURN(ENOMEM); 2816 } 2817 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) { 2818 int QueueSize = sc->ha_QueueSize; 2819 2820 if (QueueSize > MAX_INBOUND) { 2821 QueueSize = MAX_INBOUND; 2822 } 2823 2824 /* 2825 * Construct our first channel SIM entry 2826 */ 2827 sc->ha_sim[bus] = cam_sim_alloc( 2828 asr_action, asr_poll, "asr", sc, 2829 unit, 1, QueueSize, NULL); 2830 if (sc->ha_sim[bus] == NULL) 2831 continue; 2832 2833 if (xpt_bus_register(sc->ha_sim[bus], bus) 2834 != CAM_SUCCESS) { 2835 cam_sim_free(sc->ha_sim[bus]); 2836 sc->ha_sim[bus] = NULL; 2837 continue; 2838 } 2839 2840 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL, 2841 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD, 2842 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2843 xpt_bus_deregister( 2844 cam_sim_path(sc->ha_sim[bus])); 2845 cam_sim_free(sc->ha_sim[bus]); 2846 sc->ha_sim[bus] = NULL; 2847 continue; 2848 } 2849 } 2850 asr_free_ccb (ccb); 2851 } 2852 /* 2853 * Generate the device node information 2854 */ 2855 make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit); 2856 ATTACH_RETURN(0); 2857 } /* asr_attach */ 2858 2859 STATIC void 2860 asr_poll( 2861 IN struct cam_sim *sim) 2862 { 2863 asr_intr(cam_sim_softc(sim)); 2864 } /* asr_poll */ 2865 2866 STATIC void 2867 asr_action( 2868 IN struct cam_sim * sim, 2869 IN union ccb * ccb) 2870 { 2871 struct Asr_softc * sc; 2872 2873 debug_asr_printf ("asr_action(%lx,%lx{%x})\n", 2874 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code); 2875 2876 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n")); 2877 2878 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim); 2879 2880 switch (ccb->ccb_h.func_code) { 2881 2882 /* Common cases first */ 2883 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2884 { 2885 struct Message { 2886 char M[MAX_INBOUND_SIZE]; 2887 }; 2888 defAlignLong(struct Message,Message); 2889 PI2O_MESSAGE_FRAME Message_Ptr; 2890 2891 /* Reject incoming commands while we are resetting the card */ 2892 if (sc->ha_in_reset != HA_OPERATIONAL) { 2893 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2894 if (sc->ha_in_reset >= HA_OFF_LINE) { 2895 /* HBA is now off-line */ 2896 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR; 2897 } else { 2898 /* HBA currently resetting, try again later. */ 2899 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2900 } 2901 debug_asr_cmd_printf (" e\n"); 2902 xpt_done(ccb); 2903 debug_asr_cmd_printf (" q\n"); 2904 break; 2905 } 2906 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2907 printf( 2908 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n", 2909 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 2910 ccb->csio.cdb_io.cdb_bytes[0], 2911 cam_sim_bus(sim), 2912 ccb->ccb_h.target_id, 2913 ccb->ccb_h.target_lun); 2914 } 2915 debug_asr_cmd_printf ("(%d,%d,%d,%d)", 2916 cam_sim_unit(sim), 2917 cam_sim_bus(sim), 2918 ccb->ccb_h.target_id, 2919 ccb->ccb_h.target_lun); 2920 debug_asr_cmd_dump_ccb(ccb); 2921 2922 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb, 2923 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) { 2924 debug_asr_cmd2_printf ("TID=%x:\n", 2925 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID( 2926 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)); 2927 debug_asr_cmd2_dump_message(Message_Ptr); 2928 debug_asr_cmd1_printf (" q"); 2929 2930 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) { 2931 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2932 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2933 debug_asr_cmd_printf (" E\n"); 2934 xpt_done(ccb); 2935 } 2936 debug_asr_cmd_printf (" Q\n"); 2937 break; 2938 } 2939 /* 2940 * We will get here if there is no valid TID for the device 2941 * referenced in the scsi command packet. 2942 */ 2943 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2944 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 2945 debug_asr_cmd_printf (" B\n"); 2946 xpt_done(ccb); 2947 break; 2948 } 2949 2950 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2951 /* Rese HBA device ... */ 2952 asr_hbareset (sc); 2953 ccb->ccb_h.status = CAM_REQ_CMP; 2954 xpt_done(ccb); 2955 break; 2956 2957 # if (defined(REPORT_LUNS)) 2958 case REPORT_LUNS: 2959 # endif 2960 case XPT_ABORT: /* Abort the specified CCB */ 2961 /* XXX Implement */ 2962 ccb->ccb_h.status = CAM_REQ_INVALID; 2963 xpt_done(ccb); 2964 break; 2965 2966 case XPT_SET_TRAN_SETTINGS: 2967 /* XXX Implement */ 2968 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2969 xpt_done(ccb); 2970 break; 2971 2972 case XPT_GET_TRAN_SETTINGS: 2973 /* Get default/user set transfer settings for the target */ 2974 { 2975 struct ccb_trans_settings *cts; 2976 u_int target_mask; 2977 2978 cts = &(ccb->cts); 2979 target_mask = 0x01 << ccb->ccb_h.target_id; 2980 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 2981 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB; 2982 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2983 cts->sync_period = 6; /* 40MHz */ 2984 cts->sync_offset = 15; 2985 2986 cts->valid = CCB_TRANS_SYNC_RATE_VALID 2987 | CCB_TRANS_SYNC_OFFSET_VALID 2988 | CCB_TRANS_BUS_WIDTH_VALID 2989 | CCB_TRANS_DISC_VALID 2990 | CCB_TRANS_TQ_VALID; 2991 ccb->ccb_h.status = CAM_REQ_CMP; 2992 } else { 2993 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2994 } 2995 xpt_done(ccb); 2996 break; 2997 } 2998 2999 case XPT_CALC_GEOMETRY: 3000 { 3001 struct ccb_calc_geometry *ccg; 3002 u_int32_t size_mb; 3003 u_int32_t secs_per_cylinder; 3004 3005 ccg = &(ccb->ccg); 3006 size_mb = ccg->volume_size 3007 / ((1024L * 1024L) / ccg->block_size); 3008 3009 if (size_mb > 4096) { 3010 ccg->heads = 255; 3011 ccg->secs_per_track = 63; 3012 } else if (size_mb > 2048) { 3013 ccg->heads = 128; 3014 ccg->secs_per_track = 63; 3015 } else if (size_mb > 1024) { 3016 ccg->heads = 65; 3017 ccg->secs_per_track = 63; 3018 } else { 3019 ccg->heads = 64; 3020 ccg->secs_per_track = 32; 3021 } 3022 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3023 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3024 ccb->ccb_h.status = CAM_REQ_CMP; 3025 xpt_done(ccb); 3026 break; 3027 } 3028 3029 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 3030 ASR_resetBus (sc, cam_sim_bus(sim)); 3031 ccb->ccb_h.status = CAM_REQ_CMP; 3032 xpt_done(ccb); 3033 break; 3034 3035 case XPT_TERM_IO: /* Terminate the I/O process */ 3036 /* XXX Implement */ 3037 ccb->ccb_h.status = CAM_REQ_INVALID; 3038 xpt_done(ccb); 3039 break; 3040 3041 case XPT_PATH_INQ: /* Path routing inquiry */ 3042 { 3043 struct ccb_pathinq *cpi = &(ccb->cpi); 3044 3045 cpi->version_num = 1; /* XXX??? */ 3046 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3047 cpi->target_sprt = 0; 3048 /* Not necessary to reset bus, done by HDM initialization */ 3049 cpi->hba_misc = PIM_NOBUSRESET; 3050 cpi->hba_eng_cnt = 0; 3051 cpi->max_target = sc->ha_MaxId; 3052 cpi->max_lun = sc->ha_MaxLun; 3053 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)]; 3054 cpi->bus_id = cam_sim_bus(sim); 3055 cpi->base_transfer_speed = 3300; 3056 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3057 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 3058 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3059 cpi->unit_number = cam_sim_unit(sim); 3060 cpi->ccb_h.status = CAM_REQ_CMP; 3061 xpt_done(ccb); 3062 break; 3063 } 3064 default: 3065 ccb->ccb_h.status = CAM_REQ_INVALID; 3066 xpt_done(ccb); 3067 break; 3068 } 3069 } /* asr_action */ 3070 3071 3072 /* 3073 * Handle processing of current CCB as pointed to by the Status. 3074 */ 3075 STATIC int 3076 asr_intr ( 3077 IN Asr_softc_t * sc) 3078 { 3079 OUT int processed; 3080 3081 for (processed = 0; 3082 sc->ha_Virt->Status & Mask_InterruptsDisabled; 3083 processed = 1) { 3084 union asr_ccb * ccb; 3085 U32 ReplyOffset; 3086 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3087 3088 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE) 3089 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) { 3090 break; 3091 } 3092 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset 3093 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs)); 3094 /* 3095 * We do not need any (optional byteswapping) method access to 3096 * the Initiator context field. 3097 */ 3098 ccb = (union asr_ccb *)(long) 3099 I2O_MESSAGE_FRAME_getInitiatorContext64( 3100 &(Reply->StdReplyFrame.StdMessageFrame)); 3101 if (I2O_MESSAGE_FRAME_getMsgFlags( 3102 &(Reply->StdReplyFrame.StdMessageFrame)) 3103 & I2O_MESSAGE_FLAGS_FAIL) { 3104 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message); 3105 PI2O_UTIL_NOP_MESSAGE Message_Ptr; 3106 U32 MessageOffset; 3107 3108 MessageOffset = (u_long) 3109 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA( 3110 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply); 3111 /* 3112 * Get the Original Message Frame's address, and get 3113 * it's Transaction Context into our space. (Currently 3114 * unused at original authorship, but better to be 3115 * safe than sorry). Straight copy means that we 3116 * need not concern ourselves with the (optional 3117 * byteswapping) method access. 3118 */ 3119 Reply->StdReplyFrame.TransactionContext 3120 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME) 3121 (sc->ha_Fvirt + MessageOffset))->TransactionContext; 3122 /* 3123 * For 64 bit machines, we need to reconstruct the 3124 * 64 bit context. 3125 */ 3126 ccb = (union asr_ccb *)(long) 3127 I2O_MESSAGE_FRAME_getInitiatorContext64( 3128 &(Reply->StdReplyFrame.StdMessageFrame)); 3129 /* 3130 * Unique error code for command failure. 3131 */ 3132 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3133 &(Reply->StdReplyFrame), (u_int16_t)-2); 3134 /* 3135 * Modify the message frame to contain a NOP and 3136 * re-issue it to the controller. 3137 */ 3138 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage( 3139 Message, sizeof(I2O_UTIL_NOP_MESSAGE)); 3140 # if (I2O_UTIL_NOP != 0) 3141 I2O_MESSAGE_FRAME_setFunction ( 3142 &(Message_Ptr->StdMessageFrame), 3143 I2O_UTIL_NOP); 3144 # endif 3145 /* 3146 * Copy the packet out to the Original Message 3147 */ 3148 bcopy ((caddr_t)Message_Ptr, 3149 sc->ha_Fvirt + MessageOffset, 3150 sizeof(I2O_UTIL_NOP_MESSAGE)); 3151 /* 3152 * Issue the NOP 3153 */ 3154 sc->ha_Virt->ToFIFO = MessageOffset; 3155 } 3156 3157 /* 3158 * Asynchronous command with no return requirements, 3159 * and a generic handler for immunity against odd error 3160 * returns from the adapter. 3161 */ 3162 if (ccb == (union asr_ccb *)NULL) { 3163 /* 3164 * Return Reply so that it can be used for the 3165 * next command 3166 */ 3167 sc->ha_Virt->FromFIFO = ReplyOffset; 3168 continue; 3169 } 3170 3171 /* Welease Wadjah! (and stop timeouts) */ 3172 ASR_ccbRemove (sc, ccb); 3173 3174 switch ( 3175 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode( 3176 &(Reply->StdReplyFrame))) { 3177 3178 case I2O_SCSI_DSC_SUCCESS: 3179 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3180 ccb->ccb_h.status |= CAM_REQ_CMP; 3181 break; 3182 3183 case I2O_SCSI_DSC_CHECK_CONDITION: 3184 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3185 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID; 3186 break; 3187 3188 case I2O_SCSI_DSC_BUSY: 3189 /* FALLTHRU */ 3190 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY: 3191 /* FALLTHRU */ 3192 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET: 3193 /* FALLTHRU */ 3194 case I2O_SCSI_HBA_DSC_BUS_BUSY: 3195 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3196 ccb->ccb_h.status |= CAM_SCSI_BUSY; 3197 break; 3198 3199 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT: 3200 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3201 ccb->ccb_h.status |= CAM_SEL_TIMEOUT; 3202 break; 3203 3204 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT: 3205 /* FALLTHRU */ 3206 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT: 3207 /* FALLTHRU */ 3208 case I2O_SCSI_HBA_DSC_LUN_INVALID: 3209 /* FALLTHRU */ 3210 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID: 3211 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3212 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 3213 break; 3214 3215 case I2O_SCSI_HBA_DSC_DATA_OVERRUN: 3216 /* FALLTHRU */ 3217 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR: 3218 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3219 ccb->ccb_h.status |= CAM_DATA_RUN_ERR; 3220 break; 3221 3222 default: 3223 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 3224 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 3225 break; 3226 } 3227 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) { 3228 ccb->csio.resid -= 3229 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount( 3230 Reply); 3231 } 3232 3233 /* Sense data in reply packet */ 3234 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) { 3235 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply); 3236 3237 if (size) { 3238 if (size > sizeof(ccb->csio.sense_data)) { 3239 size = sizeof(ccb->csio.sense_data); 3240 } 3241 if (size > I2O_SCSI_SENSE_DATA_SZ) { 3242 size = I2O_SCSI_SENSE_DATA_SZ; 3243 } 3244 if ((ccb->csio.sense_len) 3245 && (size > ccb->csio.sense_len)) { 3246 size = ccb->csio.sense_len; 3247 } 3248 bcopy ((caddr_t)Reply->SenseData, 3249 (caddr_t)&(ccb->csio.sense_data), size); 3250 } 3251 } 3252 3253 /* 3254 * Return Reply so that it can be used for the next command 3255 * since we have no more need for it now 3256 */ 3257 sc->ha_Virt->FromFIFO = ReplyOffset; 3258 3259 if (ccb->ccb_h.path) { 3260 xpt_done ((union ccb *)ccb); 3261 } else { 3262 wakeup ((caddr_t)ccb); 3263 } 3264 } 3265 return (processed); 3266 } /* asr_intr */ 3267 3268 #undef QueueSize /* Grrrr */ 3269 #undef SG_Size /* Grrrr */ 3270 3271 /* 3272 * Meant to be included at the bottom of asr.c !!! 3273 */ 3274 3275 /* 3276 * Included here as hard coded. Done because other necessary include 3277 * files utilize C++ comment structures which make them a nuisance to 3278 * included here just to pick up these three typedefs. 3279 */ 3280 typedef U32 DPT_TAG_T; 3281 typedef U32 DPT_MSG_T; 3282 typedef U32 DPT_RTN_T; 3283 3284 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */ 3285 #include "osd_unix.h" 3286 3287 #define asr_unit(dev) minor(dev) 3288 3289 STATIC INLINE Asr_softc_t * 3290 ASR_get_sc ( 3291 IN dev_t dev) 3292 { 3293 int unit = asr_unit(dev); 3294 OUT Asr_softc_t * sc = Asr_softc; 3295 3296 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) { 3297 sc = sc->ha_next; 3298 } 3299 return (sc); 3300 } /* ASR_get_sc */ 3301 3302 STATIC u_int8_t ASR_ctlr_held; 3303 #if (!defined(UNREFERENCED_PARAMETER)) 3304 # define UNREFERENCED_PARAMETER(x) (void)(x) 3305 #endif 3306 3307 STATIC int 3308 asr_open( 3309 IN dev_t dev, 3310 int32_t flags, 3311 int32_t ifmt, 3312 IN d_thread_t *td) 3313 { 3314 OUT int error; 3315 UNREFERENCED_PARAMETER(flags); 3316 UNREFERENCED_PARAMETER(ifmt); 3317 3318 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) { 3319 return (ENODEV); 3320 } 3321 KKASSERT(td->td_proc); 3322 crit_enter(); 3323 if (ASR_ctlr_held) { 3324 error = EBUSY; 3325 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) { 3326 ++ASR_ctlr_held; 3327 } 3328 crit_exit(); 3329 return (error); 3330 } /* asr_open */ 3331 3332 STATIC int 3333 asr_close( 3334 dev_t dev, 3335 int flags, 3336 int ifmt, 3337 d_thread_t *td) 3338 { 3339 UNREFERENCED_PARAMETER(dev); 3340 UNREFERENCED_PARAMETER(flags); 3341 UNREFERENCED_PARAMETER(ifmt); 3342 UNREFERENCED_PARAMETER(td); 3343 3344 ASR_ctlr_held = 0; 3345 return (0); 3346 } /* asr_close */ 3347 3348 3349 /*-------------------------------------------------------------------------*/ 3350 /* Function ASR_queue_i */ 3351 /*-------------------------------------------------------------------------*/ 3352 /* The Parameters Passed To This Function Are : */ 3353 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */ 3354 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */ 3355 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */ 3356 /* */ 3357 /* This Function Will Take The User Request Packet And Convert It To An */ 3358 /* I2O MSG And Send It Off To The Adapter. */ 3359 /* */ 3360 /* Return : 0 For OK, Error Code Otherwise */ 3361 /*-------------------------------------------------------------------------*/ 3362 STATIC INLINE int 3363 ASR_queue_i( 3364 IN Asr_softc_t * sc, 3365 INOUT PI2O_MESSAGE_FRAME Packet) 3366 { 3367 union asr_ccb * ccb; 3368 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply; 3369 PI2O_MESSAGE_FRAME Message_Ptr; 3370 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr; 3371 int MessageSizeInBytes; 3372 int ReplySizeInBytes; 3373 int error; 3374 int s; 3375 /* Scatter Gather buffer list */ 3376 struct ioctlSgList_S { 3377 SLIST_ENTRY(ioctlSgList_S) link; 3378 caddr_t UserSpace; 3379 I2O_FLAGS_COUNT FlagsCount; 3380 char KernelSpace[sizeof(long)]; 3381 } * elm; 3382 /* Generates a `first' entry */ 3383 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList; 3384 3385 if (ASR_getBlinkLedCode(sc)) { 3386 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n", 3387 ASR_getBlinkLedCode(sc)); 3388 return (EIO); 3389 } 3390 /* Copy in the message into a local allocation */ 3391 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc ( 3392 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3393 == (PI2O_MESSAGE_FRAME)NULL) { 3394 debug_usr_cmd_printf ( 3395 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3396 return (ENOMEM); 3397 } 3398 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3399 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3400 free (Message_Ptr, M_TEMP); 3401 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error); 3402 return (error); 3403 } 3404 /* Acquire information to determine type of packet */ 3405 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2); 3406 /* The offset of the reply information within the user packet */ 3407 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet 3408 + MessageSizeInBytes); 3409 3410 /* Check if the message is a synchronous initialization command */ 3411 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr); 3412 free (Message_Ptr, M_TEMP); 3413 switch (s) { 3414 3415 case I2O_EXEC_IOP_RESET: 3416 { U32 status; 3417 3418 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt); 3419 ReplySizeInBytes = sizeof(status); 3420 debug_usr_cmd_printf ("resetIOP done\n"); 3421 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3422 ReplySizeInBytes)); 3423 } 3424 3425 case I2O_EXEC_STATUS_GET: 3426 { I2O_EXEC_STATUS_GET_REPLY status; 3427 3428 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status) 3429 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) { 3430 debug_usr_cmd_printf ("getStatus failed\n"); 3431 return (ENXIO); 3432 } 3433 ReplySizeInBytes = sizeof(status); 3434 debug_usr_cmd_printf ("getStatus done\n"); 3435 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3436 ReplySizeInBytes)); 3437 } 3438 3439 case I2O_EXEC_OUTBOUND_INIT: 3440 { U32 status; 3441 3442 status = ASR_initOutBound(sc); 3443 ReplySizeInBytes = sizeof(status); 3444 debug_usr_cmd_printf ("intOutBound done\n"); 3445 return (copyout ((caddr_t)&status, (caddr_t)Reply, 3446 ReplySizeInBytes)); 3447 } 3448 } 3449 3450 /* Determine if the message size is valid */ 3451 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME)) 3452 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) { 3453 debug_usr_cmd_printf ("Packet size %d incorrect\n", 3454 MessageSizeInBytes); 3455 return (EINVAL); 3456 } 3457 3458 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes, 3459 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) { 3460 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3461 MessageSizeInBytes); 3462 return (ENOMEM); 3463 } 3464 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr, 3465 MessageSizeInBytes)) != 0) { 3466 free (Message_Ptr, M_TEMP); 3467 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n", 3468 MessageSizeInBytes, error); 3469 return (error); 3470 } 3471 3472 /* Check the size of the reply frame, and start constructing */ 3473 3474 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3475 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) 3476 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3477 free (Message_Ptr, M_TEMP); 3478 debug_usr_cmd_printf ( 3479 "Failed to acquire I2O_MESSAGE_FRAME memory\n"); 3480 return (ENOMEM); 3481 } 3482 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr, 3483 sizeof(I2O_MESSAGE_FRAME))) != 0) { 3484 free (Reply_Ptr, M_TEMP); 3485 free (Message_Ptr, M_TEMP); 3486 debug_usr_cmd_printf ( 3487 "Failed to copy in reply frame, errno=%d\n", 3488 error); 3489 return (error); 3490 } 3491 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize( 3492 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2); 3493 free (Reply_Ptr, M_TEMP); 3494 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) { 3495 free (Message_Ptr, M_TEMP); 3496 debug_usr_cmd_printf ( 3497 "Failed to copy in reply frame[%d], errno=%d\n", 3498 ReplySizeInBytes, error); 3499 return (EINVAL); 3500 } 3501 3502 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc ( 3503 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)) 3504 ? ReplySizeInBytes 3505 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)), 3506 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) { 3507 free (Message_Ptr, M_TEMP); 3508 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n", 3509 ReplySizeInBytes); 3510 return (ENOMEM); 3511 } 3512 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes); 3513 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext 3514 = Message_Ptr->InitiatorContext; 3515 Reply_Ptr->StdReplyFrame.TransactionContext 3516 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext; 3517 I2O_MESSAGE_FRAME_setMsgFlags( 3518 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3519 I2O_MESSAGE_FRAME_getMsgFlags( 3520 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) 3521 | I2O_MESSAGE_FLAGS_REPLY); 3522 3523 /* Check if the message is a special case command */ 3524 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) { 3525 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */ 3526 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset( 3527 Message_Ptr) & 0xF0) >> 2)) { 3528 free (Message_Ptr, M_TEMP); 3529 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3530 &(Reply_Ptr->StdReplyFrame), 3531 (ASR_setSysTab(sc) != CAM_REQ_CMP)); 3532 I2O_MESSAGE_FRAME_setMessageSize( 3533 &(Reply_Ptr->StdReplyFrame.StdMessageFrame), 3534 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)); 3535 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3536 ReplySizeInBytes); 3537 free (Reply_Ptr, M_TEMP); 3538 return (error); 3539 } 3540 } 3541 3542 /* Deal in the general case */ 3543 /* First allocate and optionally copy in each scatter gather element */ 3544 SLIST_INIT(&sgList); 3545 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) { 3546 PI2O_SGE_SIMPLE_ELEMENT sg; 3547 3548 /* 3549 * since this code is reused in several systems, code 3550 * efficiency is greater by using a shift operation rather 3551 * than a divide by sizeof(u_int32_t). 3552 */ 3553 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3554 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) 3555 >> 2)); 3556 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr) 3557 + MessageSizeInBytes)) { 3558 caddr_t v; 3559 int len; 3560 3561 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3562 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) { 3563 error = EINVAL; 3564 break; 3565 } 3566 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount)); 3567 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n", 3568 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr 3569 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3570 Message_Ptr) & 0xF0) >> 2)), 3571 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len); 3572 3573 if ((elm = (struct ioctlSgList_S *)malloc ( 3574 sizeof(*elm) - sizeof(elm->KernelSpace) + len, 3575 M_TEMP, M_WAITOK)) 3576 == (struct ioctlSgList_S *)NULL) { 3577 debug_usr_cmd_printf ( 3578 "Failed to allocate SG[%d]\n", len); 3579 error = ENOMEM; 3580 break; 3581 } 3582 SLIST_INSERT_HEAD(&sgList, elm, link); 3583 elm->FlagsCount = sg->FlagsCount; 3584 elm->UserSpace = (caddr_t) 3585 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg)); 3586 v = elm->KernelSpace; 3587 /* Copy in outgoing data (DIR bit could be invalid) */ 3588 if ((error = copyin (elm->UserSpace, (caddr_t)v, len)) 3589 != 0) { 3590 break; 3591 } 3592 /* 3593 * If the buffer is not contiguous, lets 3594 * break up the scatter/gather entries. 3595 */ 3596 while ((len > 0) 3597 && (sg < (PI2O_SGE_SIMPLE_ELEMENT) 3598 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) { 3599 int next, base, span; 3600 3601 span = 0; 3602 next = base = KVTOPHYS(v); 3603 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, 3604 base); 3605 3606 /* How far can we go physically contiguously */ 3607 while ((len > 0) && (base == next)) { 3608 int size; 3609 3610 next = trunc_page(base) + PAGE_SIZE; 3611 size = next - base; 3612 if (size > len) { 3613 size = len; 3614 } 3615 span += size; 3616 v += size; 3617 len -= size; 3618 base = KVTOPHYS(v); 3619 } 3620 3621 /* Construct the Flags */ 3622 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), 3623 span); 3624 { 3625 int flags = I2O_FLAGS_COUNT_getFlags( 3626 &(elm->FlagsCount)); 3627 /* Any remaining length? */ 3628 if (len > 0) { 3629 flags &= 3630 ~(I2O_SGL_FLAGS_END_OF_BUFFER 3631 | I2O_SGL_FLAGS_LAST_ELEMENT); 3632 } 3633 I2O_FLAGS_COUNT_setFlags( 3634 &(sg->FlagsCount), flags); 3635 } 3636 3637 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n", 3638 sg - (PI2O_SGE_SIMPLE_ELEMENT) 3639 ((char *)Message_Ptr 3640 + ((I2O_MESSAGE_FRAME_getVersionOffset( 3641 Message_Ptr) & 0xF0) >> 2)), 3642 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), 3643 span); 3644 if (len <= 0) { 3645 break; 3646 } 3647 3648 /* 3649 * Incrementing requires resizing of the 3650 * packet, and moving up the existing SG 3651 * elements. 3652 */ 3653 ++sg; 3654 MessageSizeInBytes += sizeof(*sg); 3655 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr, 3656 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr) 3657 + (sizeof(*sg) / sizeof(U32))); 3658 { 3659 PI2O_MESSAGE_FRAME NewMessage_Ptr; 3660 3661 if ((NewMessage_Ptr 3662 = (PI2O_MESSAGE_FRAME) 3663 malloc (MessageSizeInBytes, 3664 M_TEMP, M_WAITOK)) 3665 == (PI2O_MESSAGE_FRAME)NULL) { 3666 debug_usr_cmd_printf ( 3667 "Failed to acquire frame[%d] memory\n", 3668 MessageSizeInBytes); 3669 error = ENOMEM; 3670 break; 3671 } 3672 span = ((caddr_t)sg) 3673 - (caddr_t)Message_Ptr; 3674 bcopy ((caddr_t)Message_Ptr, 3675 (caddr_t)NewMessage_Ptr, span); 3676 bcopy ((caddr_t)(sg-1), 3677 ((caddr_t)NewMessage_Ptr) + span, 3678 MessageSizeInBytes - span); 3679 free (Message_Ptr, M_TEMP); 3680 sg = (PI2O_SGE_SIMPLE_ELEMENT) 3681 (((caddr_t)NewMessage_Ptr) + span); 3682 Message_Ptr = NewMessage_Ptr; 3683 } 3684 } 3685 if ((error) 3686 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount)) 3687 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) { 3688 break; 3689 } 3690 ++sg; 3691 } 3692 if (error) { 3693 while ((elm = SLIST_FIRST(&sgList)) 3694 != (struct ioctlSgList_S *)NULL) { 3695 SLIST_REMOVE_HEAD(&sgList, link); 3696 free (elm, M_TEMP); 3697 } 3698 free (Reply_Ptr, M_TEMP); 3699 free (Message_Ptr, M_TEMP); 3700 return (error); 3701 } 3702 } 3703 3704 debug_usr_cmd_printf ("Inbound: "); 3705 debug_usr_cmd_dump_message(Message_Ptr); 3706 3707 /* Send the command */ 3708 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) { 3709 /* Free up in-kernel buffers */ 3710 while ((elm = SLIST_FIRST(&sgList)) 3711 != (struct ioctlSgList_S *)NULL) { 3712 SLIST_REMOVE_HEAD(&sgList, link); 3713 free (elm, M_TEMP); 3714 } 3715 free (Reply_Ptr, M_TEMP); 3716 free (Message_Ptr, M_TEMP); 3717 return (ENOMEM); 3718 } 3719 3720 /* 3721 * We do not need any (optional byteswapping) method access to 3722 * the Initiator context field. 3723 */ 3724 I2O_MESSAGE_FRAME_setInitiatorContext64( 3725 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb); 3726 3727 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr); 3728 3729 free (Message_Ptr, M_TEMP); 3730 3731 /* 3732 * Wait for the board to report a finished instruction. 3733 */ 3734 crit_enter(); 3735 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 3736 if (ASR_getBlinkLedCode(sc)) { 3737 /* Reset Adapter */ 3738 printf ("asr%d: Blink LED 0x%x resetting adapter\n", 3739 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), 3740 ASR_getBlinkLedCode(sc)); 3741 if (ASR_reset (sc) == ENXIO) { 3742 /* Command Cleanup */ 3743 ASR_ccbRemove(sc, ccb); 3744 } 3745 crit_exit(); 3746 /* Free up in-kernel buffers */ 3747 while ((elm = SLIST_FIRST(&sgList)) 3748 != (struct ioctlSgList_S *)NULL) { 3749 SLIST_REMOVE_HEAD(&sgList, link); 3750 free (elm, M_TEMP); 3751 } 3752 free (Reply_Ptr, M_TEMP); 3753 asr_free_ccb(ccb); 3754 return (EIO); 3755 } 3756 /* Check every second for BlinkLed */ 3757 tsleep((caddr_t)ccb, 0, "asr", hz); 3758 } 3759 crit_exit(); 3760 3761 debug_usr_cmd_printf ("Outbound: "); 3762 debug_usr_cmd_dump_message(Reply_Ptr); 3763 3764 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode( 3765 &(Reply_Ptr->StdReplyFrame), 3766 (ccb->ccb_h.status != CAM_REQ_CMP)); 3767 3768 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3769 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) { 3770 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr, 3771 ccb->csio.dxfer_len - ccb->csio.resid); 3772 } 3773 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes 3774 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3775 - I2O_SCSI_SENSE_DATA_SZ))) { 3776 int size = ReplySizeInBytes 3777 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME) 3778 - I2O_SCSI_SENSE_DATA_SZ; 3779 3780 if (size > sizeof(ccb->csio.sense_data)) { 3781 size = sizeof(ccb->csio.sense_data); 3782 } 3783 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData, 3784 size); 3785 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount( 3786 Reply_Ptr, size); 3787 } 3788 3789 /* Free up in-kernel buffers */ 3790 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) { 3791 /* Copy out as necessary */ 3792 if ((error == 0) 3793 /* DIR bit considered `valid', error due to ignorance works */ 3794 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount)) 3795 & I2O_SGL_FLAGS_DIR) == 0)) { 3796 error = copyout ((caddr_t)(elm->KernelSpace), 3797 elm->UserSpace, 3798 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount))); 3799 } 3800 SLIST_REMOVE_HEAD(&sgList, link); 3801 free (elm, M_TEMP); 3802 } 3803 if (error == 0) { 3804 /* Copy reply frame to user space */ 3805 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply, 3806 ReplySizeInBytes); 3807 } 3808 free (Reply_Ptr, M_TEMP); 3809 asr_free_ccb(ccb); 3810 3811 return (error); 3812 } /* ASR_queue_i */ 3813 3814 /*----------------------------------------------------------------------*/ 3815 /* Function asr_ioctl */ 3816 /*----------------------------------------------------------------------*/ 3817 /* The parameters passed to this function are : */ 3818 /* dev : Device number. */ 3819 /* cmd : Ioctl Command */ 3820 /* data : User Argument Passed In. */ 3821 /* flag : Mode Parameter */ 3822 /* proc : Process Parameter */ 3823 /* */ 3824 /* This function is the user interface into this adapter driver */ 3825 /* */ 3826 /* Return : zero if OK, error code if not */ 3827 /*----------------------------------------------------------------------*/ 3828 3829 STATIC int 3830 asr_ioctl( 3831 IN dev_t dev, 3832 IN u_long cmd, 3833 INOUT caddr_t data, 3834 int flag, 3835 struct thread *td) 3836 { 3837 int i, j; 3838 OUT int error = 0; 3839 Asr_softc_t * sc = ASR_get_sc (dev); 3840 UNREFERENCED_PARAMETER(flag); 3841 UNREFERENCED_PARAMETER(td); 3842 3843 if (sc != (Asr_softc_t *)NULL) 3844 switch(cmd) { 3845 3846 case DPT_SIGNATURE: 3847 # if (dsDescription_size != 50) 3848 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16): 3849 # endif 3850 if (cmd & 0xFFFF0000) { 3851 (void)bcopy ((caddr_t)(&ASR_sig), data, 3852 sizeof(dpt_sig_S)); 3853 return (0); 3854 } 3855 /* Traditional version of the ioctl interface */ 3856 case DPT_SIGNATURE & 0x0000FFFF: 3857 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data), 3858 sizeof(dpt_sig_S))); 3859 3860 /* Traditional version of the ioctl interface */ 3861 case DPT_CTRLINFO & 0x0000FFFF: 3862 case DPT_CTRLINFO: { 3863 struct { 3864 u_int16_t length; 3865 u_int16_t drvrHBAnum; 3866 u_int32_t baseAddr; 3867 u_int16_t blinkState; 3868 u_int8_t pciBusNum; 3869 u_int8_t pciDeviceNum; 3870 u_int16_t hbaFlags; 3871 u_int16_t Interrupt; 3872 u_int32_t reserved1; 3873 u_int32_t reserved2; 3874 u_int32_t reserved3; 3875 } CtlrInfo; 3876 3877 bzero (&CtlrInfo, sizeof(CtlrInfo)); 3878 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t); 3879 CtlrInfo.drvrHBAnum = asr_unit(dev); 3880 CtlrInfo.baseAddr = (u_long)sc->ha_Base; 3881 i = ASR_getBlinkLedCode (sc); 3882 if (i == -1) { 3883 i = 0; 3884 } 3885 CtlrInfo.blinkState = i; 3886 CtlrInfo.pciBusNum = sc->ha_pciBusNum; 3887 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum; 3888 #define FLG_OSD_PCI_VALID 0x0001 3889 #define FLG_OSD_DMA 0x0002 3890 #define FLG_OSD_I2O 0x0004 3891 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; 3892 CtlrInfo.Interrupt = sc->ha_irq; 3893 if (cmd & 0xFFFF0000) { 3894 bcopy (&CtlrInfo, data, sizeof(CtlrInfo)); 3895 } else { 3896 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo)); 3897 } 3898 } return (error); 3899 3900 /* Traditional version of the ioctl interface */ 3901 case DPT_SYSINFO & 0x0000FFFF: 3902 case DPT_SYSINFO: { 3903 sysInfo_S Info; 3904 char * cp; 3905 /* Kernel Specific ptok `hack' */ 3906 # define ptok(a) ((char *)(a) + KERNBASE) 3907 3908 bzero (&Info, sizeof(Info)); 3909 3910 /* Appears I am the only person in the Kernel doing this */ 3911 outb (0x70, 0x12); 3912 i = inb(0x71); 3913 j = i >> 4; 3914 if (i == 0x0f) { 3915 outb (0x70, 0x19); 3916 j = inb (0x71); 3917 } 3918 Info.drive0CMOS = j; 3919 3920 j = i & 0x0f; 3921 if (i == 0x0f) { 3922 outb (0x70, 0x1a); 3923 j = inb (0x71); 3924 } 3925 Info.drive1CMOS = j; 3926 3927 Info.numDrives = *((char *)ptok(0x475)); 3928 3929 Info.processorFamily = ASR_sig.dsProcessorFamily; 3930 switch (cpu) { 3931 case CPU_386SX: case CPU_386: 3932 Info.processorType = PROC_386; break; 3933 case CPU_486SX: case CPU_486: 3934 Info.processorType = PROC_486; break; 3935 case CPU_586: 3936 Info.processorType = PROC_PENTIUM; break; 3937 case CPU_686: 3938 Info.processorType = PROC_SEXIUM; break; 3939 } 3940 Info.osType = OS_BSDI_UNIX; 3941 Info.osMajorVersion = osrelease[0] - '0'; 3942 Info.osMinorVersion = osrelease[2] - '0'; 3943 /* Info.osRevision = 0; */ 3944 /* Info.osSubRevision = 0; */ 3945 Info.busType = SI_PCI_BUS; 3946 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid 3947 | SI_OSversionValid | SI_BusTypeValid | SI_NO_SmartROM; 3948 3949 /* Go Out And Look For I2O SmartROM */ 3950 for(j = 0xC8000; j < 0xE0000; j += 2048) { 3951 int k; 3952 3953 cp = ptok(j); 3954 if (*((unsigned short *)cp) != 0xAA55) { 3955 continue; 3956 } 3957 j += (cp[2] * 512) - 2048; 3958 if ((*((u_long *)(cp + 6)) 3959 != ('S' + (' ' * 256) + (' ' * 65536L))) 3960 || (*((u_long *)(cp + 10)) 3961 != ('I' + ('2' * 256) + ('0' * 65536L)))) { 3962 continue; 3963 } 3964 cp += 0x24; 3965 for (k = 0; k < 64; ++k) { 3966 if (*((unsigned short *)cp) 3967 == (' ' + ('v' * 256))) { 3968 break; 3969 } 3970 } 3971 if (k < 64) { 3972 Info.smartROMMajorVersion 3973 = *((unsigned char *)(cp += 4)) - '0'; 3974 Info.smartROMMinorVersion 3975 = *((unsigned char *)(cp += 2)); 3976 Info.smartROMRevision 3977 = *((unsigned char *)(++cp)); 3978 Info.flags |= SI_SmartROMverValid; 3979 Info.flags &= ~SI_NO_SmartROM; 3980 break; 3981 } 3982 } 3983 /* Get The Conventional Memory Size From CMOS */ 3984 outb (0x70, 0x16); 3985 j = inb (0x71); 3986 j <<= 8; 3987 outb (0x70, 0x15); 3988 j |= inb(0x71); 3989 Info.conventionalMemSize = j; 3990 3991 /* Get The Extended Memory Found At Power On From CMOS */ 3992 outb (0x70, 0x31); 3993 j = inb (0x71); 3994 j <<= 8; 3995 outb (0x70, 0x30); 3996 j |= inb(0x71); 3997 Info.extendedMemSize = j; 3998 Info.flags |= SI_MemorySizeValid; 3999 4000 # if (defined(THIS_IS_BROKEN)) 4001 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */ 4002 if (Info.numDrives > 0) { 4003 /* 4004 * Get The Pointer From Int 41 For The First 4005 * Drive Parameters 4006 */ 4007 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4) 4008 + (unsigned)(*((unsigned short *)ptok(0x104+0))); 4009 /* 4010 * It appears that SmartROM's Int41/Int46 pointers 4011 * use memory that gets stepped on by the kernel 4012 * loading. We no longer have access to this 4013 * geometry information but try anyways (!?) 4014 */ 4015 Info.drives[0].cylinders = *((unsigned char *)ptok(j)); 4016 ++j; 4017 Info.drives[0].cylinders += ((int)*((unsigned char *) 4018 ptok(j))) << 8; 4019 ++j; 4020 Info.drives[0].heads = *((unsigned char *)ptok(j)); 4021 j += 12; 4022 Info.drives[0].sectors = *((unsigned char *)ptok(j)); 4023 Info.flags |= SI_DriveParamsValid; 4024 if ((Info.drives[0].cylinders == 0) 4025 || (Info.drives[0].heads == 0) 4026 || (Info.drives[0].sectors == 0)) { 4027 Info.flags &= ~SI_DriveParamsValid; 4028 } 4029 if (Info.numDrives > 1) { 4030 /* 4031 * Get The Pointer From Int 46 For The 4032 * Second Drive Parameters 4033 */ 4034 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4) 4035 + (unsigned)(*((unsigned short *)ptok(0x118+0))); 4036 Info.drives[1].cylinders = *((unsigned char *) 4037 ptok(j)); 4038 ++j; 4039 Info.drives[1].cylinders += ((int) 4040 *((unsigned char *)ptok(j))) << 8; 4041 ++j; 4042 Info.drives[1].heads = *((unsigned char *) 4043 ptok(j)); 4044 j += 12; 4045 Info.drives[1].sectors = *((unsigned char *) 4046 ptok(j)); 4047 if ((Info.drives[1].cylinders == 0) 4048 || (Info.drives[1].heads == 0) 4049 || (Info.drives[1].sectors == 0)) { 4050 Info.flags &= ~SI_DriveParamsValid; 4051 } 4052 } 4053 } 4054 # endif 4055 /* Copy Out The Info Structure To The User */ 4056 if (cmd & 0xFFFF0000) { 4057 bcopy (&Info, data, sizeof(Info)); 4058 } else { 4059 error = copyout (&Info, *(caddr_t *)data, sizeof(Info)); 4060 } 4061 return (error); } 4062 4063 /* Get The BlinkLED State */ 4064 case DPT_BLINKLED: 4065 i = ASR_getBlinkLedCode (sc); 4066 if (i == -1) { 4067 i = 0; 4068 } 4069 if (cmd & 0xFFFF0000) { 4070 bcopy ((caddr_t)(&i), data, sizeof(i)); 4071 } else { 4072 error = copyout (&i, *(caddr_t *)data, sizeof(i)); 4073 } 4074 break; 4075 4076 /* Send an I2O command */ 4077 case I2OUSRCMD: 4078 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data))); 4079 4080 /* Reset and re-initialize the adapter */ 4081 case I2ORESETCMD: 4082 return (ASR_reset (sc)); 4083 4084 /* Rescan the LCT table and resynchronize the information */ 4085 case I2ORESCANCMD: 4086 return (ASR_rescan (sc)); 4087 } 4088 return (EINVAL); 4089 } /* asr_ioctl */ 4090