1 /* $NetBSD: rf_driver.c,v 1.61 2002/09/17 03:54:42 oster Exp $ */ 2 /*- 3 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Greg Oster 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1995 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II, 43 * Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka 44 * 45 * Permission to use, copy, modify and distribute this software and 46 * its documentation is hereby granted, provided that both the copyright 47 * notice and this permission notice appear in all copies of the 48 * software, derivative works or modified versions, and any portions 49 * thereof, and that both notices appear in supporting documentation. 50 * 51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 54 * 55 * Carnegie Mellon requests users of this software to return to 56 * 57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 58 * School of Computer Science 59 * Carnegie Mellon University 60 * Pittsburgh PA 15213-3890 61 * 62 * any improvements or extensions that they make and grant Carnegie the 63 * rights to redistribute these changes. 64 */ 65 66 /****************************************************************************** 67 * 68 * rf_driver.c -- main setup, teardown, and access routines for the RAID driver 69 * 70 * all routines are prefixed with rf_ (raidframe), to avoid conficts. 71 * 72 ******************************************************************************/ 73 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.61 2002/09/17 03:54:42 oster Exp $"); 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/ioctl.h> 81 #include <sys/fcntl.h> 82 #include <sys/vnode.h> 83 84 85 #include "rf_archs.h" 86 #include "rf_threadstuff.h" 87 88 #include <sys/errno.h> 89 90 #include "rf_raid.h" 91 #include "rf_dag.h" 92 #include "rf_aselect.h" 93 #include "rf_diskqueue.h" 94 #include "rf_parityscan.h" 95 #include "rf_alloclist.h" 96 #include "rf_dagutils.h" 97 #include "rf_utils.h" 98 #include "rf_etimer.h" 99 #include "rf_acctrace.h" 100 #include "rf_general.h" 101 #include "rf_desc.h" 102 #include "rf_states.h" 103 #include "rf_freelist.h" 104 #include "rf_decluster.h" 105 #include "rf_map.h" 106 #include "rf_revent.h" 107 #include "rf_callback.h" 108 #include "rf_engine.h" 109 #include "rf_mcpair.h" 110 #include "rf_nwayxor.h" 111 #include "rf_copyback.h" 112 #include "rf_driver.h" 113 #include "rf_options.h" 114 #include "rf_shutdown.h" 115 #include "rf_kintf.h" 116 117 #include <sys/buf.h> 118 119 #ifndef RF_ACCESS_DEBUG 120 #define RF_ACCESS_DEBUG 0 121 #endif 122 123 /* rad == RF_RaidAccessDesc_t */ 124 static RF_FreeList_t *rf_rad_freelist; 125 #define RF_MAX_FREE_RAD 128 126 #define RF_RAD_INC 16 127 #define RF_RAD_INITIAL 32 128 129 /* debug variables */ 130 char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */ 131 132 /* main configuration routines */ 133 static int raidframe_booted = 0; 134 135 static void rf_ConfigureDebug(RF_Config_t * cfgPtr); 136 static void set_debug_option(char *name, long val); 137 static void rf_UnconfigureArray(void); 138 static int init_rad(RF_RaidAccessDesc_t *); 139 static void clean_rad(RF_RaidAccessDesc_t *); 140 static void rf_ShutdownRDFreeList(void *); 141 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **); 142 143 RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved 144 * printfs by different stripes */ 145 146 #define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended)) 147 #define WAIT_FOR_QUIESCENCE(_raid_) \ 148 ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \ 149 "raidframe quiesce", 0, &((_raid_)->access_suspend_mutex)) 150 151 #define IO_BUF_ERR(bp, err) { \ 152 bp->b_flags |= B_ERROR; \ 153 bp->b_resid = bp->b_bcount; \ 154 bp->b_error = err; \ 155 biodone(bp); \ 156 } 157 158 static int configureCount = 0; /* number of active configurations */ 159 static int isconfigged = 0; /* is basic raidframe (non per-array) 160 * stuff configged */ 161 RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex) /* used to lock the configuration 162 * stuff */ 163 static RF_ShutdownList_t *globalShutdown; /* non array-specific 164 * stuff */ 165 166 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp); 167 168 /* called at system boot time */ 169 int 170 rf_BootRaidframe() 171 { 172 int rc; 173 174 if (raidframe_booted) 175 return (EBUSY); 176 raidframe_booted = 1; 177 178 rc = rf_lkmgr_mutex_init(&configureMutex); 179 if (rc) { 180 rf_print_unable_to_init_mutex( __FILE__, __LINE__, rc); 181 RF_PANIC(); 182 } 183 configureCount = 0; 184 isconfigged = 0; 185 globalShutdown = NULL; 186 return (0); 187 } 188 /* 189 * This function is really just for debugging user-level stuff: it 190 * frees up all memory, other RAIDframe resources which might otherwise 191 * be kept around. This is used with systems like "sentinel" to detect 192 * memory leaks. 193 */ 194 int 195 rf_UnbootRaidframe() 196 { 197 int rc; 198 199 RF_LOCK_LKMGR_MUTEX(configureMutex); 200 if (configureCount) { 201 RF_UNLOCK_LKMGR_MUTEX(configureMutex); 202 return (EBUSY); 203 } 204 raidframe_booted = 0; 205 RF_UNLOCK_LKMGR_MUTEX(configureMutex); 206 rc = rf_lkmgr_mutex_destroy(&configureMutex); 207 if (rc) { 208 RF_ERRORMSG3("Unable to destroy mutex file %s line %d rc=%d\n", __FILE__, 209 __LINE__, rc); 210 RF_PANIC(); 211 } 212 return (0); 213 } 214 /* 215 * Called whenever an array is shutdown 216 */ 217 static void 218 rf_UnconfigureArray() 219 { 220 int rc; 221 222 RF_LOCK_LKMGR_MUTEX(configureMutex); 223 if (--configureCount == 0) { /* if no active configurations, shut 224 * everything down */ 225 isconfigged = 0; 226 227 rc = rf_ShutdownList(&globalShutdown); 228 if (rc) { 229 RF_ERRORMSG1("RAIDFRAME: unable to do global shutdown, rc=%d\n", rc); 230 } 231 232 /* 233 * We must wait until now, because the AllocList module 234 * uses the DebugMem module. 235 */ 236 #if RF_DEBUG_MEM 237 if (rf_memDebug) 238 rf_print_unfreed(); 239 #endif 240 } 241 RF_UNLOCK_LKMGR_MUTEX(configureMutex); 242 } 243 244 /* 245 * Called to shut down an array. 246 */ 247 int 248 rf_Shutdown(raidPtr) 249 RF_Raid_t *raidPtr; 250 { 251 252 if (!raidPtr->valid) { 253 RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n"); 254 return (EINVAL); 255 } 256 /* 257 * wait for outstanding IOs to land 258 * As described in rf_raid.h, we use the rad_freelist lock 259 * to protect the per-array info about outstanding descs 260 * since we need to do freelist locking anyway, and this 261 * cuts down on the amount of serialization we've got going 262 * on. 263 */ 264 RF_FREELIST_DO_LOCK(rf_rad_freelist); 265 if (raidPtr->waitShutdown) { 266 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 267 return (EBUSY); 268 } 269 raidPtr->waitShutdown = 1; 270 while (raidPtr->nAccOutstanding) { 271 RF_WAIT_COND(raidPtr->outstandingCond, RF_FREELIST_MUTEX_OF(rf_rad_freelist)); 272 } 273 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 274 275 /* Wait for any parity re-writes to stop... */ 276 while (raidPtr->parity_rewrite_in_progress) { 277 printf("Waiting for parity re-write to exit...\n"); 278 tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO, 279 "rfprwshutdown", 0); 280 } 281 282 raidPtr->valid = 0; 283 284 rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE); 285 286 rf_UnconfigureVnodes(raidPtr); 287 288 rf_ShutdownList(&raidPtr->shutdownList); 289 290 rf_UnconfigureArray(); 291 292 return (0); 293 } 294 295 296 #define DO_INIT_CONFIGURE(f) { \ 297 rc = f (&globalShutdown); \ 298 if (rc) { \ 299 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 300 rf_ShutdownList(&globalShutdown); \ 301 configureCount--; \ 302 RF_UNLOCK_LKMGR_MUTEX(configureMutex); \ 303 return(rc); \ 304 } \ 305 } 306 307 #define DO_RAID_FAIL() { \ 308 rf_UnconfigureVnodes(raidPtr); \ 309 rf_ShutdownList(&raidPtr->shutdownList); \ 310 rf_UnconfigureArray(); \ 311 } 312 313 #define DO_RAID_INIT_CONFIGURE(f) { \ 314 rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \ 315 if (rc) { \ 316 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 317 DO_RAID_FAIL(); \ 318 return(rc); \ 319 } \ 320 } 321 322 #define DO_RAID_MUTEX(_m_) { \ 323 rc = rf_create_managed_mutex(&raidPtr->shutdownList, (_m_)); \ 324 if (rc) { \ 325 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); \ 326 DO_RAID_FAIL(); \ 327 return(rc); \ 328 } \ 329 } 330 331 #define DO_RAID_COND(_c_) { \ 332 rc = rf_create_managed_cond(&raidPtr->shutdownList, (_c_)); \ 333 if (rc) { \ 334 rf_print_unable_to_init_cond(__FILE__, __LINE__, rc); \ 335 DO_RAID_FAIL(); \ 336 return(rc); \ 337 } \ 338 } 339 340 int 341 rf_Configure(raidPtr, cfgPtr, ac) 342 RF_Raid_t *raidPtr; 343 RF_Config_t *cfgPtr; 344 RF_AutoConfig_t *ac; 345 { 346 RF_RowCol_t row, col; 347 int i, rc; 348 349 RF_LOCK_LKMGR_MUTEX(configureMutex); 350 configureCount++; 351 if (isconfigged == 0) { 352 rc = rf_create_managed_mutex(&globalShutdown, &rf_printf_mutex); 353 if (rc) { 354 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); 355 rf_ShutdownList(&globalShutdown); 356 return (rc); 357 } 358 /* initialize globals */ 359 360 DO_INIT_CONFIGURE(rf_ConfigureAllocList); 361 362 /* 363 * Yes, this does make debugging general to the whole 364 * system instead of being array specific. Bummer, drag. 365 */ 366 rf_ConfigureDebug(cfgPtr); 367 DO_INIT_CONFIGURE(rf_ConfigureDebugMem); 368 DO_INIT_CONFIGURE(rf_ConfigureAccessTrace); 369 DO_INIT_CONFIGURE(rf_ConfigureMapModule); 370 DO_INIT_CONFIGURE(rf_ConfigureReconEvent); 371 DO_INIT_CONFIGURE(rf_ConfigureCallback); 372 DO_INIT_CONFIGURE(rf_ConfigureRDFreeList); 373 DO_INIT_CONFIGURE(rf_ConfigureNWayXor); 374 DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList); 375 DO_INIT_CONFIGURE(rf_ConfigureMCPair); 376 DO_INIT_CONFIGURE(rf_ConfigureDAGs); 377 DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs); 378 DO_INIT_CONFIGURE(rf_ConfigureReconstruction); 379 DO_INIT_CONFIGURE(rf_ConfigureCopyback); 380 DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem); 381 isconfigged = 1; 382 } 383 RF_UNLOCK_LKMGR_MUTEX(configureMutex); 384 385 DO_RAID_MUTEX(&raidPtr->mutex); 386 /* set up the cleanup list. Do this after ConfigureDebug so that 387 * value of memDebug will be set */ 388 389 rf_MakeAllocList(raidPtr->cleanupList); 390 if (raidPtr->cleanupList == NULL) { 391 DO_RAID_FAIL(); 392 return (ENOMEM); 393 } 394 rc = rf_ShutdownCreate(&raidPtr->shutdownList, 395 (void (*) (void *)) rf_FreeAllocList, 396 raidPtr->cleanupList); 397 if (rc) { 398 rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc); 399 DO_RAID_FAIL(); 400 return (rc); 401 } 402 raidPtr->numRow = cfgPtr->numRow; 403 raidPtr->numCol = cfgPtr->numCol; 404 raidPtr->numSpare = cfgPtr->numSpare; 405 406 /* XXX we don't even pretend to support more than one row in the 407 * kernel... */ 408 if (raidPtr->numRow != 1) { 409 RF_ERRORMSG("Only one row supported in kernel.\n"); 410 DO_RAID_FAIL(); 411 return (EINVAL); 412 } 413 RF_CallocAndAdd(raidPtr->status, raidPtr->numRow, sizeof(RF_RowStatus_t), 414 (RF_RowStatus_t *), raidPtr->cleanupList); 415 if (raidPtr->status == NULL) { 416 DO_RAID_FAIL(); 417 return (ENOMEM); 418 } 419 RF_CallocAndAdd(raidPtr->reconControl, raidPtr->numRow, 420 sizeof(RF_ReconCtrl_t *), (RF_ReconCtrl_t **), raidPtr->cleanupList); 421 if (raidPtr->reconControl == NULL) { 422 DO_RAID_FAIL(); 423 return (ENOMEM); 424 } 425 for (i = 0; i < raidPtr->numRow; i++) { 426 raidPtr->status[i] = rf_rs_optimal; 427 raidPtr->reconControl[i] = NULL; 428 } 429 430 DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine); 431 DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks); 432 433 DO_RAID_COND(&raidPtr->outstandingCond); 434 435 raidPtr->nAccOutstanding = 0; 436 raidPtr->waitShutdown = 0; 437 438 DO_RAID_MUTEX(&raidPtr->access_suspend_mutex); 439 DO_RAID_COND(&raidPtr->quiescent_cond); 440 441 DO_RAID_COND(&raidPtr->waitForReconCond); 442 443 DO_RAID_MUTEX(&raidPtr->recon_done_proc_mutex); 444 445 if (ac!=NULL) { 446 /* We have an AutoConfig structure.. Don't do the 447 normal disk configuration... call the auto config 448 stuff */ 449 rf_AutoConfigureDisks(raidPtr, cfgPtr, ac); 450 } else { 451 DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks); 452 DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks); 453 } 454 /* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev 455 * no. is set */ 456 DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues); 457 458 DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout); 459 460 DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus); 461 462 for (row = 0; row < raidPtr->numRow; row++) { 463 for (col = 0; col < raidPtr->numCol; col++) { 464 /* 465 * XXX better distribution 466 */ 467 raidPtr->hist_diskreq[row][col] = 0; 468 } 469 } 470 471 raidPtr->numNewFailures = 0; 472 raidPtr->copyback_in_progress = 0; 473 raidPtr->parity_rewrite_in_progress = 0; 474 raidPtr->recon_in_progress = 0; 475 raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs; 476 477 /* autoconfigure and root_partition will actually get filled in 478 after the config is done */ 479 raidPtr->autoconfigure = 0; 480 raidPtr->root_partition = 0; 481 raidPtr->last_unit = raidPtr->raidid; 482 raidPtr->config_order = 0; 483 484 if (rf_keepAccTotals) { 485 raidPtr->keep_acc_totals = 1; 486 } 487 rf_StartUserStats(raidPtr); 488 489 raidPtr->valid = 1; 490 491 printf("raid%d: %s\n", raidPtr->raidid, 492 raidPtr->Layout.map->configName); 493 printf("raid%d: Components:", raidPtr->raidid); 494 for (row = 0; row < raidPtr->numRow; row++) { 495 for (col = 0; col < raidPtr->numCol; col++) { 496 printf(" %s", raidPtr->Disks[row][col].devname); 497 if (RF_DEAD_DISK(raidPtr->Disks[row][col].status)) { 498 printf("[**FAILED**]"); 499 } 500 } 501 } 502 printf("\n"); 503 printf("raid%d: Total Sectors: %lu (%lu MB)\n", 504 raidPtr->raidid, 505 (unsigned long) raidPtr->totalSectors, 506 (unsigned long) (raidPtr->totalSectors / 1024 * 507 (1 << raidPtr->logBytesPerSector) / 1024)); 508 509 return (0); 510 } 511 512 static int 513 init_rad(desc) 514 RF_RaidAccessDesc_t *desc; 515 { 516 int rc; 517 518 rc = rf_mutex_init(&desc->mutex); 519 if (rc) { 520 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); 521 return (rc); 522 } 523 rc = rf_cond_init(&desc->cond); 524 if (rc) { 525 rf_print_unable_to_init_cond(__FILE__, __LINE__, rc); 526 rf_mutex_destroy(&desc->mutex); 527 return (rc); 528 } 529 return (0); 530 } 531 532 static void 533 clean_rad(desc) 534 RF_RaidAccessDesc_t *desc; 535 { 536 rf_mutex_destroy(&desc->mutex); 537 rf_cond_destroy(&desc->cond); 538 } 539 540 static void 541 rf_ShutdownRDFreeList(ignored) 542 void *ignored; 543 { 544 RF_FREELIST_DESTROY_CLEAN(rf_rad_freelist, next, (RF_RaidAccessDesc_t *), clean_rad); 545 } 546 547 static int 548 rf_ConfigureRDFreeList(listp) 549 RF_ShutdownList_t **listp; 550 { 551 int rc; 552 553 RF_FREELIST_CREATE(rf_rad_freelist, RF_MAX_FREE_RAD, 554 RF_RAD_INC, sizeof(RF_RaidAccessDesc_t)); 555 if (rf_rad_freelist == NULL) { 556 return (ENOMEM); 557 } 558 rc = rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL); 559 if (rc) { 560 rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc); 561 rf_ShutdownRDFreeList(NULL); 562 return (rc); 563 } 564 RF_FREELIST_PRIME_INIT(rf_rad_freelist, RF_RAD_INITIAL, next, 565 (RF_RaidAccessDesc_t *), init_rad); 566 return (0); 567 } 568 569 RF_RaidAccessDesc_t * 570 rf_AllocRaidAccDesc( 571 RF_Raid_t * raidPtr, 572 RF_IoType_t type, 573 RF_RaidAddr_t raidAddress, 574 RF_SectorCount_t numBlocks, 575 caddr_t bufPtr, 576 void *bp, 577 RF_RaidAccessFlags_t flags, 578 RF_AccessState_t * states) 579 { 580 RF_RaidAccessDesc_t *desc; 581 582 RF_FREELIST_GET_INIT_NOUNLOCK(rf_rad_freelist, desc, next, (RF_RaidAccessDesc_t *), init_rad); 583 if (raidPtr->waitShutdown) { 584 /* 585 * Actually, we're shutting the array down. Free the desc 586 * and return NULL. 587 */ 588 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 589 RF_FREELIST_FREE_CLEAN(rf_rad_freelist, desc, next, clean_rad); 590 return (NULL); 591 } 592 raidPtr->nAccOutstanding++; 593 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 594 595 desc->raidPtr = (void *) raidPtr; 596 desc->type = type; 597 desc->raidAddress = raidAddress; 598 desc->numBlocks = numBlocks; 599 desc->bufPtr = bufPtr; 600 desc->bp = bp; 601 desc->paramDAG = NULL; 602 desc->paramASM = NULL; 603 desc->flags = flags; 604 desc->states = states; 605 desc->state = 0; 606 607 desc->status = 0; 608 memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t)); 609 desc->callbackFunc = NULL; 610 desc->callbackArg = NULL; 611 desc->next = NULL; 612 desc->head = desc; 613 desc->cleanupList = NULL; 614 rf_MakeAllocList(desc->cleanupList); 615 return (desc); 616 } 617 618 void 619 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t * desc) 620 { 621 RF_Raid_t *raidPtr = desc->raidPtr; 622 623 RF_ASSERT(desc); 624 625 rf_FreeAllocList(desc->cleanupList); 626 RF_FREELIST_FREE_CLEAN_NOUNLOCK(rf_rad_freelist, desc, next, clean_rad); 627 raidPtr->nAccOutstanding--; 628 if (raidPtr->waitShutdown) { 629 RF_SIGNAL_COND(raidPtr->outstandingCond); 630 } 631 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 632 } 633 /********************************************************************* 634 * Main routine for performing an access. 635 * Accesses are retried until a DAG can not be selected. This occurs 636 * when either the DAG library is incomplete or there are too many 637 * failures in a parity group. 638 ********************************************************************/ 639 int 640 rf_DoAccess( 641 RF_Raid_t * raidPtr, 642 RF_IoType_t type, 643 int async_flag, 644 RF_RaidAddr_t raidAddress, 645 RF_SectorCount_t numBlocks, 646 caddr_t bufPtr, 647 void *bp_in, 648 RF_RaidAccessFlags_t flags) 649 /* 650 type should be read or write 651 async_flag should be RF_TRUE or RF_FALSE 652 bp_in is a buf pointer. void * to facilitate ignoring it outside the kernel 653 */ 654 { 655 RF_RaidAccessDesc_t *desc; 656 caddr_t lbufPtr = bufPtr; 657 struct buf *bp = (struct buf *) bp_in; 658 659 raidAddress += rf_raidSectorOffset; 660 661 #if RF_ACCESS_DEBUG 662 if (rf_accessDebug) { 663 664 printf("logBytes is: %d %d %d\n", raidPtr->raidid, 665 raidPtr->logBytesPerSector, 666 (int) rf_RaidAddressToByte(raidPtr, numBlocks)); 667 printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid, 668 (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress, 669 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress), 670 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1), 671 (int) numBlocks, 672 (int) rf_RaidAddressToByte(raidPtr, numBlocks), 673 (long) bufPtr); 674 } 675 #endif 676 if (raidAddress + numBlocks > raidPtr->totalSectors) { 677 678 printf("DoAccess: raid addr %lu too large to access %lu sectors. Max legal addr is %lu\n", 679 (u_long) raidAddress, (u_long) numBlocks, (u_long) raidPtr->totalSectors); 680 681 IO_BUF_ERR(bp, ENOSPC); 682 return (ENOSPC); 683 } 684 desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress, 685 numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states); 686 687 if (desc == NULL) { 688 return (ENOMEM); 689 } 690 RF_ETIMER_START(desc->tracerec.tot_timer); 691 692 desc->async_flag = async_flag; 693 694 rf_ContinueRaidAccess(desc); 695 696 return (0); 697 } 698 #if 0 699 /* force the array into reconfigured mode without doing reconstruction */ 700 int 701 rf_SetReconfiguredMode(raidPtr, row, col) 702 RF_Raid_t *raidPtr; 703 int row; 704 int col; 705 { 706 if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) { 707 printf("Can't set reconfigured mode in dedicated-spare array\n"); 708 RF_PANIC(); 709 } 710 RF_LOCK_MUTEX(raidPtr->mutex); 711 raidPtr->numFailures++; 712 raidPtr->Disks[row][col].status = rf_ds_dist_spared; 713 raidPtr->status[row] = rf_rs_reconfigured; 714 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 715 /* install spare table only if declustering + distributed sparing 716 * architecture. */ 717 if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED) 718 rf_InstallSpareTable(raidPtr, row, col); 719 RF_UNLOCK_MUTEX(raidPtr->mutex); 720 return (0); 721 } 722 #endif 723 724 int 725 rf_FailDisk( 726 RF_Raid_t * raidPtr, 727 int frow, 728 int fcol, 729 int initRecon) 730 { 731 printf("raid%d: Failing disk r%d c%d\n", raidPtr->raidid, frow, fcol); 732 RF_LOCK_MUTEX(raidPtr->mutex); 733 raidPtr->numFailures++; 734 raidPtr->Disks[frow][fcol].status = rf_ds_failed; 735 raidPtr->status[frow] = rf_rs_degraded; 736 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 737 RF_UNLOCK_MUTEX(raidPtr->mutex); 738 739 /* Close the component, so that it's not "locked" if someone 740 else want's to use it! */ 741 742 rf_close_component(raidPtr, raidPtr->raid_cinfo[frow][fcol].ci_vp, 743 raidPtr->Disks[frow][fcol].auto_configured); 744 raidPtr->raid_cinfo[frow][fcol].ci_vp = NULL; 745 746 /* Need to mark the component as not being auto_configured 747 (in case it was previously). */ 748 749 raidPtr->Disks[frow][fcol].auto_configured = 0; 750 751 if (initRecon) 752 rf_ReconstructFailedDisk(raidPtr, frow, fcol); 753 return (0); 754 } 755 /* releases a thread that is waiting for the array to become quiesced. 756 * access_suspend_mutex should be locked upon calling this 757 */ 758 void 759 rf_SignalQuiescenceLock(raidPtr, reconDesc) 760 RF_Raid_t *raidPtr; 761 RF_RaidReconDesc_t *reconDesc; 762 { 763 #if RF_DEBUG_QUIESCE 764 if (rf_quiesceDebug) { 765 printf("raid%d: Signalling quiescence lock\n", 766 raidPtr->raidid); 767 } 768 #endif 769 raidPtr->access_suspend_release = 1; 770 771 if (raidPtr->waiting_for_quiescence) { 772 SIGNAL_QUIESCENT_COND(raidPtr); 773 } 774 } 775 /* suspends all new requests to the array. No effect on accesses that are in flight. */ 776 int 777 rf_SuspendNewRequestsAndWait(raidPtr) 778 RF_Raid_t *raidPtr; 779 { 780 #if RF_DEBUG_QUIESCE 781 if (rf_quiesceDebug) 782 printf("raid%d: Suspending new reqs\n", raidPtr->raidid); 783 #endif 784 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 785 raidPtr->accesses_suspended++; 786 raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1; 787 788 if (raidPtr->waiting_for_quiescence) { 789 raidPtr->access_suspend_release = 0; 790 while (!raidPtr->access_suspend_release) { 791 printf("raid%d: Suspending: Waiting for Quiescence\n", 792 raidPtr->raidid); 793 WAIT_FOR_QUIESCENCE(raidPtr); 794 raidPtr->waiting_for_quiescence = 0; 795 } 796 } 797 printf("raid%d: Quiescence reached..\n", raidPtr->raidid); 798 799 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 800 return (raidPtr->waiting_for_quiescence); 801 } 802 /* wake up everyone waiting for quiescence to be released */ 803 void 804 rf_ResumeNewRequests(raidPtr) 805 RF_Raid_t *raidPtr; 806 { 807 RF_CallbackDesc_t *t, *cb; 808 809 #if RF_DEBUG_QUIESCE 810 if (rf_quiesceDebug) 811 printf("Resuming new reqs\n"); 812 #endif 813 814 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 815 raidPtr->accesses_suspended--; 816 if (raidPtr->accesses_suspended == 0) 817 cb = raidPtr->quiesce_wait_list; 818 else 819 cb = NULL; 820 raidPtr->quiesce_wait_list = NULL; 821 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 822 823 while (cb) { 824 t = cb; 825 cb = cb->next; 826 (t->callbackFunc) (t->callbackArg); 827 rf_FreeCallbackDesc(t); 828 } 829 } 830 /***************************************************************************************** 831 * 832 * debug routines 833 * 834 ****************************************************************************************/ 835 836 static void 837 set_debug_option(name, val) 838 char *name; 839 long val; 840 { 841 RF_DebugName_t *p; 842 843 for (p = rf_debugNames; p->name; p++) { 844 if (!strcmp(p->name, name)) { 845 *(p->ptr) = val; 846 printf("[Set debug variable %s to %ld]\n", name, val); 847 return; 848 } 849 } 850 RF_ERRORMSG1("Unknown debug string \"%s\"\n", name); 851 } 852 853 854 /* would like to use sscanf here, but apparently not available in kernel */ 855 /*ARGSUSED*/ 856 static void 857 rf_ConfigureDebug(cfgPtr) 858 RF_Config_t *cfgPtr; 859 { 860 char *val_p, *name_p, *white_p; 861 long val; 862 int i; 863 864 rf_ResetDebugOptions(); 865 for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) { 866 name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]); 867 white_p = rf_find_white(name_p); /* skip to start of 2nd 868 * word */ 869 val_p = rf_find_non_white(white_p); 870 if (*val_p == '0' && *(val_p + 1) == 'x') 871 val = rf_htoi(val_p + 2); 872 else 873 val = rf_atoi(val_p); 874 *white_p = '\0'; 875 set_debug_option(name_p, val); 876 } 877 } 878 /* performance monitoring stuff */ 879 880 #define TIMEVAL_TO_US(t) (((long) t.tv_sec) * 1000000L + (long) t.tv_usec) 881 882 #if !defined(_KERNEL) && !defined(SIMULATE) 883 884 /* 885 * Throughput stats currently only used in user-level RAIDframe 886 */ 887 888 static int 889 rf_InitThroughputStats( 890 RF_ShutdownList_t ** listp, 891 RF_Raid_t * raidPtr, 892 RF_Config_t * cfgPtr) 893 { 894 int rc; 895 896 /* these used by user-level raidframe only */ 897 rc = rf_create_managed_mutex(listp, &raidPtr->throughputstats.mutex); 898 if (rc) { 899 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); 900 return (rc); 901 } 902 raidPtr->throughputstats.sum_io_us = 0; 903 raidPtr->throughputstats.num_ios = 0; 904 raidPtr->throughputstats.num_out_ios = 0; 905 return (0); 906 } 907 908 void 909 rf_StartThroughputStats(RF_Raid_t * raidPtr) 910 { 911 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 912 raidPtr->throughputstats.num_ios++; 913 raidPtr->throughputstats.num_out_ios++; 914 if (raidPtr->throughputstats.num_out_ios == 1) 915 RF_GETTIME(raidPtr->throughputstats.start); 916 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 917 } 918 919 static void 920 rf_StopThroughputStats(RF_Raid_t * raidPtr) 921 { 922 struct timeval diff; 923 924 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 925 raidPtr->throughputstats.num_out_ios--; 926 if (raidPtr->throughputstats.num_out_ios == 0) { 927 RF_GETTIME(raidPtr->throughputstats.stop); 928 RF_TIMEVAL_DIFF(&raidPtr->throughputstats.start, &raidPtr->throughputstats.stop, &diff); 929 raidPtr->throughputstats.sum_io_us += TIMEVAL_TO_US(diff); 930 } 931 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 932 } 933 934 static void 935 rf_PrintThroughputStats(RF_Raid_t * raidPtr) 936 { 937 RF_ASSERT(raidPtr->throughputstats.num_out_ios == 0); 938 if (raidPtr->throughputstats.sum_io_us != 0) { 939 printf("[Througphut: %8.2f IOs/second]\n", raidPtr->throughputstats.num_ios 940 / (raidPtr->throughputstats.sum_io_us / 1000000.0)); 941 } 942 } 943 #endif /* !KERNEL && !SIMULATE */ 944 945 void 946 rf_StartUserStats(RF_Raid_t * raidPtr) 947 { 948 RF_GETTIME(raidPtr->userstats.start); 949 raidPtr->userstats.sum_io_us = 0; 950 raidPtr->userstats.num_ios = 0; 951 raidPtr->userstats.num_sect_moved = 0; 952 } 953 954 void 955 rf_StopUserStats(RF_Raid_t * raidPtr) 956 { 957 RF_GETTIME(raidPtr->userstats.stop); 958 } 959 960 void 961 rf_UpdateUserStats(raidPtr, rt, numsect) 962 RF_Raid_t *raidPtr; 963 int rt; /* resp time in us */ 964 int numsect; /* number of sectors for this access */ 965 { 966 raidPtr->userstats.sum_io_us += rt; 967 raidPtr->userstats.num_ios++; 968 raidPtr->userstats.num_sect_moved += numsect; 969 } 970 971 void 972 rf_PrintUserStats(RF_Raid_t * raidPtr) 973 { 974 long elapsed_us, mbs, mbs_frac; 975 struct timeval diff; 976 977 RF_TIMEVAL_DIFF(&raidPtr->userstats.start, &raidPtr->userstats.stop, &diff); 978 elapsed_us = TIMEVAL_TO_US(diff); 979 980 /* 2000 sectors per megabyte, 10000000 microseconds per second */ 981 if (elapsed_us) 982 mbs = (raidPtr->userstats.num_sect_moved / 2000) / (elapsed_us / 1000000); 983 else 984 mbs = 0; 985 986 /* this computes only the first digit of the fractional mb/s moved */ 987 if (elapsed_us) { 988 mbs_frac = ((raidPtr->userstats.num_sect_moved / 200) / (elapsed_us / 1000000)) 989 - (mbs * 10); 990 } else { 991 mbs_frac = 0; 992 } 993 994 printf("Number of I/Os: %ld\n", raidPtr->userstats.num_ios); 995 printf("Elapsed time (us): %ld\n", elapsed_us); 996 printf("User I/Os per second: %ld\n", RF_DB0_CHECK(raidPtr->userstats.num_ios, (elapsed_us / 1000000))); 997 printf("Average user response time: %ld us\n", RF_DB0_CHECK(raidPtr->userstats.sum_io_us, raidPtr->userstats.num_ios)); 998 printf("Total sectors moved: %ld\n", raidPtr->userstats.num_sect_moved); 999 printf("Average access size (sect): %ld\n", RF_DB0_CHECK(raidPtr->userstats.num_sect_moved, raidPtr->userstats.num_ios)); 1000 printf("Achieved data rate: %ld.%ld MB/sec\n", mbs, mbs_frac); 1001 } 1002 1003 1004 void 1005 rf_print_panic_message(line,file) 1006 int line; 1007 char *file; 1008 { 1009 sprintf(rf_panicbuf,"raidframe error at line %d file %s", 1010 line, file); 1011 } 1012 1013 void 1014 rf_print_assert_panic_message(line,file,condition) 1015 int line; 1016 char *file; 1017 char *condition; 1018 { 1019 sprintf(rf_panicbuf, 1020 "raidframe error at line %d file %s (failed asserting %s)\n", 1021 line, file, condition); 1022 } 1023 1024 void 1025 rf_print_unable_to_init_mutex(file,line,rc) 1026 char *file; 1027 int line; 1028 int rc; 1029 { 1030 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", 1031 file, line, rc); 1032 } 1033 1034 void 1035 rf_print_unable_to_init_cond(file,line,rc) 1036 char *file; 1037 int line; 1038 int rc; 1039 { 1040 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", 1041 file, line, rc); 1042 } 1043 1044 void 1045 rf_print_unable_to_add_shutdown(file,line,rc) 1046 char *file; 1047 int line; 1048 int rc; 1049 { 1050 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 1051 file, line, rc); 1052 } 1053