1 /* $NetBSD: rf_driver.c,v 1.41 2001/07/27 03:30:07 oster Exp $ */ 2 /*- 3 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Greg Oster 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1995 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II, 43 * Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka 44 * 45 * Permission to use, copy, modify and distribute this software and 46 * its documentation is hereby granted, provided that both the copyright 47 * notice and this permission notice appear in all copies of the 48 * software, derivative works or modified versions, and any portions 49 * thereof, and that both notices appear in supporting documentation. 50 * 51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 54 * 55 * Carnegie Mellon requests users of this software to return to 56 * 57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 58 * School of Computer Science 59 * Carnegie Mellon University 60 * Pittsburgh PA 15213-3890 61 * 62 * any improvements or extensions that they make and grant Carnegie the 63 * rights to redistribute these changes. 64 */ 65 66 /****************************************************************************** 67 * 68 * rf_driver.c -- main setup, teardown, and access routines for the RAID driver 69 * 70 * all routines are prefixed with rf_ (raidframe), to avoid conficts. 71 * 72 ******************************************************************************/ 73 74 75 #include <sys/types.h> 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/ioctl.h> 79 #include <sys/fcntl.h> 80 #include <sys/vnode.h> 81 82 83 #include "rf_archs.h" 84 #include "rf_threadstuff.h" 85 86 #include <sys/errno.h> 87 88 #include "rf_raid.h" 89 #include "rf_dag.h" 90 #include "rf_aselect.h" 91 #include "rf_diskqueue.h" 92 #include "rf_parityscan.h" 93 #include "rf_alloclist.h" 94 #include "rf_dagutils.h" 95 #include "rf_utils.h" 96 #include "rf_etimer.h" 97 #include "rf_acctrace.h" 98 #include "rf_configure.h" 99 #include "rf_general.h" 100 #include "rf_desc.h" 101 #include "rf_states.h" 102 #include "rf_freelist.h" 103 #include "rf_decluster.h" 104 #include "rf_map.h" 105 #include "rf_revent.h" 106 #include "rf_callback.h" 107 #include "rf_engine.h" 108 #include "rf_memchunk.h" 109 #include "rf_mcpair.h" 110 #include "rf_nwayxor.h" 111 #include "rf_debugprint.h" 112 #include "rf_copyback.h" 113 #include "rf_driver.h" 114 #include "rf_options.h" 115 #include "rf_shutdown.h" 116 #include "rf_kintf.h" 117 118 #include <sys/buf.h> 119 120 /* rad == RF_RaidAccessDesc_t */ 121 static RF_FreeList_t *rf_rad_freelist; 122 #define RF_MAX_FREE_RAD 128 123 #define RF_RAD_INC 16 124 #define RF_RAD_INITIAL 32 125 126 /* debug variables */ 127 char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */ 128 129 /* main configuration routines */ 130 static int raidframe_booted = 0; 131 132 static void rf_ConfigureDebug(RF_Config_t * cfgPtr); 133 static void set_debug_option(char *name, long val); 134 static void rf_UnconfigureArray(void); 135 static int init_rad(RF_RaidAccessDesc_t *); 136 static void clean_rad(RF_RaidAccessDesc_t *); 137 static void rf_ShutdownRDFreeList(void *); 138 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **); 139 140 RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved 141 * printfs by different stripes */ 142 143 #define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended)) 144 #define WAIT_FOR_QUIESCENCE(_raid_) \ 145 ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \ 146 "raidframe quiesce", 0, &((_raid_)->access_suspend_mutex)) 147 148 #define IO_BUF_ERR(bp, err) { \ 149 bp->b_flags |= B_ERROR; \ 150 bp->b_resid = bp->b_bcount; \ 151 bp->b_error = err; \ 152 biodone(bp); \ 153 } 154 155 static int configureCount = 0; /* number of active configurations */ 156 static int isconfigged = 0; /* is basic raidframe (non per-array) 157 * stuff configged */ 158 RF_DECLARE_STATIC_MUTEX(configureMutex) /* used to lock the configuration 159 * stuff */ 160 static RF_ShutdownList_t *globalShutdown; /* non array-specific 161 * stuff */ 162 163 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp); 164 165 /* called at system boot time */ 166 int 167 rf_BootRaidframe() 168 { 169 int rc; 170 171 if (raidframe_booted) 172 return (EBUSY); 173 raidframe_booted = 1; 174 175 rc = rf_mutex_init(&configureMutex); 176 if (rc) { 177 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 178 __LINE__, rc); 179 RF_PANIC(); 180 } 181 configureCount = 0; 182 isconfigged = 0; 183 globalShutdown = NULL; 184 return (0); 185 } 186 /* 187 * This function is really just for debugging user-level stuff: it 188 * frees up all memory, other RAIDframe resources which might otherwise 189 * be kept around. This is used with systems like "sentinel" to detect 190 * memory leaks. 191 */ 192 int 193 rf_UnbootRaidframe() 194 { 195 int rc; 196 197 RF_LOCK_MUTEX(configureMutex); 198 if (configureCount) { 199 RF_UNLOCK_MUTEX(configureMutex); 200 return (EBUSY); 201 } 202 raidframe_booted = 0; 203 RF_UNLOCK_MUTEX(configureMutex); 204 rc = rf_mutex_destroy(&configureMutex); 205 if (rc) { 206 RF_ERRORMSG3("Unable to destroy mutex file %s line %d rc=%d\n", __FILE__, 207 __LINE__, rc); 208 RF_PANIC(); 209 } 210 return (0); 211 } 212 /* 213 * Called whenever an array is shutdown 214 */ 215 static void 216 rf_UnconfigureArray() 217 { 218 int rc; 219 220 RF_LOCK_MUTEX(configureMutex); 221 if (--configureCount == 0) { /* if no active configurations, shut 222 * everything down */ 223 isconfigged = 0; 224 225 rc = rf_ShutdownList(&globalShutdown); 226 if (rc) { 227 RF_ERRORMSG1("RAIDFRAME: unable to do global shutdown, rc=%d\n", rc); 228 } 229 230 /* 231 * We must wait until now, because the AllocList module 232 * uses the DebugMem module. 233 */ 234 if (rf_memDebug) 235 rf_print_unfreed(); 236 } 237 RF_UNLOCK_MUTEX(configureMutex); 238 } 239 240 /* 241 * Called to shut down an array. 242 */ 243 int 244 rf_Shutdown(raidPtr) 245 RF_Raid_t *raidPtr; 246 { 247 248 if (!raidPtr->valid) { 249 RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n"); 250 return (EINVAL); 251 } 252 /* 253 * wait for outstanding IOs to land 254 * As described in rf_raid.h, we use the rad_freelist lock 255 * to protect the per-array info about outstanding descs 256 * since we need to do freelist locking anyway, and this 257 * cuts down on the amount of serialization we've got going 258 * on. 259 */ 260 RF_FREELIST_DO_LOCK(rf_rad_freelist); 261 if (raidPtr->waitShutdown) { 262 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 263 return (EBUSY); 264 } 265 raidPtr->waitShutdown = 1; 266 while (raidPtr->nAccOutstanding) { 267 RF_WAIT_COND(raidPtr->outstandingCond, RF_FREELIST_MUTEX_OF(rf_rad_freelist)); 268 } 269 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 270 271 /* Wait for any parity re-writes to stop... */ 272 while (raidPtr->parity_rewrite_in_progress) { 273 printf("Waiting for parity re-write to exit...\n"); 274 tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO, 275 "rfprwshutdown", 0); 276 } 277 278 raidPtr->valid = 0; 279 280 rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE); 281 282 rf_UnconfigureVnodes(raidPtr); 283 284 rf_ShutdownList(&raidPtr->shutdownList); 285 286 rf_UnconfigureArray(); 287 288 return (0); 289 } 290 291 292 #define DO_INIT_CONFIGURE(f) { \ 293 rc = f (&globalShutdown); \ 294 if (rc) { \ 295 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 296 rf_ShutdownList(&globalShutdown); \ 297 configureCount--; \ 298 RF_UNLOCK_MUTEX(configureMutex); \ 299 return(rc); \ 300 } \ 301 } 302 303 #define DO_RAID_FAIL() { \ 304 rf_UnconfigureVnodes(raidPtr); \ 305 rf_ShutdownList(&raidPtr->shutdownList); \ 306 rf_UnconfigureArray(); \ 307 } 308 309 #define DO_RAID_INIT_CONFIGURE(f) { \ 310 rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \ 311 if (rc) { \ 312 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 313 DO_RAID_FAIL(); \ 314 return(rc); \ 315 } \ 316 } 317 318 #define DO_RAID_MUTEX(_m_) { \ 319 rc = rf_create_managed_mutex(&raidPtr->shutdownList, (_m_)); \ 320 if (rc) { \ 321 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", \ 322 __FILE__, __LINE__, rc); \ 323 DO_RAID_FAIL(); \ 324 return(rc); \ 325 } \ 326 } 327 328 #define DO_RAID_COND(_c_) { \ 329 rc = rf_create_managed_cond(&raidPtr->shutdownList, (_c_)); \ 330 if (rc) { \ 331 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", \ 332 __FILE__, __LINE__, rc); \ 333 DO_RAID_FAIL(); \ 334 return(rc); \ 335 } \ 336 } 337 338 int 339 rf_Configure(raidPtr, cfgPtr, ac) 340 RF_Raid_t *raidPtr; 341 RF_Config_t *cfgPtr; 342 RF_AutoConfig_t *ac; 343 { 344 RF_RowCol_t row, col; 345 int i, rc; 346 347 /* XXX This check can probably be removed now, since 348 RAIDFRAME_CONFIGURRE now checks to make sure that the 349 RAID set is not already valid 350 */ 351 if (raidPtr->valid) { 352 RF_ERRORMSG("RAIDframe configuration not shut down. Aborting configure.\n"); 353 return (EINVAL); 354 } 355 RF_LOCK_MUTEX(configureMutex); 356 configureCount++; 357 if (isconfigged == 0) { 358 rc = rf_create_managed_mutex(&globalShutdown, &rf_printf_mutex); 359 if (rc) { 360 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 361 __LINE__, rc); 362 rf_ShutdownList(&globalShutdown); 363 return (rc); 364 } 365 /* initialize globals */ 366 printf("RAIDFRAME: protectedSectors is %ld\n", 367 rf_protectedSectors); 368 369 rf_clear_debug_print_buffer(); 370 371 DO_INIT_CONFIGURE(rf_ConfigureAllocList); 372 373 /* 374 * Yes, this does make debugging general to the whole 375 * system instead of being array specific. Bummer, drag. 376 */ 377 rf_ConfigureDebug(cfgPtr); 378 DO_INIT_CONFIGURE(rf_ConfigureDebugMem); 379 DO_INIT_CONFIGURE(rf_ConfigureAccessTrace); 380 DO_INIT_CONFIGURE(rf_ConfigureMapModule); 381 DO_INIT_CONFIGURE(rf_ConfigureReconEvent); 382 DO_INIT_CONFIGURE(rf_ConfigureCallback); 383 DO_INIT_CONFIGURE(rf_ConfigureMemChunk); 384 DO_INIT_CONFIGURE(rf_ConfigureRDFreeList); 385 DO_INIT_CONFIGURE(rf_ConfigureNWayXor); 386 DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList); 387 DO_INIT_CONFIGURE(rf_ConfigureMCPair); 388 DO_INIT_CONFIGURE(rf_ConfigureDAGs); 389 DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs); 390 DO_INIT_CONFIGURE(rf_ConfigureDebugPrint); 391 DO_INIT_CONFIGURE(rf_ConfigureReconstruction); 392 DO_INIT_CONFIGURE(rf_ConfigureCopyback); 393 DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem); 394 isconfigged = 1; 395 } 396 RF_UNLOCK_MUTEX(configureMutex); 397 398 DO_RAID_MUTEX(&raidPtr->mutex); 399 /* set up the cleanup list. Do this after ConfigureDebug so that 400 * value of memDebug will be set */ 401 402 rf_MakeAllocList(raidPtr->cleanupList); 403 if (raidPtr->cleanupList == NULL) { 404 DO_RAID_FAIL(); 405 return (ENOMEM); 406 } 407 rc = rf_ShutdownCreate(&raidPtr->shutdownList, 408 (void (*) (void *)) rf_FreeAllocList, 409 raidPtr->cleanupList); 410 if (rc) { 411 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 412 __FILE__, __LINE__, rc); 413 DO_RAID_FAIL(); 414 return (rc); 415 } 416 raidPtr->numRow = cfgPtr->numRow; 417 raidPtr->numCol = cfgPtr->numCol; 418 raidPtr->numSpare = cfgPtr->numSpare; 419 420 /* XXX we don't even pretend to support more than one row in the 421 * kernel... */ 422 if (raidPtr->numRow != 1) { 423 RF_ERRORMSG("Only one row supported in kernel.\n"); 424 DO_RAID_FAIL(); 425 return (EINVAL); 426 } 427 RF_CallocAndAdd(raidPtr->status, raidPtr->numRow, sizeof(RF_RowStatus_t), 428 (RF_RowStatus_t *), raidPtr->cleanupList); 429 if (raidPtr->status == NULL) { 430 DO_RAID_FAIL(); 431 return (ENOMEM); 432 } 433 RF_CallocAndAdd(raidPtr->reconControl, raidPtr->numRow, 434 sizeof(RF_ReconCtrl_t *), (RF_ReconCtrl_t **), raidPtr->cleanupList); 435 if (raidPtr->reconControl == NULL) { 436 DO_RAID_FAIL(); 437 return (ENOMEM); 438 } 439 for (i = 0; i < raidPtr->numRow; i++) { 440 raidPtr->status[i] = rf_rs_optimal; 441 raidPtr->reconControl[i] = NULL; 442 } 443 444 DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine); 445 DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks); 446 447 DO_RAID_COND(&raidPtr->outstandingCond); 448 449 raidPtr->nAccOutstanding = 0; 450 raidPtr->waitShutdown = 0; 451 452 DO_RAID_MUTEX(&raidPtr->access_suspend_mutex); 453 DO_RAID_COND(&raidPtr->quiescent_cond); 454 455 DO_RAID_COND(&raidPtr->waitForReconCond); 456 457 DO_RAID_MUTEX(&raidPtr->recon_done_proc_mutex); 458 459 if (ac!=NULL) { 460 /* We have an AutoConfig structure.. Don't do the 461 normal disk configuration... call the auto config 462 stuff */ 463 rf_AutoConfigureDisks(raidPtr, cfgPtr, ac); 464 } else { 465 DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks); 466 DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks); 467 } 468 /* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev 469 * no. is set */ 470 DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues); 471 472 DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout); 473 474 DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus); 475 476 for (row = 0; row < raidPtr->numRow; row++) { 477 for (col = 0; col < raidPtr->numCol; col++) { 478 /* 479 * XXX better distribution 480 */ 481 raidPtr->hist_diskreq[row][col] = 0; 482 } 483 } 484 485 raidPtr->numNewFailures = 0; 486 raidPtr->copyback_in_progress = 0; 487 raidPtr->parity_rewrite_in_progress = 0; 488 raidPtr->recon_in_progress = 0; 489 raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs; 490 491 /* autoconfigure and root_partition will actually get filled in 492 after the config is done */ 493 raidPtr->autoconfigure = 0; 494 raidPtr->root_partition = 0; 495 raidPtr->last_unit = raidPtr->raidid; 496 raidPtr->config_order = 0; 497 498 if (rf_keepAccTotals) { 499 raidPtr->keep_acc_totals = 1; 500 } 501 rf_StartUserStats(raidPtr); 502 503 raidPtr->valid = 1; 504 return (0); 505 } 506 507 static int 508 init_rad(desc) 509 RF_RaidAccessDesc_t *desc; 510 { 511 int rc; 512 513 rc = rf_mutex_init(&desc->mutex); 514 if (rc) { 515 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 516 __LINE__, rc); 517 return (rc); 518 } 519 rc = rf_cond_init(&desc->cond); 520 if (rc) { 521 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, 522 __LINE__, rc); 523 rf_mutex_destroy(&desc->mutex); 524 return (rc); 525 } 526 return (0); 527 } 528 529 static void 530 clean_rad(desc) 531 RF_RaidAccessDesc_t *desc; 532 { 533 rf_mutex_destroy(&desc->mutex); 534 rf_cond_destroy(&desc->cond); 535 } 536 537 static void 538 rf_ShutdownRDFreeList(ignored) 539 void *ignored; 540 { 541 RF_FREELIST_DESTROY_CLEAN(rf_rad_freelist, next, (RF_RaidAccessDesc_t *), clean_rad); 542 } 543 544 static int 545 rf_ConfigureRDFreeList(listp) 546 RF_ShutdownList_t **listp; 547 { 548 int rc; 549 550 RF_FREELIST_CREATE(rf_rad_freelist, RF_MAX_FREE_RAD, 551 RF_RAD_INC, sizeof(RF_RaidAccessDesc_t)); 552 if (rf_rad_freelist == NULL) { 553 return (ENOMEM); 554 } 555 rc = rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL); 556 if (rc) { 557 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", __FILE__, 558 __LINE__, rc); 559 rf_ShutdownRDFreeList(NULL); 560 return (rc); 561 } 562 RF_FREELIST_PRIME_INIT(rf_rad_freelist, RF_RAD_INITIAL, next, 563 (RF_RaidAccessDesc_t *), init_rad); 564 return (0); 565 } 566 567 RF_RaidAccessDesc_t * 568 rf_AllocRaidAccDesc( 569 RF_Raid_t * raidPtr, 570 RF_IoType_t type, 571 RF_RaidAddr_t raidAddress, 572 RF_SectorCount_t numBlocks, 573 caddr_t bufPtr, 574 void *bp, 575 RF_RaidAccessFlags_t flags, 576 RF_AccessState_t * states) 577 { 578 RF_RaidAccessDesc_t *desc; 579 580 RF_FREELIST_GET_INIT_NOUNLOCK(rf_rad_freelist, desc, next, (RF_RaidAccessDesc_t *), init_rad); 581 if (raidPtr->waitShutdown) { 582 /* 583 * Actually, we're shutting the array down. Free the desc 584 * and return NULL. 585 */ 586 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 587 RF_FREELIST_FREE_CLEAN(rf_rad_freelist, desc, next, clean_rad); 588 return (NULL); 589 } 590 raidPtr->nAccOutstanding++; 591 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 592 593 desc->raidPtr = (void *) raidPtr; 594 desc->type = type; 595 desc->raidAddress = raidAddress; 596 desc->numBlocks = numBlocks; 597 desc->bufPtr = bufPtr; 598 desc->bp = bp; 599 desc->paramDAG = NULL; 600 desc->paramASM = NULL; 601 desc->flags = flags; 602 desc->states = states; 603 desc->state = 0; 604 605 desc->status = 0; 606 memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t)); 607 desc->callbackFunc = NULL; 608 desc->callbackArg = NULL; 609 desc->next = NULL; 610 desc->head = desc; 611 desc->numPending = 0; 612 desc->cleanupList = NULL; 613 rf_MakeAllocList(desc->cleanupList); 614 return (desc); 615 } 616 617 void 618 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t * desc) 619 { 620 RF_Raid_t *raidPtr = desc->raidPtr; 621 622 RF_ASSERT(desc); 623 624 rf_FreeAllocList(desc->cleanupList); 625 RF_FREELIST_FREE_CLEAN_NOUNLOCK(rf_rad_freelist, desc, next, clean_rad); 626 raidPtr->nAccOutstanding--; 627 if (raidPtr->waitShutdown) { 628 RF_SIGNAL_COND(raidPtr->outstandingCond); 629 } 630 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 631 } 632 /********************************************************************* 633 * Main routine for performing an access. 634 * Accesses are retried until a DAG can not be selected. This occurs 635 * when either the DAG library is incomplete or there are too many 636 * failures in a parity group. 637 ********************************************************************/ 638 int 639 rf_DoAccess( 640 RF_Raid_t * raidPtr, 641 RF_IoType_t type, 642 int async_flag, 643 RF_RaidAddr_t raidAddress, 644 RF_SectorCount_t numBlocks, 645 caddr_t bufPtr, 646 void *bp_in, 647 RF_RaidAccessFlags_t flags) 648 /* 649 type should be read or write 650 async_flag should be RF_TRUE or RF_FALSE 651 bp_in is a buf pointer. void * to facilitate ignoring it outside the kernel 652 */ 653 { 654 RF_RaidAccessDesc_t *desc; 655 caddr_t lbufPtr = bufPtr; 656 struct buf *bp = (struct buf *) bp_in; 657 658 raidAddress += rf_raidSectorOffset; 659 660 if (!raidPtr->valid) { 661 RF_ERRORMSG("RAIDframe driver not successfully configured. Rejecting access.\n"); 662 IO_BUF_ERR(bp, EINVAL); 663 return (EINVAL); 664 } 665 666 if (rf_accessDebug) { 667 668 printf("logBytes is: %d %d %d\n", raidPtr->raidid, 669 raidPtr->logBytesPerSector, 670 (int) rf_RaidAddressToByte(raidPtr, numBlocks)); 671 printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid, 672 (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress, 673 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress), 674 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1), 675 (int) numBlocks, 676 (int) rf_RaidAddressToByte(raidPtr, numBlocks), 677 (long) bufPtr); 678 } 679 if (raidAddress + numBlocks > raidPtr->totalSectors) { 680 681 printf("DoAccess: raid addr %lu too large to access %lu sectors. Max legal addr is %lu\n", 682 (u_long) raidAddress, (u_long) numBlocks, (u_long) raidPtr->totalSectors); 683 684 IO_BUF_ERR(bp, ENOSPC); 685 return (ENOSPC); 686 } 687 desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress, 688 numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states); 689 690 if (desc == NULL) { 691 return (ENOMEM); 692 } 693 RF_ETIMER_START(desc->tracerec.tot_timer); 694 695 desc->async_flag = async_flag; 696 697 rf_ContinueRaidAccess(desc); 698 699 return (0); 700 } 701 /* force the array into reconfigured mode without doing reconstruction */ 702 int 703 rf_SetReconfiguredMode(raidPtr, row, col) 704 RF_Raid_t *raidPtr; 705 int row; 706 int col; 707 { 708 if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) { 709 printf("Can't set reconfigured mode in dedicated-spare array\n"); 710 RF_PANIC(); 711 } 712 RF_LOCK_MUTEX(raidPtr->mutex); 713 raidPtr->numFailures++; 714 raidPtr->Disks[row][col].status = rf_ds_dist_spared; 715 raidPtr->status[row] = rf_rs_reconfigured; 716 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 717 /* install spare table only if declustering + distributed sparing 718 * architecture. */ 719 if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED) 720 rf_InstallSpareTable(raidPtr, row, col); 721 RF_UNLOCK_MUTEX(raidPtr->mutex); 722 return (0); 723 } 724 725 extern int fail_row, fail_col, fail_time; 726 extern int delayed_recon; 727 728 int 729 rf_FailDisk( 730 RF_Raid_t * raidPtr, 731 int frow, 732 int fcol, 733 int initRecon) 734 { 735 printf("raid%d: Failing disk r%d c%d\n", raidPtr->raidid, frow, fcol); 736 RF_LOCK_MUTEX(raidPtr->mutex); 737 raidPtr->numFailures++; 738 raidPtr->Disks[frow][fcol].status = rf_ds_failed; 739 raidPtr->status[frow] = rf_rs_degraded; 740 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 741 RF_UNLOCK_MUTEX(raidPtr->mutex); 742 if (initRecon) 743 rf_ReconstructFailedDisk(raidPtr, frow, fcol); 744 return (0); 745 } 746 /* releases a thread that is waiting for the array to become quiesced. 747 * access_suspend_mutex should be locked upon calling this 748 */ 749 void 750 rf_SignalQuiescenceLock(raidPtr, reconDesc) 751 RF_Raid_t *raidPtr; 752 RF_RaidReconDesc_t *reconDesc; 753 { 754 if (rf_quiesceDebug) { 755 printf("raid%d: Signalling quiescence lock\n", 756 raidPtr->raidid); 757 } 758 raidPtr->access_suspend_release = 1; 759 760 if (raidPtr->waiting_for_quiescence) { 761 SIGNAL_QUIESCENT_COND(raidPtr); 762 } 763 } 764 /* suspends all new requests to the array. No effect on accesses that are in flight. */ 765 int 766 rf_SuspendNewRequestsAndWait(raidPtr) 767 RF_Raid_t *raidPtr; 768 { 769 if (rf_quiesceDebug) 770 printf("Suspending new reqs\n"); 771 772 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 773 raidPtr->accesses_suspended++; 774 raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1; 775 776 if (raidPtr->waiting_for_quiescence) { 777 raidPtr->access_suspend_release = 0; 778 while (!raidPtr->access_suspend_release) { 779 printf("Suspending: Waiting for Quiescence\n"); 780 WAIT_FOR_QUIESCENCE(raidPtr); 781 raidPtr->waiting_for_quiescence = 0; 782 } 783 } 784 printf("Quiescence reached..\n"); 785 786 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 787 return (raidPtr->waiting_for_quiescence); 788 } 789 /* wake up everyone waiting for quiescence to be released */ 790 void 791 rf_ResumeNewRequests(raidPtr) 792 RF_Raid_t *raidPtr; 793 { 794 RF_CallbackDesc_t *t, *cb; 795 796 if (rf_quiesceDebug) 797 printf("Resuming new reqs\n"); 798 799 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 800 raidPtr->accesses_suspended--; 801 if (raidPtr->accesses_suspended == 0) 802 cb = raidPtr->quiesce_wait_list; 803 else 804 cb = NULL; 805 raidPtr->quiesce_wait_list = NULL; 806 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 807 808 while (cb) { 809 t = cb; 810 cb = cb->next; 811 (t->callbackFunc) (t->callbackArg); 812 rf_FreeCallbackDesc(t); 813 } 814 } 815 /***************************************************************************************** 816 * 817 * debug routines 818 * 819 ****************************************************************************************/ 820 821 static void 822 set_debug_option(name, val) 823 char *name; 824 long val; 825 { 826 RF_DebugName_t *p; 827 828 for (p = rf_debugNames; p->name; p++) { 829 if (!strcmp(p->name, name)) { 830 *(p->ptr) = val; 831 printf("[Set debug variable %s to %ld]\n", name, val); 832 return; 833 } 834 } 835 RF_ERRORMSG1("Unknown debug string \"%s\"\n", name); 836 } 837 838 839 /* would like to use sscanf here, but apparently not available in kernel */ 840 /*ARGSUSED*/ 841 static void 842 rf_ConfigureDebug(cfgPtr) 843 RF_Config_t *cfgPtr; 844 { 845 char *val_p, *name_p, *white_p; 846 long val; 847 int i; 848 849 rf_ResetDebugOptions(); 850 for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) { 851 name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]); 852 white_p = rf_find_white(name_p); /* skip to start of 2nd 853 * word */ 854 val_p = rf_find_non_white(white_p); 855 if (*val_p == '0' && *(val_p + 1) == 'x') 856 val = rf_htoi(val_p + 2); 857 else 858 val = rf_atoi(val_p); 859 *white_p = '\0'; 860 set_debug_option(name_p, val); 861 } 862 } 863 /* performance monitoring stuff */ 864 865 #define TIMEVAL_TO_US(t) (((long) t.tv_sec) * 1000000L + (long) t.tv_usec) 866 867 #if !defined(_KERNEL) && !defined(SIMULATE) 868 869 /* 870 * Throughput stats currently only used in user-level RAIDframe 871 */ 872 873 static int 874 rf_InitThroughputStats( 875 RF_ShutdownList_t ** listp, 876 RF_Raid_t * raidPtr, 877 RF_Config_t * cfgPtr) 878 { 879 int rc; 880 881 /* these used by user-level raidframe only */ 882 rc = rf_create_managed_mutex(listp, &raidPtr->throughputstats.mutex); 883 if (rc) { 884 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 885 __LINE__, rc); 886 return (rc); 887 } 888 raidPtr->throughputstats.sum_io_us = 0; 889 raidPtr->throughputstats.num_ios = 0; 890 raidPtr->throughputstats.num_out_ios = 0; 891 return (0); 892 } 893 894 void 895 rf_StartThroughputStats(RF_Raid_t * raidPtr) 896 { 897 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 898 raidPtr->throughputstats.num_ios++; 899 raidPtr->throughputstats.num_out_ios++; 900 if (raidPtr->throughputstats.num_out_ios == 1) 901 RF_GETTIME(raidPtr->throughputstats.start); 902 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 903 } 904 905 static void 906 rf_StopThroughputStats(RF_Raid_t * raidPtr) 907 { 908 struct timeval diff; 909 910 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 911 raidPtr->throughputstats.num_out_ios--; 912 if (raidPtr->throughputstats.num_out_ios == 0) { 913 RF_GETTIME(raidPtr->throughputstats.stop); 914 RF_TIMEVAL_DIFF(&raidPtr->throughputstats.start, &raidPtr->throughputstats.stop, &diff); 915 raidPtr->throughputstats.sum_io_us += TIMEVAL_TO_US(diff); 916 } 917 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 918 } 919 920 static void 921 rf_PrintThroughputStats(RF_Raid_t * raidPtr) 922 { 923 RF_ASSERT(raidPtr->throughputstats.num_out_ios == 0); 924 if (raidPtr->throughputstats.sum_io_us != 0) { 925 printf("[Througphut: %8.2f IOs/second]\n", raidPtr->throughputstats.num_ios 926 / (raidPtr->throughputstats.sum_io_us / 1000000.0)); 927 } 928 } 929 #endif /* !KERNEL && !SIMULATE */ 930 931 void 932 rf_StartUserStats(RF_Raid_t * raidPtr) 933 { 934 RF_GETTIME(raidPtr->userstats.start); 935 raidPtr->userstats.sum_io_us = 0; 936 raidPtr->userstats.num_ios = 0; 937 raidPtr->userstats.num_sect_moved = 0; 938 } 939 940 void 941 rf_StopUserStats(RF_Raid_t * raidPtr) 942 { 943 RF_GETTIME(raidPtr->userstats.stop); 944 } 945 946 void 947 rf_UpdateUserStats(raidPtr, rt, numsect) 948 RF_Raid_t *raidPtr; 949 int rt; /* resp time in us */ 950 int numsect; /* number of sectors for this access */ 951 { 952 raidPtr->userstats.sum_io_us += rt; 953 raidPtr->userstats.num_ios++; 954 raidPtr->userstats.num_sect_moved += numsect; 955 } 956 957 void 958 rf_PrintUserStats(RF_Raid_t * raidPtr) 959 { 960 long elapsed_us, mbs, mbs_frac; 961 struct timeval diff; 962 963 RF_TIMEVAL_DIFF(&raidPtr->userstats.start, &raidPtr->userstats.stop, &diff); 964 elapsed_us = TIMEVAL_TO_US(diff); 965 966 /* 2000 sectors per megabyte, 10000000 microseconds per second */ 967 if (elapsed_us) 968 mbs = (raidPtr->userstats.num_sect_moved / 2000) / (elapsed_us / 1000000); 969 else 970 mbs = 0; 971 972 /* this computes only the first digit of the fractional mb/s moved */ 973 if (elapsed_us) { 974 mbs_frac = ((raidPtr->userstats.num_sect_moved / 200) / (elapsed_us / 1000000)) 975 - (mbs * 10); 976 } else { 977 mbs_frac = 0; 978 } 979 980 printf("Number of I/Os: %ld\n", raidPtr->userstats.num_ios); 981 printf("Elapsed time (us): %ld\n", elapsed_us); 982 printf("User I/Os per second: %ld\n", RF_DB0_CHECK(raidPtr->userstats.num_ios, (elapsed_us / 1000000))); 983 printf("Average user response time: %ld us\n", RF_DB0_CHECK(raidPtr->userstats.sum_io_us, raidPtr->userstats.num_ios)); 984 printf("Total sectors moved: %ld\n", raidPtr->userstats.num_sect_moved); 985 printf("Average access size (sect): %ld\n", RF_DB0_CHECK(raidPtr->userstats.num_sect_moved, raidPtr->userstats.num_ios)); 986 printf("Achieved data rate: %ld.%ld MB/sec\n", mbs, mbs_frac); 987 } 988 989 990 void 991 rf_print_panic_message(line,file) 992 int line; 993 char *file; 994 { 995 sprintf(rf_panicbuf,"raidframe error at line %d file %s", 996 line, file); 997 } 998 999 void 1000 rf_print_assert_panic_message(line,file,condition) 1001 int line; 1002 char *file; 1003 char *condition; 1004 { 1005 sprintf(rf_panicbuf, 1006 "raidframe error at line %d file %s (failed asserting %s)\n", 1007 line, file, condition); 1008 } 1009