1 /* $NetBSD: rf_driver.c,v 1.71 2003/06/23 11:02:00 martin Exp $ */ 2 /*- 3 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Greg Oster 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1995 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II, 43 * Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka 44 * 45 * Permission to use, copy, modify and distribute this software and 46 * its documentation is hereby granted, provided that both the copyright 47 * notice and this permission notice appear in all copies of the 48 * software, derivative works or modified versions, and any portions 49 * thereof, and that both notices appear in supporting documentation. 50 * 51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 54 * 55 * Carnegie Mellon requests users of this software to return to 56 * 57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 58 * School of Computer Science 59 * Carnegie Mellon University 60 * Pittsburgh PA 15213-3890 61 * 62 * any improvements or extensions that they make and grant Carnegie the 63 * rights to redistribute these changes. 64 */ 65 66 /****************************************************************************** 67 * 68 * rf_driver.c -- main setup, teardown, and access routines for the RAID driver 69 * 70 * all routines are prefixed with rf_ (raidframe), to avoid conficts. 71 * 72 ******************************************************************************/ 73 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: rf_driver.c,v 1.71 2003/06/23 11:02:00 martin Exp $"); 77 78 #include "opt_raid_diagnostic.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/ioctl.h> 83 #include <sys/fcntl.h> 84 #include <sys/vnode.h> 85 86 87 #include "rf_archs.h" 88 #include "rf_threadstuff.h" 89 90 #include <sys/errno.h> 91 92 #include "rf_raid.h" 93 #include "rf_dag.h" 94 #include "rf_aselect.h" 95 #include "rf_diskqueue.h" 96 #include "rf_parityscan.h" 97 #include "rf_alloclist.h" 98 #include "rf_dagutils.h" 99 #include "rf_utils.h" 100 #include "rf_etimer.h" 101 #include "rf_acctrace.h" 102 #include "rf_general.h" 103 #include "rf_desc.h" 104 #include "rf_states.h" 105 #include "rf_freelist.h" 106 #include "rf_decluster.h" 107 #include "rf_map.h" 108 #include "rf_revent.h" 109 #include "rf_callback.h" 110 #include "rf_engine.h" 111 #include "rf_mcpair.h" 112 #include "rf_nwayxor.h" 113 #include "rf_copyback.h" 114 #include "rf_driver.h" 115 #include "rf_options.h" 116 #include "rf_shutdown.h" 117 #include "rf_kintf.h" 118 119 #include <sys/buf.h> 120 121 #ifndef RF_ACCESS_DEBUG 122 #define RF_ACCESS_DEBUG 0 123 #endif 124 125 /* rad == RF_RaidAccessDesc_t */ 126 static RF_FreeList_t *rf_rad_freelist; 127 #define RF_MAX_FREE_RAD 128 128 #define RF_RAD_INC 16 129 #define RF_RAD_INITIAL 32 130 131 /* debug variables */ 132 char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */ 133 134 /* main configuration routines */ 135 static int raidframe_booted = 0; 136 137 static void rf_ConfigureDebug(RF_Config_t * cfgPtr); 138 static void set_debug_option(char *name, long val); 139 static void rf_UnconfigureArray(void); 140 static int init_rad(RF_RaidAccessDesc_t *); 141 static void clean_rad(RF_RaidAccessDesc_t *); 142 static void rf_ShutdownRDFreeList(void *); 143 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **); 144 145 RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved 146 * printfs by different stripes */ 147 148 #define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended)) 149 #define WAIT_FOR_QUIESCENCE(_raid_) \ 150 ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \ 151 "raidframe quiesce", 0, &((_raid_)->access_suspend_mutex)) 152 153 #define IO_BUF_ERR(bp, err) { \ 154 bp->b_flags |= B_ERROR; \ 155 bp->b_resid = bp->b_bcount; \ 156 bp->b_error = err; \ 157 biodone(bp); \ 158 } 159 160 static int configureCount = 0; /* number of active configurations */ 161 static int isconfigged = 0; /* is basic raidframe (non per-array) 162 * stuff configged */ 163 RF_DECLARE_LKMGR_STATIC_MUTEX(configureMutex) /* used to lock the configuration 164 * stuff */ 165 static RF_ShutdownList_t *globalShutdown; /* non array-specific 166 * stuff */ 167 168 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp); 169 170 /* called at system boot time */ 171 int 172 rf_BootRaidframe() 173 { 174 int rc; 175 176 if (raidframe_booted) 177 return (EBUSY); 178 raidframe_booted = 1; 179 180 rc = rf_lkmgr_mutex_init(&configureMutex); 181 if (rc) { 182 rf_print_unable_to_init_mutex( __FILE__, __LINE__, rc); 183 RF_PANIC(); 184 } 185 configureCount = 0; 186 isconfigged = 0; 187 globalShutdown = NULL; 188 return (0); 189 } 190 191 /* 192 * Called whenever an array is shutdown 193 */ 194 static void 195 rf_UnconfigureArray() 196 { 197 int rc; 198 199 RF_LOCK_LKMGR_MUTEX(configureMutex); 200 if (--configureCount == 0) { /* if no active configurations, shut 201 * everything down */ 202 isconfigged = 0; 203 204 rc = rf_ShutdownList(&globalShutdown); 205 if (rc) { 206 RF_ERRORMSG1("RAIDFRAME: unable to do global shutdown, rc=%d\n", rc); 207 } 208 209 /* 210 * We must wait until now, because the AllocList module 211 * uses the DebugMem module. 212 */ 213 #if RF_DEBUG_MEM 214 if (rf_memDebug) 215 rf_print_unfreed(); 216 #endif 217 } 218 RF_UNLOCK_LKMGR_MUTEX(configureMutex); 219 } 220 221 /* 222 * Called to shut down an array. 223 */ 224 int 225 rf_Shutdown(raidPtr) 226 RF_Raid_t *raidPtr; 227 { 228 229 if (!raidPtr->valid) { 230 RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n"); 231 return (EINVAL); 232 } 233 /* 234 * wait for outstanding IOs to land 235 * As described in rf_raid.h, we use the rad_freelist lock 236 * to protect the per-array info about outstanding descs 237 * since we need to do freelist locking anyway, and this 238 * cuts down on the amount of serialization we've got going 239 * on. 240 */ 241 RF_FREELIST_DO_LOCK(rf_rad_freelist); 242 if (raidPtr->waitShutdown) { 243 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 244 return (EBUSY); 245 } 246 raidPtr->waitShutdown = 1; 247 while (raidPtr->nAccOutstanding) { 248 RF_WAIT_COND(raidPtr->outstandingCond, RF_FREELIST_MUTEX_OF(rf_rad_freelist)); 249 } 250 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 251 252 /* Wait for any parity re-writes to stop... */ 253 while (raidPtr->parity_rewrite_in_progress) { 254 printf("Waiting for parity re-write to exit...\n"); 255 tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO, 256 "rfprwshutdown", 0); 257 } 258 259 raidPtr->valid = 0; 260 261 rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE); 262 263 rf_UnconfigureVnodes(raidPtr); 264 265 rf_ShutdownList(&raidPtr->shutdownList); 266 267 rf_UnconfigureArray(); 268 269 return (0); 270 } 271 272 273 #define DO_INIT_CONFIGURE(f) { \ 274 rc = f (&globalShutdown); \ 275 if (rc) { \ 276 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 277 rf_ShutdownList(&globalShutdown); \ 278 configureCount--; \ 279 RF_UNLOCK_LKMGR_MUTEX(configureMutex); \ 280 return(rc); \ 281 } \ 282 } 283 284 #define DO_RAID_FAIL() { \ 285 rf_UnconfigureVnodes(raidPtr); \ 286 rf_ShutdownList(&raidPtr->shutdownList); \ 287 rf_UnconfigureArray(); \ 288 } 289 290 #define DO_RAID_INIT_CONFIGURE(f) { \ 291 rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \ 292 if (rc) { \ 293 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 294 DO_RAID_FAIL(); \ 295 return(rc); \ 296 } \ 297 } 298 299 #define DO_RAID_MUTEX(_m_) { \ 300 rc = rf_create_managed_mutex(&raidPtr->shutdownList, (_m_)); \ 301 if (rc) { \ 302 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); \ 303 DO_RAID_FAIL(); \ 304 return(rc); \ 305 } \ 306 } 307 308 #define DO_RAID_COND(_c_) { \ 309 rc = rf_create_managed_cond(&raidPtr->shutdownList, (_c_)); \ 310 if (rc) { \ 311 rf_print_unable_to_init_cond(__FILE__, __LINE__, rc); \ 312 DO_RAID_FAIL(); \ 313 return(rc); \ 314 } \ 315 } 316 317 int 318 rf_Configure(raidPtr, cfgPtr, ac) 319 RF_Raid_t *raidPtr; 320 RF_Config_t *cfgPtr; 321 RF_AutoConfig_t *ac; 322 { 323 RF_RowCol_t row, col; 324 int i, rc; 325 326 RF_LOCK_LKMGR_MUTEX(configureMutex); 327 configureCount++; 328 if (isconfigged == 0) { 329 rc = rf_create_managed_mutex(&globalShutdown, &rf_printf_mutex); 330 if (rc) { 331 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); 332 rf_ShutdownList(&globalShutdown); 333 return (rc); 334 } 335 /* initialize globals */ 336 337 DO_INIT_CONFIGURE(rf_ConfigureAllocList); 338 339 /* 340 * Yes, this does make debugging general to the whole 341 * system instead of being array specific. Bummer, drag. 342 */ 343 rf_ConfigureDebug(cfgPtr); 344 DO_INIT_CONFIGURE(rf_ConfigureDebugMem); 345 DO_INIT_CONFIGURE(rf_ConfigureAccessTrace); 346 DO_INIT_CONFIGURE(rf_ConfigureMapModule); 347 DO_INIT_CONFIGURE(rf_ConfigureReconEvent); 348 DO_INIT_CONFIGURE(rf_ConfigureCallback); 349 DO_INIT_CONFIGURE(rf_ConfigureRDFreeList); 350 DO_INIT_CONFIGURE(rf_ConfigureNWayXor); 351 DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList); 352 DO_INIT_CONFIGURE(rf_ConfigureMCPair); 353 DO_INIT_CONFIGURE(rf_ConfigureDAGs); 354 DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs); 355 DO_INIT_CONFIGURE(rf_ConfigureReconstruction); 356 DO_INIT_CONFIGURE(rf_ConfigureCopyback); 357 DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem); 358 isconfigged = 1; 359 } 360 RF_UNLOCK_LKMGR_MUTEX(configureMutex); 361 362 DO_RAID_MUTEX(&raidPtr->mutex); 363 /* set up the cleanup list. Do this after ConfigureDebug so that 364 * value of memDebug will be set */ 365 366 rf_MakeAllocList(raidPtr->cleanupList); 367 if (raidPtr->cleanupList == NULL) { 368 DO_RAID_FAIL(); 369 return (ENOMEM); 370 } 371 rc = rf_ShutdownCreate(&raidPtr->shutdownList, 372 (void (*) (void *)) rf_FreeAllocList, 373 raidPtr->cleanupList); 374 if (rc) { 375 rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc); 376 DO_RAID_FAIL(); 377 return (rc); 378 } 379 raidPtr->numRow = cfgPtr->numRow; 380 raidPtr->numCol = cfgPtr->numCol; 381 raidPtr->numSpare = cfgPtr->numSpare; 382 383 /* XXX we don't even pretend to support more than one row in the 384 * kernel... */ 385 if (raidPtr->numRow != 1) { 386 RF_ERRORMSG("Only one row supported in kernel.\n"); 387 DO_RAID_FAIL(); 388 return (EINVAL); 389 } 390 RF_CallocAndAdd(raidPtr->status, raidPtr->numRow, sizeof(RF_RowStatus_t), 391 (RF_RowStatus_t *), raidPtr->cleanupList); 392 if (raidPtr->status == NULL) { 393 DO_RAID_FAIL(); 394 return (ENOMEM); 395 } 396 RF_CallocAndAdd(raidPtr->reconControl, raidPtr->numRow, 397 sizeof(RF_ReconCtrl_t *), (RF_ReconCtrl_t **), raidPtr->cleanupList); 398 if (raidPtr->reconControl == NULL) { 399 DO_RAID_FAIL(); 400 return (ENOMEM); 401 } 402 for (i = 0; i < raidPtr->numRow; i++) { 403 raidPtr->status[i] = rf_rs_optimal; 404 raidPtr->reconControl[i] = NULL; 405 } 406 407 TAILQ_INIT(&(raidPtr->iodone)); 408 simple_lock_init(&(raidPtr->iodone_lock)); 409 410 DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine); 411 DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks); 412 413 DO_RAID_COND(&raidPtr->outstandingCond); 414 415 raidPtr->nAccOutstanding = 0; 416 raidPtr->waitShutdown = 0; 417 418 DO_RAID_MUTEX(&raidPtr->access_suspend_mutex); 419 DO_RAID_COND(&raidPtr->quiescent_cond); 420 421 DO_RAID_COND(&raidPtr->waitForReconCond); 422 423 DO_RAID_MUTEX(&raidPtr->recon_done_proc_mutex); 424 425 if (ac!=NULL) { 426 /* We have an AutoConfig structure.. Don't do the 427 normal disk configuration... call the auto config 428 stuff */ 429 rf_AutoConfigureDisks(raidPtr, cfgPtr, ac); 430 } else { 431 DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks); 432 DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks); 433 } 434 /* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev 435 * no. is set */ 436 DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues); 437 438 DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout); 439 440 DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus); 441 442 for (row = 0; row < raidPtr->numRow; row++) { 443 for (col = 0; col < raidPtr->numCol; col++) { 444 /* 445 * XXX better distribution 446 */ 447 raidPtr->hist_diskreq[row][col] = 0; 448 } 449 } 450 451 raidPtr->numNewFailures = 0; 452 raidPtr->copyback_in_progress = 0; 453 raidPtr->parity_rewrite_in_progress = 0; 454 raidPtr->adding_hot_spare = 0; 455 raidPtr->recon_in_progress = 0; 456 raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs; 457 458 /* autoconfigure and root_partition will actually get filled in 459 after the config is done */ 460 raidPtr->autoconfigure = 0; 461 raidPtr->root_partition = 0; 462 raidPtr->last_unit = raidPtr->raidid; 463 raidPtr->config_order = 0; 464 465 if (rf_keepAccTotals) { 466 raidPtr->keep_acc_totals = 1; 467 } 468 rf_StartUserStats(raidPtr); 469 470 raidPtr->valid = 1; 471 472 printf("raid%d: %s\n", raidPtr->raidid, 473 raidPtr->Layout.map->configName); 474 printf("raid%d: Components:", raidPtr->raidid); 475 for (row = 0; row < raidPtr->numRow; row++) { 476 for (col = 0; col < raidPtr->numCol; col++) { 477 printf(" %s", raidPtr->Disks[row][col].devname); 478 if (RF_DEAD_DISK(raidPtr->Disks[row][col].status)) { 479 printf("[**FAILED**]"); 480 } 481 } 482 } 483 printf("\n"); 484 printf("raid%d: Total Sectors: %lu (%lu MB)\n", 485 raidPtr->raidid, 486 (unsigned long) raidPtr->totalSectors, 487 (unsigned long) (raidPtr->totalSectors / 1024 * 488 (1 << raidPtr->logBytesPerSector) / 1024)); 489 490 return (0); 491 } 492 493 static int 494 init_rad(desc) 495 RF_RaidAccessDesc_t *desc; 496 { 497 int rc; 498 499 rc = rf_mutex_init(&desc->mutex); 500 if (rc) { 501 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); 502 return (rc); 503 } 504 rc = rf_cond_init(&desc->cond); 505 if (rc) { 506 rf_print_unable_to_init_cond(__FILE__, __LINE__, rc); 507 rf_mutex_destroy(&desc->mutex); 508 return (rc); 509 } 510 return (0); 511 } 512 513 static void 514 clean_rad(desc) 515 RF_RaidAccessDesc_t *desc; 516 { 517 rf_mutex_destroy(&desc->mutex); 518 rf_cond_destroy(&desc->cond); 519 } 520 521 static void 522 rf_ShutdownRDFreeList(ignored) 523 void *ignored; 524 { 525 RF_FREELIST_DESTROY_CLEAN(rf_rad_freelist, next, (RF_RaidAccessDesc_t *), clean_rad); 526 } 527 528 static int 529 rf_ConfigureRDFreeList(listp) 530 RF_ShutdownList_t **listp; 531 { 532 int rc; 533 534 RF_FREELIST_CREATE(rf_rad_freelist, RF_MAX_FREE_RAD, 535 RF_RAD_INC, sizeof(RF_RaidAccessDesc_t)); 536 if (rf_rad_freelist == NULL) { 537 return (ENOMEM); 538 } 539 rc = rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL); 540 if (rc) { 541 rf_print_unable_to_add_shutdown(__FILE__, __LINE__, rc); 542 rf_ShutdownRDFreeList(NULL); 543 return (rc); 544 } 545 RF_FREELIST_PRIME_INIT(rf_rad_freelist, RF_RAD_INITIAL, next, 546 (RF_RaidAccessDesc_t *), init_rad); 547 return (0); 548 } 549 550 RF_RaidAccessDesc_t * 551 rf_AllocRaidAccDesc( 552 RF_Raid_t * raidPtr, 553 RF_IoType_t type, 554 RF_RaidAddr_t raidAddress, 555 RF_SectorCount_t numBlocks, 556 caddr_t bufPtr, 557 void *bp, 558 RF_RaidAccessFlags_t flags, 559 RF_AccessState_t * states) 560 { 561 RF_RaidAccessDesc_t *desc; 562 563 RF_FREELIST_GET_INIT_NOUNLOCK(rf_rad_freelist, desc, next, (RF_RaidAccessDesc_t *), init_rad); 564 if (raidPtr->waitShutdown) { 565 /* 566 * Actually, we're shutting the array down. Free the desc 567 * and return NULL. 568 */ 569 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 570 RF_FREELIST_FREE_CLEAN(rf_rad_freelist, desc, next, clean_rad); 571 return (NULL); 572 } 573 raidPtr->nAccOutstanding++; 574 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 575 576 desc->raidPtr = (void *) raidPtr; 577 desc->type = type; 578 desc->raidAddress = raidAddress; 579 desc->numBlocks = numBlocks; 580 desc->bufPtr = bufPtr; 581 desc->bp = bp; 582 desc->paramDAG = NULL; 583 desc->paramASM = NULL; 584 desc->flags = flags; 585 desc->states = states; 586 desc->state = 0; 587 588 desc->status = 0; 589 memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t)); 590 desc->callbackFunc = NULL; 591 desc->callbackArg = NULL; 592 desc->next = NULL; 593 desc->head = desc; 594 desc->cleanupList = NULL; 595 rf_MakeAllocList(desc->cleanupList); 596 return (desc); 597 } 598 599 void 600 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t * desc) 601 { 602 RF_Raid_t *raidPtr = desc->raidPtr; 603 604 RF_ASSERT(desc); 605 606 rf_FreeAllocList(desc->cleanupList); 607 RF_FREELIST_FREE_CLEAN_NOUNLOCK(rf_rad_freelist, desc, next, clean_rad); 608 raidPtr->nAccOutstanding--; 609 if (raidPtr->waitShutdown) { 610 RF_SIGNAL_COND(raidPtr->outstandingCond); 611 } 612 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 613 } 614 /********************************************************************* 615 * Main routine for performing an access. 616 * Accesses are retried until a DAG can not be selected. This occurs 617 * when either the DAG library is incomplete or there are too many 618 * failures in a parity group. 619 ********************************************************************/ 620 int 621 rf_DoAccess( 622 RF_Raid_t * raidPtr, 623 RF_IoType_t type, 624 int async_flag, 625 RF_RaidAddr_t raidAddress, 626 RF_SectorCount_t numBlocks, 627 caddr_t bufPtr, 628 void *bp_in, 629 RF_RaidAccessFlags_t flags) 630 /* 631 type should be read or write 632 async_flag should be RF_TRUE or RF_FALSE 633 bp_in is a buf pointer. void * to facilitate ignoring it outside the kernel 634 */ 635 { 636 RF_RaidAccessDesc_t *desc; 637 caddr_t lbufPtr = bufPtr; 638 struct buf *bp = (struct buf *) bp_in; 639 640 raidAddress += rf_raidSectorOffset; 641 642 #if RF_ACCESS_DEBUG 643 if (rf_accessDebug) { 644 645 printf("logBytes is: %d %d %d\n", raidPtr->raidid, 646 raidPtr->logBytesPerSector, 647 (int) rf_RaidAddressToByte(raidPtr, numBlocks)); 648 printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid, 649 (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress, 650 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress), 651 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1), 652 (int) numBlocks, 653 (int) rf_RaidAddressToByte(raidPtr, numBlocks), 654 (long) bufPtr); 655 } 656 #endif 657 if (raidAddress + numBlocks > raidPtr->totalSectors) { 658 659 printf("DoAccess: raid addr %lu too large to access %lu sectors. Max legal addr is %lu\n", 660 (u_long) raidAddress, (u_long) numBlocks, (u_long) raidPtr->totalSectors); 661 662 IO_BUF_ERR(bp, ENOSPC); 663 return (ENOSPC); 664 } 665 desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress, 666 numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states); 667 668 if (desc == NULL) { 669 return (ENOMEM); 670 } 671 RF_ETIMER_START(desc->tracerec.tot_timer); 672 673 desc->async_flag = async_flag; 674 675 rf_ContinueRaidAccess(desc); 676 677 return (0); 678 } 679 #if 0 680 /* force the array into reconfigured mode without doing reconstruction */ 681 int 682 rf_SetReconfiguredMode(raidPtr, row, col) 683 RF_Raid_t *raidPtr; 684 int row; 685 int col; 686 { 687 if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) { 688 printf("Can't set reconfigured mode in dedicated-spare array\n"); 689 RF_PANIC(); 690 } 691 RF_LOCK_MUTEX(raidPtr->mutex); 692 raidPtr->numFailures++; 693 raidPtr->Disks[row][col].status = rf_ds_dist_spared; 694 raidPtr->status[row] = rf_rs_reconfigured; 695 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 696 /* install spare table only if declustering + distributed sparing 697 * architecture. */ 698 if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED) 699 rf_InstallSpareTable(raidPtr, row, col); 700 RF_UNLOCK_MUTEX(raidPtr->mutex); 701 return (0); 702 } 703 #endif 704 705 int 706 rf_FailDisk( 707 RF_Raid_t * raidPtr, 708 int frow, 709 int fcol, 710 int initRecon) 711 { 712 RF_LOCK_MUTEX(raidPtr->mutex); 713 if (raidPtr->Disks[frow][fcol].status != rf_ds_failed) { 714 /* must be failing something that is valid, or else it's 715 already marked as failed (in which case we don't 716 want to mark it failed again!) */ 717 raidPtr->numFailures++; 718 raidPtr->Disks[frow][fcol].status = rf_ds_failed; 719 raidPtr->status[frow] = rf_rs_degraded; 720 } 721 RF_UNLOCK_MUTEX(raidPtr->mutex); 722 723 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 724 725 /* Close the component, so that it's not "locked" if someone 726 else want's to use it! */ 727 728 rf_close_component(raidPtr, raidPtr->raid_cinfo[frow][fcol].ci_vp, 729 raidPtr->Disks[frow][fcol].auto_configured); 730 731 RF_LOCK_MUTEX(raidPtr->mutex); 732 raidPtr->raid_cinfo[frow][fcol].ci_vp = NULL; 733 734 /* Need to mark the component as not being auto_configured 735 (in case it was previously). */ 736 737 raidPtr->Disks[frow][fcol].auto_configured = 0; 738 RF_UNLOCK_MUTEX(raidPtr->mutex); 739 740 if (initRecon) 741 rf_ReconstructFailedDisk(raidPtr, frow, fcol); 742 return (0); 743 } 744 /* releases a thread that is waiting for the array to become quiesced. 745 * access_suspend_mutex should be locked upon calling this 746 */ 747 void 748 rf_SignalQuiescenceLock(raidPtr) 749 RF_Raid_t *raidPtr; 750 { 751 #if RF_DEBUG_QUIESCE 752 if (rf_quiesceDebug) { 753 printf("raid%d: Signalling quiescence lock\n", 754 raidPtr->raidid); 755 } 756 #endif 757 raidPtr->access_suspend_release = 1; 758 759 if (raidPtr->waiting_for_quiescence) { 760 SIGNAL_QUIESCENT_COND(raidPtr); 761 } 762 } 763 /* suspends all new requests to the array. No effect on accesses that are in flight. */ 764 int 765 rf_SuspendNewRequestsAndWait(raidPtr) 766 RF_Raid_t *raidPtr; 767 { 768 #if RF_DEBUG_QUIESCE 769 if (rf_quiesceDebug) 770 printf("raid%d: Suspending new reqs\n", raidPtr->raidid); 771 #endif 772 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 773 raidPtr->accesses_suspended++; 774 raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1; 775 776 if (raidPtr->waiting_for_quiescence) { 777 raidPtr->access_suspend_release = 0; 778 while (!raidPtr->access_suspend_release) { 779 printf("raid%d: Suspending: Waiting for Quiescence\n", 780 raidPtr->raidid); 781 WAIT_FOR_QUIESCENCE(raidPtr); 782 raidPtr->waiting_for_quiescence = 0; 783 } 784 } 785 printf("raid%d: Quiescence reached..\n", raidPtr->raidid); 786 787 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 788 return (raidPtr->waiting_for_quiescence); 789 } 790 /* wake up everyone waiting for quiescence to be released */ 791 void 792 rf_ResumeNewRequests(raidPtr) 793 RF_Raid_t *raidPtr; 794 { 795 RF_CallbackDesc_t *t, *cb; 796 797 #if RF_DEBUG_QUIESCE 798 if (rf_quiesceDebug) 799 printf("Resuming new reqs\n"); 800 #endif 801 802 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 803 raidPtr->accesses_suspended--; 804 if (raidPtr->accesses_suspended == 0) 805 cb = raidPtr->quiesce_wait_list; 806 else 807 cb = NULL; 808 raidPtr->quiesce_wait_list = NULL; 809 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 810 811 while (cb) { 812 t = cb; 813 cb = cb->next; 814 (t->callbackFunc) (t->callbackArg); 815 rf_FreeCallbackDesc(t); 816 } 817 } 818 /***************************************************************************************** 819 * 820 * debug routines 821 * 822 ****************************************************************************************/ 823 824 static void 825 set_debug_option(name, val) 826 char *name; 827 long val; 828 { 829 RF_DebugName_t *p; 830 831 for (p = rf_debugNames; p->name; p++) { 832 if (!strcmp(p->name, name)) { 833 *(p->ptr) = val; 834 printf("[Set debug variable %s to %ld]\n", name, val); 835 return; 836 } 837 } 838 RF_ERRORMSG1("Unknown debug string \"%s\"\n", name); 839 } 840 841 842 /* would like to use sscanf here, but apparently not available in kernel */ 843 /*ARGSUSED*/ 844 static void 845 rf_ConfigureDebug(cfgPtr) 846 RF_Config_t *cfgPtr; 847 { 848 char *val_p, *name_p, *white_p; 849 long val; 850 int i; 851 852 rf_ResetDebugOptions(); 853 for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) { 854 name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]); 855 white_p = rf_find_white(name_p); /* skip to start of 2nd 856 * word */ 857 val_p = rf_find_non_white(white_p); 858 if (*val_p == '0' && *(val_p + 1) == 'x') 859 val = rf_htoi(val_p + 2); 860 else 861 val = rf_atoi(val_p); 862 *white_p = '\0'; 863 set_debug_option(name_p, val); 864 } 865 } 866 /* performance monitoring stuff */ 867 868 #define TIMEVAL_TO_US(t) (((long) t.tv_sec) * 1000000L + (long) t.tv_usec) 869 870 #if !defined(_KERNEL) && !defined(SIMULATE) 871 872 /* 873 * Throughput stats currently only used in user-level RAIDframe 874 */ 875 876 static int 877 rf_InitThroughputStats( 878 RF_ShutdownList_t ** listp, 879 RF_Raid_t * raidPtr, 880 RF_Config_t * cfgPtr) 881 { 882 int rc; 883 884 /* these used by user-level raidframe only */ 885 rc = rf_create_managed_mutex(listp, &raidPtr->throughputstats.mutex); 886 if (rc) { 887 rf_print_unable_to_init_mutex(__FILE__, __LINE__, rc); 888 return (rc); 889 } 890 raidPtr->throughputstats.sum_io_us = 0; 891 raidPtr->throughputstats.num_ios = 0; 892 raidPtr->throughputstats.num_out_ios = 0; 893 return (0); 894 } 895 896 void 897 rf_StartThroughputStats(RF_Raid_t * raidPtr) 898 { 899 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 900 raidPtr->throughputstats.num_ios++; 901 raidPtr->throughputstats.num_out_ios++; 902 if (raidPtr->throughputstats.num_out_ios == 1) 903 RF_GETTIME(raidPtr->throughputstats.start); 904 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 905 } 906 907 static void 908 rf_StopThroughputStats(RF_Raid_t * raidPtr) 909 { 910 struct timeval diff; 911 912 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 913 raidPtr->throughputstats.num_out_ios--; 914 if (raidPtr->throughputstats.num_out_ios == 0) { 915 RF_GETTIME(raidPtr->throughputstats.stop); 916 RF_TIMEVAL_DIFF(&raidPtr->throughputstats.start, &raidPtr->throughputstats.stop, &diff); 917 raidPtr->throughputstats.sum_io_us += TIMEVAL_TO_US(diff); 918 } 919 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 920 } 921 922 static void 923 rf_PrintThroughputStats(RF_Raid_t * raidPtr) 924 { 925 RF_ASSERT(raidPtr->throughputstats.num_out_ios == 0); 926 if (raidPtr->throughputstats.sum_io_us != 0) { 927 printf("[Througphut: %8.2f IOs/second]\n", raidPtr->throughputstats.num_ios 928 / (raidPtr->throughputstats.sum_io_us / 1000000.0)); 929 } 930 } 931 #endif /* !KERNEL && !SIMULATE */ 932 933 void 934 rf_StartUserStats(RF_Raid_t * raidPtr) 935 { 936 RF_GETTIME(raidPtr->userstats.start); 937 raidPtr->userstats.sum_io_us = 0; 938 raidPtr->userstats.num_ios = 0; 939 raidPtr->userstats.num_sect_moved = 0; 940 } 941 942 void 943 rf_StopUserStats(RF_Raid_t * raidPtr) 944 { 945 RF_GETTIME(raidPtr->userstats.stop); 946 } 947 948 void 949 rf_UpdateUserStats(raidPtr, rt, numsect) 950 RF_Raid_t *raidPtr; 951 int rt; /* resp time in us */ 952 int numsect; /* number of sectors for this access */ 953 { 954 raidPtr->userstats.sum_io_us += rt; 955 raidPtr->userstats.num_ios++; 956 raidPtr->userstats.num_sect_moved += numsect; 957 } 958 959 void 960 rf_PrintUserStats(RF_Raid_t * raidPtr) 961 { 962 long elapsed_us, mbs, mbs_frac; 963 struct timeval diff; 964 965 RF_TIMEVAL_DIFF(&raidPtr->userstats.start, 966 &raidPtr->userstats.stop, &diff); 967 elapsed_us = TIMEVAL_TO_US(diff); 968 969 /* 2000 sectors per megabyte, 10000000 microseconds per second */ 970 if (elapsed_us) 971 mbs = (raidPtr->userstats.num_sect_moved / 2000) / 972 (elapsed_us / 1000000); 973 else 974 mbs = 0; 975 976 /* this computes only the first digit of the fractional mb/s moved */ 977 if (elapsed_us) { 978 mbs_frac = ((raidPtr->userstats.num_sect_moved / 200) / 979 (elapsed_us / 1000000)) - (mbs * 10); 980 } else { 981 mbs_frac = 0; 982 } 983 984 printf("raid%d: Number of I/Os: %ld\n", 985 raidPtr->raidid, raidPtr->userstats.num_ios); 986 printf("raid%d: Elapsed time (us): %ld\n", 987 raidPtr->raidid, elapsed_us); 988 printf("raid%d: User I/Os per second: %ld\n", 989 raidPtr->raidid, RF_DB0_CHECK(raidPtr->userstats.num_ios, 990 (elapsed_us / 1000000))); 991 printf("raid%d: Average user response time: %ld us\n", 992 raidPtr->raidid, RF_DB0_CHECK(raidPtr->userstats.sum_io_us, 993 raidPtr->userstats.num_ios)); 994 printf("raid%d: Total sectors moved: %ld\n", 995 raidPtr->raidid, raidPtr->userstats.num_sect_moved); 996 printf("raid%d: Average access size (sect): %ld\n", 997 raidPtr->raidid, RF_DB0_CHECK(raidPtr->userstats.num_sect_moved, 998 raidPtr->userstats.num_ios)); 999 printf("raid%d: Achieved data rate: %ld.%ld MB/sec\n", 1000 raidPtr->raidid, mbs, mbs_frac); 1001 } 1002 1003 1004 void 1005 rf_print_panic_message(line,file) 1006 int line; 1007 char *file; 1008 { 1009 sprintf(rf_panicbuf,"raidframe error at line %d file %s", 1010 line, file); 1011 } 1012 1013 #ifdef RAID_DIAGNOSTIC 1014 void 1015 rf_print_assert_panic_message(line,file,condition) 1016 int line; 1017 char *file; 1018 char *condition; 1019 { 1020 sprintf(rf_panicbuf, 1021 "raidframe error at line %d file %s (failed asserting %s)\n", 1022 line, file, condition); 1023 } 1024 #endif 1025 1026 void 1027 rf_print_unable_to_init_mutex(file,line,rc) 1028 char *file; 1029 int line; 1030 int rc; 1031 { 1032 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", 1033 file, line, rc); 1034 } 1035 1036 void 1037 rf_print_unable_to_init_cond(file,line,rc) 1038 char *file; 1039 int line; 1040 int rc; 1041 { 1042 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", 1043 file, line, rc); 1044 } 1045 1046 void 1047 rf_print_unable_to_add_shutdown(file,line,rc) 1048 char *file; 1049 int line; 1050 int rc; 1051 { 1052 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 1053 file, line, rc); 1054 } 1055