1 /* $NetBSD: rf_paritymap.c,v 1.6 2011/03/01 22:51:14 riz Exp $ */ 2 3 /*- 4 * Copyright (c) 2009 Jed Davis. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: rf_paritymap.c,v 1.6 2011/03/01 22:51:14 riz Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/callout.h> 34 #include <sys/kmem.h> 35 #include <sys/mutex.h> 36 #include <sys/rwlock.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 40 #include <dev/raidframe/rf_paritymap.h> 41 #include <dev/raidframe/rf_stripelocks.h> 42 #include <dev/raidframe/rf_layout.h> 43 #include <dev/raidframe/rf_raid.h> 44 #include <dev/raidframe/rf_parityscan.h> 45 #include <dev/raidframe/rf_kintf.h> 46 47 /* Important parameters: */ 48 #define REGION_MINSIZE (25ULL << 20) 49 #define DFL_TICKMS 40000 50 #define DFL_COOLDOWN 8 /* 7-8 intervals of 40s = 5min +/- 20s */ 51 52 /* Internal-use flag bits. */ 53 #define TICKING 1 54 #define TICKED 2 55 56 /* Prototypes! */ 57 static void rf_paritymap_write_locked(struct rf_paritymap *); 58 static void rf_paritymap_tick(void *); 59 static u_int rf_paritymap_nreg(RF_Raid_t *); 60 61 /* Extract the current status of the parity map. */ 62 void 63 rf_paritymap_status(struct rf_paritymap *pm, struct rf_pmstat *ps) 64 { 65 memset(ps, 0, sizeof(*ps)); 66 if (pm == NULL) 67 ps->enabled = 0; 68 else { 69 ps->enabled = 1; 70 ps->region_size = pm->region_size; 71 mutex_enter(&pm->lock); 72 memcpy(&ps->params, &pm->params, sizeof(ps->params)); 73 memcpy(ps->dirty, pm->disk_now, sizeof(ps->dirty)); 74 memcpy(&ps->ctrs, &pm->ctrs, sizeof(ps->ctrs)); 75 mutex_exit(&pm->lock); 76 } 77 } 78 79 /* 80 * Test whether parity in a given sector is suspected of being inconsistent 81 * on disk (assuming that any pending I/O to it is allowed to complete). 82 * This may be of interest to future work on parity scrubbing. 83 */ 84 int 85 rf_paritymap_test(struct rf_paritymap *pm, daddr_t sector) 86 { 87 unsigned region = sector / pm->region_size; 88 int retval; 89 90 mutex_enter(&pm->lock); 91 retval = isset(pm->disk_boot->bits, region) ? 1 : 0; 92 mutex_exit(&pm->lock); 93 return retval; 94 } 95 96 /* To be called before a write to the RAID is submitted. */ 97 void 98 rf_paritymap_begin(struct rf_paritymap *pm, daddr_t offset, daddr_t size) 99 { 100 unsigned i, b, e; 101 102 b = offset / pm->region_size; 103 e = (offset + size - 1) / pm->region_size; 104 105 for (i = b; i <= e; i++) 106 rf_paritymap_begin_region(pm, i); 107 } 108 109 /* To be called after a write to the RAID completes. */ 110 void 111 rf_paritymap_end(struct rf_paritymap *pm, daddr_t offset, daddr_t size) 112 { 113 unsigned i, b, e; 114 115 b = offset / pm->region_size; 116 e = (offset + size - 1) / pm->region_size; 117 118 for (i = b; i <= e; i++) 119 rf_paritymap_end_region(pm, i); 120 } 121 122 void 123 rf_paritymap_begin_region(struct rf_paritymap *pm, unsigned region) 124 { 125 int needs_write; 126 127 KASSERT(region < RF_PARITYMAP_NREG); 128 pm->ctrs.nwrite++; 129 130 /* If it was being kept warm, deal with that. */ 131 mutex_enter(&pm->lock); 132 if (pm->current->state[region] < 0) 133 pm->current->state[region] = 0; 134 135 /* This shouldn't happen unless RAIDOUTSTANDING is set too high. */ 136 KASSERT(pm->current->state[region] < 127); 137 pm->current->state[region]++; 138 139 needs_write = isclr(pm->disk_now->bits, region); 140 141 if (needs_write) { 142 KASSERT(pm->current->state[region] == 1); 143 rf_paritymap_write_locked(pm); 144 } 145 146 mutex_exit(&pm->lock); 147 } 148 149 void 150 rf_paritymap_end_region(struct rf_paritymap *pm, unsigned region) 151 { 152 KASSERT(region < RF_PARITYMAP_NREG); 153 154 mutex_enter(&pm->lock); 155 KASSERT(pm->current->state[region] > 0); 156 --pm->current->state[region]; 157 158 if (pm->current->state[region] <= 0) { 159 pm->current->state[region] = -pm->params.cooldown; 160 KASSERT(pm->current->state[region] <= 0); 161 mutex_enter(&pm->lk_flags); 162 if (!(pm->flags & TICKING)) { 163 pm->flags |= TICKING; 164 mutex_exit(&pm->lk_flags); 165 callout_schedule(&pm->ticker, 166 mstohz(pm->params.tickms)); 167 } else 168 mutex_exit(&pm->lk_flags); 169 } 170 mutex_exit(&pm->lock); 171 } 172 173 /* 174 * Updates the parity map to account for any changes in current activity 175 * and/or an ongoing parity scan, then writes it to disk with appropriate 176 * synchronization. 177 */ 178 void 179 rf_paritymap_write(struct rf_paritymap *pm) 180 { 181 mutex_enter(&pm->lock); 182 rf_paritymap_write_locked(pm); 183 mutex_exit(&pm->lock); 184 } 185 186 /* As above, but to be used when pm->lock is already held. */ 187 static void 188 rf_paritymap_write_locked(struct rf_paritymap *pm) 189 { 190 char w, w0; 191 int i, j, setting, clearing; 192 193 setting = clearing = 0; 194 for (i = 0; i < RF_PARITYMAP_NBYTE; i++) { 195 w0 = pm->disk_now->bits[i]; 196 w = pm->disk_boot->bits[i]; 197 198 for (j = 0; j < NBBY; j++) 199 if (pm->current->state[i * NBBY + j] != 0) 200 w |= 1 << j; 201 202 if (w & ~w0) 203 setting = 1; 204 if (w0 & ~w) 205 clearing = 1; 206 207 pm->disk_now->bits[i] = w; 208 } 209 pm->ctrs.ncachesync += setting + clearing; 210 pm->ctrs.nclearing += clearing; 211 212 /* 213 * If bits are being set in the parity map, then a sync is 214 * required afterwards, so that the regions are marked dirty 215 * on disk before any writes to them take place. If bits are 216 * being cleared, then a sync is required before the write, so 217 * that any writes to those regions are processed before the 218 * region is marked clean. (Synchronization is somewhat 219 * overkill; a write ordering barrier would suffice, but we 220 * currently have no way to express that directly.) 221 */ 222 if (clearing) 223 rf_sync_component_caches(pm->raid); 224 rf_paritymap_kern_write(pm->raid, pm->disk_now); 225 if (setting) 226 rf_sync_component_caches(pm->raid); 227 } 228 229 /* Mark all parity as being in need of rewrite. */ 230 void 231 rf_paritymap_invalidate(struct rf_paritymap *pm) 232 { 233 mutex_enter(&pm->lock); 234 memset(pm->disk_boot, ~(unsigned char)0, 235 sizeof(struct rf_paritymap_ondisk)); 236 mutex_exit(&pm->lock); 237 } 238 239 /* Mark all parity as being correct. */ 240 void 241 rf_paritymap_forceclean(struct rf_paritymap *pm) 242 { 243 mutex_enter(&pm->lock); 244 memset(pm->disk_boot, (unsigned char)0, 245 sizeof(struct rf_paritymap_ondisk)); 246 mutex_exit(&pm->lock); 247 } 248 249 /* 250 * The cooldown callout routine just defers its work to a thread; it can't do 251 * the parity map write itself as it would block, and although mutex-induced 252 * blocking is permitted it seems wise to avoid tying up the softint. 253 */ 254 static void 255 rf_paritymap_tick(void *arg) 256 { 257 struct rf_paritymap *pm = arg; 258 259 mutex_enter(&pm->lk_flags); 260 pm->flags |= TICKED; 261 mutex_exit(&pm->lk_flags); 262 wakeup(&(pm->raid->iodone)); /* XXX */ 263 } 264 265 /* 266 * This is where the parity cooling work (and rearming the callout if needed) 267 * is done; the raidio thread calls it when woken up, as by the above. 268 */ 269 void 270 rf_paritymap_checkwork(struct rf_paritymap *pm) 271 { 272 int i, zerop, progressp; 273 274 mutex_enter(&pm->lk_flags); 275 if (pm->flags & TICKED) { 276 zerop = progressp = 0; 277 278 pm->flags &= ~TICKED; 279 mutex_exit(&pm->lk_flags); 280 281 mutex_enter(&pm->lock); 282 for (i = 0; i < RF_PARITYMAP_NREG; i++) { 283 if (pm->current->state[i] < 0) { 284 progressp = 1; 285 pm->current->state[i]++; 286 if (pm->current->state[i] == 0) 287 zerop = 1; 288 } 289 } 290 291 if (progressp) 292 callout_schedule(&pm->ticker, 293 mstohz(pm->params.tickms)); 294 else { 295 mutex_enter(&pm->lk_flags); 296 pm->flags &= ~TICKING; 297 mutex_exit(&pm->lk_flags); 298 } 299 300 if (zerop) 301 rf_paritymap_write_locked(pm); 302 mutex_exit(&pm->lock); 303 } else 304 mutex_exit(&pm->lk_flags); 305 } 306 307 /* 308 * Set parity map parameters; used both to alter parameters on the fly and to 309 * establish their initial values. Note that setting a parameter to 0 means 310 * to leave the previous setting unchanged, and that if this is done for the 311 * initial setting of "regions", then a default value will be computed based 312 * on the RAID component size. 313 */ 314 int 315 rf_paritymap_set_params(struct rf_paritymap *pm, 316 const struct rf_pmparams *params, int todisk) 317 { 318 int cooldown, tickms; 319 u_int regions; 320 RF_RowCol_t col; 321 RF_ComponentLabel_t *clabel; 322 RF_Raid_t *raidPtr; 323 324 cooldown = params->cooldown != 0 325 ? params->cooldown : pm->params.cooldown; 326 tickms = params->tickms != 0 327 ? params->tickms : pm->params.tickms; 328 regions = params->regions != 0 329 ? params->regions : pm->params.regions; 330 331 if (cooldown < 1 || cooldown > 128) { 332 printf("raid%d: cooldown %d out of range\n", pm->raid->raidid, 333 cooldown); 334 return (-1); 335 } 336 if (tickms < 10) { 337 printf("raid%d: tick time %dms out of range\n", 338 pm->raid->raidid, tickms); 339 return (-1); 340 } 341 if (regions == 0) { 342 regions = rf_paritymap_nreg(pm->raid); 343 } else if (regions > RF_PARITYMAP_NREG) { 344 printf("raid%d: region count %u too large (more than %u)\n", 345 pm->raid->raidid, regions, RF_PARITYMAP_NREG); 346 return (-1); 347 } 348 349 /* XXX any currently warm parity will be used with the new tickms! */ 350 pm->params.cooldown = cooldown; 351 pm->params.tickms = tickms; 352 /* Apply the initial region count, but do not change it after that. */ 353 if (pm->params.regions == 0) 354 pm->params.regions = regions; 355 356 /* So that the newly set parameters can be tested: */ 357 pm->ctrs.nwrite = pm->ctrs.ncachesync = pm->ctrs.nclearing = 0; 358 359 if (todisk) { 360 raidPtr = pm->raid; 361 for (col = 0; col < raidPtr->numCol; col++) { 362 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 363 continue; 364 365 clabel = raidget_component_label(raidPtr, col); 366 clabel->parity_map_ntick = cooldown; 367 clabel->parity_map_tickms = tickms; 368 clabel->parity_map_regions = regions; 369 370 /* Don't touch the disk if it's been spared */ 371 if (clabel->status == rf_ds_spared) 372 continue; 373 374 raidflush_component_label(raidPtr, col); 375 } 376 377 /* handle the spares too... */ 378 for (col = 0; col < raidPtr->numSpare; col++) { 379 if (raidPtr->Disks[raidPtr->numCol+col].status == rf_ds_used_spare) { 380 clabel = raidget_component_label(raidPtr, raidPtr->numCol+col); 381 clabel->parity_map_ntick = cooldown; 382 clabel->parity_map_tickms = tickms; 383 clabel->parity_map_regions = regions; 384 raidflush_component_label(raidPtr, raidPtr->numCol+col); 385 } 386 } 387 } 388 return 0; 389 } 390 391 /* 392 * The number of regions may not be as many as can fit into the map, because 393 * when regions are too small, the overhead of setting parity map bits 394 * becomes significant in comparison to the actual I/O, while the 395 * corresponding gains in parity verification time become negligible. Thus, 396 * a minimum region size (defined above) is imposed. 397 * 398 * Note that, if the number of regions is less than the maximum, then some of 399 * the regions will be "fictional", corresponding to no actual disk; some 400 * parts of the code may process them as normal, but they can not ever be 401 * written to. 402 */ 403 static u_int 404 rf_paritymap_nreg(RF_Raid_t *raid) 405 { 406 daddr_t bytes_per_disk, nreg; 407 408 bytes_per_disk = raid->sectorsPerDisk << raid->logBytesPerSector; 409 nreg = bytes_per_disk / REGION_MINSIZE; 410 if (nreg > RF_PARITYMAP_NREG) 411 nreg = RF_PARITYMAP_NREG; 412 if (nreg < 1) 413 nreg = 1; 414 415 return (u_int)nreg; 416 } 417 418 /* 419 * Initialize a parity map given specific parameters. This neither reads nor 420 * writes the parity map config in the component labels; for that, see below. 421 */ 422 int 423 rf_paritymap_init(struct rf_paritymap *pm, RF_Raid_t *raid, 424 const struct rf_pmparams *params) 425 { 426 daddr_t rstripes; 427 struct rf_pmparams safe; 428 429 pm->raid = raid; 430 pm->params.regions = 0; 431 if (0 != rf_paritymap_set_params(pm, params, 0)) { 432 /* 433 * If the parameters are out-of-range, then bring the 434 * parity map up with something reasonable, so that 435 * the admin can at least go and fix it (or ignore it 436 * entirely). 437 */ 438 safe.cooldown = DFL_COOLDOWN; 439 safe.tickms = DFL_TICKMS; 440 safe.regions = 0; 441 442 if (0 != rf_paritymap_set_params(pm, &safe, 0)) 443 return (-1); 444 } 445 446 rstripes = howmany(raid->Layout.numStripe, pm->params.regions); 447 pm->region_size = rstripes * raid->Layout.dataSectorsPerStripe; 448 449 callout_init(&pm->ticker, CALLOUT_MPSAFE); 450 callout_setfunc(&pm->ticker, rf_paritymap_tick, pm); 451 pm->flags = 0; 452 453 pm->disk_boot = kmem_alloc(sizeof(struct rf_paritymap_ondisk), 454 KM_SLEEP); 455 pm->disk_now = kmem_alloc(sizeof(struct rf_paritymap_ondisk), 456 KM_SLEEP); 457 pm->current = kmem_zalloc(sizeof(struct rf_paritymap_current), 458 KM_SLEEP); 459 460 rf_paritymap_kern_read(pm->raid, pm->disk_boot); 461 memcpy(pm->disk_now, pm->disk_boot, sizeof(*pm->disk_now)); 462 463 mutex_init(&pm->lock, MUTEX_DEFAULT, IPL_NONE); 464 mutex_init(&pm->lk_flags, MUTEX_DEFAULT, IPL_SOFTCLOCK); 465 466 return 0; 467 } 468 469 /* 470 * Destroys a parity map; unless "force" is set, also cleans parity for any 471 * regions which were still in cooldown (but are not dirty on disk). 472 */ 473 void 474 rf_paritymap_destroy(struct rf_paritymap *pm, int force) 475 { 476 int i; 477 478 callout_halt(&pm->ticker, NULL); /* XXX stop? halt? */ 479 callout_destroy(&pm->ticker); 480 481 if (!force) { 482 for (i = 0; i < RF_PARITYMAP_NREG; i++) { 483 /* XXX check for > 0 ? */ 484 if (pm->current->state[i] < 0) 485 pm->current->state[i] = 0; 486 } 487 488 rf_paritymap_write_locked(pm); 489 } 490 491 mutex_destroy(&pm->lock); 492 mutex_destroy(&pm->lk_flags); 493 494 kmem_free(pm->disk_boot, sizeof(struct rf_paritymap_ondisk)); 495 kmem_free(pm->disk_now, sizeof(struct rf_paritymap_ondisk)); 496 kmem_free(pm->current, sizeof(struct rf_paritymap_current)); 497 } 498 499 /* 500 * Rewrite parity, taking parity map into account; this is the equivalent of 501 * the old rf_RewriteParity, and is likewise to be called from a suitable 502 * thread and shouldn't have multiple copies running in parallel and so on. 503 * 504 * Note that the fictional regions are "cleaned" in one shot, so that very 505 * small RAIDs (useful for testing) will not experience potentially severe 506 * regressions in rewrite time. 507 */ 508 int 509 rf_paritymap_rewrite(struct rf_paritymap *pm) 510 { 511 int i, ret_val = 0; 512 daddr_t reg_b, reg_e; 513 514 /* Process only the actual regions. */ 515 for (i = 0; i < pm->params.regions; i++) { 516 mutex_enter(&pm->lock); 517 if (isset(pm->disk_boot->bits, i)) { 518 mutex_exit(&pm->lock); 519 520 reg_b = i * pm->region_size; 521 reg_e = reg_b + pm->region_size; 522 if (reg_e > pm->raid->totalSectors) 523 reg_e = pm->raid->totalSectors; 524 525 if (rf_RewriteParityRange(pm->raid, reg_b, 526 reg_e - reg_b)) { 527 ret_val = 1; 528 if (pm->raid->waitShutdown) 529 return ret_val; 530 } else { 531 mutex_enter(&pm->lock); 532 clrbit(pm->disk_boot->bits, i); 533 rf_paritymap_write_locked(pm); 534 mutex_exit(&pm->lock); 535 } 536 } else { 537 mutex_exit(&pm->lock); 538 } 539 } 540 541 /* Now, clear the fictional regions, if any. */ 542 rf_paritymap_forceclean(pm); 543 rf_paritymap_write(pm); 544 545 return ret_val; 546 } 547 548 /* 549 * How to merge the on-disk parity maps when reading them in from the 550 * various components; returns whether they differ. In the case that 551 * they do differ, sets *dst to the union of *dst and *src. 552 * 553 * In theory, it should be safe to take the intersection (or just pick 554 * a single component arbitrarily), but the paranoid approach costs 555 * little. 556 * 557 * Appropriate locking, if any, is the responsibility of the caller. 558 */ 559 int 560 rf_paritymap_merge(struct rf_paritymap_ondisk *dst, 561 struct rf_paritymap_ondisk *src) 562 { 563 int i, discrep = 0; 564 565 for (i = 0; i < RF_PARITYMAP_NBYTE; i++) { 566 if (dst->bits[i] != src->bits[i]) 567 discrep = 1; 568 dst->bits[i] |= src->bits[i]; 569 } 570 571 return discrep; 572 } 573 574 /* 575 * Detach a parity map from its RAID. This is not meant to be applied except 576 * when unconfiguring the RAID after all I/O has been resolved, as otherwise 577 * an out-of-date parity map could be treated as current. 578 */ 579 void 580 rf_paritymap_detach(RF_Raid_t *raidPtr) 581 { 582 if (raidPtr->parity_map == NULL) 583 return; 584 585 simple_lock(&(raidPtr->iodone_lock)); 586 struct rf_paritymap *pm = raidPtr->parity_map; 587 raidPtr->parity_map = NULL; 588 simple_unlock(&(raidPtr->iodone_lock)); 589 /* XXXjld is that enough locking? Or too much? */ 590 rf_paritymap_destroy(pm, 0); 591 kmem_free(pm, sizeof(*pm)); 592 } 593 594 /* 595 * Is this RAID set ineligible for parity-map use due to not actually 596 * having any parity? (If so, rf_paritymap_attach is a no-op, but 597 * rf_paritymap_{get,set}_disable will still pointlessly act on the 598 * component labels.) 599 */ 600 int 601 rf_paritymap_ineligible(RF_Raid_t *raidPtr) 602 { 603 return raidPtr->Layout.map->faultsTolerated == 0; 604 } 605 606 /* 607 * Attach a parity map to a RAID set if appropriate. Includes 608 * configure-time processing of parity-map fields of component label. 609 */ 610 void 611 rf_paritymap_attach(RF_Raid_t *raidPtr, int force) 612 { 613 RF_RowCol_t col; 614 int pm_use, pm_zap; 615 int g_tickms, g_ntick, g_regions; 616 int good; 617 RF_ComponentLabel_t *clabel; 618 u_int flags, regions; 619 struct rf_pmparams params; 620 621 if (rf_paritymap_ineligible(raidPtr)) { 622 /* There isn't any parity. */ 623 return; 624 } 625 626 pm_use = 1; 627 pm_zap = 0; 628 g_tickms = DFL_TICKMS; 629 g_ntick = DFL_COOLDOWN; 630 g_regions = 0; 631 632 /* 633 * Collect opinions on the set config. If this is the initial 634 * config (raidctl -C), treat all labels as invalid, since 635 * there may be random data present. 636 */ 637 if (!force) { 638 for (col = 0; col < raidPtr->numCol; col++) { 639 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 640 continue; 641 clabel = raidget_component_label(raidPtr, col); 642 flags = clabel->parity_map_flags; 643 /* Check for use by non-parity-map kernel. */ 644 if (clabel->parity_map_modcount 645 != clabel->mod_counter) { 646 flags &= ~RF_PMLABEL_WASUSED; 647 } 648 649 if (flags & RF_PMLABEL_VALID) { 650 g_tickms = clabel->parity_map_tickms; 651 g_ntick = clabel->parity_map_ntick; 652 regions = clabel->parity_map_regions; 653 if (g_regions == 0) 654 g_regions = regions; 655 else if (g_regions != regions) { 656 pm_zap = 1; /* important! */ 657 } 658 659 if (flags & RF_PMLABEL_DISABLE) { 660 pm_use = 0; 661 } 662 if (!(flags & RF_PMLABEL_WASUSED)) { 663 pm_zap = 1; 664 } 665 } else { 666 pm_zap = 1; 667 } 668 } 669 } else { 670 pm_zap = 1; 671 } 672 673 /* Finally, create and attach the parity map. */ 674 if (pm_use) { 675 params.cooldown = g_ntick; 676 params.tickms = g_tickms; 677 params.regions = g_regions; 678 679 raidPtr->parity_map = kmem_alloc(sizeof(struct rf_paritymap), 680 KM_SLEEP); 681 if (0 != rf_paritymap_init(raidPtr->parity_map, raidPtr, 682 ¶ms)) { 683 /* It failed; do without. */ 684 kmem_free(raidPtr->parity_map, 685 sizeof(struct rf_paritymap)); 686 raidPtr->parity_map = NULL; 687 return; 688 } 689 690 if (g_regions == 0) 691 /* Pick up the autoconfigured region count. */ 692 g_regions = raidPtr->parity_map->params.regions; 693 694 if (pm_zap) { 695 good = raidPtr->parity_good && !force; 696 697 if (good) 698 rf_paritymap_forceclean(raidPtr->parity_map); 699 else 700 rf_paritymap_invalidate(raidPtr->parity_map); 701 /* This needs to be on disk before WASUSED is set. */ 702 rf_paritymap_write(raidPtr->parity_map); 703 } 704 } 705 706 /* Alter labels in-core to reflect the current view of things. */ 707 for (col = 0; col < raidPtr->numCol; col++) { 708 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 709 continue; 710 clabel = raidget_component_label(raidPtr, col); 711 712 if (pm_use) 713 flags = RF_PMLABEL_VALID | RF_PMLABEL_WASUSED; 714 else 715 flags = RF_PMLABEL_VALID | RF_PMLABEL_DISABLE; 716 717 clabel->parity_map_flags = flags; 718 clabel->parity_map_tickms = g_tickms; 719 clabel->parity_map_ntick = g_ntick; 720 clabel->parity_map_regions = g_regions; 721 raidflush_component_label(raidPtr, col); 722 } 723 /* Note that we're just in 'attach' here, and there won't 724 be any spare disks at this point. */ 725 } 726 727 /* 728 * For initializing the parity-map fields of a component label, both on 729 * initial creation and on reconstruct/copyback/etc. */ 730 void 731 rf_paritymap_init_label(struct rf_paritymap *pm, RF_ComponentLabel_t *clabel) 732 { 733 if (pm != NULL) { 734 clabel->parity_map_flags = 735 RF_PMLABEL_VALID | RF_PMLABEL_WASUSED; 736 clabel->parity_map_tickms = pm->params.tickms; 737 clabel->parity_map_ntick = pm->params.cooldown; 738 /* 739 * XXXjld: If the number of regions is changed on disk, and 740 * then a new component is labeled before the next configure, 741 * then it will get the old value and they will conflict on 742 * the next boot (and the default will be used instead). 743 */ 744 clabel->parity_map_regions = pm->params.regions; 745 } else { 746 /* 747 * XXXjld: if the map is disabled, and all the components are 748 * replaced without an intervening unconfigure/reconfigure, 749 * then it will become enabled on the next unconfig/reconfig. 750 */ 751 } 752 } 753 754 755 /* Will the parity map be disabled next time? */ 756 int 757 rf_paritymap_get_disable(RF_Raid_t *raidPtr) 758 { 759 RF_ComponentLabel_t *clabel; 760 RF_RowCol_t col; 761 int dis; 762 763 dis = 0; 764 for (col = 0; col < raidPtr->numCol; col++) { 765 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 766 continue; 767 clabel = raidget_component_label(raidPtr, col); 768 if (clabel->parity_map_flags & RF_PMLABEL_DISABLE) 769 dis = 1; 770 } 771 for (col = 0; col < raidPtr->numSpare; col++) { 772 if (raidPtr->Disks[raidPtr->numCol+col].status != rf_ds_used_spare) 773 continue; 774 clabel = raidget_component_label(raidPtr, raidPtr->numCol+col); 775 if (clabel->parity_map_flags & RF_PMLABEL_DISABLE) 776 dis = 1; 777 } 778 779 return dis; 780 } 781 782 /* Set whether the parity map will be disabled next time. */ 783 void 784 rf_paritymap_set_disable(RF_Raid_t *raidPtr, int dis) 785 { 786 RF_ComponentLabel_t *clabel; 787 RF_RowCol_t col; 788 789 for (col = 0; col < raidPtr->numCol; col++) { 790 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 791 continue; 792 clabel = raidget_component_label(raidPtr, col); 793 if (dis) 794 clabel->parity_map_flags |= RF_PMLABEL_DISABLE; 795 else 796 clabel->parity_map_flags &= ~RF_PMLABEL_DISABLE; 797 raidflush_component_label(raidPtr, col); 798 } 799 800 /* update any used spares as well */ 801 for (col = 0; col < raidPtr->numSpare; col++) { 802 if (raidPtr->Disks[raidPtr->numCol+col].status != rf_ds_used_spare) 803 continue; 804 805 clabel = raidget_component_label(raidPtr, raidPtr->numCol+col); 806 if (dis) 807 clabel->parity_map_flags |= RF_PMLABEL_DISABLE; 808 else 809 clabel->parity_map_flags &= ~RF_PMLABEL_DISABLE; 810 raidflush_component_label(raidPtr, raidPtr->numCol+col); 811 } 812 } 813