Lines Matching defs:raidPtr

183 static int raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp);
184 static int rf_get_component_caches(RF_Raid_t *raidPtr, int *);
261 void *raidPtr;
299 static void rf_RewriteParityThread(RF_Raid_t *raidPtr);
847 RF_Raid_t *raidPtr;
851 raidPtr = &rs->sc_r;
854 if (raidPtr->Layout.numDataCol != 1 ||
855 raidPtr->Layout.numParityCol != 1)
873 for (c = 0; c < raidPtr->numCol; c++) {
874 if (raidPtr->Disks[c].status == rf_ds_optimal) {
887 for (c = 0; c < raidPtr->numSpare; c++) {
888 sparecol = raidPtr->numCol + c;
890 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
893 for(j=0;j<raidPtr->numCol;j++) {
894 if (raidPtr->Disks[j].spareCol == sparecol) {
934 bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev);
940 error = (*bdev->d_dump)(raidPtr->Disks[dumpto].dev,
941 blkno, va, nblk * raidPtr->bytesPerSector);
1060 raid_wakeup(RF_Raid_t *raidPtr)
1062 rf_lock_mutex2(raidPtr->iodone_lock);
1063 rf_signal_cond2(raidPtr->iodone_cv);
1064 rf_unlock_mutex2(raidPtr->iodone_lock);
1073 RF_Raid_t *raidPtr;
1085 raidPtr = &rs->sc_r;
1092 raid_wakeup(raidPtr);
1106 RF_Raid_t *raidPtr;
1108 raidPtr = &rs->sc_r;
1109 if (!raidPtr->valid) {
1117 return raiddoaccess(raidPtr, bp);
1121 raiddone(RF_Raid_t *raidPtr, struct buf *bp)
1126 rs = raidPtr->softc;
1131 rf_lock_mutex2(raidPtr->mutex);
1132 raidPtr->openings++;
1133 rf_unlock_mutex2(raidPtr->mutex);
1136 raid_wakeup(raidPtr);
1177 RF_Raid_t *raidPtr;
1180 raidPtr = &rs->sc_r;
1183 raidPtr->recon_in_progress != 0 ||
1184 raidPtr->parity_rewrite_in_progress != 0)
1192 if ((error = rf_Shutdown(raidPtr)) != 0)
1211 rf_fail_disk(RF_Raid_t *raidPtr, struct rf_recon_req *rr)
1215 if (raidPtr->Layout.map->faultsTolerated == 0) {
1220 if (rr->col < 0 || rr->col >= raidPtr->numCol) {
1225 rf_lock_mutex2(raidPtr->mutex);
1226 if (raidPtr->status == rf_rs_reconstructing) {
1227 raidPtr->abortRecon[rr->col] = 1;
1229 if ((raidPtr->Disks[rr->col].status == rf_ds_optimal) &&
1230 (raidPtr->numFailures > 0)) {
1235 if (raidPtr->Disks[rr->col].status == rf_ds_spared) {
1236 int spareCol = raidPtr->Disks[rr->col].spareCol;
1238 if (spareCol < raidPtr->numCol ||
1239 spareCol >= raidPtr->numCol + raidPtr->numSpare)
1246 raidPtr->Disks[spareCol].status = rf_ds_failed;
1249 rf_unlock_mutex2(raidPtr->mutex);
1258 rrint->raidPtr = raidPtr;
1260 return RF_CREATE_THREAD(raidPtr->recon_thread, rf_ReconThread,
1263 rf_unlock_mutex2(raidPtr->mutex);
1326 RF_Raid_t *raidPtr = &rs->sc_r;
1366 memset(raidPtr, 0, sizeof(*raidPtr));
1367 raidPtr->softc = rs;
1368 raidPtr->raidid = rs->sc_unit;
1370 retcode = rf_Configure(raidPtr, k_cfg, NULL);
1375 raidPtr->openings = RAIDOUTSTANDING;
1378 raid_wakeup(raidPtr);
1379 rf_markalldirty(raidPtr);
1400 rf_set_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
1409 int raidid = raidPtr->raidid;
1422 if ((column < 0) || (column >= raidPtr->numCol)) {
1430 memcpy(raidget_component_label(raidPtr, column),
1432 raidflush_component_label(raidPtr, column);
1438 rf_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
1447 raidPtr->serial_number = clabel->serial_number;
1449 for (int column = 0; column < raidPtr->numCol; column++) {
1450 RF_RaidDisk_t *diskPtr = &raidPtr->Disks[column];
1454 raidPtr, column);
1457 raid_init_component_label(raidPtr, ci_label);
1458 ci_label->serial_number = raidPtr->serial_number;
1463 raidflush_component_label(raidPtr, column);
1471 rf_rebuild_in_place(RF_Raid_t *raidPtr, RF_SingleComponent_t *componentPtr)
1474 if (raidPtr->Layout.map->faultsTolerated == 0) {
1479 if (raidPtr->recon_in_progress == 1) {
1489 if ((column < 0) || (column >= raidPtr->numCol)) {
1493 rf_lock_mutex2(raidPtr->mutex);
1494 if ((raidPtr->Disks[column].status == rf_ds_optimal) &&
1495 (raidPtr->numFailures > 0)) {
1501 raidPtr->raidid);
1503 raidPtr->raidid, column);
1504 rf_unlock_mutex2(raidPtr->mutex);
1508 if (raidPtr->Disks[column].status == rf_ds_reconstructing) {
1510 raidPtr->raidid);
1513 raidPtr->raidid, column);
1515 rf_unlock_mutex2(raidPtr->mutex);
1519 if (raidPtr->Disks[column].status == rf_ds_spared) {
1520 rf_unlock_mutex2(raidPtr->mutex);
1524 rf_unlock_mutex2(raidPtr->mutex);
1532 rrint->raidPtr = raidPtr;
1534 return RF_CREATE_THREAD(raidPtr->recon_thread,
1539 rf_check_recon_status(RF_Raid_t *raidPtr, int *data)
1545 if (raidPtr->Layout.map->faultsTolerated == 0 ||
1546 raidPtr->status != rf_rs_reconstructing) {
1550 if (raidPtr->reconControl->numRUsTotal == 0) {
1554 *data = (raidPtr->reconControl->numRUsComplete * 100
1555 / raidPtr->reconControl->numRUsTotal);
1579 RF_Raid_t *raidPtr;
1592 raidPtr = &rs->sc_r;
1624 raidPtr->recon_in_progress != 0 ||
1625 raidPtr->parity_rewrite_in_progress != 0)
1637 return rf_get_component_label(raidPtr, data);
1641 return rf_set_component_label(raidPtr, data);
1645 return rf_init_component_label(raidPtr, data);
1648 d = rf_set_autoconfig(raidPtr, *(int *) data);
1650 raidPtr->raidid, d);
1655 d = rf_set_rootpartition(raidPtr, *(int *) data);
1657 raidPtr->raidid, d);
1664 if (raidPtr->Layout.map->faultsTolerated == 0) {
1666 raidPtr->parity_good = RF_RAID_CLEAN;
1670 if (raidPtr->parity_rewrite_in_progress == 1) {
1675 return RF_CREATE_THREAD(raidPtr->parity_rewrite_thread,
1676 rf_RewriteParityThread, raidPtr,"raid_parity");
1680 return rf_add_hot_spare(raidPtr, &component);
1685 return rf_delete_component(raidPtr, &component);
1689 return rf_remove_component(raidPtr, &component);
1693 return rf_incorporate_hot_spare(raidPtr, &component);
1696 return rf_rebuild_in_place(raidPtr, data);
1703 retcode = rf_get_info(raidPtr, d_cfg);
1711 *(int *) data = raidPtr->parity_good;
1715 if (rf_paritymap_ineligible(raidPtr))
1717 rf_paritymap_status(raidPtr->parity_map, data);
1721 if (rf_paritymap_ineligible(raidPtr))
1723 if (raidPtr->parity_map == NULL)
1725 if (rf_paritymap_set_params(raidPtr->parity_map, data, 1) != 0)
1730 if (rf_paritymap_ineligible(raidPtr))
1732 *(int *) data = rf_paritymap_get_disable(raidPtr);
1736 if (rf_paritymap_ineligible(raidPtr))
1738 rf_paritymap_set_disable(raidPtr, *(int *)data);
1746 memset(&raidPtr->acc_totals, 0, sizeof(raidPtr->acc_totals));
1751 *totals = raidPtr->acc_totals;
1755 raidPtr->keep_acc_totals = *(int *)data;
1759 *(int *) data = raidPtr->totalSectors;
1763 return rf_fail_disk(raidPtr, data);
1771 return rf_check_recon_status(raidPtr, data);
1774 rf_check_recon_status_ext(raidPtr, data);
1778 if (raidPtr->Layout.map->faultsTolerated == 0) {
1784 if (raidPtr->parity_rewrite_in_progress == 1) {
1786 raidPtr->parity_rewrite_stripes_done /
1787 raidPtr->Layout.numStripe;
1794 rf_check_parityrewrite_status_ext(raidPtr, data);
1802 rf_check_copyback_status_ext(raidPtr, data);
1806 for (column = 0; column < raidPtr->numCol; column++)
1807 if (raidPtr->Disks[column].status != rf_ds_optimal)
1810 for (column = 0; column < raidPtr->numCol; column++) {
1811 clabel = raidget_component_label(raidPtr, column);
1813 raidflush_component_label(raidPtr, column);
1857 retcode = rf_SetSpareTable(raidPtr, *(void **) data);
1904 if (!raidPtr->valid)
1913 retcode = rf_get_component_caches(raidPtr, (int *)data);
1917 retcode = rf_sync_component_caches(raidPtr, *(int *)data);
1940 RF_Raid_t *raidPtr = &rs->sc_r;
1943 unit = raidPtr->raidid;
1958 raidPtr->raidid);
1975 rs->sc_size = raidPtr->totalSectors;
1980 rf_set_geometry(rs, raidPtr);
1993 * the entries in the queues should be tagged with the raidPtr
2034 raidstart(RF_Raid_t *raidPtr)
2039 rs = raidPtr->softc;
2042 rf_lock_mutex2(raidPtr->mutex);
2043 if (raidPtr->numNewFailures > 0) {
2044 rf_unlock_mutex2(raidPtr->mutex);
2045 rf_update_component_labels(raidPtr,
2047 rf_lock_mutex2(raidPtr->mutex);
2048 raidPtr->numNewFailures--;
2050 rf_unlock_mutex2(raidPtr->mutex);
2053 printf("raid%d: raidstart not ready\n", raidPtr->raidid);
2061 raiddoaccess(RF_Raid_t *raidPtr, struct buf *bp)
2068 rf_lock_mutex2(raidPtr->mutex);
2069 if (raidPtr->openings == 0) {
2070 rf_unlock_mutex2(raidPtr->mutex);
2073 rf_unlock_mutex2(raidPtr->mutex);
2087 num_blocks = bp->b_bcount >> raidPtr->logBytesPerSector;
2088 pb = (bp->b_bcount & raidPtr->sectorMask) ? 1 : 0;
2095 if ((sum > raidPtr->totalSectors) || (sum < raid_addr)
2104 if (bp->b_bcount & raidPtr->sectorMask) {
2111 rf_lock_mutex2(raidPtr->mutex);
2112 raidPtr->openings--;
2113 rf_unlock_mutex2(raidPtr->mutex);
2118 rc = rf_DoAccess(raidPtr, (bp->b_flags & B_READ) ?
2163 queue->raidPtr->logBytesPerSector);
2176 req->type, queue->raidPtr->raidid,
2181 queue->raidPtr->logBytesPerSector),
2182 (int) queue->raidPtr->logBytesPerSector));
2217 rf_lock_mutex2(queue->raidPtr->iodone_lock);
2239 if (((queue->raidPtr->Disks[queue->col].status ==
2241 (queue->raidPtr->Disks[queue->col].status ==
2243 (queue->raidPtr->numFailures <
2244 queue->raidPtr->Layout.map->faultsTolerated)) {
2246 queue->raidPtr->raidid,
2248 queue->raidPtr->Disks[queue->col].devname);
2249 queue->raidPtr->Disks[queue->col].status =
2251 queue->raidPtr->status = rf_rs_degraded;
2252 queue->raidPtr->numFailures++;
2253 queue->raidPtr->numNewFailures++;
2264 TAILQ_INSERT_TAIL(&(queue->raidPtr->iodone), req, iodone_entries);
2267 rf_signal_cond2(queue->raidPtr->iodone_cv);
2269 rf_unlock_mutex2(queue->raidPtr->iodone_lock);
2366 rf_parity_map_offset(RF_Raid_t *raidPtr)
2370 KASSERT(raidPtr->bytesPerSector);
2371 if (raidPtr->bytesPerSector > RF_COMPONENT_INFO_SIZE)
2372 map_offset = raidPtr->bytesPerSector;
2381 rf_parity_map_size(RF_Raid_t *raidPtr)
2385 if (raidPtr->bytesPerSector > RF_PARITY_MAP_SIZE)
2386 map_size = raidPtr->bytesPerSector;
2394 raidmarkclean(RF_Raid_t *raidPtr, RF_RowCol_t col)
2398 clabel = raidget_component_label(raidPtr, col);
2400 raidflush_component_label(raidPtr, col);
2406 raidmarkdirty(RF_Raid_t *raidPtr, RF_RowCol_t col)
2410 clabel = raidget_component_label(raidPtr, col);
2412 raidflush_component_label(raidPtr, col);
2417 raidfetch_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2419 KASSERT(raidPtr->bytesPerSector);
2421 return raidread_component_label(raidPtr->bytesPerSector,
2422 raidPtr->Disks[col].dev,
2423 raidPtr->raid_cinfo[col].ci_vp,
2424 &raidPtr->raid_cinfo[col].ci_label);
2428 raidget_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2430 return &raidPtr->raid_cinfo[col].ci_label;
2434 raidflush_component_label(RF_Raid_t *raidPtr, RF_RowCol_t col)
2438 label = &raidPtr->raid_cinfo[col].ci_label;
2439 label->mod_counter = raidPtr->mod_counter;
2443 return raidwrite_component_label(raidPtr->bytesPerSector,
2444 raidPtr->Disks[col].dev,
2445 raidPtr->raid_cinfo[col].ci_vp, label);
2590 rf_paritymap_kern_write(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2594 for (c = 0; c < raidPtr->numCol; c++) {
2596 if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2599 raidwrite_component_area(raidPtr->Disks[c].dev,
2600 raidPtr->raid_cinfo[c].ci_vp, map,
2602 rf_parity_map_offset(raidPtr),
2603 rf_parity_map_size(raidPtr));
2608 rf_paritymap_kern_read(RF_Raid_t *raidPtr, struct rf_paritymap_ondisk *map)
2614 for (c = 0; c < raidPtr->numCol; c++) {
2616 if (RF_DEAD_DISK(raidPtr->Disks[c].status))
2618 raidread_component_area(raidPtr->Disks[c].dev,
2619 raidPtr->raid_cinfo[c].ci_vp, &tmp,
2621 rf_parity_map_offset(raidPtr),
2622 rf_parity_map_size(raidPtr));
2633 rf_markalldirty(RF_Raid_t *raidPtr)
2641 raidPtr->mod_counter++;
2642 for (c = 0; c < raidPtr->numCol; c++) {
2645 if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
2646 clabel = raidget_component_label(raidPtr, c);
2652 raidmarkdirty(raidPtr, c);
2657 for (c = 0; c < raidPtr->numSpare ; c++) {
2658 sparecol = raidPtr->numCol + c;
2660 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2670 for(j=0;j<raidPtr->numCol;j++) {
2671 if (raidPtr->Disks[j].spareCol == sparecol) {
2677 clabel = raidget_component_label(raidPtr, sparecol);
2680 raid_init_component_label(raidPtr, clabel);
2688 raidmarkdirty(raidPtr, sparecol);
2695 rf_update_component_labels(RF_Raid_t *raidPtr, int final)
2702 struct raid_softc *rs = raidPtr->softc;
2709 raidPtr->mod_counter++;
2711 for (c = 0; c < raidPtr->numCol; c++) {
2712 if (raidPtr->Disks[c].status == rf_ds_optimal) {
2713 clabel = raidget_component_label(raidPtr, c);
2719 clabel->last_unit = raidPtr->raidid;
2721 raidflush_component_label(raidPtr, c);
2723 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2724 raidmarkclean(raidPtr, c);
2731 for (c = 0; c < raidPtr->numSpare ; c++) {
2732 sparecol = raidPtr->numCol + c;
2735 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
2745 for(j=0;j<raidPtr->numCol;j++) {
2746 if (raidPtr->Disks[j].spareCol == sparecol) {
2753 clabel = raidget_component_label(raidPtr, sparecol);
2756 raid_init_component_label(raidPtr, clabel);
2761 clabel->last_unit = raidPtr->raidid;
2763 raidflush_component_label(raidPtr, sparecol);
2765 if (raidPtr->parity_good == RF_RAID_CLEAN) {
2766 raidmarkclean(raidPtr, sparecol);
2774 rf_close_component(RF_Raid_t *raidPtr, struct vnode *vp, int auto_configured)
2791 rf_UnconfigureVnodes(RF_Raid_t *raidPtr)
2800 for (c = 0; c < raidPtr->numCol; c++) {
2801 vp = raidPtr->raid_cinfo[c].ci_vp;
2802 acd = raidPtr->Disks[c].auto_configured;
2803 rf_close_component(raidPtr, vp, acd);
2804 raidPtr->raid_cinfo[c].ci_vp = NULL;
2805 raidPtr->Disks[c].auto_configured = 0;
2808 for (r = 0; r < raidPtr->numSpare; r++) {
2809 vp = raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp;
2810 acd = raidPtr->Disks[raidPtr->numCol + r].auto_configured;
2811 rf_close_component(raidPtr, vp, acd);
2812 raidPtr->raid_cinfo[raidPtr->numCol + r].ci_vp = NULL;
2813 raidPtr->Disks[raidPtr->numCol + r].auto_configured = 0;
2822 RF_Raid_t *raidPtr;
2825 raidPtr = (RF_Raid_t *) req->raidPtr;
2826 raidPtr->recon_in_progress = 1;
2829 raidPtr->forceRecon = 1;
2832 rf_FailDisk((RF_Raid_t *) req->raidPtr, req->col,
2836 raidPtr->forceRecon = 0;
2841 raidPtr->recon_in_progress = 0;
2849 rf_RewriteParityThread(RF_Raid_t *raidPtr)
2854 raidPtr->parity_rewrite_stripes_done = 0;
2855 raidPtr->parity_rewrite_in_progress = 1;
2857 retcode = rf_RewriteParity(raidPtr);
2861 raidPtr->raidid, retcode);
2866 raidPtr->parity_good = RF_RAID_CLEAN;
2868 raidPtr->parity_rewrite_in_progress = 0;
2871 if (raidPtr->waitShutdown) {
2872 rf_lock_mutex2(raidPtr->rad_lock);
2873 cv_broadcast(&raidPtr->parity_rewrite_cv);
2874 rf_unlock_mutex2(raidPtr->rad_lock);
2885 RF_Raid_t *raidPtr;
2888 raidPtr = req->raidPtr;
2889 raidPtr->recon_in_progress = 1;
2892 raidPtr->forceRecon = 1;
2895 rf_ReconstructInPlace(raidPtr, req->col);
2898 raidPtr->forceRecon = 0;
2902 raidPtr->recon_in_progress = 0;
3495 RF_Raid_t *raidPtr)
3528 rf_set_autoconfig(RF_Raid_t *raidPtr, int new_value)
3534 raidPtr->autoconfigure = new_value;
3536 for(column=0; column<raidPtr->numCol; column++) {
3537 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3538 clabel = raidget_component_label(raidPtr, column);
3540 raidflush_component_label(raidPtr, column);
3543 for(column = 0; column < raidPtr->numSpare ; column++) {
3544 sparecol = raidPtr->numCol + column;
3546 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3547 clabel = raidget_component_label(raidPtr, sparecol);
3549 raidflush_component_label(raidPtr, sparecol);
3556 rf_set_rootpartition(RF_Raid_t *raidPtr, int new_value)
3562 raidPtr->root_partition = new_value;
3563 for(column=0; column<raidPtr->numCol; column++) {
3564 if (raidPtr->Disks[column].status == rf_ds_optimal) {
3565 clabel = raidget_component_label(raidPtr, column);
3567 raidflush_component_label(raidPtr, column);
3570 for (column = 0; column < raidPtr->numSpare ; column++) {
3571 sparecol = raidPtr->numCol + column;
3573 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3574 clabel = raidget_component_label(raidPtr, sparecol);
3576 raidflush_component_label(raidPtr, sparecol);
3623 raid_init_component_label(RF_Raid_t *raidPtr, RF_ComponentLabel_t *clabel)
3628 clabel->serial_number = raidPtr->serial_number;
3629 clabel->mod_counter = raidPtr->mod_counter;
3632 clabel->num_columns = raidPtr->numCol;
3636 clabel->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
3637 clabel->SUsPerPU = raidPtr->Layout.SUsPerPU;
3638 clabel->SUsPerRU = raidPtr->Layout.SUsPerRU;
3640 clabel->blockSize = raidPtr->bytesPerSector;
3641 rf_component_label_set_numblocks(clabel, raidPtr->sectorsPerDisk);
3644 clabel->parityConfig = raidPtr->Layout.map->parityConfig;
3645 clabel->maxOutstanding = raidPtr->maxOutstanding;
3646 clabel->autoconfigure = raidPtr->autoconfigure;
3647 clabel->root_partition = raidPtr->root_partition;
3648 clabel->last_unit = raidPtr->raidid;
3649 clabel->config_order = raidPtr->config_order;
3652 rf_paritymap_init_label(raidPtr->parity_map, clabel);
3659 RF_Raid_t *raidPtr;
3687 raidPtr = &sc->sc_r;
3690 raidPtr->softc = sc;
3691 raidPtr->raidid = raidID;
3692 raidPtr->openings = RAIDOUTSTANDING;
3695 rf_create_configuration(cset->ac, config, raidPtr);
3698 if (rf_Configure(raidPtr, config, cset->ac) == 0) {
3701 rf_markalldirty(raidPtr);
3702 raidPtr->autoconfigure = 1; /* XXX do this here? */
3713 raidPtr->root_partition = cset->rootable;
3729 rf_pool_init(RF_Raid_t *raidPtr, char *w_chan, struct pool *p, size_t size, const char *pool_name,
3734 snprintf(w_chan, RF_MAX_POOLNAMELEN, "raid%d_%s", raidPtr->raidid, pool_name);
3743 * rf_buf_queue_check(RF_Raid_t raidPtr) -- looks into the buffer queue
3750 rf_buf_queue_check(RF_Raid_t *raidPtr)
3755 rs = raidPtr->softc;
3761 if (dk_strategy_pending(dksc) && raidPtr->openings > 0) {
3826 rf_set_geometry(struct raid_softc *rs, RF_Raid_t *raidPtr)
3833 dg->dg_secperunit = raidPtr->totalSectors;
3834 dg->dg_secsize = raidPtr->bytesPerSector;
3835 dg->dg_nsectors = raidPtr->Layout.dataSectorsPerStripe;
3836 dg->dg_ntracks = 4 * raidPtr->numCol;
3848 rf_get_component_caches(RF_Raid_t *raidPtr, int *data)
3854 for (c = 0; c < raidPtr->numCol + raidPtr->numSpare; c++) {
3859 if (!RF_DEAD_DISK(raidPtr->Disks[c].status)) {
3860 error = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp,
3865 raidPtr->raidid,
3866 raidPtr->Disks[c].devname);
3891 rf_sync_component_cache(RF_Raid_t *raidPtr, int c, int force)
3895 e = VOP_IOCTL(raidPtr->raid_cinfo[c].ci_vp, DIOCCACHESYNC,
3900 raidPtr->raidid, i, raidPtr->Disks[c].devname, e);
3906 rf_sync_component_caches(RF_Raid_t *raidPtr, int force)
3911 for (c = 0; c < raidPtr->numCol; c++) {
3912 if (raidPtr->Disks[c].status == rf_ds_optimal) {
3913 int e = rf_sync_component_cache(raidPtr, c, force);
3919 for (c = 0; c < raidPtr->numSpare ; c++) {
3920 int sparecol = raidPtr->numCol + c;
3923 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) {
3924 int e = rf_sync_component_cache(raidPtr, sparecol,
3935 rf_check_recon_status_ext(RF_Raid_t *raidPtr, RF_ProgressInfo_t *info)
3940 if (raidPtr->status != rf_rs_reconstructing) {
3944 info->total = raidPtr->reconControl->numRUsTotal;
3945 info->completed = raidPtr->reconControl->numRUsComplete;
3952 rf_check_parityrewrite_status_ext(RF_Raid_t *raidPtr, RF_ProgressInfo_t *info)
3957 if (raidPtr->parity_rewrite_in_progress == 1) {
3958 info->total = raidPtr->Layout.numStripe;
3959 info->completed = raidPtr->parity_rewrite_stripes_done;
3969 rf_check_copyback_status_ext(RF_Raid_t *raidPtr, RF_ProgressInfo_t *info)
3980 rf_get_info(RF_Raid_t *raidPtr, RF_DeviceConfig_t *config)
3984 if (!raidPtr->valid)
3986 config->cols = raidPtr->numCol;
3987 config->ndevs = raidPtr->numCol;
3990 config->nspares = raidPtr->numSpare;
3993 config->maxqdepth = raidPtr->maxQueueDepth;
3996 config->devs[d] = raidPtr->Disks[j];
4000 config->spares[i] = raidPtr->Disks[raidPtr->numCol + i];
4010 rf_get_component_label(RF_Raid_t *raidPtr, void *data)
4016 if ((column < 0) || (column >= raidPtr->numCol + raidPtr->numSpare))
4018 raid_clabel = raidget_component_label(raidPtr, column);