Lines Matching defs:sd
223 sr_meta_attach(struct sr_discipline *sd, int chunk_no, int force)
225 struct sr_softc *sc = sd->sd_sc;
233 sd->sd_meta = malloc(SR_META_SIZE * DEV_BSIZE, M_DEVBUF,
235 if (!sd->sd_meta) {
240 if (sd->sd_meta_type != SR_META_F_NATIVE) {
242 sd->sd_meta_foreign = malloc(smd[sd->sd_meta_type].smd_size,
244 if (!sd->sd_meta_foreign) {
253 cl = &sd->sd_vol.sv_chunk_list;
254 sd->sd_vol.sv_chunks = mallocarray(chunk_no, sizeof(struct sr_chunk *),
260 sd->sd_vol.sv_chunks[i++] = ch_entry;
263 if (smd[sd->sd_meta_type].smd_attach(sd, force))
269 ch_entry = sd->sd_vol.sv_chunks[i];
284 sd->sd_vol.sv_chunks[i++] = ch_entry;
292 sr_meta_probe(struct sr_discipline *sd, dev_t *dt, int no_chunk)
294 struct sr_softc *sc = sd->sd_sc;
307 cl = &sd->sd_vol.sv_chunk_list;
477 sr_meta_rw(struct sr_discipline *sd, dev_t dev, void *md, long flags)
482 DEVNAME(sd->sd_sc), dev, md, flags);
486 DEVNAME(sd->sd_sc));
490 rv = sr_rw(sd->sd_sc, dev, md, SR_META_SIZE * DEV_BSIZE,
498 sr_meta_clear(struct sr_discipline *sd)
500 struct sr_softc *sc = sd->sd_sc;
501 struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list;
508 if (sd->sd_meta_type != SR_META_F_NATIVE) {
515 if (sr_meta_native_write(sd, ch_entry->src_dev_mm, m, NULL)) {
525 bzero(sd->sd_meta, SR_META_SIZE * DEV_BSIZE);
534 sr_meta_init(struct sr_discipline *sd, int level, int no_chunk)
536 struct sr_softc *sc = sd->sd_sc;
537 struct sr_metadata *sm = sd->sd_meta;
538 struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list;
553 sm->ssdi.ssd_vol_flags = sd->sd_meta_flags;
591 sd->sd_vol.sv_chunk_minsz = min_chunk_sz;
592 sd->sd_vol.sv_chunk_maxsz = max_chunk_sz;
596 sr_meta_init_complete(struct sr_discipline *sd)
599 struct sr_softc *sc = sd->sd_sc;
601 struct sr_metadata *sm = sd->sd_meta;
608 "SR %s", sd->sd_name);
614 sr_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
623 struct sr_discipline *sd = xsd;
628 if (sr_meta_save(sd, SR_META_DIRTY))
629 printf("%s: save metadata failed\n", DEVNAME(sd->sd_sc));
631 sd->sd_must_flush = 0;
636 sr_meta_save(struct sr_discipline *sd, u_int32_t flags)
638 struct sr_softc *sc = sd->sd_sc;
639 struct sr_metadata *sm = sd->sd_meta, *m;
649 DEVNAME(sc), sd->sd_meta->ssd_devname);
657 s = &smd[sd->sd_meta_type];
674 src = sd->sd_vol.sv_chunks[i];
681 SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link) {
694 src = sd->sd_vol.sv_chunks[i];
718 if (s->smd_write(sd, src->src_dev_mm, m, NULL /* XXX */)) {
729 if (sd->sd_scsi_sync) {
732 wu.swu_dis = sd;
733 sd->sd_scsi_sync(&wu);
742 sr_meta_read(struct sr_discipline *sd)
744 struct sr_softc *sc = sd->sd_sc;
745 struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list;
756 s = &smd[sd->sd_meta_type];
757 if (sd->sd_meta_type != SR_META_F_NATIVE)
769 } else if (s->smd_read(sd, ch_entry->src_dev_mm, sm, fm)) {
786 if (sr_meta_validate(sd, ch_entry->src_dev_mm, sm, fm)) {
795 sr_meta_opt_load(sc, sm, &sd->sd_meta_opt);
796 memcpy(sd->sd_meta, sm, sizeof(*sd->sd_meta));
902 sr_meta_validate(struct sr_discipline *sd, dev_t dev, struct sr_metadata *sm,
905 struct sr_softc *sc = sd->sd_sc;
918 s = &smd[sd->sd_meta_type];
919 if (sd->sd_meta_type != SR_META_F_NATIVE)
920 if (s->smd_validate(sd, sm, fm)) {
1074 /* create fake sd to use utility functions */
1192 /* Only check sd(4) and wd(4) devices. */
1193 if (strncmp(dk->dk_name, "sd", 2) &&
1509 struct sr_discipline *sd;
1526 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
1527 SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link) {
1605 sr_meta_native_attach(struct sr_discipline *sd, int force)
1607 struct sr_softc *sc = sd->sd_sc;
1608 struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list;
1630 if (sr_meta_native_read(sd, ch_entry->src_dev_mm, md, NULL)) {
1674 if (sr_meta_native_read(sd, ch_entry->src_dev_mm, md,
1678 sd->sd_vol.sv_chunks[d]->src_meta.scm_status =
1695 sr_meta_native_read(struct sr_discipline *sd, dev_t dev,
1699 struct sr_softc *sc = sd->sd_sc;
1704 return (sr_meta_rw(sd, dev, md, B_READ));
1708 sr_meta_native_write(struct sr_discipline *sd, dev_t dev,
1712 struct sr_softc *sc = sd->sd_sc;
1717 return (sr_meta_rw(sd, dev, md, B_WRITE));
1721 sr_hotplug_register(struct sr_discipline *sd, void *func)
1726 DEVNAME(sd->sd_sc), func);
1736 mhe->sh_sd = sd;
1741 sr_hotplug_unregister(struct sr_discipline *sd, void *func)
1746 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, func);
1892 sr_ccb_alloc(struct sr_discipline *sd)
1897 if (!sd)
1900 DNPRINTF(SR_D_CCB, "%s: sr_ccb_alloc\n", DEVNAME(sd->sd_sc));
1902 if (sd->sd_ccb)
1905 sd->sd_ccb = mallocarray(sd->sd_max_wu,
1906 sd->sd_max_ccb_per_wu * sizeof(struct sr_ccb),
1908 TAILQ_INIT(&sd->sd_ccb_freeq);
1909 for (i = 0; i < sd->sd_max_wu * sd->sd_max_ccb_per_wu; i++) {
1910 ccb = &sd->sd_ccb[i];
1911 ccb->ccb_dis = sd;
1916 DEVNAME(sd->sd_sc), sd->sd_max_wu * sd->sd_max_ccb_per_wu);
1922 sr_ccb_free(struct sr_discipline *sd)
1926 if (!sd)
1929 DNPRINTF(SR_D_CCB, "%s: sr_ccb_free %p\n", DEVNAME(sd->sd_sc), sd);
1931 while ((ccb = TAILQ_FIRST(&sd->sd_ccb_freeq)) != NULL)
1932 TAILQ_REMOVE(&sd->sd_ccb_freeq, ccb, ccb_link);
1934 free(sd->sd_ccb, M_DEVBUF, sd->sd_max_wu * sd->sd_max_ccb_per_wu *
1939 sr_ccb_get(struct sr_discipline *sd)
1946 ccb = TAILQ_FIRST(&sd->sd_ccb_freeq);
1948 TAILQ_REMOVE(&sd->sd_ccb_freeq, ccb, ccb_link);
1954 DNPRINTF(SR_D_CCB, "%s: sr_ccb_get: %p\n", DEVNAME(sd->sd_sc),
1963 struct sr_discipline *sd = ccb->ccb_dis;
1966 DNPRINTF(SR_D_CCB, "%s: sr_ccb_put: %p\n", DEVNAME(sd->sd_sc),
1976 TAILQ_INSERT_TAIL(&sd->sd_ccb_freeq, ccb, ccb_link);
1982 sr_ccb_rw(struct sr_discipline *sd, int chunk, daddr_t blkno,
1985 struct sr_chunk *sc = sd->sd_vol.sv_chunks[chunk];
1989 ccb = sr_ccb_get(sd);
2002 ccb->ccb_buf.b_blkno = blkno + sd->sd_meta->ssd_data_blkno;
2008 ccb->ccb_buf.b_iodone = sd->sd_scsi_intr;
2022 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name,
2034 struct sr_discipline *sd = wu->swu_dis;
2035 struct sr_softc *sc = sd->sd_sc;
2039 DEVNAME(sc), sd->sd_meta->ssd_devname, sd->sd_name,
2052 if (ISSET(sd->sd_capabilities, SR_CAP_REDUNDANT))
2053 sd->sd_set_chunk_state(sd, ccb->ccb_target,
2057 DEVNAME(sc), sd->sd_meta->ssd_devname,
2058 ccb->ccb_buf.b_error, sd->sd_name,
2071 sr_wu_alloc(struct sr_discipline *sd)
2076 DNPRINTF(SR_D_WU, "%s: sr_wu_alloc %p %d\n", DEVNAME(sd->sd_sc),
2077 sd, sd->sd_max_wu);
2079 no_wu = sd->sd_max_wu;
2080 sd->sd_wu_pending = no_wu;
2082 mtx_init(&sd->sd_wu_mtx, IPL_BIO);
2083 TAILQ_INIT(&sd->sd_wu);
2084 TAILQ_INIT(&sd->sd_wu_freeq);
2085 TAILQ_INIT(&sd->sd_wu_pendq);
2086 TAILQ_INIT(&sd->sd_wu_defq);
2089 wu = malloc(sd->sd_wu_size, M_DEVBUF, M_WAITOK | M_ZERO);
2090 TAILQ_INSERT_TAIL(&sd->sd_wu, wu, swu_next);
2092 wu->swu_dis = sd;
2094 sr_wu_put(sd, wu);
2101 sr_wu_free(struct sr_discipline *sd)
2105 DNPRINTF(SR_D_WU, "%s: sr_wu_free %p\n", DEVNAME(sd->sd_sc), sd);
2107 while ((wu = TAILQ_FIRST(&sd->sd_wu_freeq)) != NULL)
2108 TAILQ_REMOVE(&sd->sd_wu_freeq, wu, swu_link);
2109 while ((wu = TAILQ_FIRST(&sd->sd_wu_pendq)) != NULL)
2110 TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link);
2111 while ((wu = TAILQ_FIRST(&sd->sd_wu_defq)) != NULL)
2112 TAILQ_REMOVE(&sd->sd_wu_defq, wu, swu_link);
2114 while ((wu = TAILQ_FIRST(&sd->sd_wu)) != NULL) {
2115 TAILQ_REMOVE(&sd->sd_wu, wu, swu_next);
2116 free(wu, M_DEVBUF, sd->sd_wu_size);
2123 struct sr_discipline *sd = (struct sr_discipline *)xsd;
2126 mtx_enter(&sd->sd_wu_mtx);
2127 wu = TAILQ_FIRST(&sd->sd_wu_freeq);
2129 TAILQ_REMOVE(&sd->sd_wu_freeq, wu, swu_link);
2130 sd->sd_wu_pending++;
2132 mtx_leave(&sd->sd_wu_mtx);
2134 DNPRINTF(SR_D_WU, "%s: sr_wu_get: %p\n", DEVNAME(sd->sd_sc), wu);
2142 struct sr_discipline *sd = (struct sr_discipline *)xsd;
2145 DNPRINTF(SR_D_WU, "%s: sr_wu_put: %p\n", DEVNAME(sd->sd_sc), wu);
2148 sr_wu_init(sd, wu);
2150 mtx_enter(&sd->sd_wu_mtx);
2151 TAILQ_INSERT_TAIL(&sd->sd_wu_freeq, wu, swu_link);
2152 sd->sd_wu_pending--;
2153 mtx_leave(&sd->sd_wu_mtx);
2157 sr_wu_init(struct sr_discipline *sd, struct sr_workunit *wu)
2163 panic("%s: sr_wu_init got active wu", DEVNAME(sd->sd_sc));
2177 struct sr_discipline *sd = wu->swu_dis;
2183 DEVNAME(sd->sd_sc));
2210 struct sr_discipline *sd = wu->swu_dis;
2213 DEVNAME(sd->sd_sc), wu->swu_io_count, wu->swu_ios_complete,
2219 task_add(sd->sd_taskq, &wu->swu_task);
2226 struct sr_discipline *sd = wu->swu_dis;
2247 if (sd->sd_scsi_wu_done) {
2248 if (sd->sd_scsi_wu_done(wu) == SR_WU_RESTART)
2253 TAILQ_FOREACH(wup, &sd->sd_wu_pendq, swu_link)
2258 DEVNAME(sd->sd_sc), wu);
2259 TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link);
2277 if (sd->sd_scsi_done)
2278 sd->sd_scsi_done(wu);
2280 sr_scsi_wu_put(sd, wu);
2282 sr_scsi_done(sd, xs);
2289 sr_scsi_wu_get(struct sr_discipline *sd, int flags)
2291 return scsi_io_get(&sd->sd_iopool, flags);
2295 sr_scsi_wu_put(struct sr_discipline *sd, struct sr_workunit *wu)
2297 scsi_io_put(&sd->sd_iopool, wu);
2299 if (sd->sd_sync && sd->sd_wu_pending == 0)
2300 wakeup(sd);
2304 sr_scsi_done(struct sr_discipline *sd, struct scsi_xfer *xs)
2306 DNPRINTF(SR_D_DIS, "%s: sr_scsi_done: xs %p\n", DEVNAME(sd->sd_sc), xs);
2313 if (sd->sd_sync && sd->sd_wu_pending == 0)
2314 wakeup(sd);
2323 struct sr_discipline *sd;
2328 sd = sc->sc_targets[link->target];
2329 if (sd == NULL)
2332 if (sd->sd_deleted) {
2334 DEVNAME(sc), sd->sd_meta->ssd_devname);
2340 sr_wu_init(sd, wu);
2353 if (sd->sd_scsi_rw(wu))
2360 if (sd->sd_scsi_sync(wu))
2367 if (sd->sd_scsi_tur(wu))
2374 if (sd->sd_scsi_start_stop(wu))
2381 if (sd->sd_scsi_inquiry(wu))
2389 if (sd->sd_scsi_read_cap(wu))
2396 if (sd->sd_scsi_req_sense(wu))
2409 if (sd->sd_scsi_sense.error_code) {
2411 memcpy(&xs->sense, &sd->sd_scsi_sense, sizeof(xs->sense));
2412 bzero(&sd->sd_scsi_sense, sizeof(sd->sd_scsi_sense));
2417 sr_scsi_done(sd, xs);
2424 struct sr_discipline *sd;
2428 sd = sc->sc_targets[link->target];
2429 if (sd == NULL)
2432 link->pool = &sd->sd_iopool;
2433 if (sd->sd_openings)
2434 link->openings = sd->sd_openings(sd);
2436 link->openings = sd->sd_max_wu;
2445 struct sr_discipline *sd;
2447 sd = sc->sc_targets[link->target];
2448 if (sd == NULL)
2452 DEVNAME(sc), sd->sd_meta->ssd_devname, cmd);
2456 return (sr_bio_handler(sc, sd, cmd, (struct bio *)addr));
2477 sr_bio_handler(struct sr_softc *sc, struct sr_discipline *sd, u_long cmd,
2527 rv = sr_ioctl_deleteraid(sc, sd, (struct bioc_deleteraid *)bio);
2532 rv = sr_ioctl_discipline(sc, sd, (struct bioc_discipline *)bio);
2537 rv = sr_ioctl_installboot(sc, sd,
2561 struct sr_discipline *sd;
2564 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
2566 disk += sd->sd_meta->ssdi.ssd_chunk_no;
2580 struct sr_discipline *sd;
2583 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
2588 bv->bv_status = sd->sd_vol_status;
2589 bv->bv_size = sd->sd_meta->ssdi.ssd_size << DEV_BSHIFT;
2590 bv->bv_level = sd->sd_meta->ssdi.ssd_level;
2591 bv->bv_nodisk = sd->sd_meta->ssdi.ssd_chunk_no;
2594 if (sd->sd_meta->ssdi.ssd_level == 'C' &&
2595 sd->mds.mdd_crypto.key_disk != NULL)
2597 else if (sd->sd_meta->ssdi.ssd_level == 0x1C &&
2598 sd->mds.mdd_raid1c.sr1c_crypto.key_disk != NULL)
2602 bv->bv_percent = sr_rebuild_percent(sd);
2604 strlcpy(bv->bv_dev, sd->sd_meta->ssd_devname,
2606 strlcpy(bv->bv_vendor, sd->sd_meta->ssdi.ssd_vendor,
2637 struct sr_discipline *sd;
2644 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
2649 if (bd->bd_diskid < sd->sd_meta->ssdi.ssd_chunk_no)
2650 src = sd->sd_vol.sv_chunks[bd->bd_diskid];
2652 else if (bd->bd_diskid == sd->sd_meta->ssdi.ssd_chunk_no &&
2653 sd->sd_meta->ssdi.ssd_level == 'C' &&
2654 sd->mds.mdd_crypto.key_disk != NULL)
2655 src = sd->mds.mdd_crypto.key_disk;
2656 else if (bd->bd_diskid == sd->sd_meta->ssdi.ssd_chunk_no &&
2657 sd->sd_meta->ssdi.ssd_level == 0x1C &&
2658 sd->mds.mdd_raid1c.sr1c_crypto.key_disk != NULL)
2659 src = sd->mds.mdd_crypto.key_disk;
2702 struct sr_discipline *sd;
2714 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
2719 if (sd == NULL)
2726 cl = &sd->sd_vol.sv_chunk_list;
2740 sd->sd_set_chunk_state(sd, c, BIOC_SDOFFLINE);
2742 if (sr_meta_save(sd, SR_META_DIRTY)) {
2744 sd->sd_meta->ssd_devname);
2754 rv = sr_rebuild_init(sd, (dev_t)bs->bs_other_id, 0);
2768 struct sr_discipline *sd;
2778 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
2779 for (i = 0; i < sd->sd_meta->ssdi.ssd_chunk_no; i++) {
2780 chunk = sd->sd_vol.sv_chunks[i];
2797 struct sr_discipline *sd = NULL;
2915 sd = malloc(sizeof(struct sr_discipline), M_DEVBUF, M_WAITOK | M_ZERO);
2916 sd->sd_sc = sc;
2917 sd->sd_meta = sm;
2918 sd->sd_meta_type = SR_META_F_NATIVE;
2919 sd->sd_vol_status = BIOC_SVONLINE;
2920 strlcpy(sd->sd_name, "HOTSPARE", sizeof(sd->sd_name));
2921 SLIST_INIT(&sd->sd_meta_opt);
2924 sd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
2926 sd->sd_vol.sv_chunks[0] = hotspare;
2927 SLIST_INIT(&sd->sd_vol.sv_chunk_list);
2928 SLIST_INSERT_HEAD(&sd->sd_vol.sv_chunk_list, hotspare, src_link);
2931 if (sr_meta_save(sd, SR_META_DIRTY)) {
2958 if (sd)
2959 free(sd->sd_vol.sv_chunks, M_DEVBUF,
2960 sizeof(sd->sd_vol.sv_chunks));
2961 free(sd, M_DEVBUF, sizeof(*sd));
2974 struct sr_discipline *sd = xsd;
2975 sr_hotspare_rebuild(sd);
2979 sr_hotspare_rebuild(struct sr_discipline *sd)
2981 struct sr_softc *sc = sd->sd_sc;
2993 for (cid = 0; cid < sd->sd_meta->ssdi.ssd_chunk_no; cid++) {
2994 if (sd->sd_vol.sv_chunks[cid]->src_meta.scm_status ==
2996 chunk = sd->sd_vol.sv_chunks[cid];
3002 DEVNAME(sc), sd->sd_meta->ssd_devname);
3011 hotspare->src_secsize <= sd->sd_meta->ssdi.ssd_secsize)
3018 sd->sd_meta->ssd_devname, hotspare->src_devname);
3029 TAILQ_FOREACH(wu, &sd->sd_wu_pendq, swu_link) {
3035 TAILQ_FOREACH(wu, &sd->sd_wu_defq, swu_link) {
3044 tsleep_nsec(sd, PRIBIO, "sr_hotspare",
3065 if (sr_rebuild_init(sd, hotspare->src_dev_mm, 1) == 0) {
3081 sr_rebuild_init(struct sr_discipline *sd, dev_t dev, int hotspare)
3083 struct sr_softc *sc = sd->sd_sc;
3098 if (!(sd->sd_capabilities & SR_CAP_REBUILD)) {
3104 if (sd->sd_vol_status == BIOC_SVREBUILD) {
3108 if (sd->sd_vol_status != BIOC_SVDEGRADED) {
3114 for (cid = 0; cid < sd->sd_meta->ssdi.ssd_chunk_no; cid++) {
3115 if (sd->sd_vol.sv_chunks[cid]->src_meta.scm_status ==
3117 chunk = sd->sd_vol.sv_chunks[cid];
3128 for (i = 0; i < sd->sd_meta->ssdi.ssd_chunk_no; i++) {
3129 if (sd->sd_vol.sv_chunks[i]->src_meta.scm_status ==
3131 meta = &sd->sd_vol.sv_chunks[i]->src_meta;
3171 if (size <= sd->sd_meta->ssd_data_blkno) {
3176 size -= sd->sd_meta->ssd_data_blkno;
3189 if (label.d_secsize > sd->sd_meta->ssdi.ssd_secsize) {
3191 "required", devname, sd->sd_meta->ssdi.ssd_secsize);
3204 sd->sd_meta->ssd_rebuild = 0;
3215 meta->scmi.scm_volid = sd->sd_meta->ssdi.ssd_volid;
3221 memcpy(&meta->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,
3226 sd->sd_set_chunk_state(sd, cid, BIOC_SDREBUILD);
3228 if (sr_meta_save(sd, SR_META_DIRTY)) {
3235 sd->sd_meta->ssd_devname, devname);
3237 sd->sd_reb_abort = 0;
3238 kthread_create_deferred(sr_rebuild_start, sd);
3251 sr_rebuild_percent(struct sr_discipline *sd)
3255 sz = sd->sd_meta->ssdi.ssd_size;
3256 rb = sd->sd_meta->ssd_rebuild;
3265 sr_roam_chunks(struct sr_discipline *sd)
3267 struct sr_softc *sc = sd->sd_sc;
3273 SLIST_FOREACH(chunk, &sd->sd_vol.sv_chunk_list, src_link) {
3289 sr_meta_save(sd, SR_META_DIRTY);
3298 struct sr_discipline *sd = NULL;
3322 sd = malloc(sizeof(struct sr_discipline), M_DEVBUF, M_WAITOK | M_ZERO);
3323 sd->sd_sc = sc;
3324 SLIST_INIT(&sd->sd_meta_opt);
3325 sd->sd_taskq = taskq_create("srdis", 1, IPL_BIO, 0);
3326 if (sd->sd_taskq == NULL) {
3330 if (sr_discipline_init(sd, bc->bc_level)) {
3336 cl = &sd->sd_vol.sv_chunk_list;
3348 sd->sd_meta_type = sr_meta_probe(sd, dt, no_chunk);
3349 if (sd->sd_meta_type == SR_META_F_INVALID) {
3354 if (sr_meta_attach(sd, no_chunk, bc->bc_flags & BIOC_SCFORCE))
3360 if (sr_meta_read(sd))
3361 if (sr_already_assembled(sd)) {
3363 &sd->sd_meta->ssdi.ssd_uuid);
3370 if (sr_meta_clear(sd)) {
3376 no_meta = sr_meta_read(sd);
3387 sr_meta_init(sd, bc->bc_level, no_chunk);
3388 sd->sd_vol_status = BIOC_SVONLINE;
3389 sd->sd_meta_flags = bc->bc_flags & BIOC_SCNOAUTOASSEMBLE;
3390 if (sd->sd_create) {
3391 if ((i = sd->sd_create(sd, bc, no_chunk,
3392 sd->sd_vol.sv_chunk_minsz))) {
3397 sr_meta_init_complete(sd);
3401 DEVNAME(sc), sd->sd_meta->ssdi.ssd_size);
3404 if ((sd->sd_capabilities & SR_CAP_NON_COERCED) == 0 &&
3405 sd->sd_vol.sv_chunk_minsz != sd->sd_vol.sv_chunk_maxsz)
3408 sd->sd_vol.sv_chunk_maxsz -
3409 sd->sd_vol.sv_chunk_minsz);
3415 sd->sd_meta->ssdi.ssd_chunk_no > no_chunk) {
3417 sd->sd_meta->ssd_devname);
3418 } else if (sd->sd_meta->ssdi.ssd_chunk_no != no_chunk) {
3425 if (sd->sd_meta->ssdi.ssd_level != bc->bc_level) {
3431 if (sr_already_assembled(sd)) {
3432 uuid = sr_uuid_format(&sd->sd_meta->ssdi.ssd_uuid);
3438 if (user == 0 && sd->sd_meta_flags & BIOC_SCNOAUTOASSEMBLE) {
3446 sd->sd_meta->ssd_devname);
3448 if (sd->sd_meta->ssd_meta_flags & SR_META_DIRTY)
3450 sd->sd_meta->ssd_devname);
3452 SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link)
3453 if (sd->sd_meta_opt_handler == NULL ||
3454 sd->sd_meta_opt_handler(sd, omi->omi_som) != 0)
3455 sr_meta_opt_handler(sd, omi->omi_som);
3457 if (sd->sd_assemble) {
3458 if ((i = sd->sd_assemble(sd, bc, no_chunk, data))) {
3470 TAILQ_INSERT_TAIL(&sc->sc_dis_list, sd, sd_link);
3473 if ((rv = sd->sd_alloc_resources(sd)))
3477 if ((sd->sd_capabilities & SR_CAP_AUTO_ASSEMBLE) &&
3479 (sd->sd_meta->ssdi.ssd_vol_flags & BIOC_SCNOAUTOASSEMBLE)) {
3480 sd->sd_meta->ssdi.ssd_vol_flags &= ~BIOC_SCNOAUTOASSEMBLE;
3481 sd->sd_meta->ssdi.ssd_vol_flags |=
3485 if (sd->sd_capabilities & SR_CAP_SYSTEM_DISK) {
3487 sd->sd_set_vol_state(sd);
3488 if (sd->sd_vol_status == BIOC_SVOFFLINE) {
3490 "online", sd->sd_meta->ssd_devname);
3495 scsi_iopool_init(&sd->sd_iopool, sd, sr_wu_get, sr_wu_put);
3514 sd->sd_meta->ssd_devname);
3519 bzero(&sd->sd_scsi_sense, sizeof(sd->sd_scsi_sense));
3522 sd->sd_target = target;
3523 sc->sc_targets[target] = sd;
3527 sd->sd_target = 0;
3537 DEVNAME(sc), dev->dv_xname, sd->sd_target);
3540 for (i = 0, vol = -1; i <= sd->sd_target; i++)
3546 if (sd->sd_meta->ssd_devname[0] != '\0' &&
3547 strncmp(sd->sd_meta->ssd_devname, dev->dv_xname,
3551 sd->sd_meta->ssd_devname);
3554 sd->sd_meta->ssdi.ssd_volid = vol;
3555 strlcpy(sd->sd_meta->ssd_devname, dev->dv_xname,
3556 sizeof(sd->sd_meta->ssd_devname));
3559 sd->sd_name, sd->sd_meta->ssd_devname);
3562 sr_roam_chunks(sd);
3565 if (sr_sensors_create(sd))
3572 strlcpy(sd->sd_meta->ssd_devname, ch_entry->src_devname,
3573 sizeof(sd->sd_meta->ssd_devname));
3575 if (sd->sd_start_discipline(sd))
3580 rv = sr_meta_save(sd, SR_META_DIRTY);
3582 if (sd->sd_vol_status == BIOC_SVREBUILD)
3583 kthread_create_deferred(sr_rebuild_start, sd);
3585 sd->sd_ready = 1;
3594 sr_discipline_shutdown(sd, 0, 0);
3603 sr_ioctl_deleteraid(struct sr_softc *sc, struct sr_discipline *sd,
3611 if (sd == NULL && (sd = sr_find_discipline(sc, bd->bd_dev)) == NULL) {
3620 if (bcmp(&sr_bootuuid, &sd->sd_meta->ssdi.ssd_uuid,
3626 sd->sd_deleted = 1;
3627 sd->sd_meta->ssdi.ssd_vol_flags = BIOC_SCNOAUTOASSEMBLE;
3628 sr_discipline_shutdown(sd, 1, 0);
3636 sr_ioctl_discipline(struct sr_softc *sc, struct sr_discipline *sd,
3646 if (sd == NULL && (sd = sr_find_discipline(sc, bd->bd_dev)) == NULL) {
3651 if (sd->sd_ioctl_handler)
3652 rv = sd->sd_ioctl_handler(sd, bd);
3659 sr_ioctl_installboot(struct sr_softc *sc, struct sr_discipline *sd,
3675 if (sd == NULL && (sd = sr_find_discipline(sc, bb->bb_dev)) == NULL) {
3691 if (sd->sd_meta->ssd_data_blkno < (SR_BOOT_OFFSET + SR_BOOT_SIZE)) {
3708 secsize = sd->sd_meta->ssdi.ssd_secsize;
3723 SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link)
3733 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
3734 sd->sd_meta->ssdi.ssd_opt_no++;
3747 for (i = 0; i < sd->sd_meta->ssdi.ssd_chunk_no; i++) {
3749 chunk = sd->sd_vol.sv_chunks[i];
3784 sd->sd_meta->ssdi.ssd_vol_flags |= BIOC_SCBOOTABLE;
3785 if (sr_meta_save(sd, SR_META_DIRTY)) {
3831 sr_discipline_free(struct sr_discipline *sd)
3838 if (!sd)
3841 sc = sd->sd_sc;
3845 sd->sd_meta ? sd->sd_meta->ssd_devname : "nodev");
3846 if (sd->sd_free_resources)
3847 sd->sd_free_resources(sd);
3848 free(sd->sd_vol.sv_chunks, M_DEVBUF, 0);
3849 free(sd->sd_meta, M_DEVBUF, SR_META_SIZE * DEV_BSIZE);
3850 free(sd->sd_meta_foreign, M_DEVBUF, smd[sd->sd_meta_type].smd_size);
3852 som = &sd->sd_meta_opt;
3859 if (sd->sd_target != 0) {
3860 KASSERT(sc->sc_targets[sd->sd_target] == sd);
3861 sc->sc_targets[sd->sd_target] = NULL;
3865 if (sdtmp1 == sd)
3869 TAILQ_REMOVE(&sc->sc_dis_list, sd, sd_link);
3871 explicit_bzero(sd, sizeof *sd);
3872 free(sd, M_DEVBUF, sizeof(*sd));
3876 sr_discipline_shutdown(struct sr_discipline *sd, int meta_save, int dying)
3881 if (!sd)
3883 sc = sd->sd_sc;
3886 sd->sd_meta ? sd->sd_meta->ssd_devname : "nodev");
3889 if (sd->sd_reb_active) {
3890 sd->sd_reb_abort = 1;
3891 while (sd->sd_reb_active)
3892 tsleep_nsec(sd, PWAIT, "sr_shutdown", MSEC_TO_NSEC(1));
3896 sr_meta_save(sd, 0);
3900 sd->sd_ready = 0;
3903 wakeup(sd);
3904 while (sd->sd_sync || sd->sd_must_flush) {
3905 ret = tsleep_nsec(&sd->sd_sync, MAXPRI, "sr_down",
3911 sd->sd_ready = 1;
3917 sr_sensors_delete(sd);
3920 if (sd->sd_target != 0)
3921 scsi_detach_lun(sc->sc_scsibus, sd->sd_target, 0,
3924 sr_chunks_unwind(sc, &sd->sd_vol.sv_chunk_list);
3926 if (sd->sd_taskq)
3927 taskq_destroy(sd->sd_taskq);
3929 sr_discipline_free(sd);
3935 sr_discipline_init(struct sr_discipline *sd, int level)
3940 sd->sd_alloc_resources = sr_alloc_resources;
3941 sd->sd_assemble = NULL;
3942 sd->sd_create = NULL;
3943 sd->sd_free_resources = sr_free_resources;
3944 sd->sd_ioctl_handler = NULL;
3945 sd->sd_openings = NULL;
3946 sd->sd_meta_opt_handler = NULL;
3947 sd->sd_rebuild = sr_rebuild;
3948 sd->sd_scsi_inquiry = sr_raid_inquiry;
3949 sd->sd_scsi_read_cap = sr_raid_read_cap;
3950 sd->sd_scsi_tur = sr_raid_tur;
3951 sd->sd_scsi_req_sense = sr_raid_request_sense;
3952 sd->sd_scsi_start_stop = sr_raid_start_stop;
3953 sd->sd_scsi_sync = sr_raid_sync;
3954 sd->sd_scsi_rw = NULL;
3955 sd->sd_scsi_intr = sr_raid_intr;
3956 sd->sd_scsi_wu_done = NULL;
3957 sd->sd_scsi_done = NULL;
3958 sd->sd_set_chunk_state = sr_set_chunk_state;
3959 sd->sd_set_vol_state = sr_set_vol_state;
3960 sd->sd_start_discipline = NULL;
3962 task_set(&sd->sd_meta_save_task, sr_meta_save_callback, sd);
3963 task_set(&sd->sd_hotspare_rebuild_task, sr_hotspare_rebuild_callback,
3964 sd);
3966 sd->sd_wu_size = sizeof(struct sr_workunit);
3969 sr_raid0_discipline_init(sd);
3972 sr_raid1_discipline_init(sd);
3975 sr_raid5_discipline_init(sd);
3978 sr_raid6_discipline_init(sd);
3982 sr_crypto_discipline_init(sd);
3985 sr_raid1c_discipline_init(sd);
3989 sr_concat_discipline_init(sd);
4003 struct sr_discipline *sd = wu->swu_dis;
4008 DNPRINTF(SR_D_DIS, "%s: sr_raid_inquiry\n", DEVNAME(sd->sd_sc));
4023 strlcpy(inq.vendor, sd->sd_meta->ssdi.ssd_vendor,
4025 strlcpy(inq.product, sd->sd_meta->ssdi.ssd_product,
4027 strlcpy(inq.revision, sd->sd_meta->ssdi.ssd_revision,
4037 struct sr_discipline *sd = wu->swu_dis;
4045 DNPRINTF(SR_D_DIS, "%s: sr_raid_read_cap\n", DEVNAME(sd->sd_sc));
4047 secsize = sd->sd_meta->ssdi.ssd_secsize;
4049 addr = ((sd->sd_meta->ssdi.ssd_size * DEV_BSIZE) / secsize) - 1;
4073 struct sr_discipline *sd = wu->swu_dis;
4075 DNPRINTF(SR_D_DIS, "%s: sr_raid_tur\n", DEVNAME(sd->sd_sc));
4077 if (sd->sd_vol_status == BIOC_SVOFFLINE) {
4078 sd->sd_scsi_sense.error_code = SSD_ERRCODE_CURRENT;
4079 sd->sd_scsi_sense.flags = SKEY_NOT_READY;
4080 sd->sd_scsi_sense.add_sense_code = 0x04;
4081 sd->sd_scsi_sense.add_sense_code_qual = 0x11;
4082 sd->sd_scsi_sense.extra_len = 4;
4084 } else if (sd->sd_vol_status == BIOC_SVINVALID) {
4085 sd->sd_scsi_sense.error_code = SSD_ERRCODE_CURRENT;
4086 sd->sd_scsi_sense.flags = SKEY_HARDWARE_ERROR;
4087 sd->sd_scsi_sense.add_sense_code = 0x05;
4088 sd->sd_scsi_sense.add_sense_code_qual = 0x00;
4089 sd->sd_scsi_sense.extra_len = 4;
4099 struct sr_discipline *sd = wu->swu_dis;
4103 DEVNAME(sd->sd_sc));
4106 memcpy(&xs->sense, &sd->sd_scsi_sense, sizeof(xs->sense));
4109 bzero(&sd->sd_scsi_sense, sizeof(sd->sd_scsi_sense));
4136 struct sr_discipline *sd = wu->swu_dis;
4139 DNPRINTF(SR_D_DIS, "%s: sr_raid_sync\n", DEVNAME(sd->sd_sc));
4145 sd->sd_sync = 1;
4146 while (sd->sd_wu_pending > ios) {
4147 ret = tsleep_nsec(sd, PRIBIO, "sr_sync", SEC_TO_NSEC(15));
4150 DEVNAME(sd->sd_sc));
4155 sd->sd_sync = 0;
4158 wakeup(&sd->sd_sync);
4169 struct sr_discipline *sd = wu->swu_dis;
4175 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name, bp, xs);
4186 struct sr_discipline *sd = wu->swu_dis;
4214 TAILQ_FOREACH_REVERSE(wup, &sd->sd_wu_pendq, sr_wu_list, swu_link) {
4226 TAILQ_INSERT_TAIL(&sd->sd_wu_defq, wu, swu_link);
4227 sd->sd_wu_collisions++;
4241 struct sr_discipline *sd = wu->swu_dis;
4249 TAILQ_REMOVE(&sd->sd_wu_defq, wu, swu_link);
4254 TAILQ_INSERT_TAIL(&sd->sd_wu_pendq, wu, swu_link);
4258 panic("%s: sr_startwu_callback", DEVNAME(sd->sd_sc));
4270 struct sr_discipline *sd = wu->swu_dis;
4282 if (sd->sd_scsi_rw(wup))
4290 sr_alloc_resources(struct sr_discipline *sd)
4292 if (sr_wu_alloc(sd)) {
4293 sr_error(sd->sd_sc, "unable to allocate work units");
4296 if (sr_ccb_alloc(sd)) {
4297 sr_error(sd->sd_sc, "unable to allocate ccbs");
4305 sr_free_resources(struct sr_discipline *sd)
4307 sr_wu_free(sd);
4308 sr_ccb_free(sd);
4312 sr_set_chunk_state(struct sr_discipline *sd, int c, int new_state)
4317 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname,
4318 sd->sd_vol.sv_chunks[c]->src_meta.scmi.scm_devname, c, new_state);
4322 old_state = sd->sd_vol.sv_chunks[c]->src_meta.scm_status;
4343 DEVNAME(sd->sd_sc),
4344 sd->sd_meta->ssd_devname,
4345 sd->sd_vol.sv_chunks[c]->src_meta.scmi.scm_devname,
4350 sd->sd_vol.sv_chunks[c]->src_meta.scm_status = new_state;
4351 sd->sd_set_vol_state(sd);
4353 sd->sd_must_flush = 1;
4354 task_add(systq, &sd->sd_meta_save_task);
4360 sr_set_vol_state(struct sr_discipline *sd)
4364 int old_state = sd->sd_vol_status;
4368 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
4370 nd = sd->sd_meta->ssdi.ssd_chunk_no;
4376 s = sd->sd_vol.sv_chunks[i]->src_meta.scm_status;
4379 DEVNAME(sd->sd_sc),
4380 sd->sd_meta->ssd_devname,
4381 sd->sd_vol.sv_chunks[i]->src_meta.scmi.scm_devname);
4391 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname,
4409 DEVNAME(sd->sd_sc),
4410 sd->sd_meta->ssd_devname,
4415 sd->sd_vol_status = new_state;
4419 sr_block_get(struct sr_discipline *sd, long length)
4425 sr_block_put(struct sr_discipline *sd, void *ptr, int length)
4497 sr_already_assembled(struct sr_discipline *sd)
4499 struct sr_softc *sc = sd->sd_sc;
4503 if (!bcmp(&sd->sd_meta->ssdi.ssd_uuid,
4505 sizeof(sd->sd_meta->ssdi.ssd_uuid)))
4537 struct sr_discipline *sd, *nsd;
4543 TAILQ_FOREACH_REVERSE_SAFE(sd, &sc->sc_dis_list,
4545 sr_discipline_shutdown(sd, 1, -1);
4552 struct sr_discipline *sd;
4567 while ((sd = TAILQ_LAST(&sc->sc_dis_list, sr_discipline_list)) != NULL)
4568 sr_discipline_shutdown(sd, 1, dying);
4574 struct sr_discipline *sd = wu->swu_dis;
4578 DNPRINTF(SR_D_DIS, "%s: %s 0x%02x\n", DEVNAME(sd->sd_sc), func,
4581 if (sd->sd_meta->ssd_data_blkno == 0)
4584 if (sd->sd_vol_status == BIOC_SVOFFLINE) {
4586 DEVNAME(sd->sd_sc), func);
4592 DEVNAME(sd->sd_sc), func, sd->sd_meta->ssd_devname);
4604 DEVNAME(sd->sd_sc), func, sd->sd_meta->ssd_devname);
4608 *blkno *= (sd->sd_meta->ssdi.ssd_secsize / DEV_BSIZE);
4613 if (wu->swu_blk_end > sd->sd_meta->ssdi.ssd_size) {
4616 DEVNAME(sd->sd_sc), func, (long long)wu->swu_blk_start,
4619 sd->sd_scsi_sense.error_code = SSD_ERRCODE_CURRENT |
4621 sd->sd_scsi_sense.flags = SKEY_ILLEGAL_REQUEST;
4622 sd->sd_scsi_sense.add_sense_code = 0x21;
4623 sd->sd_scsi_sense.add_sense_code_qual = 0x00;
4624 sd->sd_scsi_sense.extra_len = 4;
4636 struct sr_discipline *sd = arg;
4637 struct sr_softc *sc = sd->sd_sc;
4640 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
4642 if (kthread_create(sr_rebuild_thread, sd, &sd->sd_background_proc,
4651 struct sr_discipline *sd = arg;
4654 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
4656 sd->sd_reb_active = 1;
4657 sd->sd_rebuild(sd);
4658 sd->sd_reb_active = 0;
4664 sr_rebuild(struct sr_discipline *sd)
4666 struct sr_softc *sc = sd->sd_sc;
4675 whole_blk = sd->sd_meta->ssdi.ssd_size / SR_REBUILD_IO_SIZE;
4676 partial_blk = sd->sd_meta->ssdi.ssd_size % SR_REBUILD_IO_SIZE;
4678 restart = sd->sd_meta->ssd_rebuild / SR_REBUILD_IO_SIZE;
4694 percent = sr_rebuild_percent(sd);
4696 DEVNAME(sc), sd->sd_meta->ssd_devname, percent);
4711 wu_r = sr_scsi_wu_get(sd, 0);
4712 wu_w = sr_scsi_wu_get(sd, 0);
4715 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, wu_r, wu_w);
4731 if (sd->sd_scsi_rw(wu_r)) {
4751 if (sd->sd_scsi_rw(wu_w)) {
4764 TAILQ_INSERT_TAIL(&sd->sd_wu_defq, wu_w, swu_link);
4768 DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, wu_r);
4783 sr_scsi_wu_put(sd, wu_r);
4784 sr_scsi_wu_put(sd, wu_w);
4786 sd->sd_meta->ssd_rebuild = lba;
4790 percent = sr_rebuild_percent(sd);
4792 if (sr_meta_save(sd, SR_META_DIRTY))
4794 DEVNAME(sc), sd->sd_meta->ssd_devname);
4798 if (sd->sd_reb_abort)
4803 sd->sd_meta->ssd_rebuild = 0;
4804 for (c = 0; c < sd->sd_meta->ssdi.ssd_chunk_no; c++) {
4805 if (sd->sd_vol.sv_chunks[c]->src_meta.scm_status ==
4807 sd->sd_set_chunk_state(sd, c, BIOC_SDONLINE);
4813 if (sr_meta_save(sd, SR_META_DIRTY))
4815 DEVNAME(sc), sd->sd_meta->ssd_devname);
4823 struct sr_discipline *sd;
4825 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)
4826 if (!strncmp(sd->sd_meta->ssd_devname, devname,
4827 sizeof(sd->sd_meta->ssd_devname)))
4829 return sd;
4834 sr_sensors_create(struct sr_discipline *sd)
4836 struct sr_softc *sc = sd->sd_sc;
4840 DEVNAME(sc), sd->sd_meta->ssd_devname);
4842 sd->sd_vol.sv_sensor.type = SENSOR_DRIVE;
4843 sd->sd_vol.sv_sensor.status = SENSOR_S_UNKNOWN;
4844 strlcpy(sd->sd_vol.sv_sensor.desc, sd->sd_meta->ssd_devname,
4845 sizeof(sd->sd_vol.sv_sensor.desc));
4847 sensor_attach(&sc->sc_sensordev, &sd->sd_vol.sv_sensor);
4848 sd->sd_vol.sv_sensor_attached = 1;
4863 sr_sensors_delete(struct sr_discipline *sd)
4865 struct sr_softc *sc = sd->sd_sc;
4867 DNPRINTF(SR_D_STATE, "%s: sr_sensors_delete\n", DEVNAME(sd->sd_sc));
4869 if (sd->sd_vol.sv_sensor_attached) {
4870 sensor_detach(&sd->sd_sc->sc_sensordev, &sd->sd_vol.sv_sensor);
4871 sd->sd_vol.sv_sensor_attached = 0;
4877 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)
4878 if (sd->sd_vol.sv_sensor_attached)
4891 struct sr_discipline *sd;
4895 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
4896 sv = &sd->sd_vol;
4898 switch(sd->sd_vol_status) {
4935 struct sr_discipline *sd;
4942 TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link) {
4944 sd->sd_meta->ssd_devname,
4945 sd->sd_wu_pending,
4946 sd->sd_wu_collisions);
5079 struct sd_softc *sd;
5098 sd = (struct sd_softc *)dv;
5105 my->srd = sc->sc_targets[sd->sc_link->target];