Lines Matching defs:raid_bdev
67 struct raid_bdev *raid_bdev;
158 struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(ch);
161 for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
162 struct raid_base_bdev_info *base_info = &raid_bdev->base_bdev_info[i];
176 static void raid_bdev_deconfigure(struct raid_bdev *raid_bdev,
199 struct raid_bdev *raid_bdev = process->raid_bdev;
221 raid_ch_processed->base_channel = calloc(raid_bdev->num_base_bdevs,
227 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
251 * io_device - pointer to raid bdev io device represented by raid_bdev
260 struct raid_bdev *raid_bdev = io_device;
267 assert(raid_bdev != NULL);
268 assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
270 raid_ch->base_channel = calloc(raid_bdev->num_base_bdevs, sizeof(struct spdk_io_channel *));
276 for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
284 if (raid_bdev->base_bdev_info[i].is_configured == false ||
285 raid_bdev->base_bdev_info[i].is_process_target == true) {
289 raid_bdev->base_bdev_info[i].desc);
296 if (raid_bdev->module->get_io_channel) {
297 raid_ch->module_channel = raid_bdev->module->get_io_channel(raid_bdev);
304 if (raid_bdev->process != NULL) {
305 ret = raid_bdev_ch_process_setup(raid_ch, raid_bdev->process);
316 for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
333 * io_device - pointer to raid bdev io device represented by raid_bdev
341 struct raid_bdev *raid_bdev = io_device;
354 for (i = 0; i < raid_bdev->num_base_bdevs; i++) {
368 * raid_bdev_cleanup is used to cleanup raid_bdev related data
371 * raid_bdev - pointer to raid_bdev
376 raid_bdev_cleanup(struct raid_bdev *raid_bdev)
381 raid_bdev, raid_bdev->bdev.name, raid_bdev_state_to_str(raid_bdev->state));
382 assert(raid_bdev->state != RAID_BDEV_STATE_ONLINE);
385 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
390 TAILQ_REMOVE(&g_raid_bdev_list, raid_bdev, global_link);
394 raid_bdev_free(struct raid_bdev *raid_bdev)
396 raid_bdev_free_superblock(raid_bdev);
397 free(raid_bdev->base_bdev_info);
398 free(raid_bdev->bdev.name);
399 free(raid_bdev);
403 raid_bdev_cleanup_and_free(struct raid_bdev *raid_bdev)
405 raid_bdev_cleanup(raid_bdev);
406 raid_bdev_free(raid_bdev);
412 struct raid_bdev *raid_bdev = base_info->raid_bdev;
415 assert(raid_bdev->num_base_bdevs_discovered);
416 raid_bdev->num_base_bdevs_discovered--;
432 struct raid_bdev *raid_bdev = base_info->raid_bdev;
439 if (raid_bdev->state != RAID_BDEV_STATE_CONFIGURING) {
465 struct raid_bdev *raid_bdev = io_device;
467 if (raid_bdev->num_base_bdevs_discovered == 0) {
468 /* Free raid_bdev when there are no base bdevs left */
470 raid_bdev_cleanup(raid_bdev);
471 spdk_bdev_destruct_done(&raid_bdev->bdev, 0);
472 raid_bdev_free(raid_bdev);
474 spdk_bdev_destruct_done(&raid_bdev->bdev, 0);
479 raid_bdev_module_stop_done(struct raid_bdev *raid_bdev)
481 if (raid_bdev->state != RAID_BDEV_STATE_CONFIGURING) {
482 spdk_io_device_unregister(raid_bdev, raid_bdev_io_device_unregister_cb);
489 struct raid_bdev *raid_bdev = ctxt;
494 assert(raid_bdev->process == NULL);
496 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
507 raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
510 if (raid_bdev->module->stop != NULL) {
511 if (raid_bdev->module->stop(raid_bdev) == false) {
516 raid_bdev_module_stop_done(raid_bdev);
639 raid_io->raid_bdev->module->submit_rw_request(raid_io);
764 struct raid_bdev *raid_bdev;
770 raid_bdev = raid_io->raid_bdev;
773 raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs;
776 for (i = raid_io->base_bdev_io_submitted; i < raid_bdev->num_base_bdevs; i++) {
777 base_info = &raid_bdev->base_bdev_info[i];
804 struct raid_bdev *raid_bdev = raid_io->raid_bdev;
805 size_t iov_offset = split_offset * raid_bdev->bdev.blocklen;
815 raid_io->md_buf += (split_offset * raid_bdev->bdev.md_len);
869 raid_io->raid_bdev->module->submit_rw_request(raid_io);
907 struct raid_bdev *raid_bdev = spdk_io_channel_get_io_device(ch);
918 raid_io->raid_bdev = raid_bdev;
931 * raid bdev function table. This is used to submit the io on raid_bdev to below
966 if (raid_io->raid_bdev->process != NULL) {
971 raid_io->raid_bdev->module->submit_null_payload_request(raid_io);
988 * raid_bdev - pointer to raid bdev context
995 _raid_bdev_io_type_supported(struct raid_bdev *raid_bdev, enum spdk_bdev_io_type io_type)
1001 if (raid_bdev->module->submit_null_payload_request == NULL) {
1006 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1056 * ctxt - pointer to raid_bdev
1063 struct raid_bdev *raid_bdev = ctxt;
1065 return spdk_get_io_channel(raid_bdev);
1069 raid_bdev_write_info_json(struct raid_bdev *raid_bdev, struct spdk_json_write_ctx *w)
1073 assert(raid_bdev != NULL);
1076 spdk_json_write_named_uuid(w, "uuid", &raid_bdev->bdev.uuid);
1077 spdk_json_write_named_uint32(w, "strip_size_kb", raid_bdev->strip_size_kb);
1078 spdk_json_write_named_string(w, "state", raid_bdev_state_to_str(raid_bdev->state));
1079 spdk_json_write_named_string(w, "raid_level", raid_bdev_level_to_str(raid_bdev->level));
1080 spdk_json_write_named_bool(w, "superblock", raid_bdev->superblock_enabled);
1081 spdk_json_write_named_uint32(w, "num_base_bdevs", raid_bdev->num_base_bdevs);
1082 spdk_json_write_named_uint32(w, "num_base_bdevs_discovered", raid_bdev->num_base_bdevs_discovered);
1084 raid_bdev->num_base_bdevs_operational);
1085 if (raid_bdev->process) {
1086 struct raid_bdev_process *process = raid_bdev->process;
1095 spdk_json_write_named_uint32(w, "percent", offset * 100.0 / raid_bdev->bdev.blockcnt);
1101 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1122 * ctx - pointer to raid_bdev
1131 struct raid_bdev *raid_bdev = ctx;
1137 raid_bdev_write_info_json(raid_bdev, w);
1155 struct raid_bdev *raid_bdev = bdev->ctxt;
1160 if (raid_bdev->superblock_enabled) {
1171 spdk_json_write_named_uuid(w, "uuid", &raid_bdev->bdev.uuid);
1172 if (raid_bdev->strip_size_kb != 0) {
1173 spdk_json_write_named_uint32(w, "strip_size_kb", raid_bdev->strip_size_kb);
1175 spdk_json_write_named_string(w, "raid_level", raid_bdev_level_to_str(raid_bdev->level));
1178 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1197 struct raid_bdev *raid_bdev = ctx;
1201 if (raid_bdev->module->memory_domains_supported == false) {
1206 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1221 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1247 struct raid_bdev *
1250 struct raid_bdev *raid_bdev;
1252 TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
1253 if (strcmp(raid_bdev->bdev.name, name) == 0) {
1254 return raid_bdev;
1261 static struct raid_bdev *
1264 struct raid_bdev *raid_bdev;
1266 TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
1267 if (spdk_uuid_compare(&raid_bdev->bdev.uuid, uuid) == 0) {
1268 return raid_bdev;
1384 struct raid_bdev *raid_bdev;
1389 TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
1390 if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
1391 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1411 struct raid_bdev *raid_bdev, *tmp;
1415 TAILQ_FOREACH_SAFE(raid_bdev, &g_raid_bdev_list, global_link, tmp) {
1416 raid_bdev_cleanup_and_free(raid_bdev);
1491 struct raid_bdev **raid_bdev_out)
1493 struct raid_bdev *raid_bdev;
1561 raid_bdev = calloc(1, sizeof(*raid_bdev));
1562 if (!raid_bdev) {
1567 raid_bdev->module = module;
1568 raid_bdev->num_base_bdevs = num_base_bdevs;
1569 raid_bdev->base_bdev_info = calloc(raid_bdev->num_base_bdevs,
1571 if (!raid_bdev->base_bdev_info) {
1573 raid_bdev_free(raid_bdev);
1577 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1578 base_info->raid_bdev = raid_bdev;
1584 raid_bdev->strip_size = 0;
1585 raid_bdev->strip_size_kb = strip_size;
1586 raid_bdev->state = RAID_BDEV_STATE_CONFIGURING;
1587 raid_bdev->level = level;
1588 raid_bdev->min_base_bdevs_operational = min_operational;
1589 raid_bdev->superblock_enabled = superblock_enabled;
1591 raid_bdev_gen = &raid_bdev->bdev;
1596 raid_bdev_free(raid_bdev);
1601 raid_bdev_gen->ctxt = raid_bdev;
1607 TAILQ_INSERT_TAIL(&g_raid_bdev_list, raid_bdev, global_link);
1609 *raid_bdev_out = raid_bdev;
1632 struct raid_bdev **raid_bdev_out)
1634 struct raid_bdev *raid_bdev;
1640 &raid_bdev);
1647 spdk_uuid_generate(&raid_bdev->bdev.uuid);
1650 raid_bdev->num_base_bdevs_operational = num_base_bdevs;
1652 *raid_bdev_out = raid_bdev;
1660 struct raid_bdev *raid_bdev = ctx;
1662 spdk_bdev_close(raid_bdev->self_desc);
1663 raid_bdev->self_desc = NULL;
1697 struct raid_bdev *raid_bdev = process->raid_bdev;
1705 rc = raid_bdev_process_add_finish_action(process, raid_bdev_unregistering_cont, raid_bdev);
1708 raid_bdev->bdev.name, spdk_strerror(-rc));
1715 struct raid_bdev *raid_bdev = event_ctx;
1718 if (raid_bdev->process != NULL) {
1719 spdk_thread_send_msg(raid_bdev->process->thread, raid_bdev_unregistering_stop_process,
1720 raid_bdev->process);
1722 raid_bdev_unregistering_cont(raid_bdev);
1728 raid_bdev_configure_cont(struct raid_bdev *raid_bdev)
1730 struct spdk_bdev *raid_bdev_gen = &raid_bdev->bdev;
1733 raid_bdev->state = RAID_BDEV_STATE_ONLINE;
1734 SPDK_DEBUGLOG(bdev_raid, "io device register %p\n", raid_bdev);
1737 spdk_io_device_register(raid_bdev, raid_bdev_create_cb, raid_bdev_destroy_cb,
1755 rc = spdk_bdev_open_ext(raid_bdev_gen->name, false, raid_bdev_event_cb, raid_bdev,
1756 &raid_bdev->self_desc);
1765 SPDK_DEBUGLOG(bdev_raid, "raid bdev is created with name %s, raid_bdev %p\n",
1766 raid_bdev_gen->name, raid_bdev);
1769 if (raid_bdev->module->stop != NULL) {
1770 raid_bdev->module->stop(raid_bdev);
1772 spdk_io_device_unregister(raid_bdev, NULL);
1773 raid_bdev->state = RAID_BDEV_STATE_CONFIGURING;
1776 if (raid_bdev->configure_cb != NULL) {
1777 raid_bdev->configure_cb(raid_bdev->configure_cb_ctx, rc);
1778 raid_bdev->configure_cb = NULL;
1783 raid_bdev_configure_write_sb_cb(int status, struct raid_bdev *raid_bdev, void *ctx)
1786 raid_bdev_configure_cont(raid_bdev);
1789 raid_bdev->bdev.name, spdk_strerror(-status));
1790 if (raid_bdev->module->stop != NULL) {
1791 raid_bdev->module->stop(raid_bdev);
1793 if (raid_bdev->configure_cb != NULL) {
1794 raid_bdev->configure_cb(raid_bdev->configure_cb_ctx, status);
1795 raid_bdev->configure_cb = NULL;
1806 * raid_bdev - pointer to raid bdev
1812 raid_bdev_configure(struct raid_bdev *raid_bdev, raid_bdev_configure_cb cb, void *cb_ctx)
1814 uint32_t data_block_size = spdk_bdev_get_data_block_size(&raid_bdev->bdev);
1817 assert(raid_bdev->state == RAID_BDEV_STATE_CONFIGURING);
1818 assert(raid_bdev->num_base_bdevs_discovered == raid_bdev->num_base_bdevs_operational);
1819 assert(raid_bdev->bdev.blocklen > 0);
1824 raid_bdev->strip_size = (raid_bdev->strip_size_kb * 1024) / data_block_size;
1825 if (raid_bdev->strip_size == 0 && raid_bdev->level != RAID1) {
1829 raid_bdev->strip_size_shift = spdk_u32log2(raid_bdev->strip_size);
1831 rc = raid_bdev->module->start(raid_bdev);
1837 assert(raid_bdev->configure_cb == NULL);
1838 raid_bdev->configure_cb = cb;
1839 raid_bdev->configure_cb_ctx = cb_ctx;
1841 if (raid_bdev->superblock_enabled) {
1842 if (raid_bdev->sb == NULL) {
1843 rc = raid_bdev_alloc_superblock(raid_bdev, data_block_size);
1845 raid_bdev_init_superblock(raid_bdev);
1848 assert(spdk_uuid_compare(&raid_bdev->sb->uuid, &raid_bdev->bdev.uuid) == 0);
1849 if (raid_bdev->sb->block_size != data_block_size) {
1853 if (raid_bdev->sb->raid_size != raid_bdev->bdev.blockcnt) {
1860 raid_bdev->configure_cb = NULL;
1861 if (raid_bdev->module->stop != NULL) {
1862 raid_bdev->module->stop(raid_bdev);
1867 raid_bdev_write_superblock(raid_bdev, raid_bdev_configure_write_sb_cb, NULL);
1869 raid_bdev_configure_cont(raid_bdev);
1881 * raid_bdev - pointer to raid bdev
1888 raid_bdev_deconfigure(struct raid_bdev *raid_bdev, raid_bdev_destruct_cb cb_fn,
1891 if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
1898 raid_bdev->state = RAID_BDEV_STATE_OFFLINE;
1901 spdk_bdev_unregister(&raid_bdev->bdev, cb_fn, cb_arg);
1915 struct raid_bdev *raid_bdev;
1918 TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
1919 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
1933 struct raid_bdev *raid_bdev = base_info->raid_bdev;
1939 raid_bdev->num_base_bdevs_operational--;
1940 if (raid_bdev->num_base_bdevs_operational < raid_bdev->min_base_bdevs_operational) {
1942 raid_bdev_deconfigure(raid_bdev, base_info->remove_cb, base_info->remove_cb_ctx);
1956 struct raid_bdev *raid_bdev = base_info->raid_bdev;
1960 raid_bdev->bdev.name, spdk_strerror(-status));
1992 struct raid_bdev *raid_bdev = base_info->raid_bdev;
1996 spdk_bdev_unquiesce(&raid_bdev->bdev, &g_raid_if, raid_bdev_remove_base_bdev_on_unquiesced,
2005 spdk_for_each_channel(base_info->raid_bdev, raid_bdev_channel_remove_base_bdev, base_info,
2010 raid_bdev_remove_base_bdev_write_sb_cb(int status, struct raid_bdev *raid_bdev, void *ctx)
2016 raid_bdev->bdev.name, spdk_strerror(-status));
2028 struct raid_bdev *raid_bdev = base_info->raid_bdev;
2032 raid_bdev->bdev.name, spdk_strerror(-status));
2037 if (raid_bdev->sb) {
2038 struct raid_bdev_superblock *sb = raid_bdev->sb;
2053 raid_bdev_write_superblock(raid_bdev, raid_bdev_remove_base_bdev_write_sb_cb, base_info);
2067 return spdk_bdev_quiesce(&base_info->raid_bdev->bdev, &g_raid_if,
2109 ctx->num_base_bdevs_operational > process->raid_bdev->min_base_bdevs_operational) {
2147 * because the process thread should not access raid_bdev's properties. Particularly,
2148 * raid_bdev->process may be cleared by the time the message is handled, but ctx->process
2154 * raid_bdev->num_base_bdevs_operational can't be used here because it is decremented
2157 RAID_FOR_EACH_BASE_BDEV(process->raid_bdev, base_info) {
2172 struct raid_bdev *raid_bdev = base_info->raid_bdev;
2186 if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
2196 if (raid_bdev->num_base_bdevs_discovered == 0 &&
2197 raid_bdev->state == RAID_BDEV_STATE_OFFLINE) {
2199 raid_bdev_cleanup_and_free(raid_bdev);
2204 } else if (raid_bdev->min_base_bdevs_operational == raid_bdev->num_base_bdevs) {
2206 raid_bdev->num_base_bdevs_operational--;
2207 raid_bdev_deconfigure(raid_bdev, cb_fn, cb_ctx);
2212 if (raid_bdev->process != NULL) {
2213 ret = raid_bdev_process_base_bdev_remove(raid_bdev->process, base_info);
2244 /* Find the raid_bdev which has claimed this base_bdev */
2277 raid_bdev_base_bdev_slot(base_info), base_info->name, base_info->raid_bdev->bdev.name);
2292 raid_bdev_resize_write_sb_cb(int status, struct raid_bdev *raid_bdev, void *ctx)
2296 raid_bdev->bdev.name, spdk_strerror(-status));
2304 * If yes, call module handler to resize the raid_bdev if implemented.
2313 struct raid_bdev *raid_bdev;
2321 /* Find the raid_bdev which has claimed this base_bdev */
2323 SPDK_ERRLOG("raid_bdev whose base_bdev '%s' not found\n", base_bdev->name);
2326 raid_bdev = base_info->raid_bdev;
2335 if (!raid_bdev->module->resize) {
2339 blockcnt_old = raid_bdev->bdev.blockcnt;
2340 if (raid_bdev->module->resize(raid_bdev) == false) {
2345 raid_bdev->bdev.name, blockcnt_old, raid_bdev->bdev.blockcnt);
2347 if (raid_bdev->superblock_enabled) {
2348 struct raid_bdev_superblock *sb = raid_bdev->sb;
2354 if (sb_base_bdev->slot < raid_bdev->num_base_bdevs) {
2355 base_info = &raid_bdev->base_bdev_info[sb_base_bdev->slot];
2359 sb->raid_size = raid_bdev->bdev.blockcnt;
2360 raid_bdev_write_superblock(raid_bdev, raid_bdev_resize_write_sb_cb, NULL);
2402 * raid_bdev - pointer to raid bdev
2407 raid_bdev_delete(struct raid_bdev *raid_bdev, raid_bdev_destruct_cb cb_fn, void *cb_arg)
2411 SPDK_DEBUGLOG(bdev_raid, "delete raid bdev: %s\n", raid_bdev->bdev.name);
2413 if (raid_bdev->destroy_started) {
2415 raid_bdev->bdev.name);
2422 raid_bdev->destroy_started = true;
2424 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
2427 if (raid_bdev->state != RAID_BDEV_STATE_ONLINE) {
2436 if (raid_bdev->num_base_bdevs_discovered == 0) {
2438 raid_bdev_cleanup_and_free(raid_bdev);
2443 raid_bdev_deconfigure(raid_bdev, cb_fn, cb_arg);
2448 raid_bdev_process_finish_write_sb_cb(int status, struct raid_bdev *raid_bdev, void *ctx)
2452 raid_bdev->bdev.name, spdk_strerror(-status));
2459 struct raid_bdev *raid_bdev = ctx;
2460 struct raid_bdev_superblock *sb = raid_bdev->sb;
2469 sb_base_bdev->slot < raid_bdev->num_base_bdevs) {
2470 base_info = &raid_bdev->base_bdev_info[sb_base_bdev->slot];
2479 raid_bdev_write_superblock(raid_bdev, raid_bdev_process_finish_write_sb_cb, NULL);
2542 rc = spdk_bdev_unquiesce(&process->raid_bdev->bdev, &g_raid_if,
2553 struct raid_bdev *raid_bdev = process->raid_bdev;
2564 raid_bdev->bdev.name);
2565 if (raid_bdev->superblock_enabled) {
2568 raid_bdev);
2573 raid_bdev->bdev.name,
2612 struct raid_bdev *raid_bdev = process->raid_bdev;
2619 raid_bdev->process = NULL;
2622 spdk_for_each_channel(process->raid_bdev, raid_bdev_channel_process_finish, process,
2632 rc = spdk_bdev_quiesce(&process->raid_bdev->bdev, &g_raid_if,
2695 rc = spdk_bdev_unquiesce_range(&process->raid_bdev->bdev, &g_raid_if,
2744 spdk_for_each_channel(process->raid_bdev, raid_bdev_process_channel_update, process,
2753 struct raid_bdev *raid_bdev = process->raid_bdev;
2767 process_req->iov.iov_len = num_blocks * raid_bdev->bdev.blocklen;
2769 ret = raid_bdev->module->submit_process_request(process_req, process->raid_ch);
2773 raid_bdev->bdev.name, spdk_strerror(-ret));
2788 struct raid_bdev *raid_bdev = process->raid_bdev;
2790 const uint64_t offset_end = spdk_min(offset + process->max_window_size, raid_bdev->bdev.blockcnt);
2834 struct raid_bdev *raid_bdev = process->raid_bdev;
2842 process->qos.bytes_available -= process->window_size * raid_bdev->bdev.blocklen;
2851 struct raid_bdev *raid_bdev = process->raid_bdev;
2865 rc = spdk_bdev_quiesce_range(&raid_bdev->bdev, &g_raid_if,
2888 struct raid_bdev *raid_bdev = process->raid_bdev;
2899 if (process->window_offset == raid_bdev->bdev.blockcnt) {
2900 SPDK_DEBUGLOG(bdev_raid, "process completed on %s\n", raid_bdev->bdev.name);
2905 process->max_window_size = spdk_min(raid_bdev->bdev.blockcnt - process->window_offset,
2914 struct raid_bdev *raid_bdev = process->raid_bdev;
2919 ch = spdk_get_io_channel(raid_bdev);
2936 raid_bdev_process_to_str(process->type), raid_bdev->bdev.name);
2967 struct raid_bdev *raid_bdev = process->raid_bdev;
2973 raid_bdev->num_base_bdevs_operational <= raid_bdev->min_base_bdevs_operational)) {
2980 raid_bdev_process_to_str(process->type), raid_bdev->bdev.name,
2986 raid_bdev->bdev.name, raid_bdev_process_to_str(process->type));
2991 raid_bdev_process_to_str(process->type), raid_bdev->bdev.name);
2995 raid_bdev->process = process;
3001 spdk_for_each_channel(process->raid_bdev, raid_bdev_channel_abort_start_process, process,
3021 struct raid_bdev *raid_bdev = process->raid_bdev;
3023 assert(raid_bdev->module->submit_process_request != NULL);
3025 spdk_for_each_channel(raid_bdev, raid_bdev_channel_start_process, process,
3040 struct raid_bdev *raid_bdev = process->raid_bdev;
3049 process_req->iov.iov_len = process->max_window_size * raid_bdev->bdev.blocklen;
3055 if (spdk_bdev_is_md_separate(&raid_bdev->bdev)) {
3056 process_req->md_buf = spdk_dma_malloc(process->max_window_size * raid_bdev->bdev.md_len, 4096, 0);
3080 raid_bdev_process_alloc(struct raid_bdev *raid_bdev, enum raid_process_type type,
3092 process->raid_bdev = raid_bdev;
3096 spdk_bdev_get_data_block_size(&raid_bdev->bdev)),
3097 raid_bdev->bdev.write_unit_size);
3130 process = raid_bdev_process_alloc(target->raid_bdev, RAID_PROCESS_REBUILD, target);
3159 struct raid_bdev *raid_bdev = base_info->raid_bdev;
3163 if (raid_bdev->num_base_bdevs_discovered == raid_bdev->num_base_bdevs_operational &&
3166 assert(raid_bdev->process == NULL);
3167 assert(raid_bdev->state == RAID_BDEV_STATE_ONLINE);
3170 spdk_for_each_channel(raid_bdev, raid_bdev_ch_sync, base_info, _raid_bdev_configure_base_bdev_cont);
3176 raid_bdev->num_base_bdevs_discovered++;
3177 assert(raid_bdev->num_base_bdevs_discovered <= raid_bdev->num_base_bdevs);
3178 assert(raid_bdev->num_base_bdevs_operational <= raid_bdev->num_base_bdevs);
3179 assert(raid_bdev->num_base_bdevs_operational >= raid_bdev->min_base_bdevs_operational);
3189 if (raid_bdev->num_base_bdevs_discovered == raid_bdev->num_base_bdevs_operational) {
3190 rc = raid_bdev_configure(raid_bdev, configure_cb, base_info->configure_cb_ctx);
3197 raid_bdev->num_base_bdevs_operational++;
3226 if (spdk_uuid_compare(&base_info->raid_bdev->bdev.uuid, &sb->uuid) == 0) {
3257 struct raid_bdev *raid_bdev = base_info->raid_bdev;
3339 if (raid_bdev->superblock_enabled) {
3378 if (!raid_bdev->module->dif_supported && spdk_bdev_get_dif_type(bdev) != SPDK_DIF_DISABLE) {
3390 if (raid_bdev->bdev.blocklen == 0) {
3391 raid_bdev->bdev.blocklen = bdev->blocklen;
3392 raid_bdev->bdev.md_len = spdk_bdev_get_md_size(bdev);
3393 raid_bdev->bdev.md_interleave = spdk_bdev_is_md_interleaved(bdev);
3394 raid_bdev->bdev.dif_type = spdk_bdev_get_dif_type(bdev);
3395 raid_bdev->bdev.dif_check_flags = bdev->dif_check_flags;
3396 raid_bdev->bdev.dif_is_head_of_md = spdk_bdev_is_dif_head_of_md(bdev);
3397 raid_bdev->bdev.dif_pi_format = bdev->dif_pi_format;
3399 if (raid_bdev->bdev.blocklen != bdev->blocklen) {
3401 raid_bdev->bdev.name, raid_bdev->bdev.blocklen, bdev->name, bdev->blocklen);
3406 if (raid_bdev->bdev.md_len != spdk_bdev_get_md_size(bdev) ||
3407 raid_bdev->bdev.md_interleave != spdk_bdev_is_md_interleaved(bdev) ||
3408 raid_bdev->bdev.dif_type != spdk_bdev_get_dif_type(bdev) ||
3409 raid_bdev->bdev.dif_check_flags != bdev->dif_check_flags ||
3410 raid_bdev->bdev.dif_is_head_of_md != spdk_bdev_is_dif_head_of_md(bdev) ||
3411 raid_bdev->bdev.dif_pi_format != bdev->dif_pi_format) {
3413 raid_bdev->bdev.name, bdev->name);
3443 raid_bdev_add_base_bdev(struct raid_bdev *raid_bdev, const char *name,
3452 if (raid_bdev->process != NULL) {
3454 raid_bdev->bdev.name);
3458 if (raid_bdev->state == RAID_BDEV_STATE_CONFIGURING) {
3462 RAID_FOR_EACH_BASE_BDEV(raid_bdev, iter) {
3472 if (base_info == NULL || raid_bdev->state == RAID_BDEV_STATE_ONLINE) {
3473 RAID_FOR_EACH_BASE_BDEV(raid_bdev, iter) {
3483 raid_bdev->bdev.name, name);
3489 if (raid_bdev->state == RAID_BDEV_STATE_ONLINE) {
3500 if (rc != 0 && (rc != -ENODEV || raid_bdev->state != RAID_BDEV_STATE_CONFIGURING)) {
3510 raid_bdev_create_from_sb(const struct raid_bdev_superblock *sb, struct raid_bdev **raid_bdev_out)
3512 struct raid_bdev *raid_bdev;
3517 sb->level, true, &sb->uuid, &raid_bdev);
3522 rc = raid_bdev_alloc_superblock(raid_bdev, sb->block_size);
3524 raid_bdev_free(raid_bdev);
3529 memcpy(raid_bdev->sb, sb, sb->length);
3533 struct raid_base_bdev_info *base_info = &raid_bdev->base_bdev_info[sb_base_bdev->slot];
3537 raid_bdev->num_base_bdevs_operational++;
3544 *raid_bdev_out = raid_bdev;
3551 struct raid_bdev *raid_bdev;
3554 TAILQ_FOREACH(raid_bdev, &g_raid_bdev_list, global_link) {
3555 if (raid_bdev->state != RAID_BDEV_STATE_CONFIGURING || raid_bdev->sb != NULL) {
3558 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) {
3613 struct raid_bdev *raid_bdev;
3621 raid_bdev = raid_bdev_find_by_uuid(&ctx->raid_bdev_uuid);
3622 if (raid_bdev == NULL) {
3627 for (base_info = &raid_bdev->base_bdev_info[ctx->current_base_bdev_idx];
3628 base_info < &raid_bdev->base_bdev_info[raid_bdev->num_base_bdevs];
3657 struct raid_bdev *raid_bdev;
3675 raid_bdev = raid_bdev_find_by_uuid(&sb->uuid);
3677 if (raid_bdev) {
3678 if (raid_bdev->sb == NULL) {
3684 if (sb->seq_number > raid_bdev->sb->seq_number) {
3687 bdev->name, sb->seq_number, raid_bdev->bdev.name, raid_bdev->sb->seq_number);
3689 if (raid_bdev->state != RAID_BDEV_STATE_CONFIGURING) {
3691 raid_bdev->bdev.name, bdev->name);
3697 raid_bdev_delete(raid_bdev, NULL, NULL);
3698 raid_bdev = NULL;
3699 } else if (sb->seq_number < raid_bdev->sb->seq_number) {
3702 bdev->name, sb->seq_number, raid_bdev->bdev.name, raid_bdev->sb->seq_number);
3704 sb = raid_bdev->sb;
3724 if (!raid_bdev) {
3733 rc = raid_bdev_create_from_sb(sb, &raid_bdev);
3750 if (raid_bdev->state == RAID_BDEV_STATE_ONLINE) {
3751 assert(sb_base_bdev->slot < raid_bdev->num_base_bdevs);
3752 base_info = &raid_bdev->base_bdev_info[sb_base_bdev->slot];
3758 SPDK_NOTICELOG("Re-adding bdev %s to raid bdev %s.\n", bdev->name, raid_bdev->bdev.name);
3762 bdev->name, raid_bdev->bdev.name, spdk_strerror(-rc));
3769 bdev->name, raid_bdev->bdev.name);
3775 RAID_FOR_EACH_BASE_BDEV(raid_bdev, iter) {
3784 bdev->name, raid_bdev->bdev.name);
3797 bdev->name, raid_bdev->bdev.name, spdk_strerror(-rc));