Lines Matching defs:ublk

18 #include "spdk/ublk.h"
24 #define UBLK_CTRL_DEV "/dev/ublk-control"
41 #define UBLK_DEBUGLOG(ublk, format, ...) \
42 SPDK_DEBUGLOG(ublk, "ublk%d: " format, ublk->ublk_id, ##__VA_ARGS__);
57 static int ublk_set_params(struct spdk_ublk_dev *ublk);
58 static int ublk_start_dev(struct spdk_ublk_dev *ublk, bool is_recovering);
59 static void ublk_free_dev(struct spdk_ublk_dev *ublk);
61 static int ublk_close_dev(struct spdk_ublk_dev *ublk);
62 static int ublk_ctrl_start_recovery(struct spdk_ublk_dev *ublk);
64 static int ublk_ctrl_cmd_submit(struct spdk_ublk_dev *ublk, uint32_t cmd_op);
290 ublk_ctrl_cmd_error(struct spdk_ublk_dev *ublk, int32_t res)
294 SPDK_ERRLOG("ctrlr cmd %s failed, %s\n", ublk_op_name[ublk->current_cmd_op], spdk_strerror(-res));
295 if (ublk->ctrl_cb) {
296 ublk->ctrl_cb(ublk->cb_arg, res);
297 ublk->ctrl_cb = NULL;
300 switch (ublk->current_cmd_op) {
305 ublk_delete_dev(ublk);
308 ublk_close_dev(ublk);
311 ublk_free_dev(ublk);
317 SPDK_ERRLOG("No match cmd operation,cmd_op = %d\n", ublk->current_cmd_op);
325 struct spdk_ublk_dev *ublk = arg;
328 spdk_poller_unregister(&ublk->retry_poller);
330 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_GET_DEV_INFO);
332 ublk_delete_dev(ublk);
333 if (ublk->ctrl_cb) {
334 ublk->ctrl_cb(ublk->cb_arg, rc);
335 ublk->ctrl_cb = NULL;
345 struct spdk_ublk_dev *ublk;
348 ublk = (struct spdk_ublk_dev *)cqe->user_data;
349 UBLK_DEBUGLOG(ublk, "ctrl cmd %s completed\n", ublk_op_name[ublk->current_cmd_op]);
350 ublk->ctrl_ops_in_progress--;
353 ublk_ctrl_cmd_error(ublk, cqe->res);
357 switch (ublk->current_cmd_op) {
359 rc = ublk_set_params(ublk);
361 ublk_delete_dev(ublk);
366 rc = ublk_start_dev(ublk, false);
368 ublk_delete_dev(ublk);
378 if (ublk->ctrl_cb) {
379 ublk->ctrl_cb(ublk->cb_arg, 0);
380 ublk->ctrl_cb = NULL;
382 ublk_free_dev(ublk);
385 if (ublk->ublk_id != ublk->dev_info.dev_id) {
386 SPDK_ERRLOG("Invalid ublk ID\n");
391 UBLK_DEBUGLOG(ublk, "Ublk %u device state %u\n", ublk->ublk_id, ublk->dev_info.state);
393 if ((ublk->dev_info.state != UBLK_S_DEV_QUIESCED) && (ublk->retry_count < 3)) {
394 ublk->retry_count++;
395 ublk->retry_poller = SPDK_POLLER_REGISTER(_ublk_get_device_state_retry, ublk, 1000000);
399 rc = ublk_ctrl_start_recovery(ublk);
401 ublk_delete_dev(ublk);
406 rc = ublk_start_dev(ublk, true);
408 ublk_delete_dev(ublk);
413 SPDK_NOTICELOG("Ublk %u recover done successfully\n", ublk->ublk_id);
414 ublk->is_recovering = false;
418 SPDK_ERRLOG("No match cmd operation,cmd_op = %d\n", ublk->current_cmd_op);
425 if (ublk->ctrl_cb) {
426 ublk->ctrl_cb(ublk->cb_arg, rc);
427 ublk->ctrl_cb = NULL;
462 ublk_ctrl_cmd_submit(struct spdk_ublk_dev *ublk, uint32_t cmd_op)
464 uint32_t dev_id = ublk->ublk_id;
469 UBLK_DEBUGLOG(ublk, "ctrl cmd %s\n", ublk_op_name[cmd_op]);
484 ublk->current_cmd_op = cmd_op;
489 cmd->addr = (__u64)(uintptr_t)&ublk->dev_info;
490 cmd->len = sizeof(ublk->dev_info);
493 cmd->addr = (__u64)(uintptr_t)&ublk->dev_params;
494 cmd->len = sizeof(ublk->dev_params);
513 io_uring_sqe_set_data(sqe, ublk);
522 ublk->ctrl_ops_in_progress++;
608 /* We need to set SQPOLL for kernels 6.1 and earlier, since they would not defer ublk ctrl
610 * All the commands sent via control uring for a ublk device is executed one by one, so use
674 /* Bind ublk spdk_thread to current CPU core in order to avoid thread context switch
675 * during uring processing as required by ublk kernel.
681 rc = spdk_iobuf_channel_init(&poll_group->iobuf_ch, "ublk",
739 spdk_iobuf_register_module("ublk");
766 SPDK_DEBUGLOG(ublk, "\n");
807 ublk_close_dev(struct spdk_ublk_dev *ublk)
812 if (ublk->is_closing) {
815 ublk->is_closing = true;
817 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_STOP_DEV);
819 SPDK_ERRLOG("stop dev %d failed\n", ublk->ublk_id);
827 struct spdk_ublk_dev *ublk, *ublk_tmp;
829 TAILQ_FOREACH_SAFE(ublk, &g_ublk_devs, tailq, ublk_tmp) {
830 ublk_close_dev(ublk);
835 SPDK_DEBUGLOG(ublk, "finish shutdown\n");
886 struct spdk_ublk_dev *ublk;
888 /* check whether ublk has already been registered by ublk path. */
889 TAILQ_FOREACH(ublk, &g_ublk_devs, tailq) {
890 if (ublk->ublk_id == ublk_id) {
891 return ublk;
899 ublk_dev_get_id(struct spdk_ublk_dev *ublk)
901 return ublk->ublk_id;
915 ublk_dev_get_queue_depth(struct spdk_ublk_dev *ublk)
917 return ublk->queue_depth;
921 ublk_dev_get_num_queues(struct spdk_ublk_dev *ublk)
923 return ublk->num_queues;
927 ublk_dev_get_bdev_name(struct spdk_ublk_dev *ublk)
929 return spdk_bdev_get_name(ublk->bdev);
935 struct spdk_ublk_dev *ublk;
950 TAILQ_FOREACH(ublk, &g_ublk_devs, tailq) {
956 spdk_json_write_named_string(w, "bdev_name", ublk_dev_get_bdev_name(ublk));
957 spdk_json_write_named_uint32(w, "ublk_id", ublk->ublk_id);
958 spdk_json_write_named_uint32(w, "num_queues", ublk->num_queues);
959 spdk_json_write_named_uint32(w, "queue_depth", ublk->queue_depth);
969 ublk_dev_list_register(struct spdk_ublk_dev *ublk)
971 UBLK_DEBUGLOG(ublk, "add to tailq\n");
972 TAILQ_INSERT_TAIL(&g_ublk_devs, ublk, tailq);
977 ublk_dev_list_unregister(struct spdk_ublk_dev *ublk)
980 * ublk device may be stopped before registered.
984 if (ublk_dev_find_by_id(ublk->ublk_id)) {
985 UBLK_DEBUGLOG(ublk, "remove from tailq\n");
986 TAILQ_REMOVE(&g_ublk_devs, ublk, tailq);
992 UBLK_DEBUGLOG(ublk, "not found in tailq\n");
999 struct spdk_ublk_dev *ublk = arg;
1004 for (q_idx = 0; q_idx < ublk->num_queues; q_idx++) {
1005 ublk_dev_queue_fini(&ublk->queues[q_idx]);
1008 if (ublk->cdev_fd >= 0) {
1009 close(ublk->cdev_fd);
1012 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_DEL_DEV);
1014 SPDK_ERRLOG("delete dev %d failed\n", ublk->ublk_id);
1021 struct spdk_ublk_dev *ublk = arg;
1023 if (ublk->ctrl_ops_in_progress > 0) {
1024 if (ublk->retry_count-- > 0) {
1029 spdk_poller_unregister(&ublk->retry_poller);
1030 ublk_delete_dev(ublk);
1037 struct spdk_ublk_dev *ublk = arg;
1041 ublk->queues_closed += 1;
1042 SPDK_DEBUGLOG(ublk_io, "ublkb%u closed queues %u\n", ublk->ublk_id, ublk->queues_closed);
1044 if (ublk->queues_closed < ublk->num_queues) {
1048 if (ublk->ctrl_ops_in_progress > 0) {
1049 assert(ublk->retry_poller == NULL);
1050 ublk->retry_count = UBLK_STOP_BUSY_WAITING_MS * 1000ULL / UBLK_BUSY_POLLING_INTERVAL_US;
1051 ublk->retry_poller = SPDK_POLLER_REGISTER(_ublk_close_dev_retry, ublk,
1054 ublk_delete_dev(ublk);
1061 struct spdk_ublk_dev *ublk = q->dev;
1075 spdk_thread_send_msg(spdk_thread_get_app_thread(), ublk_try_close_dev, ublk);
1081 struct spdk_ublk_dev *ublk;
1085 ublk = ublk_dev_find_by_id(ublk_id);
1086 if (ublk == NULL) {
1087 SPDK_ERRLOG("no ublk dev with ublk_id=%u\n", ublk_id);
1090 if (ublk->is_closing) {
1091 SPDK_WARNLOG("ublk %d is closing\n", ublk->ublk_id);
1094 if (ublk->ctrl_cb) {
1095 SPDK_WARNLOG("ublk %d is busy with RPC call\n", ublk->ublk_id);
1099 ublk->ctrl_cb = ctrl_cb;
1100 ublk->cb_arg = cb_arg;
1101 return ublk_close_dev(ublk);
1248 struct spdk_ublk_dev *ublk = q->dev;
1258 offset_blocks = iod->start_sector >> ublk->sector_per_block_shift;
1259 num_blocks = iod->nr_sectors >> ublk->sector_per_block_shift;
1274 rc = spdk_bdev_flush_blocks(desc, ch, 0, spdk_bdev_get_num_blocks(ublk->bdev), ublk_io_done, io);
1288 SPDK_INFOLOG(ublk, "No memory, start to queue io.\n");
1291 SPDK_ERRLOG("ublk io failed in ublk_queue_io, rc=%d, ublk_op=%u\n", rc, ublk_op);
1416 /* Note: for READ io, ublk will always copy the data out of
1484 SPDK_ERRLOG("ublk received error io: res %d qid %d tag %u cmd_op %u\n",
1541 ublk_bdev_hot_remove(struct spdk_ublk_dev *ublk)
1543 ublk_close_dev(ublk);
1582 struct spdk_ublk_dev *ublk = q->dev;
1589 MAP_SHARED | MAP_POPULATE, ublk->cdev_fd, off);
1610 rc = io_uring_register_files(&q->ring, &ublk->cdev_fd, 1);
1656 /* Initialize and submit all io commands to ublk driver */
1677 ublk_set_params(struct spdk_ublk_dev *ublk)
1681 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_SET_PARAMS);
1683 SPDK_ERRLOG("UBLK can't set params for dev %d, rc %s\n", ublk->ublk_id, spdk_strerror(-rc));
1690 ublk_dev_info_init(struct spdk_ublk_dev *ublk)
1693 .queue_depth = ublk->queue_depth,
1694 .nr_hw_queues = ublk->num_queues,
1695 .dev_id = ublk->ublk_id,
1712 ublk->dev_info = uinfo;
1715 /* Set ublk device parameters based on bdev */
1717 ublk_info_param_init(struct spdk_ublk_dev *ublk)
1719 struct spdk_bdev *bdev = ublk->bdev;
1756 ublk->dev_params = uparams;
1762 struct spdk_ublk_dev *ublk = arg;
1764 ublk_free_dev(ublk);
1782 ublk_free_dev(struct spdk_ublk_dev *ublk)
1787 for (q_idx = 0; q_idx < ublk->num_queues; q_idx++) {
1788 q = &ublk->queues[q_idx];
1811 * continue with releasing resources for the rest of the ublk device.
1813 if (ublk->bdev_desc) {
1814 spdk_bdev_close(ublk->bdev_desc);
1815 ublk->bdev_desc = NULL;
1818 ublk_dev_list_unregister(ublk);
1819 SPDK_NOTICELOG("ublk dev %d stopped\n", ublk->ublk_id);
1821 free(ublk);
1825 ublk_ios_init(struct spdk_ublk_dev *ublk)
1831 for (i = 0; i < ublk->num_queues; i++) {
1832 q = &ublk->queues[i];
1836 q->dev = ublk;
1838 q->q_depth = ublk->queue_depth;
1853 for (i = 0; i < ublk->num_queues; i++) {
1863 struct spdk_ublk_dev *ublk = arg;
1865 ublk->online_num_queues++;
1866 if (ublk->is_recovering && (ublk->online_num_queues == ublk->num_queues)) {
1867 ublk_ctrl_cmd_submit(ublk, UBLK_CMD_END_USER_RECOVERY);
1875 struct spdk_ublk_dev *ublk = q->dev;
1879 q->bdev_ch = spdk_bdev_get_io_channel(ublk->bdev_desc);
1884 spdk_thread_send_msg(spdk_thread_get_app_thread(), ublk_queue_recovery_done, ublk);
1895 struct spdk_ublk_dev *ublk = NULL;
1901 SPDK_ERRLOG("NO ublk target exist\n");
1905 ublk = ublk_dev_find_by_id(ublk_id);
1906 if (ublk != NULL) {
1907 SPDK_DEBUGLOG(ublk, "ublk id %d is in use.\n", ublk_id);
1912 SPDK_DEBUGLOG(ublk, "Reached maximum number of supported devices: %u\n", g_ublks_max);
1916 ublk = calloc(1, sizeof(*ublk));
1917 if (ublk == NULL) {
1920 ublk->ctrl_cb = ctrl_cb;
1921 ublk->cb_arg = cb_arg;
1922 ublk->cdev_fd = -1;
1923 ublk->ublk_id = ublk_id;
1924 UBLK_DEBUGLOG(ublk, "bdev %s num_queues %d queue_depth %d\n",
1927 rc = spdk_bdev_open_ext(bdev_name, true, ublk_bdev_event_cb, ublk, &ublk->bdev_desc);
1930 free(ublk);
1934 bdev = spdk_bdev_desc_get_bdev(ublk->bdev_desc);
1935 ublk->bdev = bdev;
1936 sector_per_block = spdk_bdev_get_data_block_size(ublk->bdev) >> LINUX_SECTOR_SHIFT;
1937 ublk->sector_per_block_shift = spdk_u32log2(sector_per_block);
1939 ublk->queues_closed = 0;
1940 ublk->num_queues = num_queues;
1941 ublk->queue_depth = queue_depth;
1942 if (ublk->queue_depth > UBLK_DEV_MAX_QUEUE_DEPTH) {
1944 ublk->queue_depth, ublk->ublk_id, UBLK_DEV_MAX_QUEUE_DEPTH);
1945 ublk->queue_depth = UBLK_DEV_MAX_QUEUE_DEPTH;
1947 if (ublk->num_queues > UBLK_DEV_MAX_QUEUES) {
1949 ublk->num_queues, ublk->ublk_id, UBLK_DEV_MAX_QUEUES);
1950 ublk->num_queues = UBLK_DEV_MAX_QUEUES;
1952 for (i = 0; i < ublk->num_queues; i++) {
1953 ublk->queues[i].ring.ring_fd = -1;
1956 ublk_dev_info_init(ublk);
1957 ublk_info_param_init(ublk);
1958 rc = ublk_ios_init(ublk);
1960 spdk_bdev_close(ublk->bdev_desc);
1961 free(ublk);
1965 SPDK_INFOLOG(ublk, "Enabling kernel access to bdev %s via ublk %d\n",
1969 ublk_dev_list_register(ublk);
1970 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_ADD_DEV);
1972 SPDK_ERRLOG("UBLK can't add dev %d, rc %s\n", ublk->ublk_id, spdk_strerror(-rc));
1973 ublk_free_dev(ublk);
1980 ublk_start_dev(struct spdk_ublk_dev *ublk, bool is_recovering)
1987 snprintf(buf, 64, "%s%d", UBLK_BLK_CDEV, ublk->ublk_id);
1988 ublk->cdev_fd = open(buf, O_RDWR);
1989 if (ublk->cdev_fd < 0) {
1990 rc = ublk->cdev_fd;
1995 for (q_id = 0; q_id < ublk->num_queues; q_id++) {
1996 rc = ublk_dev_queue_init(&ublk->queues[q_id]);
2003 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_START_DEV);
2005 SPDK_ERRLOG("start dev %d failed, rc %s\n", ublk->ublk_id,
2012 for (q_id = 0; q_id < ublk->num_queues; q_id++) {
2013 ublk->queues[q_id].poll_group = &g_ublk_tgt.poll_groups[g_next_ublk_poll_group];
2015 spdk_thread_send_msg(ublk_thread, ublk_queue_run, &ublk->queues[q_id]);
2026 ublk_ctrl_start_recovery(struct spdk_ublk_dev *ublk)
2031 ublk->num_queues = ublk->dev_info.nr_hw_queues;
2032 ublk->queue_depth = ublk->dev_info.queue_depth;
2033 ublk->dev_info.ublksrv_pid = getpid();
2035 SPDK_DEBUGLOG(ublk, "Recovering ublk %d, num queues %u, queue depth %u, flags 0x%llx\n",
2036 ublk->ublk_id,
2037 ublk->num_queues, ublk->queue_depth, ublk->dev_info.flags);
2039 for (i = 0; i < ublk->num_queues; i++) {
2040 ublk->queues[i].ring.ring_fd = -1;
2043 ublk_info_param_init(ublk);
2044 rc = ublk_ios_init(ublk);
2049 ublk->is_recovering = true;
2050 return ublk_ctrl_cmd_submit(ublk, UBLK_CMD_START_USER_RECOVERY);
2059 struct spdk_ublk_dev *ublk = NULL;
2065 SPDK_ERRLOG("NO ublk target exist\n");
2074 ublk = ublk_dev_find_by_id(ublk_id);
2075 if (ublk != NULL) {
2076 SPDK_DEBUGLOG(ublk, "ublk id %d is in use.\n", ublk_id);
2081 SPDK_DEBUGLOG(ublk, "Reached maximum number of supported devices: %u\n", g_ublks_max);
2085 ublk = calloc(1, sizeof(*ublk));
2086 if (ublk == NULL) {
2089 ublk->ctrl_cb = ctrl_cb;
2090 ublk->cb_arg = cb_arg;
2091 ublk->cdev_fd = -1;
2092 ublk->ublk_id = ublk_id;
2094 rc = spdk_bdev_open_ext(bdev_name, true, ublk_bdev_event_cb, ublk, &ublk->bdev_desc);
2097 free(ublk);
2101 bdev = spdk_bdev_desc_get_bdev(ublk->bdev_desc);
2102 ublk->bdev = bdev;
2103 sector_per_block = spdk_bdev_get_data_block_size(ublk->bdev) >> LINUX_SECTOR_SHIFT;
2104 ublk->sector_per_block_shift = spdk_u32log2(sector_per_block);
2106 SPDK_NOTICELOG("Recovering ublk %d with bdev %s\n", ublk->ublk_id, bdev_name);
2108 ublk_dev_list_register(ublk);
2109 rc = ublk_ctrl_cmd_submit(ublk, UBLK_CMD_GET_DEV_INFO);
2111 ublk_free_dev(ublk);
2117 SPDK_LOG_REGISTER_COMPONENT(ublk)