Lines Matching defs:nbdev

60 #define BDEV_STRING(nbdev) (nbdev->disk.name)
62 #define NVME_BDEV_ERRLOG(nbdev, format, ...) \
63 SPDK_ERRLOG("[%s] " format, BDEV_STRING(nbdev), ##__VA_ARGS__);
65 #define NVME_BDEV_WARNLOG(nbdev, format, ...) \
66 SPDK_WARNLOG("[%s] " format, BDEV_STRING(nbdev), ##__VA_ARGS__);
68 #define NVME_BDEV_NOTICELOG(nbdev, format, ...) \
69 SPDK_NOTICELOG("[%s] " format, BDEV_STRING(nbdev), ##__VA_ARGS__);
71 #define NVME_BDEV_INFOLOG(nbdev, format, ...) \
72 SPDK_INFOLOG(bdev_nvme, "[%s] " format, BDEV_STRING(nbdev), ##__VA_ARGS__);
251 static void bdev_nvme_reset_io(struct nvme_bdev *nbdev, struct nvme_bdev_io *bio);
353 struct nvme_bdev *nbdev;
356 TAILQ_FOREACH(nbdev, &nbdev_ctrlr->bdevs, tailq) {
357 if (nbdev->nsid == nsid) {
363 return nbdev;
521 struct nvme_bdev *nbdev = spdk_io_channel_iter_get_io_device(i);
526 iter->fn(iter, nbdev, nbdev_ch, iter->ctx);
533 struct nvme_bdev *nbdev = spdk_io_channel_iter_get_io_device(i);
536 iter->cpl(nbdev, iter->ctx, status);
542 nvme_bdev_for_each_channel(struct nvme_bdev *nbdev,
548 assert(nbdev != NULL && fn != NULL);
561 spdk_for_each_channel(nbdev, nvme_bdev_each_channel_msg, iter,
892 struct nvme_bdev *nbdev;
894 nbdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(nbdev_ch));
897 pthread_mutex_lock(&nbdev->mutex);
898 if (nbdev->ref != 0 && io_path->nvme_ns->stat != NULL && io_path->stat != NULL) {
901 pthread_mutex_unlock(&nbdev->mutex);
939 struct nvme_bdev *nbdev = io_device;
946 pthread_mutex_lock(&nbdev->mutex);
948 nbdev_ch->mp_policy = nbdev->mp_policy;
949 nbdev_ch->mp_selector = nbdev->mp_selector;
950 nbdev_ch->rr_min_io = nbdev->rr_min_io;
952 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) {
955 pthread_mutex_unlock(&nbdev->mutex);
961 pthread_mutex_unlock(&nbdev->mutex);
1387 struct nvme_bdev *nbdev;
1392 nbdev = bdev_io->bdev->ctxt;
1394 if (nbdev->err_stat == NULL) {
1401 pthread_mutex_lock(&nbdev->mutex);
1403 nbdev->err_stat->status_type[sct]++;
1409 nbdev->err_stat->status[sct][sc]++;
1415 pthread_mutex_unlock(&nbdev->mutex);
1893 struct nvme_bdev *nbdev = io_device;
1895 pthread_mutex_destroy(&nbdev->mutex);
1896 free(nbdev->disk.name);
1897 free(nbdev->err_stat);
1898 free(nbdev);
1904 struct nvme_bdev *nbdev = ctx;
1907 SPDK_DTRACE_PROBE2(bdev_nvme_destruct, nbdev->nbdev_ctrlr->name, nbdev->nsid);
1909 pthread_mutex_lock(&nbdev->mutex);
1911 TAILQ_FOREACH_SAFE(nvme_ns, &nbdev->nvme_ns_list, tailq, tmp_nvme_ns) {
1928 pthread_mutex_unlock(&nbdev->mutex);
1931 TAILQ_REMOVE(&nbdev->nbdev_ctrlr->bdevs, nbdev, tailq);
1934 spdk_io_device_unregister(nbdev, nvme_bdev_free);
2976 bdev_nvme_unfreeze_bdev_channel_done(struct nvme_bdev *nbdev, void *ctx, int status)
2987 NVME_BDEV_INFOLOG(nbdev, "reset_io %p completed, status:%d\n", bio, io_status);
2994 struct nvme_bdev *nbdev,
3007 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
3010 nvme_bdev_for_each_channel(nbdev,
3045 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
3047 NVME_BDEV_INFOLOG(nbdev, "continue reset_io %p, rc:%d\n", bio, rc);
3063 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt;
3091 NVME_BDEV_INFOLOG(nbdev, "reset_io %p started resetting ctrlr [%s, %u].\n",
3096 NVME_BDEV_INFOLOG(nbdev, "reset_io %p was queued to ctrlr [%s, %u].\n",
3099 NVME_BDEV_INFOLOG(nbdev, "reset_io %p could not reset ctrlr [%s, %u], rc:%d\n",
3107 bdev_nvme_freeze_bdev_channel_done(struct nvme_bdev *nbdev, void *ctx, int status)
3137 struct nvme_bdev *nbdev,
3146 bdev_nvme_reset_io(struct nvme_bdev *nbdev, struct nvme_bdev_io *bio)
3148 NVME_BDEV_INFOLOG(nbdev, "reset_io %p started.\n", bio);
3150 nvme_bdev_for_each_channel(nbdev,
3469 struct nvme_bdev *nbdev = ctx;
3475 nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
3834 struct nvme_bdev *nbdev = ctx;
3836 return spdk_get_io_channel(nbdev);
3842 struct nvme_bdev *nbdev = ctx;
3845 if (!nbdev || nbdev->disk.module != &nvme_if) {
3849 nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
3880 struct nvme_bdev *nbdev = ctx;
3885 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) {
4103 nvme_bdev_get_mp_policy_str(struct nvme_bdev *nbdev)
4105 switch (nbdev->mp_policy) {
4117 nvme_bdev_get_mp_selector_str(struct nvme_bdev *nbdev)
4119 switch (nbdev->mp_selector) {
4133 struct nvme_bdev *nbdev = ctx;
4136 pthread_mutex_lock(&nbdev->mutex);
4138 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) {
4142 spdk_json_write_named_string(w, "mp_policy", nvme_bdev_get_mp_policy_str(nbdev));
4143 if (nbdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE) {
4144 spdk_json_write_named_string(w, "selector", nvme_bdev_get_mp_selector_str(nbdev));
4145 if (nbdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN) {
4146 spdk_json_write_named_uint32(w, "rr_min_io", nbdev->rr_min_io);
4149 pthread_mutex_unlock(&nbdev->mutex);
4191 struct nvme_bdev *nbdev = ctx;
4193 if (nbdev->err_stat != NULL) {
4194 memset(nbdev->err_stat, 0, sizeof(struct nvme_error_stat));
4213 struct nvme_bdev *nbdev = ctx;
4219 if (nbdev->err_stat == NULL) {
4227 if (nbdev->err_stat->status_type[sct] == 0) {
4236 spdk_json_write_named_uint32(w, status_json, nbdev->err_stat->status_type[sct]);
4244 if (nbdev->err_stat->status[sct][sc] == 0) {
4253 spdk_json_write_named_uint32(w, status_json, nbdev->err_stat->status[sct][sc]);
4264 struct nvme_bdev *nbdev = ctx;
4280 nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
4592 struct nvme_bdev *nbdev;
4595 nbdev = calloc(1, sizeof(*nbdev));
4596 if (!nbdev) {
4597 SPDK_ERRLOG("nbdev calloc() failed\n");
4602 nbdev->err_stat = calloc(1, sizeof(struct nvme_error_stat));
4603 if (!nbdev->err_stat) {
4605 free(nbdev);
4610 rc = pthread_mutex_init(&nbdev->mutex, NULL);
4612 free(nbdev->err_stat);
4613 free(nbdev);
4617 nbdev->ref = 1;
4618 nbdev->mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE;
4619 nbdev->mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN;
4620 nbdev->rr_min_io = UINT32_MAX;
4621 TAILQ_INIT(&nbdev->nvme_ns_list);
4623 return nbdev;
4629 struct nvme_bdev *nbdev;
4633 nbdev = nvme_bdev_alloc();
4634 if (nbdev == NULL) {
4639 nbdev->opal = nvme_ctrlr->opal_dev != NULL;
4641 rc = nbdev_create(&nbdev->disk, nbdev_ctrlr->name, nvme_ctrlr->ctrlr,
4642 nvme_ns->ns, &nvme_ctrlr->opts, nbdev);
4645 nvme_bdev_free(nbdev);
4649 spdk_io_device_register(nbdev,
4653 nbdev->disk.name);
4655 nvme_ns->bdev = nbdev;
4656 nbdev->nsid = nvme_ns->id;
4657 TAILQ_INSERT_TAIL(&nbdev->nvme_ns_list, nvme_ns, tailq);
4661 nbdev->nbdev_ctrlr = nbdev_ctrlr;
4662 TAILQ_INSERT_TAIL(&nbdev_ctrlr->bdevs, nbdev, tailq);
4664 rc = spdk_bdev_register(&nbdev->disk);
4667 spdk_io_device_unregister(nbdev, NULL);
4670 TAILQ_REMOVE(&nbdev_ctrlr->bdevs, nbdev, tailq);
4674 nvme_bdev_free(nbdev);
4856 struct nvme_bdev *nbdev,
4872 struct nvme_bdev *nbdev,
4887 bdev_nvme_add_io_path_failed(struct nvme_bdev *nbdev, void *ctx, int status)
4895 bdev_nvme_add_io_path_done(struct nvme_bdev *nbdev, void *ctx, int status)
4903 nvme_bdev_for_each_channel(nbdev,
4911 nvme_bdev_add_ns(struct nvme_bdev *nbdev, struct nvme_ns *nvme_ns)
4922 pthread_mutex_lock(&nbdev->mutex);
4924 tmp_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);
4928 pthread_mutex_unlock(&nbdev->mutex);
4933 nbdev->ref++;
4934 TAILQ_INSERT_TAIL(&nbdev->nvme_ns_list, nvme_ns, tailq);
4935 nvme_ns->bdev = nbdev;
4937 pthread_mutex_unlock(&nbdev->mutex);
4940 nvme_bdev_for_each_channel(nbdev,
5005 bdev_nvme_delete_io_path_done(struct nvme_bdev *nbdev, void *ctx, int status)
5015 struct nvme_bdev *nbdev;
5025 nbdev = nvme_ns->bdev;
5026 if (nbdev != NULL) {
5027 pthread_mutex_lock(&nbdev->mutex);
5029 assert(nbdev->ref > 0);
5030 nbdev->ref--;
5031 if (nbdev->ref == 0) {
5032 pthread_mutex_unlock(&nbdev->mutex);
5034 spdk_bdev_unregister(&nbdev->disk, NULL, NULL);
5040 TAILQ_REMOVE(&nbdev->nvme_ns_list, nvme_ns, tailq);
5046 pthread_mutex_unlock(&nbdev->mutex);
5051 nvme_bdev_for_each_channel(nbdev,
5069 struct nvme_bdev *nbdev;
5097 nbdev = nvme_ns->bdev;
5098 assert(nbdev != NULL);
5099 if (nbdev->disk.blockcnt != num_sectors) {
5103 nbdev->disk.name,
5104 nbdev->disk.blockcnt,
5106 rc = spdk_bdev_notify_blockcnt_change(&nbdev->disk, num_sectors);
5110 nbdev->disk.name, rc);
5325 bdev_nvme_set_preferred_path_done(struct nvme_bdev *nbdev, void *_ctx, int status)
5342 struct nvme_bdev *nbdev,
5377 bdev_nvme_set_preferred_ns(struct nvme_bdev *nbdev, uint16_t cntlid)
5383 TAILQ_FOREACH(nvme_ns, &nbdev->nvme_ns_list, tailq) {
5393 TAILQ_REMOVE(&nbdev->nvme_ns_list, nvme_ns, tailq);
5394 TAILQ_INSERT_HEAD(&nbdev->nvme_ns_list, nvme_ns, tailq);
5413 struct nvme_bdev *nbdev;
5442 nbdev = SPDK_CONTAINEROF(bdev, struct nvme_bdev, disk);
5444 pthread_mutex_lock(&nbdev->mutex);
5446 ctx->nvme_ns = bdev_nvme_set_preferred_ns(nbdev, cntlid);
5448 pthread_mutex_unlock(&nbdev->mutex);
5455 pthread_mutex_unlock(&nbdev->mutex);
5457 nvme_bdev_for_each_channel(nbdev,
5478 bdev_nvme_set_multipath_policy_done(struct nvme_bdev *nbdev, void *_ctx, int status)
5495 struct nvme_bdev *nbdev,
5498 nbdev_ch->mp_policy = nbdev->mp_policy;
5499 nbdev_ch->mp_selector = nbdev->mp_selector;
5500 nbdev_ch->rr_min_io = nbdev->rr_min_io;
5513 struct nvme_bdev *nbdev;
5566 nbdev = SPDK_CONTAINEROF(bdev, struct nvme_bdev, disk);
5568 pthread_mutex_lock(&nbdev->mutex);
5569 nbdev->mp_policy = policy;
5570 nbdev->mp_selector = selector;
5571 nbdev->rr_min_io = rr_min_io;
5572 pthread_mutex_unlock(&nbdev->mutex);
5574 nvme_bdev_for_each_channel(nbdev,
9084 struct nvme_bdev *nbdev;
9091 nbdev = SPDK_CONTAINEROF(bdev, struct nvme_bdev, disk);
9092 nvme_ns = TAILQ_FIRST(&nbdev->nvme_ns_list);