Lines Matching defs:nbdev_ch

210 static void _bdev_nvme_submit_request(struct nvme_bdev_channel *nbdev_ch,
239 static void bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch,
249 static void bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch,
523 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
526 iter->fn(iter, nbdev, nbdev_ch, iter->ctx);
784 bdev_nvme_clear_current_io_path(struct nvme_bdev_channel *nbdev_ch)
786 nbdev_ch->current_io_path = NULL;
787 nbdev_ch->rr_counter = 0;
791 _bdev_nvme_get_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_ns)
795 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
836 _bdev_nvme_add_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_ns *nvme_ns)
865 io_path->nbdev_ch = nbdev_ch;
866 STAILQ_INSERT_TAIL(&nbdev_ch->io_path_list, io_path, stailq);
868 bdev_nvme_clear_current_io_path(nbdev_ch);
874 bdev_nvme_clear_retry_io_path(struct nvme_bdev_channel *nbdev_ch,
879 TAILQ_FOREACH(bio, &nbdev_ch->retry_io_list, retry_link) {
887 _bdev_nvme_delete_io_path(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_path *io_path)
894 nbdev = spdk_io_channel_get_io_device(spdk_io_channel_from_ctx(nbdev_ch));
903 bdev_nvme_clear_current_io_path(nbdev_ch);
904 bdev_nvme_clear_retry_io_path(nbdev_ch, io_path);
906 STAILQ_REMOVE(&nbdev_ch->io_path_list, io_path, nvme_io_path, stailq);
907 io_path->nbdev_ch = NULL;
926 _bdev_nvme_delete_io_paths(struct nvme_bdev_channel *nbdev_ch)
930 STAILQ_FOREACH_SAFE(io_path, &nbdev_ch->io_path_list, stailq, tmp_io_path) {
931 _bdev_nvme_delete_io_path(nbdev_ch, io_path);
938 struct nvme_bdev_channel *nbdev_ch = ctx_buf;
943 STAILQ_INIT(&nbdev_ch->io_path_list);
944 TAILQ_INIT(&nbdev_ch->retry_io_list);
948 nbdev_ch->mp_policy = nbdev->mp_policy;
949 nbdev_ch->mp_selector = nbdev->mp_selector;
950 nbdev_ch->rr_min_io = nbdev->rr_min_io;
953 rc = _bdev_nvme_add_io_path(nbdev_ch, nvme_ns);
957 _bdev_nvme_delete_io_paths(nbdev_ch);
982 static void bdev_nvme_abort_retry_ios(struct nvme_bdev_channel *nbdev_ch);
987 struct nvme_bdev_channel *nbdev_ch = ctx_buf;
989 bdev_nvme_abort_retry_ios(nbdev_ch);
990 _bdev_nvme_delete_io_paths(nbdev_ch);
1131 nvme_io_path_get_next(struct nvme_bdev_channel *nbdev_ch, struct nvme_io_path *prev_path)
1142 return STAILQ_FIRST(&nbdev_ch->io_path_list);
1146 _bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch)
1150 start = nvme_io_path_get_next(nbdev_ch, nbdev_ch->current_io_path);
1157 nbdev_ch->current_io_path = io_path;
1169 io_path = nvme_io_path_get_next(nbdev_ch, io_path);
1172 if (nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE) {
1176 nbdev_ch->current_io_path = non_optimized;
1183 _bdev_nvme_find_io_path_min_qd(struct nvme_bdev_channel *nbdev_ch)
1190 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
1228 bdev_nvme_find_io_path(struct nvme_bdev_channel *nbdev_ch)
1230 if (spdk_likely(nbdev_ch->current_io_path != NULL)) {
1231 if (nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE) {
1232 return nbdev_ch->current_io_path;
1233 } else if (nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN) {
1234 if (++nbdev_ch->rr_counter < nbdev_ch->rr_min_io) {
1235 return nbdev_ch->current_io_path;
1237 nbdev_ch->rr_counter = 0;
1241 if (nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE ||
1242 nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN) {
1243 return _bdev_nvme_find_io_path(nbdev_ch);
1245 return _bdev_nvme_find_io_path_min_qd(nbdev_ch);
1261 any_io_path_may_become_available(struct nvme_bdev_channel *nbdev_ch)
1265 if (nbdev_ch->resetting) {
1269 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
1284 bdev_nvme_retry_io(struct nvme_bdev_channel *nbdev_ch, struct spdk_bdev_io *bdev_io)
1290 _bdev_nvme_submit_request(nbdev_ch, bdev_io);
1292 ch = spdk_io_channel_from_ctx(nbdev_ch);
1300 struct nvme_bdev_channel *nbdev_ch = arg;
1306 TAILQ_FOREACH_SAFE(bio, &nbdev_ch->retry_io_list, retry_link, tmp_bio) {
1311 TAILQ_REMOVE(&nbdev_ch->retry_io_list, bio, retry_link);
1313 bdev_nvme_retry_io(nbdev_ch, spdk_bdev_io_from_ctx(bio));
1316 spdk_poller_unregister(&nbdev_ch->retry_io_poller);
1318 bio = TAILQ_FIRST(&nbdev_ch->retry_io_list);
1322 nbdev_ch->retry_io_poller = SPDK_POLLER_REGISTER(bdev_nvme_retry_ios, nbdev_ch,
1330 bdev_nvme_queue_retry_io(struct nvme_bdev_channel *nbdev_ch,
1337 TAILQ_FOREACH_REVERSE(tmp_bio, &nbdev_ch->retry_io_list, retry_io_head, retry_link) {
1339 TAILQ_INSERT_AFTER(&nbdev_ch->retry_io_list, tmp_bio, bio,
1346 TAILQ_INSERT_HEAD(&nbdev_ch->retry_io_list, bio, retry_link);
1348 spdk_poller_unregister(&nbdev_ch->retry_io_poller);
1350 nbdev_ch->retry_io_poller = SPDK_POLLER_REGISTER(bdev_nvme_retry_ios, nbdev_ch,
1355 bdev_nvme_abort_retry_ios(struct nvme_bdev_channel *nbdev_ch)
1359 TAILQ_FOREACH_SAFE(bio, &nbdev_ch->retry_io_list, retry_link, tmp_bio) {
1360 TAILQ_REMOVE(&nbdev_ch->retry_io_list, bio, retry_link);
1364 spdk_poller_unregister(&nbdev_ch->retry_io_poller);
1368 bdev_nvme_abort_retry_io(struct nvme_bdev_channel *nbdev_ch,
1373 TAILQ_FOREACH(bio, &nbdev_ch->retry_io_list, retry_link) {
1375 TAILQ_REMOVE(&nbdev_ch->retry_io_list, bio, retry_link);
1514 struct nvme_bdev_channel *nbdev_ch,
1525 bdev_nvme_clear_current_io_path(nbdev_ch);
1532 if (!any_io_path_may_become_available(nbdev_ch)) {
1556 struct nvme_bdev_channel *nbdev_ch;
1582 nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
1584 if (bdev_nvme_check_retry_io(bio, cpl, nbdev_ch, &delay_ms)) {
1585 bdev_nvme_queue_retry_io(nbdev_ch, bio, delay_ms);
1600 struct nvme_bdev_channel *nbdev_ch;
1614 nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
1616 bdev_nvme_clear_current_io_path(nbdev_ch);
1619 if (any_io_path_may_become_available(nbdev_ch)) {
1620 bdev_nvme_queue_retry_io(nbdev_ch, bio, 1000ULL);
1686 if (io_path->nbdev_ch == NULL) {
1689 bdev_nvme_clear_current_io_path(io_path->nbdev_ch);
2995 struct nvme_bdev_channel *nbdev_ch, void *ctx)
2997 bdev_nvme_abort_retry_ios(nbdev_ch);
2998 nbdev_ch->resetting = false;
3111 struct nvme_bdev_channel *nbdev_ch;
3115 nbdev_ch = spdk_io_channel_get_ctx(spdk_bdev_io_get_io_channel(bdev_io));
3123 io_path = STAILQ_FIRST(&nbdev_ch->io_path_list);
3138 struct nvme_bdev_channel *nbdev_ch, void *ctx)
3140 nbdev_ch->resetting = true;
3266 _bdev_nvme_submit_request(struct nvme_bdev_channel *nbdev_ch, struct spdk_bdev_io *bdev_io)
3368 bdev_nvme_admin_passthru(nbdev_ch,
3401 bdev_nvme_abort(nbdev_ch,
3425 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
3438 nbdev_io->io_path = bdev_nvme_find_io_path(nbdev_ch);
3450 _bdev_nvme_submit_request(nbdev_ch, bdev_io);
4163 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch);
4168 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
4857 struct nvme_bdev_channel *nbdev_ch, void *ctx)
4862 rc = _bdev_nvme_add_io_path(nbdev_ch, nvme_ns);
4873 struct nvme_bdev_channel *nbdev_ch, void *ctx)
4878 io_path = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns);
4880 _bdev_nvme_delete_io_path(nbdev_ch, io_path);
5343 struct nvme_bdev_channel *nbdev_ch, void *_ctx)
5349 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
5358 STAILQ_REMOVE_AFTER(&nbdev_ch->io_path_list, prev, stailq);
5359 STAILQ_INSERT_HEAD(&nbdev_ch->io_path_list, io_path, stailq);
5362 /* We can set io_path to nbdev_ch->current_io_path directly here.
5364 * just clear nbdev_ch->current_io_path and let find_io_path()
5368 * already at the head, clear nbdev_ch->current_io_path.
5370 bdev_nvme_clear_current_io_path(nbdev_ch);
5496 struct nvme_bdev_channel *nbdev_ch, void *ctx)
5498 nbdev_ch->mp_policy = nbdev->mp_policy;
5499 nbdev_ch->mp_selector = nbdev->mp_selector;
5500 nbdev_ch->rr_min_io = nbdev->rr_min_io;
5501 bdev_nvme_clear_current_io_path(nbdev_ch);
8654 bdev_nvme_admin_passthru(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
8663 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
8784 bdev_nvme_abort(struct nvme_bdev_channel *nbdev_ch, struct nvme_bdev_io *bio,
8790 rc = bdev_nvme_abort_retry_io(nbdev_ch, bio_to_abort);
8803 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) {
9101 const struct nvme_bdev_channel *nbdev_ch;
9108 nbdev_ch = io_path->nbdev_ch;
9109 if (nbdev_ch == NULL) {
9111 } else if (nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE) {
9114 STAILQ_FOREACH(optimized_io_path, &nbdev_ch->io_path_list, stailq) {
9124 if (nbdev_ch->current_io_path) {
9125 current = (io_path == nbdev_ch->current_io_path);
9133 STAILQ_FOREACH(first_path, &nbdev_ch->io_path_list, stailq) {