Lines Matching defs:ctrlr

58 int nvmf_passthru_admin_cmd_for_ctrlr(struct spdk_nvmf_request *req, struct spdk_nvmf_ctrlr *ctrlr);
78 nvmf_ctrlr_stop_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr)
80 if (!ctrlr) {
85 if (ctrlr->keep_alive_poller == NULL) {
90 spdk_poller_unregister(&ctrlr->keep_alive_poller);
94 nvmf_ctrlr_stop_association_timer(struct spdk_nvmf_ctrlr *ctrlr)
96 if (!ctrlr) {
102 if (ctrlr->association_timer == NULL) {
107 spdk_poller_unregister(&ctrlr->association_timer);
114 SPDK_DEBUGLOG(nvmf, "ctrlr disconnect qpairs complete successfully\n");
116 SPDK_ERRLOG("Fail to disconnect ctrlr qpairs\n");
124 struct spdk_nvmf_ctrlr *ctrlr;
129 ctrlr = spdk_io_channel_iter_get_ctx(i);
134 if (qpair->ctrlr == ctrlr && (include_admin || !nvmf_qpair_is_admin_queue(qpair))) {
167 struct spdk_nvmf_ctrlr *ctrlr = ctx;
169 if (ctrlr->in_destruct) {
170 nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
174 SPDK_DEBUGLOG(nvmf, "Polling ctrlr keep alive timeout\n");
177 keep_alive_timeout_tick = ctrlr->last_keep_alive_tick +
178 ctrlr->feat.keep_alive_timer.bits.kato * spdk_get_ticks_hz() / UINT64_C(1000);
181 ctrlr->hostnqn, ctrlr->subsys->subnqn);
183 if (ctrlr->vcprop.csts.bits.cfs == 0) {
184 nvmf_ctrlr_set_fatal_status(ctrlr);
188 * destroy ctrlr, break the host to controller association
189 * disconnect qpairs with qpair->ctrlr == ctrlr
191 spdk_for_each_channel(ctrlr->subsys->tgt,
193 ctrlr,
203 nvmf_ctrlr_start_keep_alive_timer(struct spdk_nvmf_ctrlr *ctrlr)
205 if (!ctrlr) {
211 if (ctrlr->feat.keep_alive_timer.bits.kato != 0) {
213 ctrlr->last_keep_alive_tick = spdk_get_ticks();
216 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr,
217 ctrlr->feat.keep_alive_timer.bits.kato * 1000);
222 nvmf_qpair_set_ctrlr(struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_ctrlr *ctrlr)
224 if (qpair->ctrlr != NULL) {
226 assert(qpair->ctrlr == ctrlr);
230 qpair->ctrlr = ctrlr;
232 spdk_nvmf_subsystem_get_nqn(ctrlr->subsys));
242 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
250 if (nvmf_subsystem_host_auth_required(ctrlr->subsys, ctrlr->hostnqn)) {
266 SPDK_DEBUGLOG(nvmf, "connect capsule response: cntlid = 0x%04x\n", ctrlr->cntlid);
270 rsp->status_code_specific.success.cntlid = ctrlr->cntlid;
276 struct spdk_nvmf_ctrlr *ctrlr,
281 if (!ctrlr->admin_qpair) {
286 qpair->ctrlr = NULL;
291 assert(ctrlr->admin_qpair->group->thread == spdk_get_thread());
293 if (spdk_bit_array_get(ctrlr->qpair_mask, qpair->qid)) {
296 qpair->qid, ctrlr->cntlid);
300 qpair->ctrlr = NULL;
304 ctrlr->cntlid, qpair->qid, DUPLICATE_QID_RETRY_US);
306 /* Set qpair->ctrlr here so that we'll have it when the poller expires. */
307 nvmf_qpair_set_ctrlr(qpair, ctrlr);
316 SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_qpair, qpair, qpair->qid, ctrlr->subsys->subnqn,
317 ctrlr->hostnqn);
318 nvmf_qpair_set_ctrlr(qpair, ctrlr);
319 spdk_bit_array_set(ctrlr->qpair_mask, qpair->qid);
330 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
334 nvmf_ctrlr_add_qpair(qpair, ctrlr, req);
343 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
345 ctrlr->admin_qpair = qpair;
346 ctrlr->association_timeout = qpair->transport->opts.association_timeout;
347 nvmf_ctrlr_start_keep_alive_timer(ctrlr);
348 nvmf_ctrlr_add_qpair(qpair, ctrlr, req);
357 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
359 if (nvmf_subsystem_add_ctrlr(ctrlr->subsys, ctrlr)) {
361 spdk_bit_array_free(&ctrlr->qpair_mask);
362 free(ctrlr);
363 qpair->ctrlr = NULL;
369 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_admin_qpair, req);
421 nvmf_ctrlr_init_visible_ns(struct spdk_nvmf_ctrlr *ctrlr)
423 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
428 if (ns->always_visible || nvmf_ns_find_host(ns, ctrlr->hostnqn) != NULL) {
429 nvmf_ctrlr_ns_set_visible(ctrlr, ns->nsid, true);
440 struct spdk_nvmf_ctrlr *ctrlr;
445 ctrlr = calloc(1, sizeof(*ctrlr));
446 if (ctrlr == NULL) {
452 ctrlr->dynamic_ctrlr = true;
454 ctrlr->cntlid = connect_data->cntlid;
457 SPDK_DTRACE_PROBE3_TICKS(nvmf_ctrlr_create, ctrlr, subsystem->subnqn,
460 STAILQ_INIT(&ctrlr->async_events);
461 TAILQ_INIT(&ctrlr->log_head);
462 ctrlr->subsys = subsystem;
463 ctrlr->thread = req->qpair->group->thread;
464 ctrlr->disconnect_in_progress = false;
466 ctrlr->qpair_mask = spdk_bit_array_create(transport->opts.max_qpairs_per_ctrlr);
467 if (!ctrlr->qpair_mask) {
472 nvmf_ctrlr_cdata_init(transport, subsystem, &ctrlr->cdata);
478 if (ctrlr->cdata.kas) {
479 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(connect_cmd->kato,
484 ctrlr->feat.async_event_configuration.bits.ns_attr_notice = 1;
485 if (ctrlr->subsys->flags.ana_reporting) {
486 ctrlr->feat.async_event_configuration.bits.ana_change_notice = 1;
488 ctrlr->feat.volatile_write_cache.bits.wce = 1;
490 ctrlr->feat.interrupt_vector_configuration.bits.cd = 1;
492 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
516 if (ctrlr->feat.keep_alive_timer.bits.kato == 0) {
517 ctrlr->feat.keep_alive_timer.bits.kato = NVMF_DISC_KATO_IN_MS;
518 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 0;
520 ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice = 1;
525 ctrlr->feat.number_of_queues.bits.ncqr = transport->opts.max_qpairs_per_ctrlr - 1 -
527 ctrlr->feat.number_of_queues.bits.nsqr = transport->opts.max_qpairs_per_ctrlr - 1 -
530 spdk_uuid_copy(&ctrlr->hostid, (struct spdk_uuid *)connect_data->hostid);
531 memcpy(ctrlr->hostnqn, connect_data->hostnqn, SPDK_NVMF_NQN_MAX_LEN);
533 ctrlr->visible_ns = spdk_bit_array_create(subsystem->max_nsid);
534 if (!ctrlr->visible_ns) {
538 nvmf_ctrlr_init_visible_ns(ctrlr);
540 ctrlr->vcprop.cap.raw = 0;
541 ctrlr->vcprop.cap.bits.cqr = 1; /* NVMe-oF specification required */
542 ctrlr->vcprop.cap.bits.mqes = transport->opts.max_queue_depth -
544 ctrlr->vcprop.cap.bits.ams = 0; /* optional arb mechanisms */
546 ctrlr->vcprop.cap.bits.to = NVMF_CTRLR_RESET_SHN_TIMEOUT_IN_MS / 500;
547 ctrlr->vcprop.cap.bits.dstrd = 0; /* fixed to 0 for NVMe-oF */
550 ctrlr->vcprop.cap.bits.css =
553 ctrlr->vcprop.cap.bits.css = SPDK_NVME_CAP_CSS_NVM; /* NVM command set */
556 ctrlr->vcprop.cap.bits.mpsmin = 0; /* 2 ^ (12 + mpsmin) == 4k */
557 ctrlr->vcprop.cap.bits.mpsmax = 0; /* 2 ^ (12 + mpsmax) == 4k */
560 ctrlr->vcprop.vs.bits.mjr = 1;
561 ctrlr->vcprop.vs.bits.mnr = 3;
562 ctrlr->vcprop.vs.bits.ter = 0;
564 ctrlr->vcprop.cc.raw = 0;
565 ctrlr->vcprop.cc.bits.en = 0; /* Init controller disabled */
567 ctrlr->vcprop.cc.bits.css =
571 ctrlr->vcprop.csts.raw = 0;
572 ctrlr->vcprop.csts.bits.rdy = 0; /* Init controller as not ready */
574 SPDK_DEBUGLOG(nvmf, "cap 0x%" PRIx64 "\n", ctrlr->vcprop.cap.raw);
575 SPDK_DEBUGLOG(nvmf, "vs 0x%x\n", ctrlr->vcprop.vs.raw);
576 SPDK_DEBUGLOG(nvmf, "cc 0x%x\n", ctrlr->vcprop.cc.raw);
577 SPDK_DEBUGLOG(nvmf, "csts 0x%x\n", ctrlr->vcprop.csts.raw);
579 ctrlr->dif_insert_or_strip = transport->opts.dif_insert_or_strip;
581 if (ctrlr->subsys->subtype == SPDK_NVMF_SUBTYPE_NVME) {
587 ctrlr->listener = nvmf_subsystem_find_listener(ctrlr->subsys, &listen_trid);
588 if (!ctrlr->listener) {
594 nvmf_qpair_set_ctrlr(req->qpair, ctrlr);
597 return ctrlr;
599 spdk_bit_array_free(&ctrlr->visible_ns);
601 spdk_bit_array_free(&ctrlr->qpair_mask);
603 free(ctrlr);
610 struct spdk_nvmf_ctrlr *ctrlr = ctx;
614 SPDK_DTRACE_PROBE3_TICKS(nvmf_ctrlr_destruct, ctrlr, ctrlr->subsys->subnqn,
615 spdk_thread_get_id(ctrlr->thread));
617 assert(spdk_get_thread() == ctrlr->thread);
618 assert(ctrlr->in_destruct);
620 SPDK_DEBUGLOG(nvmf, "Destroy ctrlr 0x%hx\n", ctrlr->cntlid);
621 if (ctrlr->disconnect_in_progress) {
622 SPDK_ERRLOG("freeing ctrlr with disconnect in progress\n");
623 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr);
627 nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
628 nvmf_ctrlr_stop_association_timer(ctrlr);
629 spdk_bit_array_free(&ctrlr->qpair_mask);
631 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) {
632 TAILQ_REMOVE(&ctrlr->log_head, log, link);
635 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
636 STAILQ_REMOVE(&ctrlr->async_events, event, spdk_nvmf_async_event_completion, link);
639 spdk_bit_array_free(&ctrlr->visible_ns);
640 free(ctrlr);
644 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
646 nvmf_subsystem_remove_ctrlr(ctrlr->subsys, ctrlr);
648 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_destruct, ctrlr);
657 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
658 struct spdk_nvmf_qpair *admin_qpair = ctrlr->admin_qpair;
663 SPDK_DTRACE_PROBE4_TICKS(nvmf_ctrlr_add_io_qpair, ctrlr, req->qpair, req->qpair->qid,
664 spdk_thread_get_id(ctrlr->thread));
666 /* Unit test will check qpair->ctrlr after calling spdk_nvmf_ctrlr_connect.
669 qpair->ctrlr = NULL;
672 if (ctrlr->in_destruct) {
673 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n");
678 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
684 if (!ctrlr->vcprop.cc.bits.en) {
685 SPDK_ERRLOG("Got I/O connect before ctrlr was enabled\n");
690 if (1u << ctrlr->vcprop.cc.bits.iosqes != sizeof(struct spdk_nvme_cmd)) {
692 ctrlr->vcprop.cc.bits.iosqes);
697 if (1u << ctrlr->vcprop.cc.bits.iocqes != sizeof(struct spdk_nvme_cpl)) {
699 ctrlr->vcprop.cc.bits.iocqes);
720 /* check if we would exceed ctrlr connection limit */
721 if (qpair->qid >= spdk_bit_array_capacity(ctrlr->qpair_mask)) {
723 qpair->qid, spdk_bit_array_capacity(ctrlr->qpair_mask) - 1);
729 nvmf_ctrlr_add_qpair(qpair, ctrlr, req);
741 struct spdk_nvmf_ctrlr *ctrlr;
762 ctrlr = nvmf_subsystem_get_ctrlr(subsystem, data->cntlid);
763 if (ctrlr == NULL) {
771 if (ctrlr->in_destruct) {
772 SPDK_ERRLOG("Got I/O connect while ctrlr was being destroyed.\n");
788 if (listener != ctrlr->listener) {
796 admin_qpair = ctrlr->admin_qpair;
814 qpair->ctrlr = ctrlr;
852 struct spdk_nvmf_ctrlr *ctrlr;
935 /* Establish a new ctrlr */
936 ctrlr = nvmf_ctrlr_create(subsystem, req, cmd, data);
937 if (!ctrlr) {
958 assert(req->qpair->ctrlr == NULL);
1072 struct spdk_nvmf_ctrlr *ctrlr = ctx;
1075 nvmf_ctrlr_stop_association_timer(ctrlr);
1077 if (ctrlr->in_destruct) {
1081 ctrlr->subsys->subnqn);
1083 if (ctrlr->admin_qpair) {
1084 rc = spdk_nvmf_qpair_disconnect(ctrlr->admin_qpair);
1086 SPDK_ERRLOG("Fail to disconnect admin ctrlr qpair\n");
1097 struct spdk_nvmf_ctrlr *ctrlr = ctx;
1101 if (ctrlr->cc_timer) {
1102 spdk_poller_unregister(&ctrlr->cc_timer);
1105 count = spdk_bit_array_count_set(ctrlr->qpair_mask);
1106 SPDK_DEBUGLOG(nvmf, "ctrlr %p active queue count %u\n", ctrlr, count);
1109 if (now < ctrlr->cc_timeout_tsc) {
1111 ctrlr->cc_timer = SPDK_POLLER_REGISTER(_nvmf_ctrlr_cc_reset_shn_done, ctrlr, 100 * 1000);
1115 SPDK_WARNLOG("IO timeout, ctrlr %p is in fatal status\n", ctrlr);
1116 nvmf_ctrlr_set_fatal_status(ctrlr);
1120 spdk_poller_unregister(&ctrlr->cc_timeout_timer);
1122 if (ctrlr->disconnect_is_shn) {
1123 ctrlr->vcprop.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
1124 ctrlr->disconnect_is_shn = false;
1127 ctrlr->vcprop.cc.raw = 0;
1128 ctrlr->vcprop.csts.raw = 0;
1133 if (ctrlr->association_timer) {
1135 nvmf_ctrlr_stop_association_timer(ctrlr);
1137 if (ctrlr->association_timeout) {
1138 ctrlr->association_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_association_remove, ctrlr,
1139 ctrlr->association_timeout * 1000);
1141 ctrlr->disconnect_in_progress = false;
1148 struct spdk_nvmf_ctrlr *ctrlr = spdk_io_channel_iter_get_ctx(i);
1151 SPDK_ERRLOG("Fail to disconnect io ctrlr qpairs\n");
1155 _nvmf_ctrlr_cc_reset_shn_done((void *)ctrlr);
1170 struct spdk_nvmf_ctrlr *ctrlr = ctx;
1175 spdk_poller_unregister(&ctrlr->cc_timeout_timer);
1176 SPDK_DEBUGLOG(nvmf, "Ctrlr %p reset or shutdown timeout\n", ctrlr);
1178 if (!ctrlr->admin_qpair) {
1179 SPDK_NOTICELOG("Ctrlr %p admin qpair disconnected\n", ctrlr);
1183 group = ctrlr->admin_qpair->group;
1186 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL;
1187 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
1191 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[ns->opts.nsid - 1];
1192 SPDK_NOTICELOG("Ctrlr %p resetting NSID %u\n", ctrlr, ns->opts.nsid);
1200 spdk_nvmf_ctrlr_get_regs(struct spdk_nvmf_ctrlr *ctrlr)
1202 return &ctrlr->vcprop;
1206 nvmf_ctrlr_set_fatal_status(struct spdk_nvmf_ctrlr *ctrlr)
1208 ctrlr->vcprop.csts.bits.cfs = 1;
1212 nvmf_prop_get_cap(struct spdk_nvmf_ctrlr *ctrlr)
1214 return ctrlr->vcprop.cap.raw;
1218 nvmf_prop_get_vs(struct spdk_nvmf_ctrlr *ctrlr)
1220 return ctrlr->vcprop.vs.raw;
1224 nvmf_prop_get_cc(struct spdk_nvmf_ctrlr *ctrlr)
1226 return ctrlr->vcprop.cc.raw;
1230 nvmf_prop_set_cc(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1237 SPDK_DEBUGLOG(nvmf, "cur CC: 0x%08x\n", ctrlr->vcprop.cc.raw);
1244 diff.raw = cc.raw ^ ctrlr->vcprop.cc.raw;
1249 nvmf_ctrlr_stop_association_timer(ctrlr);
1251 ctrlr->vcprop.cc.bits.en = 1;
1252 ctrlr->vcprop.csts.bits.rdy = 1;
1255 if (ctrlr->disconnect_in_progress) {
1260 ctrlr->cc_timeout_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_cc_timeout, ctrlr,
1264 ctrlr->cc_timeout_tsc = spdk_get_ticks() + cc_timeout_ms * spdk_get_ticks_hz() / (uint64_t)1000;
1266 ctrlr->vcprop.cc.bits.en = 0;
1267 ctrlr->disconnect_in_progress = true;
1268 ctrlr->disconnect_is_shn = false;
1269 spdk_for_each_channel(ctrlr->subsys->tgt,
1271 ctrlr,
1282 if (ctrlr->disconnect_in_progress) {
1287 ctrlr->cc_timeout_timer = SPDK_POLLER_REGISTER(nvmf_ctrlr_cc_timeout, ctrlr,
1291 ctrlr->cc_timeout_tsc = spdk_get_ticks() + cc_timeout_ms * spdk_get_ticks_hz() / (uint64_t)1000;
1293 ctrlr->vcprop.cc.bits.shn = cc.bits.shn;
1294 ctrlr->disconnect_in_progress = true;
1295 ctrlr->disconnect_is_shn = true;
1296 spdk_for_each_channel(ctrlr->subsys->tgt,
1298 ctrlr,
1303 nvmf_ctrlr_stop_keep_alive_timer(ctrlr);
1305 ctrlr->vcprop.cc.bits.shn = 0;
1317 ctrlr->vcprop.cc.bits.iosqes = cc.bits.iosqes;
1324 ctrlr->vcprop.cc.bits.iocqes = cc.bits.iocqes;
1357 nvmf_prop_get_csts(struct spdk_nvmf_ctrlr *ctrlr)
1359 return ctrlr->vcprop.csts.raw;
1363 nvmf_prop_get_aqa(struct spdk_nvmf_ctrlr *ctrlr)
1365 return ctrlr->vcprop.aqa.raw;
1369 nvmf_prop_set_aqa(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1385 ctrlr->vcprop.aqa.raw = value;
1391 nvmf_prop_get_asq(struct spdk_nvmf_ctrlr *ctrlr)
1393 return ctrlr->vcprop.asq;
1397 nvmf_prop_set_asq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1399 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & (0xFFFFFFFFULL << 32ULL)) | value;
1405 nvmf_prop_set_asq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1407 ctrlr->vcprop.asq = (ctrlr->vcprop.asq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL);
1413 nvmf_prop_get_acq(struct spdk_nvmf_ctrlr *ctrlr)
1415 return ctrlr->vcprop.acq;
1419 nvmf_prop_set_acq_lower(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1421 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & (0xFFFFFFFFULL << 32ULL)) | value;
1427 nvmf_prop_set_acq_upper(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value)
1429 ctrlr->vcprop.acq = (ctrlr->vcprop.acq & 0xFFFFFFFFULL) | ((uint64_t)value << 32ULL);
1438 uint64_t (*get_cb)(struct spdk_nvmf_ctrlr *ctrlr);
1439 bool (*set_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value);
1440 bool (*set_upper_cb)(struct spdk_nvmf_ctrlr *ctrlr, uint32_t value);
1480 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1515 response->value.u64 = prop->get_cb(ctrlr);
1539 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1577 ret = prop->set_cb(ctrlr, (uint32_t)value);
1584 ret = prop->set_cb(ctrlr, (uint32_t)value);
1586 ret = prop->set_upper_cb(ctrlr, (uint32_t)value);
1589 ret = prop->set_cb(ctrlr, (uint32_t)value);
1591 ret = prop->set_upper_cb(ctrlr, (uint32_t)(value >> 32));
1608 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1613 ctrlr->feat.arbitration.raw = cmd->cdw11;
1614 ctrlr->feat.arbitration.bits.reserved = 0;
1622 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1636 ctrlr->feat.power_management.raw = cmd->cdw11;
1637 ctrlr->feat.power_management.bits.reserved = 0;
1711 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1719 iv_conf.bits.cd = ctrlr->feat.interrupt_vector_configuration.bits.cd;
1728 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1745 ctrlr->feat.error_recovery.raw = cmd->cdw11;
1746 ctrlr->feat.error_recovery.bits.reserved = 0;
1754 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1759 ctrlr->feat.volatile_write_cache.raw = cmd->cdw11;
1760 ctrlr->feat.volatile_write_cache.bits.reserved = 0;
1763 ctrlr->feat.volatile_write_cache.bits.wce ? "Enabled" : "Disabled");
1770 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1775 ctrlr->feat.write_atomicity.raw = cmd->cdw11;
1776 ctrlr->feat.write_atomicity.bits.reserved = 0;
1794 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1808 if (req->iovcnt < 1 || req->length < sizeof(ctrlr->hostid)) {
1815 spdk_iov_xfer_from_buf(&ix, &ctrlr->hostid, sizeof(ctrlr->hostid));
1823 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1836 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
1850 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1851 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
1866 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
1880 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1887 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
1906 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1914 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
1920 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns;
1921 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
1942 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1956 host_behavior.acre = ctrlr->acre_enabled;
1957 host_behavior.lbafee = ctrlr->lbafee_enabled;
1968 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
1988 ctrlr->acre_enabled = false;
1990 ctrlr->acre_enabled = true;
1998 ctrlr->lbafee_enabled = false;
2000 ctrlr->lbafee_enabled = true;
2013 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2026 ctrlr->feat.keep_alive_timer.bits.kato = MIN_KEEP_ALIVE_TIMEOUT_IN_MS;
2029 ctrlr->feat.keep_alive_timer.bits.kato = spdk_divide_round_up(
2040 if (ctrlr->keep_alive_poller != NULL) {
2041 spdk_poller_unregister(&ctrlr->keep_alive_poller);
2043 ctrlr->keep_alive_poller = SPDK_POLLER_REGISTER(nvmf_ctrlr_keep_alive_poll, ctrlr,
2044 ctrlr->feat.keep_alive_timer.bits.kato * 1000);
2048 ctrlr->feat.keep_alive_timer.bits.kato);
2056 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2070 count = spdk_bit_array_count_set(ctrlr->qpair_mask);
2080 rsp->cdw0 = ctrlr->feat.number_of_queues.raw;
2123 spdk_nvmf_ctrlr_save_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
2135 assert(spdk_get_thread() == ctrlr->thread);
2137 memcpy(&data_local.regs, &ctrlr->vcprop, sizeof(struct spdk_nvmf_registers));
2138 memcpy(&data_local.feat, &ctrlr->feat, sizeof(struct spdk_nvmf_ctrlr_feat));
2140 data_local.cntlid = ctrlr->cntlid;
2141 data_local.acre = ctrlr->acre_enabled;
2142 data_local.num_aer_cids = ctrlr->nr_aer_reqs;
2144 STAILQ_FOREACH_SAFE(event, &ctrlr->async_events, link, event_tmp) {
2146 SPDK_ERRLOG("ctrlr %p has too many pending AERs\n", ctrlr);
2153 for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
2154 struct spdk_nvmf_request *req = ctrlr->aer_req[i];
2157 data_local.notice_aen_mask = ctrlr->notice_aen_mask;
2164 spdk_nvmf_ctrlr_restore_migr_data(struct spdk_nvmf_ctrlr *ctrlr,
2175 assert(spdk_get_thread() == ctrlr->thread);
2179 memcpy(&ctrlr->vcprop, &data_local.regs, sizeof(struct spdk_nvmf_registers));
2180 memcpy(&ctrlr->feat, &data_local.feat, sizeof(struct spdk_nvmf_ctrlr_feat));
2182 ctrlr->cntlid = data_local.cntlid;
2183 ctrlr->acre_enabled = data_local.acre;
2194 STAILQ_INSERT_TAIL(&ctrlr->async_events, event, link);
2196 ctrlr->notice_aen_mask = data_local.notice_aen_mask;
2204 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2209 ctrlr->feat.async_event_configuration.raw = cmd->cdw11;
2210 ctrlr->feat.async_event_configuration.bits.reserved1 = 0;
2211 ctrlr->feat.async_event_configuration.bits.reserved2 = 0;
2218 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2225 if (ctrlr->nr_aer_reqs >= SPDK_NVMF_MAX_ASYNC_EVENTS) {
2232 if (!STAILQ_EMPTY(&ctrlr->async_events)) {
2233 pending_event = STAILQ_FIRST(&ctrlr->async_events);
2235 STAILQ_REMOVE(&ctrlr->async_events, pending_event, spdk_nvmf_async_event_completion, link);
2240 ctrlr->aer_req[ctrlr->nr_aer_reqs++] = req;
2284 nvmf_ctrlr_unmask_aen(struct spdk_nvmf_ctrlr *ctrlr,
2287 ctrlr->notice_aen_mask &= ~(1 << mask);
2291 nvmf_ctrlr_mask_aen(struct spdk_nvmf_ctrlr *ctrlr,
2294 if (ctrlr->notice_aen_mask & (1 << mask)) {
2297 ctrlr->notice_aen_mask |= (1 << mask);
2306 nvmf_ctrlr_get_ana_state(struct spdk_nvmf_ctrlr *ctrlr, uint32_t anagrpid)
2308 if (!ctrlr->subsys->flags.ana_reporting) {
2312 if (spdk_unlikely(ctrlr->listener == NULL)) {
2316 assert(anagrpid - 1 < ctrlr->subsys->max_nsid);
2317 return ctrlr->listener->ana_state[anagrpid - 1];
2321 nvmf_ctrlr_get_ana_state_from_nsid(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
2329 ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
2334 return nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid);
2338 nvmf_get_error_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt,
2342 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT);
2349 nvmf_get_ana_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt,
2368 for (anagrpid = 1; anagrpid <= ctrlr->subsys->max_nsid; anagrpid++) {
2369 if (ctrlr->subsys->ana_group[anagrpid - 1] > 0) {
2391 for (anagrpid = 1; anagrpid <= ctrlr->subsys->max_nsid; anagrpid++) {
2392 if (ctrlr->subsys->ana_group[anagrpid - 1] == 0) {
2405 ana_desc.num_of_nsid = ctrlr->subsys->ana_group[anagrpid - 1];
2407 ana_desc.ana_state = nvmf_ctrlr_get_ana_state(ctrlr, anagrpid);
2428 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL;
2429 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
2454 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT);
2459 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
2461 uint16_t max_changes = SPDK_COUNTOF(ctrlr->changed_ns_list.ns_list);
2465 for (i = 0; i < ctrlr->changed_ns_list_count; i++) {
2466 if (ctrlr->changed_ns_list.ns_list[i] == nsid) {
2474 if (ctrlr->changed_ns_list_count == max_changes) {
2476 ctrlr->changed_ns_list.ns_list[0] = 0xFFFFFFFFu;
2478 ctrlr->changed_ns_list.ns_list[i] = 0;
2481 ctrlr->changed_ns_list.ns_list[ctrlr->changed_ns_list_count++] = nsid;
2487 nvmf_get_changed_ns_list_log_page(struct spdk_nvmf_ctrlr *ctrlr,
2495 if (offset < sizeof(ctrlr->changed_ns_list)) {
2496 copy_length = spdk_min(length, sizeof(ctrlr->changed_ns_list) - offset);
2498 spdk_iov_xfer_from_buf(&ix, (char *)&ctrlr->changed_ns_list + offset, copy_length);
2503 ctrlr->changed_ns_list_count = 0;
2504 memset(&ctrlr->changed_ns_list, 0, sizeof(ctrlr->changed_ns_list));
2507 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT);
2555 nvmf_get_cmds_and_effects_log_page(struct spdk_nvmf_ctrlr *ctrlr, struct iovec *iovs, int iovcnt,
2564 if (!ctrlr->cdata.oncs.write_zeroes || !nvmf_ctrlr_write_zeroes_supported(ctrlr)) {
2567 if (!ctrlr->cdata.oncs.dsm || !nvmf_ctrlr_dsm_supported(ctrlr)) {
2570 if (!ctrlr->cdata.oncs.compare) {
2573 if (!nvmf_subsystem_has_zns_iocs(ctrlr->subsys)) {
2577 if (!nvmf_subsystem_zone_append_supported(ctrlr->subsys)) {
2580 if (!ctrlr->cdata.oncs.copy) {
2592 nvmf_get_reservation_notification_log_page(struct spdk_nvmf_ctrlr *ctrlr,
2603 if (!ctrlr->num_avail_log_pages) {
2607 avail_log_len = ctrlr->num_avail_log_pages * unit_log_len;
2613 TAILQ_FOREACH_SAFE(log, &ctrlr->log_head, link, log_tmp) {
2614 TAILQ_REMOVE(&ctrlr->log_head, log, link);
2615 ctrlr->num_avail_log_pages--;
2632 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT);
2640 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
2641 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
2689 nvmf_get_discovery_log_page(subsystem->tgt, ctrlr->hostnqn, req->iov, req->iovcnt,
2692 nvmf_ctrlr_unmask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT);
2709 nvmf_get_error_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae);
2720 nvmf_get_ana_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae, rgo);
2726 nvmf_get_cmds_and_effects_log_page(ctrlr, req->iov, req->iovcnt, offset, len);
2729 nvmf_get_changed_ns_list_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae);
2732 nvmf_get_reservation_notification_log_page(ctrlr, req->iov, req->iovcnt, offset, len, rae);
2747 _nvmf_ctrlr_get_ns_safe(struct spdk_nvmf_ctrlr *ctrlr,
2752 if (nsid == 0 || nsid > ctrlr->subsys->max_nsid) {
2759 ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
2775 nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr,
2781 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
2786 ns = _nvmf_ctrlr_get_ns_safe(ctrlr, nsid, rsp);
2791 nvmf_bdev_ctrlr_identify_ns(ns, nsdata, ctrlr->dif_insert_or_strip);
2793 assert(ctrlr->admin_qpair);
2798 max_num_blocks = ctrlr->admin_qpair->transport->opts.max_io_size /
2813 ana_state = nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid);
2822 spdk_nvmf_ctrlr_identify_ns(struct spdk_nvmf_ctrlr *ctrlr,
2827 nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, nsdata, cmd->nsid);
2836 struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
2845 nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, &nvmf_nsdata, req->orig_nsid);
2875 struct spdk_nvmf_ctrlr *ctrlr = spdk_nvmf_request_get_ctrlr(req);
2876 struct spdk_nvmf_ns *ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
2885 nvmf_ctrlr_identify_ns(ctrlr, cmd, rsp, &nsdata, cmd->nsid);
2907 nvmf_ctrlr_populate_oacs(struct spdk_nvmf_ctrlr *ctrlr,
2910 cdata->oacs = ctrlr->cdata.oacs;
2934 spdk_nvmf_ctrlr_identify_ctrlr(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_ctrlr_data *cdata)
2936 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
2942 assert(ctrlr->admin_qpair);
2943 transport = ctrlr->admin_qpair->transport;
2947 cdata->cntlid = ctrlr->cntlid;
2948 cdata->ver = ctrlr->vcprop.vs;
2949 cdata->aerl = ctrlr->cdata.aerl;
2953 cdata->sgls = ctrlr->cdata.sgls;
2954 cdata->fuses = ctrlr->cdata.fuses;
2961 SPDK_DEBUGLOG(nvmf, "ctrlr data: maxcmd 0x%x\n", cdata->maxcmd);
2972 cdata->vid = ctrlr->cdata.vid;
2973 cdata->ssvid = ctrlr->cdata.ssvid;
2974 cdata->ieee[0] = ctrlr->cdata.ieee[0];
2975 cdata->ieee[1] = ctrlr->cdata.ieee[1];
2976 cdata->ieee[2] = ctrlr->cdata.ieee[2];
2983 cdata->kas = ctrlr->cdata.kas;
2990 cdata->ctratt.bits.fdps = ctrlr->subsys->fdp_supported;
3009 cdata->nvmf_specific = ctrlr->cdata.nvmf_specific;
3011 cdata->oncs.compare = ctrlr->cdata.oncs.compare;
3012 cdata->oncs.dsm = ctrlr->cdata.oncs.dsm && nvmf_ctrlr_dsm_supported(ctrlr);
3013 cdata->oncs.write_zeroes = ctrlr->cdata.oncs.write_zeroes &&
3014 nvmf_ctrlr_write_zeroes_supported(ctrlr);
3015 cdata->oncs.reservations = ctrlr->cdata.oncs.reservations;
3016 cdata->oncs.copy = ctrlr->cdata.oncs.copy;
3036 nvmf_ctrlr_populate_oacs(ctrlr, cdata);
3043 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ioccsz 0x%x\n",
3045 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: iorcsz 0x%x\n",
3047 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: icdoff 0x%x\n",
3049 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: ctrattr 0x%x\n",
3051 SPDK_DEBUGLOG(nvmf, "ext ctrlr data: msdbd 0x%x\n",
3093 spdk_nvmf_ns_identify_iocs_specific(struct spdk_nvmf_ctrlr *ctrlr,
3100 struct spdk_nvmf_ns *ns = _nvmf_ctrlr_get_ns_safe(ctrlr, cmd->nsid, rsp);
3114 if (!ctrlr->dif_insert_or_strip) {
3132 nvmf_ctrlr_identify_iocs_nvm(struct spdk_nvmf_ctrlr *ctrlr,
3141 cdata_nvm->wzsl = spdk_u64log2(ctrlr->subsys->max_write_zeroes_size_kib >>
3142 (2 + ctrlr->vcprop.cap.bits.mpsmin));
3148 cdata_nvm->dmrsl = ctrlr->subsys->max_discard_size_kib << 1;
3157 nvmf_ctrlr_identify_iocs_zns(struct spdk_nvmf_ctrlr *ctrlr,
3166 cdata_zns->zasl = spdk_u64log2(ctrlr->subsys->max_zone_append_size_kib >>
3167 (12 + ctrlr->vcprop.cap.bits.mpsmin));
3175 spdk_nvmf_ctrlr_identify_iocs_specific(struct spdk_nvmf_ctrlr *ctrlr,
3187 return nvmf_ctrlr_identify_iocs_nvm(ctrlr, cmd, rsp, cdata);
3189 return nvmf_ctrlr_identify_iocs_zns(ctrlr, cmd, rsp, cdata);
3195 "Returning zero filled struct for the iocs specific ctrlr "
3204 nvmf_ctrlr_identify_active_ns_list(struct spdk_nvmf_ctrlr *ctrlr,
3209 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
3223 if (ns->opts.nsid <= cmd->nsid || !nvmf_ctrlr_ns_is_visible(ctrlr, ns->opts.nsid)) {
3266 struct spdk_nvmf_ctrlr *ctrlr,
3275 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
3307 nvmf_ctrlr_identify_iocs(struct spdk_nvmf_ctrlr *ctrlr,
3325 cmd->cdw10_bits.identify.cntid != ctrlr->cntlid) {
3334 for (ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys); ns != NULL;
3335 ns = spdk_nvmf_subsystem_get_next_ns(ctrlr->subsys, ns)) {
3353 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3356 struct spdk_nvmf_subsystem *subsystem = ctrlr->subsys;
3390 ret = spdk_nvmf_ctrlr_identify_ctrlr(ctrlr, (void *)&tmpbuf);
3393 ret = nvmf_ctrlr_identify_active_ns_list(ctrlr, cmd, rsp, (void *)&tmpbuf);
3396 ret = nvmf_ctrlr_identify_ns_id_descriptor_list(ctrlr, cmd, rsp,
3400 ret = spdk_nvmf_ns_identify_iocs_specific(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length);
3403 ret = spdk_nvmf_ctrlr_identify_iocs_specific(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length);
3406 ret = nvmf_ctrlr_identify_iocs(ctrlr, cmd, rsp, (void *)&tmpbuf, req->length);
3428 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
3436 assert(spdk_get_thread() == ctrlr->thread);
3438 for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
3439 if (ctrlr->aer_req[i]->cmd->nvme_cmd.cid == cid) {
3441 req = ctrlr->aer_req[i];
3442 ctrlr->aer_req[i] = NULL;
3443 ctrlr->nr_aer_reqs--;
3448 if (i < ctrlr->nr_aer_reqs) {
3449 ctrlr->aer_req[i] = ctrlr->aer_req[ctrlr->nr_aer_reqs];
3450 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL;
3489 SPDK_DEBUGLOG(nvmf, "abort ctrlr=%p sqid=%u cid=%u successful\n",
3490 qpair->ctrlr, qpair->qid, cid);
3523 if (qpair->ctrlr == req->qpair->ctrlr && qpair->qid == sqid) {
3546 /* Send a message to each poll group, searching for this ctrlr, sqid, and command. */
3547 spdk_for_each_channel(req->qpair->ctrlr->subsys->tgt,
3612 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3619 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
3625 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw);
3627 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw);
3637 ana_state = nvmf_ctrlr_get_ana_state_from_nsid(ctrlr, cmd->nsid);
3660 return get_features_generic(req, ctrlr->feat.arbitration.raw);
3662 return get_features_generic(req, ctrlr->feat.power_management.raw);
3666 return get_features_generic(req, ctrlr->feat.error_recovery.raw);
3668 return get_features_generic(req, ctrlr->feat.volatile_write_cache.raw);
3670 return get_features_generic(req, ctrlr->feat.number_of_queues.raw);
3672 return get_features_generic(req, ctrlr->feat.interrupt_coalescing.raw);
3676 return get_features_generic(req, ctrlr->feat.write_atomicity.raw);
3678 return get_features_generic(req, ctrlr->feat.async_event_configuration.raw);
3680 return get_features_generic(req, ctrlr->feat.keep_alive_timer.raw);
3700 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3717 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
3735 ana_state = nvmf_ctrlr_get_ana_state_from_nsid(ctrlr, cmd->nsid);
3806 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3811 * ctrlr based keep alive duration counter.
3817 ctrlr->last_keep_alive_tick = spdk_get_ticks();
3849 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
3855 assert(ctrlr != NULL);
3861 sgroup = &req->qpair->group->sgroups[ctrlr->subsys->id];
3866 assert(spdk_get_thread() == ctrlr->thread);
3878 if (ctrlr->vcprop.cc.bits.en != 1) {
3889 if (spdk_nvmf_subsystem_is_discovery(ctrlr->subsys)) {
3916 if (ctrlr->subsys->passthrough && cmd->nsid && cmd->nsid != SPDK_NVME_GLOBAL_NS_TAG) {
3962 if (qpair->ctrlr == NULL) {
3963 /* No ctrlr established yet; the only valid command is Connect */
4005 nvmf_ctrlr_queue_pending_async_event(struct spdk_nvmf_ctrlr *ctrlr,
4016 STAILQ_INSERT_TAIL(&ctrlr->async_events, nvmf_event, link);
4020 nvmf_ctrlr_async_event_notification(struct spdk_nvmf_ctrlr *ctrlr,
4026 assert(spdk_get_thread() == ctrlr->thread);
4032 if (ctrlr->nr_aer_reqs == 0) {
4033 nvmf_ctrlr_queue_pending_async_event(ctrlr, event);
4037 req = ctrlr->aer_req[--ctrlr->nr_aer_reqs];
4043 ctrlr->aer_req[ctrlr->nr_aer_reqs] = NULL;
4049 nvmf_ctrlr_async_event_ns_notice(struct spdk_nvmf_ctrlr *ctrlr)
4054 if (!ctrlr->feat.async_event_configuration.bits.ns_attr_notice) {
4058 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGE_MASK_BIT)) {
4066 return nvmf_ctrlr_async_event_notification(ctrlr, &event);
4070 nvmf_ctrlr_async_event_ana_change_notice(struct spdk_nvmf_ctrlr *ctrlr)
4075 if (!ctrlr->feat.async_event_configuration.bits.ana_change_notice) {
4079 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ANA_CHANGE_MASK_BIT)) {
4087 return nvmf_ctrlr_async_event_notification(ctrlr, &event);
4091 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
4095 if (!ctrlr->num_avail_log_pages) {
4099 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL_MASK_BIT)) {
4107 nvmf_ctrlr_async_event_notification(ctrlr, &event);
4114 struct spdk_nvmf_ctrlr *ctrlr = ctx;
4120 if (!ctrlr->feat.async_event_configuration.bits.discovery_log_change_notice) {
4124 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_DISCOVERY_LOG_CHANGE_MASK_BIT)) {
4132 nvmf_ctrlr_async_event_notification(ctrlr, &event);
4136 spdk_nvmf_ctrlr_async_event_error_event(struct spdk_nvmf_ctrlr *ctrlr,
4141 if (!nvmf_ctrlr_mask_aen(ctrlr, SPDK_NVME_ASYNC_EVENT_ERROR_MASK_BIT)) {
4153 return nvmf_ctrlr_async_event_notification(ctrlr, &event);
4159 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
4162 if (ctrlr == NULL || !nvmf_qpair_is_admin_queue(qpair)) {
4166 assert(spdk_get_thread() == ctrlr->thread);
4168 for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
4169 spdk_nvmf_request_free(ctrlr->aer_req[i]);
4170 ctrlr->aer_req[i] = NULL;
4173 ctrlr->nr_aer_reqs = 0;
4177 spdk_nvmf_ctrlr_abort_aer(struct spdk_nvmf_ctrlr *ctrlr)
4182 assert(spdk_get_thread() == ctrlr->thread);
4184 if (!ctrlr->nr_aer_reqs) {
4188 for (i = 0; i < ctrlr->nr_aer_reqs; i++) {
4189 req = ctrlr->aer_req[i];
4195 ctrlr->aer_req[i] = NULL;
4198 ctrlr->nr_aer_reqs = 0;
4205 struct spdk_nvmf_ctrlr *ctrlr = log->ctrlr;
4207 ctrlr->log_page_count++;
4210 if (ctrlr->num_avail_log_pages == 0xff) {
4212 entry = TAILQ_LAST(&ctrlr->log_head, log_page_head);
4213 entry->log.log_page_count = ctrlr->log_page_count;
4218 log->log.log_page_count = ctrlr->log_page_count;
4219 log->log.num_avail_log_pages = ctrlr->num_avail_log_pages++;
4220 TAILQ_INSERT_TAIL(&ctrlr->log_head, log, link);
4222 nvmf_ctrlr_async_event_reservation_notification(ctrlr);
4226 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
4259 log->ctrlr = ctrlr;
4263 spdk_thread_send_msg(ctrlr->thread, _nvmf_ctrlr_add_reservation_log, log);
4269 struct spdk_nvmf_ctrlr *ctrlr)
4274 if (!spdk_uuid_compare(&ns_info->reg_hostid[i], &ctrlr->hostid)) {
4287 struct spdk_nvmf_ctrlr *ctrlr,
4301 is_registrant = nvmf_ns_info_ctrlr_is_registrant(ns_info, ctrlr);
4302 /* All registrants type and current ctrlr is a valid registrant */
4306 } else if (!spdk_uuid_compare(&ns_info->holder_id, &ctrlr->hostid)) {
4476 ns = nvmf_ctrlr_get_ns(req->qpair->ctrlr, req->cmd->nvme_cmd.nsid);
4515 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
4525 assert(ctrlr != NULL);
4526 if (spdk_unlikely(ctrlr->vcprop.cc.bits.en != 1)) {
4533 ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
4541 ana_state = nvmf_ctrlr_get_ana_state(ctrlr, ns->anagrpid);
4551 if (spdk_likely(ctrlr->listener != NULL)) {
4553 ctrlr->listener->trid->traddr,
4554 ctrlr->listener->trid->trsvcid);
4559 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1];
4560 if (nvmf_ns_reservation_request_check(ns_info, ctrlr, req)) {
4576 ctrlr->cntlid, qpair->qid,
4586 if (ctrlr->subsys->passthrough) {
4605 if (spdk_unlikely(!ctrlr->cdata.oncs.compare)) {
4610 if (spdk_unlikely(!ctrlr->cdata.oncs.write_zeroes)) {
4615 if (spdk_unlikely(!ctrlr->cdata.oncs.dsm)) {
4623 if (spdk_unlikely(!ctrlr->cdata.oncs.reservations)) {
4626 spdk_thread_send_msg(ctrlr->subsys->thread, nvmf_ns_reservation_request, req);
4629 if (spdk_unlikely(!ctrlr->cdata.oncs.copy)) {
4698 if (spdk_likely(qpair->ctrlr)) {
4699 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
4713 qpair->ctrlr->acre_enabled)) {
4814 if (spdk_likely(qpair->ctrlr)) {
4815 sgroup = &qpair->group->sgroups[qpair->ctrlr->subsys->id];
4963 nvmf_ctrlr_get_dif_ctx(struct spdk_nvmf_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd,
4969 if (ctrlr == NULL || cmd == NULL) {
4973 ns = nvmf_ctrlr_get_ns(ctrlr, cmd->nsid);
4996 struct spdk_nvmf_ctrlr *ctrlr = qpair->ctrlr;
4998 if (spdk_likely(ctrlr == NULL || !ctrlr->dif_insert_or_strip)) {
5014 return nvmf_ctrlr_get_dif_ctx(ctrlr, &req->cmd->nvme_cmd, dif_ctx);
5030 struct spdk_nvmf_ctrlr *ctrlr;
5041 ctrlr = req->qpair->ctrlr;
5042 ns = nvmf_ctrlr_get_ns(ctrlr, bdev_nsid);
5067 nvmf_passthru_admin_cmd_for_ctrlr(struct spdk_nvmf_request *req, struct spdk_nvmf_ctrlr *ctrlr)
5072 ns = spdk_nvmf_subsystem_get_first_ns(ctrlr->subsys);
5094 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr;
5103 ns = nvmf_ctrlr_get_ns(ctrlr, nsid);
5109 ns_info = &group->sgroups[ctrlr->subsys->id].ns_info[nsid - 1];
5119 return req->qpair->ctrlr;
5134 return req->qpair->ctrlr->subsys;
5157 struct spdk_nvmf_subsystem *spdk_nvmf_ctrlr_get_subsystem(struct spdk_nvmf_ctrlr *ctrlr)
5159 return ctrlr->subsys;
5163 spdk_nvmf_ctrlr_get_id(struct spdk_nvmf_ctrlr *ctrlr)
5165 return ctrlr->cntlid;