1 /* 2 * BSD LICENSE 3 * 4 * Copyright (c) 2018-2019 Broadcom. All Rights Reserved. 5 * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. 6 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * NVMe_FC transport functions. 37 */ 38 39 #include "spdk/env.h" 40 #include "spdk/assert.h" 41 #include "spdk/nvmf_transport.h" 42 #include "spdk/string.h" 43 #include "spdk/trace.h" 44 #include "spdk/util.h" 45 #include "spdk/likely.h" 46 #include "spdk/endian.h" 47 #include "spdk/log.h" 48 #include "spdk/thread.h" 49 50 #include "nvmf_fc.h" 51 #include "fc_lld.h" 52 53 #include "spdk_internal/trace_defs.h" 54 55 #ifndef DEV_VERIFY 56 #define DEV_VERIFY assert 57 #endif 58 59 #ifndef ASSERT_SPDK_FC_MAIN_THREAD 60 #define ASSERT_SPDK_FC_MAIN_THREAD() \ 61 DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread()); 62 #endif 63 64 /* 65 * PRLI service parameters 66 */ 67 enum spdk_nvmf_fc_service_parameters { 68 SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001, 69 SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008, 70 SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010, 71 SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020, 72 SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080, 73 }; 74 75 static char *fc_req_state_strs[] = { 76 "SPDK_NVMF_FC_REQ_INIT", 77 "SPDK_NVMF_FC_REQ_READ_BDEV", 78 "SPDK_NVMF_FC_REQ_READ_XFER", 79 "SPDK_NVMF_FC_REQ_READ_RSP", 80 "SPDK_NVMF_FC_REQ_WRITE_BUFFS", 81 "SPDK_NVMF_FC_REQ_WRITE_XFER", 82 "SPDK_NVMF_FC_REQ_WRITE_BDEV", 83 "SPDK_NVMF_FC_REQ_WRITE_RSP", 84 "SPDK_NVMF_FC_REQ_NONE_BDEV", 85 "SPDK_NVMF_FC_REQ_NONE_RSP", 86 "SPDK_NVMF_FC_REQ_SUCCESS", 87 "SPDK_NVMF_FC_REQ_FAILED", 88 "SPDK_NVMF_FC_REQ_ABORTED", 89 "SPDK_NVMF_FC_REQ_BDEV_ABORTED", 90 "SPDK_NVMF_FC_REQ_PENDING", 91 "SPDK_NVMF_FC_REQ_FUSED_WAITING" 92 }; 93 94 #define HWQP_CONN_TABLE_SIZE 8192 95 #define HWQP_RPI_TABLE_SIZE 4096 96 97 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC) 98 { 99 spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r'); 100 spdk_trace_register_description("FC_NEW", 101 TRACE_FC_REQ_INIT, 102 OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 103 SPDK_TRACE_ARG_TYPE_INT, ""); 104 spdk_trace_register_description("FC_READ_SBMT_TO_BDEV", 105 TRACE_FC_REQ_READ_BDEV, 106 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 107 SPDK_TRACE_ARG_TYPE_INT, ""); 108 spdk_trace_register_description("FC_READ_XFER_DATA", 109 TRACE_FC_REQ_READ_XFER, 110 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 111 SPDK_TRACE_ARG_TYPE_INT, ""); 112 spdk_trace_register_description("FC_READ_RSP", 113 TRACE_FC_REQ_READ_RSP, 114 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 115 SPDK_TRACE_ARG_TYPE_INT, ""); 116 spdk_trace_register_description("FC_WRITE_NEED_BUFFER", 117 TRACE_FC_REQ_WRITE_BUFFS, 118 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 119 SPDK_TRACE_ARG_TYPE_INT, ""); 120 spdk_trace_register_description("FC_WRITE_XFER_DATA", 121 TRACE_FC_REQ_WRITE_XFER, 122 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 123 SPDK_TRACE_ARG_TYPE_INT, ""); 124 spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV", 125 TRACE_FC_REQ_WRITE_BDEV, 126 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 127 SPDK_TRACE_ARG_TYPE_INT, ""); 128 spdk_trace_register_description("FC_WRITE_RSP", 129 TRACE_FC_REQ_WRITE_RSP, 130 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 131 SPDK_TRACE_ARG_TYPE_INT, ""); 132 spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV", 133 TRACE_FC_REQ_NONE_BDEV, 134 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 135 SPDK_TRACE_ARG_TYPE_INT, ""); 136 spdk_trace_register_description("FC_NONE_RSP", 137 TRACE_FC_REQ_NONE_RSP, 138 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 139 SPDK_TRACE_ARG_TYPE_INT, ""); 140 spdk_trace_register_description("FC_SUCCESS", 141 TRACE_FC_REQ_SUCCESS, 142 OWNER_NONE, OBJECT_NONE, 0, 143 SPDK_TRACE_ARG_TYPE_INT, ""); 144 spdk_trace_register_description("FC_FAILED", 145 TRACE_FC_REQ_FAILED, 146 OWNER_NONE, OBJECT_NONE, 0, 147 SPDK_TRACE_ARG_TYPE_INT, ""); 148 spdk_trace_register_description("FC_ABRT", 149 TRACE_FC_REQ_ABORTED, 150 OWNER_NONE, OBJECT_NONE, 0, 151 SPDK_TRACE_ARG_TYPE_INT, ""); 152 spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV", 153 TRACE_FC_REQ_BDEV_ABORTED, 154 OWNER_NONE, OBJECT_NONE, 0, 155 SPDK_TRACE_ARG_TYPE_INT, ""); 156 spdk_trace_register_description("FC_PENDING", 157 TRACE_FC_REQ_PENDING, 158 OWNER_NONE, OBJECT_NONE, 0, 159 SPDK_TRACE_ARG_TYPE_INT, ""); 160 spdk_trace_register_description("FC_FUSED_WAITING", 161 TRACE_FC_REQ_FUSED_WAITING, 162 OWNER_NONE, OBJECT_NONE, 0, 163 SPDK_TRACE_ARG_TYPE_INT, ""); 164 } 165 166 /** 167 * The structure used by all fc adm functions 168 */ 169 struct spdk_nvmf_fc_adm_api_data { 170 void *api_args; 171 spdk_nvmf_fc_callback cb_func; 172 }; 173 174 /** 175 * The callback structure for nport-delete 176 */ 177 struct spdk_nvmf_fc_adm_nport_del_cb_data { 178 struct spdk_nvmf_fc_nport *nport; 179 uint8_t port_handle; 180 spdk_nvmf_fc_callback fc_cb_func; 181 void *fc_cb_ctx; 182 }; 183 184 /** 185 * The callback structure for it-delete 186 */ 187 struct spdk_nvmf_fc_adm_i_t_del_cb_data { 188 struct spdk_nvmf_fc_nport *nport; 189 struct spdk_nvmf_fc_remote_port_info *rport; 190 uint8_t port_handle; 191 spdk_nvmf_fc_callback fc_cb_func; 192 void *fc_cb_ctx; 193 }; 194 195 196 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err); 197 198 /** 199 * The callback structure for the it-delete-assoc callback 200 */ 201 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data { 202 struct spdk_nvmf_fc_nport *nport; 203 struct spdk_nvmf_fc_remote_port_info *rport; 204 uint8_t port_handle; 205 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func; 206 void *cb_ctx; 207 }; 208 209 /* 210 * Call back function pointer for HW port quiesce. 211 */ 212 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err); 213 214 /** 215 * Context structure for quiescing a hardware port 216 */ 217 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx { 218 int quiesce_count; 219 void *ctx; 220 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func; 221 }; 222 223 /** 224 * Context structure used to reset a hardware port 225 */ 226 struct spdk_nvmf_fc_adm_hw_port_reset_ctx { 227 void *reset_args; 228 spdk_nvmf_fc_callback reset_cb_func; 229 }; 230 231 struct spdk_nvmf_fc_transport { 232 struct spdk_nvmf_transport transport; 233 struct spdk_poller *accept_poller; 234 pthread_mutex_t lock; 235 }; 236 237 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport; 238 239 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL; 240 241 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list = 242 TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list); 243 244 static struct spdk_thread *g_nvmf_fc_main_thread = NULL; 245 246 static uint32_t g_nvmf_fgroup_count = 0; 247 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups = 248 TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups); 249 250 struct spdk_thread * 251 nvmf_fc_get_main_thread(void) 252 { 253 return g_nvmf_fc_main_thread; 254 } 255 256 static inline void 257 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req, 258 enum spdk_nvmf_fc_request_state state) 259 { 260 uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID; 261 262 switch (state) { 263 case SPDK_NVMF_FC_REQ_INIT: 264 /* Start IO tracing */ 265 tpoint_id = TRACE_FC_REQ_INIT; 266 break; 267 case SPDK_NVMF_FC_REQ_READ_BDEV: 268 tpoint_id = TRACE_FC_REQ_READ_BDEV; 269 break; 270 case SPDK_NVMF_FC_REQ_READ_XFER: 271 tpoint_id = TRACE_FC_REQ_READ_XFER; 272 break; 273 case SPDK_NVMF_FC_REQ_READ_RSP: 274 tpoint_id = TRACE_FC_REQ_READ_RSP; 275 break; 276 case SPDK_NVMF_FC_REQ_WRITE_BUFFS: 277 tpoint_id = TRACE_FC_REQ_WRITE_BUFFS; 278 break; 279 case SPDK_NVMF_FC_REQ_WRITE_XFER: 280 tpoint_id = TRACE_FC_REQ_WRITE_XFER; 281 break; 282 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 283 tpoint_id = TRACE_FC_REQ_WRITE_BDEV; 284 break; 285 case SPDK_NVMF_FC_REQ_WRITE_RSP: 286 tpoint_id = TRACE_FC_REQ_WRITE_RSP; 287 break; 288 case SPDK_NVMF_FC_REQ_NONE_BDEV: 289 tpoint_id = TRACE_FC_REQ_NONE_BDEV; 290 break; 291 case SPDK_NVMF_FC_REQ_NONE_RSP: 292 tpoint_id = TRACE_FC_REQ_NONE_RSP; 293 break; 294 case SPDK_NVMF_FC_REQ_SUCCESS: 295 tpoint_id = TRACE_FC_REQ_SUCCESS; 296 break; 297 case SPDK_NVMF_FC_REQ_FAILED: 298 tpoint_id = TRACE_FC_REQ_FAILED; 299 break; 300 case SPDK_NVMF_FC_REQ_ABORTED: 301 tpoint_id = TRACE_FC_REQ_ABORTED; 302 break; 303 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 304 tpoint_id = TRACE_FC_REQ_ABORTED; 305 break; 306 case SPDK_NVMF_FC_REQ_PENDING: 307 tpoint_id = TRACE_FC_REQ_PENDING; 308 break; 309 case SPDK_NVMF_FC_REQ_FUSED_WAITING: 310 tpoint_id = TRACE_FC_REQ_FUSED_WAITING; 311 break; 312 default: 313 assert(0); 314 break; 315 } 316 if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) { 317 spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0, 318 (uint64_t)(&fc_req->req)); 319 } 320 } 321 322 static struct rte_hash * 323 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len) 324 { 325 struct rte_hash_parameters hash_params = { 0 }; 326 327 hash_params.entries = num_entries; 328 hash_params.key_len = key_len; 329 hash_params.name = name; 330 331 return rte_hash_create(&hash_params); 332 } 333 334 void 335 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) 336 { 337 free(fc_conn->pool_memory); 338 fc_conn->pool_memory = NULL; 339 } 340 341 int 342 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) 343 { 344 uint32_t i, qd; 345 struct spdk_nvmf_fc_pooled_request *req; 346 347 /* 348 * Create number of fc-requests to be more than the actual SQ size. 349 * This is to handle race conditions where the target driver may send 350 * back a RSP and before the target driver gets to process the CQE 351 * for the RSP, the initiator may have sent a new command. 352 * Depending on the load on the HWQP, there is a slim possibility 353 * that the target reaps the RQE corresponding to the new 354 * command before processing the CQE corresponding to the RSP. 355 */ 356 qd = fc_conn->max_queue_depth * 2; 357 358 STAILQ_INIT(&fc_conn->pool_queue); 359 fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2), 360 sizeof(struct spdk_nvmf_fc_request)); 361 if (!fc_conn->pool_memory) { 362 SPDK_ERRLOG("create fc req ring objects failed\n"); 363 goto error; 364 } 365 fc_conn->pool_size = qd; 366 fc_conn->pool_free_elems = qd; 367 368 /* Initialise value in ring objects and link the objects */ 369 for (i = 0; i < qd; i++) { 370 req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory + 371 i * sizeof(struct spdk_nvmf_fc_request)); 372 373 STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link); 374 } 375 return 0; 376 error: 377 nvmf_fc_free_conn_reqpool(fc_conn); 378 return -1; 379 } 380 381 static inline struct spdk_nvmf_fc_request * 382 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn) 383 { 384 struct spdk_nvmf_fc_request *fc_req; 385 struct spdk_nvmf_fc_pooled_request *pooled_req; 386 struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp; 387 388 pooled_req = STAILQ_FIRST(&fc_conn->pool_queue); 389 if (!pooled_req) { 390 SPDK_ERRLOG("Alloc request buffer failed\n"); 391 return NULL; 392 } 393 STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link); 394 fc_conn->pool_free_elems -= 1; 395 396 fc_req = (struct spdk_nvmf_fc_request *)pooled_req; 397 memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request)); 398 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); 399 400 TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link); 401 TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link); 402 TAILQ_INIT(&fc_req->abort_cbs); 403 return fc_req; 404 } 405 406 static inline void 407 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req) 408 { 409 if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) { 410 /* Log an error for debug purpose. */ 411 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED); 412 } 413 414 /* set the magic to mark req as no longer valid. */ 415 fc_req->magic = 0xDEADBEEF; 416 417 TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link); 418 TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link); 419 420 STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link); 421 fc_conn->pool_free_elems += 1; 422 } 423 424 static inline void 425 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req) 426 { 427 STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req, 428 spdk_nvmf_request, buf_link); 429 } 430 431 int 432 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp) 433 { 434 char name[64]; 435 436 hwqp->fc_port = fc_port; 437 438 /* clear counters */ 439 memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors)); 440 441 TAILQ_INIT(&hwqp->in_use_reqs); 442 TAILQ_INIT(&hwqp->sync_cbs); 443 TAILQ_INIT(&hwqp->ls_pending_queue); 444 445 snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id); 446 hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE, 447 sizeof(uint64_t)); 448 if (!hwqp->connection_list_hash) { 449 SPDK_ERRLOG("Failed to create connection hash table.\n"); 450 return -ENOMEM; 451 } 452 453 snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id); 454 hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t)); 455 if (!hwqp->rport_list_hash) { 456 SPDK_ERRLOG("Failed to create rpi hash table.\n"); 457 rte_hash_free(hwqp->connection_list_hash); 458 return -ENOMEM; 459 } 460 461 /* Init low level driver queues */ 462 nvmf_fc_init_q(hwqp); 463 return 0; 464 } 465 466 static struct spdk_nvmf_fc_poll_group * 467 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp) 468 { 469 uint32_t max_count = UINT32_MAX; 470 struct spdk_nvmf_fc_poll_group *fgroup; 471 struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL; 472 473 pthread_mutex_lock(&g_nvmf_ftransport->lock); 474 /* find poll group with least number of hwqp's assigned to it */ 475 TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) { 476 if (fgroup->hwqp_count < max_count) { 477 ret_fgroup = fgroup; 478 max_count = fgroup->hwqp_count; 479 } 480 } 481 482 if (ret_fgroup) { 483 ret_fgroup->hwqp_count++; 484 hwqp->thread = ret_fgroup->group.group->thread; 485 hwqp->fgroup = ret_fgroup; 486 } 487 488 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 489 490 return ret_fgroup; 491 } 492 493 bool 494 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup) 495 { 496 struct spdk_nvmf_fc_poll_group *tmp; 497 bool rc = false; 498 499 pthread_mutex_lock(&g_nvmf_ftransport->lock); 500 TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) { 501 if (tmp == fgroup) { 502 rc = true; 503 break; 504 } 505 } 506 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 507 return rc; 508 } 509 510 void 511 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 512 { 513 assert(hwqp); 514 if (hwqp == NULL) { 515 SPDK_ERRLOG("Error: hwqp is NULL\n"); 516 return; 517 } 518 519 assert(g_nvmf_fgroup_count); 520 521 if (!nvmf_fc_assign_idlest_poll_group(hwqp)) { 522 SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id); 523 return; 524 } 525 526 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL); 527 } 528 529 static void 530 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 531 { 532 struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data; 533 534 if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) { 535 SPDK_DEBUGLOG(nvmf_fc_adm_api, 536 "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id); 537 } else { 538 SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id); 539 } 540 541 if (args->cb_fn) { 542 args->cb_fn(args->cb_ctx, 0); 543 } 544 545 free(args); 546 } 547 548 void 549 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp, 550 spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx) 551 { 552 struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args; 553 struct spdk_nvmf_fc_poll_group *tmp; 554 int rc = 0; 555 556 assert(hwqp); 557 558 SPDK_DEBUGLOG(nvmf_fc, 559 "Remove hwqp from poller: for port: %d, hwqp: %d\n", 560 hwqp->fc_port->port_hdl, hwqp->hwqp_id); 561 562 if (!hwqp->fgroup) { 563 SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id); 564 } else { 565 pthread_mutex_lock(&g_nvmf_ftransport->lock); 566 TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) { 567 if (tmp == hwqp->fgroup) { 568 hwqp->fgroup->hwqp_count--; 569 break; 570 } 571 } 572 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 573 574 if (tmp != hwqp->fgroup) { 575 /* Pollgroup was already removed. Dont bother. */ 576 goto done; 577 } 578 579 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args)); 580 if (args == NULL) { 581 rc = -ENOMEM; 582 SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id); 583 goto done; 584 } 585 586 args->hwqp = hwqp; 587 args->cb_fn = cb_fn; 588 args->cb_ctx = cb_ctx; 589 args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb; 590 args->cb_info.cb_data = args; 591 args->cb_info.cb_thread = spdk_get_thread(); 592 593 rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args); 594 if (rc) { 595 rc = -EINVAL; 596 SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id); 597 free(args); 598 goto done; 599 } 600 return; 601 } 602 done: 603 if (cb_fn) { 604 cb_fn(cb_ctx, rc); 605 } 606 } 607 608 /* 609 * Note: This needs to be used only on main poller. 610 */ 611 static uint64_t 612 nvmf_fc_get_abts_unique_id(void) 613 { 614 static uint32_t u_id = 0; 615 616 return (uint64_t)(++u_id); 617 } 618 619 static void 620 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 621 { 622 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 623 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg; 624 625 ctx->hwqps_responded++; 626 627 if (ctx->hwqps_responded < ctx->num_hwqps) { 628 /* Wait for all pollers to complete. */ 629 return; 630 } 631 632 /* Free the queue sync poller args. */ 633 free(ctx->sync_poller_args); 634 635 /* Mark as queue synced */ 636 ctx->queue_synced = true; 637 638 /* Reset the ctx values */ 639 ctx->hwqps_responded = 0; 640 ctx->handled = false; 641 642 SPDK_DEBUGLOG(nvmf_fc, 643 "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 644 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 645 646 /* Resend ABTS to pollers */ 647 args = ctx->abts_poller_args; 648 for (int i = 0; i < ctx->num_hwqps; i++) { 649 poller_arg = args + i; 650 nvmf_fc_poller_api_func(poller_arg->hwqp, 651 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 652 poller_arg); 653 } 654 } 655 656 static int 657 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx) 658 { 659 struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg; 660 struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg; 661 662 /* check if FC driver supports queue sync */ 663 if (!nvmf_fc_q_sync_available()) { 664 return -EPERM; 665 } 666 667 assert(ctx); 668 if (!ctx) { 669 SPDK_ERRLOG("NULL ctx pointer"); 670 return -EINVAL; 671 } 672 673 /* Reset the ctx values */ 674 ctx->hwqps_responded = 0; 675 676 args = calloc(ctx->num_hwqps, 677 sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args)); 678 if (!args) { 679 SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 680 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 681 return -ENOMEM; 682 } 683 ctx->sync_poller_args = args; 684 685 abts_args = ctx->abts_poller_args; 686 for (int i = 0; i < ctx->num_hwqps; i++) { 687 abts_poller_arg = abts_args + i; 688 poller_arg = args + i; 689 poller_arg->u_id = ctx->u_id; 690 poller_arg->hwqp = abts_poller_arg->hwqp; 691 poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb; 692 poller_arg->cb_info.cb_data = ctx; 693 poller_arg->cb_info.cb_thread = spdk_get_thread(); 694 695 /* Send a Queue sync message to interested pollers */ 696 nvmf_fc_poller_api_func(poller_arg->hwqp, 697 SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC, 698 poller_arg); 699 } 700 701 SPDK_DEBUGLOG(nvmf_fc, 702 "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 703 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 704 705 /* Post Marker to queue to track aborted request */ 706 nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id); 707 708 return 0; 709 } 710 711 static void 712 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 713 { 714 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 715 struct spdk_nvmf_fc_nport *nport = NULL; 716 717 if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) { 718 ctx->handled = true; 719 } 720 721 ctx->hwqps_responded++; 722 723 if (ctx->hwqps_responded < ctx->num_hwqps) { 724 /* Wait for all pollers to complete. */ 725 return; 726 } 727 728 nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl); 729 730 if (ctx->nport != nport) { 731 /* Nport can be deleted while this abort is being 732 * processed by the pollers. 733 */ 734 SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 735 ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 736 } else { 737 if (!ctx->handled) { 738 /* Try syncing the queues and try one more time */ 739 if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) { 740 SPDK_DEBUGLOG(nvmf_fc, 741 "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 742 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 743 return; 744 } else { 745 /* Send Reject */ 746 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 747 ctx->oxid, ctx->rxid, ctx->rpi, true, 748 FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL); 749 } 750 } else { 751 /* Send Accept */ 752 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 753 ctx->oxid, ctx->rxid, ctx->rpi, false, 754 0, NULL, NULL); 755 } 756 } 757 SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 758 (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 759 760 free(ctx->abts_poller_args); 761 free(ctx); 762 } 763 764 void 765 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi, 766 uint16_t oxid, uint16_t rxid) 767 { 768 struct spdk_nvmf_fc_abts_ctx *ctx = NULL; 769 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg; 770 struct spdk_nvmf_fc_association *assoc = NULL; 771 struct spdk_nvmf_fc_conn *conn = NULL; 772 uint32_t hwqp_cnt = 0; 773 bool skip_hwqp_cnt; 774 struct spdk_nvmf_fc_hwqp **hwqps = NULL; 775 uint32_t i; 776 777 SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 778 nport->nport_hdl, rpi, oxid, rxid); 779 780 /* Allocate memory to track hwqp's with at least 1 active connection. */ 781 hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *)); 782 if (hwqps == NULL) { 783 SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n"); 784 goto bls_rej; 785 } 786 787 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 788 TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) { 789 if ((conn->rpi != rpi) || !conn->hwqp) { 790 continue; 791 } 792 793 skip_hwqp_cnt = false; 794 for (i = 0; i < hwqp_cnt; i++) { 795 if (hwqps[i] == conn->hwqp) { 796 /* Skip. This is already present */ 797 skip_hwqp_cnt = true; 798 break; 799 } 800 } 801 if (!skip_hwqp_cnt) { 802 assert(hwqp_cnt < nport->fc_port->num_io_queues); 803 hwqps[hwqp_cnt] = conn->hwqp; 804 hwqp_cnt++; 805 } 806 } 807 } 808 809 if (!hwqp_cnt) { 810 goto bls_rej; 811 } 812 813 args = calloc(hwqp_cnt, 814 sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args)); 815 if (!args) { 816 goto bls_rej; 817 } 818 819 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx)); 820 if (!ctx) { 821 goto bls_rej; 822 } 823 ctx->rpi = rpi; 824 ctx->oxid = oxid; 825 ctx->rxid = rxid; 826 ctx->nport = nport; 827 ctx->nport_hdl = nport->nport_hdl; 828 ctx->port_hdl = nport->fc_port->port_hdl; 829 ctx->num_hwqps = hwqp_cnt; 830 ctx->ls_hwqp = &nport->fc_port->ls_queue; 831 ctx->fcp_rq_id = nport->fc_port->fcp_rq_id; 832 ctx->abts_poller_args = args; 833 834 /* Get a unique context for this ABTS */ 835 ctx->u_id = nvmf_fc_get_abts_unique_id(); 836 837 for (i = 0; i < hwqp_cnt; i++) { 838 poller_arg = args + i; 839 poller_arg->hwqp = hwqps[i]; 840 poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb; 841 poller_arg->cb_info.cb_data = ctx; 842 poller_arg->cb_info.cb_thread = spdk_get_thread(); 843 poller_arg->ctx = ctx; 844 845 nvmf_fc_poller_api_func(poller_arg->hwqp, 846 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 847 poller_arg); 848 } 849 850 free(hwqps); 851 852 return; 853 bls_rej: 854 free(args); 855 free(hwqps); 856 857 /* Send Reject */ 858 nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi, 859 true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL); 860 SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 861 nport->nport_hdl, rpi, oxid, rxid); 862 return; 863 } 864 865 /*** Accessor functions for the FC structures - BEGIN */ 866 /* 867 * Returns true if the port is in offline state. 868 */ 869 bool 870 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port) 871 { 872 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) { 873 return true; 874 } 875 876 return false; 877 } 878 879 /* 880 * Returns true if the port is in online state. 881 */ 882 bool 883 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port) 884 { 885 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) { 886 return true; 887 } 888 889 return false; 890 } 891 892 int 893 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port) 894 { 895 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) { 896 fc_port->hw_port_status = SPDK_FC_PORT_ONLINE; 897 return 0; 898 } 899 900 return -EPERM; 901 } 902 903 int 904 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port) 905 { 906 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) { 907 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 908 return 0; 909 } 910 911 return -EPERM; 912 } 913 914 int 915 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp) 916 { 917 if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) { 918 hwqp->state = SPDK_FC_HWQP_ONLINE; 919 /* reset some queue counters */ 920 hwqp->num_conns = 0; 921 return nvmf_fc_set_q_online_state(hwqp, true); 922 } 923 924 return -EPERM; 925 } 926 927 int 928 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp) 929 { 930 if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) { 931 hwqp->state = SPDK_FC_HWQP_OFFLINE; 932 return nvmf_fc_set_q_online_state(hwqp, false); 933 } 934 935 return -EPERM; 936 } 937 938 void 939 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port) 940 { 941 TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link); 942 943 /* 944 * Let LLD add the port to its list. 945 */ 946 nvmf_fc_lld_port_add(fc_port); 947 } 948 949 static void 950 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port) 951 { 952 TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link); 953 954 /* 955 * Let LLD remove the port from its list. 956 */ 957 nvmf_fc_lld_port_remove(fc_port); 958 } 959 960 struct spdk_nvmf_fc_port * 961 nvmf_fc_port_lookup(uint8_t port_hdl) 962 { 963 struct spdk_nvmf_fc_port *fc_port = NULL; 964 965 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 966 if (fc_port->port_hdl == port_hdl) { 967 return fc_port; 968 } 969 } 970 return NULL; 971 } 972 973 uint32_t 974 nvmf_fc_get_prli_service_params(void) 975 { 976 return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION); 977 } 978 979 int 980 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port, 981 struct spdk_nvmf_fc_nport *nport) 982 { 983 if (fc_port) { 984 TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link); 985 fc_port->num_nports++; 986 return 0; 987 } 988 989 return -EINVAL; 990 } 991 992 int 993 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port, 994 struct spdk_nvmf_fc_nport *nport) 995 { 996 if (fc_port && nport) { 997 TAILQ_REMOVE(&fc_port->nport_list, nport, link); 998 fc_port->num_nports--; 999 return 0; 1000 } 1001 1002 return -EINVAL; 1003 } 1004 1005 static struct spdk_nvmf_fc_nport * 1006 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl) 1007 { 1008 struct spdk_nvmf_fc_nport *fc_nport = NULL; 1009 1010 TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) { 1011 if (fc_nport->nport_hdl == nport_hdl) { 1012 return fc_nport; 1013 } 1014 } 1015 1016 return NULL; 1017 } 1018 1019 struct spdk_nvmf_fc_nport * 1020 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl) 1021 { 1022 struct spdk_nvmf_fc_port *fc_port = NULL; 1023 1024 fc_port = nvmf_fc_port_lookup(port_hdl); 1025 if (fc_port) { 1026 return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl); 1027 } 1028 1029 return NULL; 1030 } 1031 1032 static inline int 1033 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp, 1034 uint32_t d_id, struct spdk_nvmf_fc_nport **nport, 1035 uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport) 1036 { 1037 struct spdk_nvmf_fc_nport *n_port; 1038 struct spdk_nvmf_fc_remote_port_info *r_port; 1039 1040 assert(hwqp); 1041 if (hwqp == NULL) { 1042 SPDK_ERRLOG("Error: hwqp is NULL\n"); 1043 return -EINVAL; 1044 } 1045 assert(nport); 1046 if (nport == NULL) { 1047 SPDK_ERRLOG("Error: nport is NULL\n"); 1048 return -EINVAL; 1049 } 1050 assert(rport); 1051 if (rport == NULL) { 1052 SPDK_ERRLOG("Error: rport is NULL\n"); 1053 return -EINVAL; 1054 } 1055 1056 TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) { 1057 if (n_port->d_id == d_id) { 1058 TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) { 1059 if (r_port->s_id == s_id) { 1060 *nport = n_port; 1061 *rport = r_port; 1062 return 0; 1063 } 1064 } 1065 break; 1066 } 1067 } 1068 1069 return -ENOENT; 1070 } 1071 1072 /* Returns true if the Nport is empty of all rem_ports */ 1073 bool 1074 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport) 1075 { 1076 if (nport && TAILQ_EMPTY(&nport->rem_port_list)) { 1077 assert(nport->rport_count == 0); 1078 return true; 1079 } else { 1080 return false; 1081 } 1082 } 1083 1084 int 1085 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport, 1086 enum spdk_nvmf_fc_object_state state) 1087 { 1088 if (nport) { 1089 nport->nport_state = state; 1090 return 0; 1091 } else { 1092 return -EINVAL; 1093 } 1094 } 1095 1096 bool 1097 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport, 1098 struct spdk_nvmf_fc_remote_port_info *rem_port) 1099 { 1100 if (nport && rem_port) { 1101 TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link); 1102 nport->rport_count++; 1103 return 0; 1104 } else { 1105 return -EINVAL; 1106 } 1107 } 1108 1109 bool 1110 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport, 1111 struct spdk_nvmf_fc_remote_port_info *rem_port) 1112 { 1113 if (nport && rem_port) { 1114 TAILQ_REMOVE(&nport->rem_port_list, rem_port, link); 1115 nport->rport_count--; 1116 return 0; 1117 } else { 1118 return -EINVAL; 1119 } 1120 } 1121 1122 int 1123 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport, 1124 enum spdk_nvmf_fc_object_state state) 1125 { 1126 if (rport) { 1127 rport->rport_state = state; 1128 return 0; 1129 } else { 1130 return -EINVAL; 1131 } 1132 } 1133 int 1134 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc, 1135 enum spdk_nvmf_fc_object_state state) 1136 { 1137 if (assoc) { 1138 assoc->assoc_state = state; 1139 return 0; 1140 } else { 1141 return -EINVAL; 1142 } 1143 } 1144 1145 static struct spdk_nvmf_fc_association * 1146 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr) 1147 { 1148 struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair; 1149 struct spdk_nvmf_fc_conn *fc_conn; 1150 1151 if (!qpair) { 1152 SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid); 1153 return NULL; 1154 } 1155 1156 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1157 1158 return fc_conn->fc_assoc; 1159 } 1160 1161 bool 1162 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl, 1163 struct spdk_nvmf_ctrlr *ctrlr) 1164 { 1165 struct spdk_nvmf_fc_nport *fc_nport = NULL; 1166 struct spdk_nvmf_fc_association *assoc = NULL; 1167 1168 if (!ctrlr) { 1169 return false; 1170 } 1171 1172 fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl); 1173 if (!fc_nport) { 1174 return false; 1175 } 1176 1177 assoc = nvmf_ctrlr_get_fc_assoc(ctrlr); 1178 if (assoc && assoc->tgtport == fc_nport) { 1179 SPDK_DEBUGLOG(nvmf_fc, 1180 "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n", 1181 ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl, 1182 nport_hdl); 1183 return true; 1184 } 1185 return false; 1186 } 1187 1188 static void 1189 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp, 1190 struct spdk_nvmf_fc_ls_rqst *ls_rqst) 1191 { 1192 assert(ls_rqst); 1193 1194 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1195 1196 /* Return buffer to chip */ 1197 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1198 } 1199 1200 static int 1201 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp, 1202 struct spdk_nvmf_fc_nport *nport, 1203 struct spdk_nvmf_fc_remote_port_info *rport) 1204 { 1205 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1206 int num_deleted = 0; 1207 1208 assert(hwqp); 1209 assert(nport); 1210 assert(rport); 1211 1212 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1213 if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) { 1214 num_deleted++; 1215 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1216 } 1217 } 1218 return num_deleted; 1219 } 1220 1221 static void 1222 nvmf_fc_req_bdev_abort(void *arg1) 1223 { 1224 struct spdk_nvmf_fc_request *fc_req = arg1; 1225 struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr; 1226 int i; 1227 1228 /* Initial release - we don't have to abort Admin Queue or 1229 * Fabric commands. The AQ commands supported at this time are 1230 * Get-Log-Page, 1231 * Identify 1232 * Set Features 1233 * Get Features 1234 * AER -> Special case and handled differently. 1235 * Every one of the above Admin commands (except AER) run 1236 * to completion and so an Abort of such commands doesn't 1237 * make sense. 1238 */ 1239 /* The Fabric commands supported are 1240 * Property Set 1241 * Property Get 1242 * Connect -> Special case (async. handling). Not sure how to 1243 * handle at this point. Let it run to completion. 1244 */ 1245 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1246 if (ctrlr->aer_req[i] == &fc_req->req) { 1247 SPDK_NOTICELOG("Abort AER request\n"); 1248 nvmf_qpair_free_aer(fc_req->req.qpair); 1249 } 1250 } 1251 } 1252 1253 void 1254 nvmf_fc_request_abort_complete(void *arg1) 1255 { 1256 struct spdk_nvmf_fc_request *fc_req = 1257 (struct spdk_nvmf_fc_request *)arg1; 1258 struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp; 1259 struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL; 1260 TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs; 1261 1262 /* Make a copy of the cb list from fc_req */ 1263 TAILQ_INIT(&abort_cbs); 1264 TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link); 1265 1266 SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req, 1267 fc_req_state_strs[fc_req->state]); 1268 1269 _nvmf_fc_request_free(fc_req); 1270 1271 /* Request abort completed. Notify all the callbacks */ 1272 TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) { 1273 /* Notify */ 1274 ctx->cb(hwqp, 0, ctx->cb_args); 1275 /* Remove */ 1276 TAILQ_REMOVE(&abort_cbs, ctx, link); 1277 /* free */ 1278 free(ctx); 1279 } 1280 } 1281 1282 void 1283 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts, 1284 spdk_nvmf_fc_caller_cb cb, void *cb_args) 1285 { 1286 struct spdk_nvmf_fc_caller_ctx *ctx = NULL; 1287 bool kill_req = false; 1288 1289 /* Add the cb to list */ 1290 if (cb) { 1291 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx)); 1292 if (!ctx) { 1293 SPDK_ERRLOG("ctx alloc failed.\n"); 1294 return; 1295 } 1296 ctx->cb = cb; 1297 ctx->cb_args = cb_args; 1298 1299 TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link); 1300 } 1301 1302 if (!fc_req->is_aborted) { 1303 /* Increment aborted command counter */ 1304 fc_req->hwqp->counters.num_aborted++; 1305 } 1306 1307 /* If port is dead, skip abort wqe */ 1308 kill_req = nvmf_fc_is_port_dead(fc_req->hwqp); 1309 if (kill_req && nvmf_fc_req_in_xfer(fc_req)) { 1310 fc_req->is_aborted = true; 1311 goto complete; 1312 } 1313 1314 /* Check if the request is already marked for deletion */ 1315 if (fc_req->is_aborted) { 1316 return; 1317 } 1318 1319 /* Mark request as aborted */ 1320 fc_req->is_aborted = true; 1321 1322 /* If xchg is allocated, then save if we need to send abts or not. */ 1323 if (fc_req->xchg) { 1324 fc_req->xchg->send_abts = send_abts; 1325 fc_req->xchg->aborted = true; 1326 } 1327 1328 switch (fc_req->state) { 1329 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 1330 /* Aborted by backend */ 1331 goto complete; 1332 1333 case SPDK_NVMF_FC_REQ_READ_BDEV: 1334 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 1335 case SPDK_NVMF_FC_REQ_NONE_BDEV: 1336 /* Notify bdev */ 1337 spdk_thread_send_msg(fc_req->hwqp->thread, 1338 nvmf_fc_req_bdev_abort, (void *)fc_req); 1339 break; 1340 1341 case SPDK_NVMF_FC_REQ_READ_XFER: 1342 case SPDK_NVMF_FC_REQ_READ_RSP: 1343 case SPDK_NVMF_FC_REQ_WRITE_XFER: 1344 case SPDK_NVMF_FC_REQ_WRITE_RSP: 1345 case SPDK_NVMF_FC_REQ_NONE_RSP: 1346 /* Notify HBA to abort this exchange */ 1347 nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL); 1348 break; 1349 1350 case SPDK_NVMF_FC_REQ_PENDING: 1351 /* Remove from pending */ 1352 nvmf_fc_request_remove_from_pending(fc_req); 1353 goto complete; 1354 case SPDK_NVMF_FC_REQ_FUSED_WAITING: 1355 TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link); 1356 goto complete; 1357 default: 1358 SPDK_ERRLOG("Request in invalid state.\n"); 1359 goto complete; 1360 } 1361 1362 return; 1363 complete: 1364 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED); 1365 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1366 (void *)fc_req); 1367 } 1368 1369 static int 1370 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req) 1371 { 1372 uint32_t length = fc_req->req.length; 1373 struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup; 1374 struct spdk_nvmf_transport_poll_group *group = &fgroup->group; 1375 struct spdk_nvmf_transport *transport = group->transport; 1376 1377 if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) { 1378 return -ENOMEM; 1379 } 1380 1381 return 0; 1382 } 1383 1384 static int 1385 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req) 1386 { 1387 /* Allocate an XCHG if we dont use send frame for this command. */ 1388 if (!nvmf_fc_use_send_frame(fc_req)) { 1389 fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp); 1390 if (!fc_req->xchg) { 1391 fc_req->hwqp->counters.no_xchg++; 1392 return -EAGAIN; 1393 } 1394 } 1395 1396 if (fc_req->req.length) { 1397 if (nvmf_fc_request_alloc_buffers(fc_req) < 0) { 1398 fc_req->hwqp->counters.buf_alloc_err++; 1399 if (fc_req->xchg) { 1400 nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg); 1401 fc_req->xchg = NULL; 1402 } 1403 return -EAGAIN; 1404 } 1405 fc_req->req.data = fc_req->req.iov[0].iov_base; 1406 } 1407 1408 if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1409 SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n"); 1410 1411 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER); 1412 1413 if (nvmf_fc_recv_data(fc_req)) { 1414 /* Dropped return success to caller */ 1415 fc_req->hwqp->counters.unexpected_err++; 1416 _nvmf_fc_request_free(fc_req); 1417 } 1418 } else { 1419 SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n"); 1420 1421 if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1422 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV); 1423 } else { 1424 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV); 1425 } 1426 spdk_nvmf_request_exec(&fc_req->req); 1427 } 1428 1429 return 0; 1430 } 1431 1432 static void 1433 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req, 1434 struct spdk_nvmf_fc_frame_hdr *fchdr) 1435 { 1436 uint8_t df_ctl = fchdr->df_ctl; 1437 uint32_t f_ctl = fchdr->f_ctl; 1438 1439 /* VMID */ 1440 if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) { 1441 struct spdk_nvmf_fc_vm_header *vhdr; 1442 uint32_t vmhdr_offset = 0; 1443 1444 if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) { 1445 vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE; 1446 } 1447 1448 if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) { 1449 vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE; 1450 } 1451 1452 vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr + 1453 sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset); 1454 fc_req->app_id = from_be32(&vhdr->src_vmid); 1455 } 1456 1457 /* Priority */ 1458 if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) { 1459 fc_req->csctl = fchdr->cs_ctl; 1460 } 1461 } 1462 1463 static int 1464 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame, 1465 struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen) 1466 { 1467 uint16_t cmnd_len; 1468 uint64_t rqst_conn_id; 1469 struct spdk_nvmf_fc_request *fc_req = NULL; 1470 struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL; 1471 struct spdk_nvmf_fc_conn *fc_conn = NULL; 1472 enum spdk_nvme_data_transfer xfer; 1473 uint32_t s_id, d_id; 1474 1475 s_id = (uint32_t)frame->s_id; 1476 d_id = (uint32_t)frame->d_id; 1477 s_id = from_be32(&s_id) >> 8; 1478 d_id = from_be32(&d_id) >> 8; 1479 1480 cmd_iu = buffer->virt; 1481 cmnd_len = cmd_iu->cmnd_iu_len; 1482 cmnd_len = from_be16(&cmnd_len); 1483 1484 /* check for a valid cmnd_iu format */ 1485 if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) || 1486 (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) || 1487 (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) { 1488 SPDK_ERRLOG("IU CMD error\n"); 1489 hwqp->counters.nvme_cmd_iu_err++; 1490 return -ENXIO; 1491 } 1492 1493 xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags); 1494 if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) { 1495 SPDK_ERRLOG("IU CMD xfer error\n"); 1496 hwqp->counters.nvme_cmd_xfer_err++; 1497 return -EPERM; 1498 } 1499 1500 rqst_conn_id = from_be64(&cmd_iu->conn_id); 1501 1502 if (rte_hash_lookup_data(hwqp->connection_list_hash, 1503 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) { 1504 SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id); 1505 hwqp->counters.invalid_conn_err++; 1506 return -ENODEV; 1507 } 1508 1509 /* Validate s_id and d_id */ 1510 if (s_id != fc_conn->s_id) { 1511 hwqp->counters.rport_invalid++; 1512 SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id); 1513 return -ENODEV; 1514 } 1515 1516 if (d_id != fc_conn->d_id) { 1517 hwqp->counters.nport_invalid++; 1518 SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id); 1519 return -ENODEV; 1520 } 1521 1522 /* If association/connection is being deleted - return */ 1523 if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1524 SPDK_ERRLOG("Association %ld state = %d not valid\n", 1525 fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state); 1526 return -EACCES; 1527 } 1528 1529 if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1530 SPDK_ERRLOG("Connection %ld state = %d not valid\n", 1531 rqst_conn_id, fc_conn->conn_state); 1532 return -EACCES; 1533 } 1534 1535 if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) { 1536 SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n", 1537 rqst_conn_id, fc_conn->qpair.state); 1538 return -EACCES; 1539 } 1540 1541 /* Make sure xfer len is according to mdts */ 1542 if (from_be32(&cmd_iu->data_len) > 1543 hwqp->fgroup->group.transport->opts.max_io_size) { 1544 SPDK_ERRLOG("IO length requested is greater than MDTS\n"); 1545 return -EINVAL; 1546 } 1547 1548 /* allocate a request buffer */ 1549 fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn); 1550 if (fc_req == NULL) { 1551 return -ENOMEM; 1552 } 1553 1554 fc_req->req.length = from_be32(&cmd_iu->data_len); 1555 fc_req->req.qpair = &fc_conn->qpair; 1556 memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg)); 1557 fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd; 1558 fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp; 1559 fc_req->oxid = frame->ox_id; 1560 fc_req->oxid = from_be16(&fc_req->oxid); 1561 fc_req->rpi = fc_conn->rpi; 1562 fc_req->poller_lcore = hwqp->lcore_id; 1563 fc_req->poller_thread = hwqp->thread; 1564 fc_req->hwqp = hwqp; 1565 fc_req->fc_conn = fc_conn; 1566 fc_req->req.xfer = xfer; 1567 fc_req->s_id = s_id; 1568 fc_req->d_id = d_id; 1569 fc_req->csn = from_be32(&cmd_iu->cmnd_seq_num); 1570 nvmf_fc_set_vmid_priority(fc_req, frame); 1571 1572 nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT); 1573 1574 if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) { 1575 STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link); 1576 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING); 1577 } 1578 1579 return 0; 1580 } 1581 1582 /* 1583 * These functions are called from the FC LLD 1584 */ 1585 1586 void 1587 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req) 1588 { 1589 struct spdk_nvmf_fc_hwqp *hwqp; 1590 struct spdk_nvmf_transport_poll_group *group; 1591 1592 if (!fc_req) { 1593 return; 1594 } 1595 hwqp = fc_req->hwqp; 1596 1597 if (fc_req->xchg) { 1598 nvmf_fc_put_xchg(hwqp, fc_req->xchg); 1599 fc_req->xchg = NULL; 1600 } 1601 1602 /* Release IO buffers */ 1603 if (fc_req->req.data_from_pool) { 1604 group = &hwqp->fgroup->group; 1605 spdk_nvmf_request_free_buffers(&fc_req->req, group, 1606 group->transport); 1607 } 1608 fc_req->req.data = NULL; 1609 fc_req->req.iovcnt = 0; 1610 1611 /* Free Fc request */ 1612 nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req); 1613 } 1614 1615 void 1616 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req, 1617 enum spdk_nvmf_fc_request_state state) 1618 { 1619 assert(fc_req->magic != 0xDEADBEEF); 1620 1621 SPDK_DEBUGLOG(nvmf_fc, 1622 "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req, 1623 nvmf_fc_request_get_state_str(fc_req->state), 1624 nvmf_fc_request_get_state_str(state)); 1625 nvmf_fc_record_req_trace_point(fc_req, state); 1626 fc_req->state = state; 1627 } 1628 1629 char * 1630 nvmf_fc_request_get_state_str(int state) 1631 { 1632 static char *unk_str = "unknown"; 1633 1634 return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ? 1635 fc_req_state_strs[state] : unk_str); 1636 } 1637 1638 int 1639 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp, 1640 uint32_t buff_idx, 1641 struct spdk_nvmf_fc_frame_hdr *frame, 1642 struct spdk_nvmf_fc_buffer_desc *buffer, 1643 uint32_t plen) 1644 { 1645 int rc = 0; 1646 uint32_t s_id, d_id; 1647 struct spdk_nvmf_fc_nport *nport = NULL; 1648 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1649 1650 s_id = (uint32_t)frame->s_id; 1651 d_id = (uint32_t)frame->d_id; 1652 s_id = from_be32(&s_id) >> 8; 1653 d_id = from_be32(&d_id) >> 8; 1654 1655 SPDK_DEBUGLOG(nvmf_fc, 1656 "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n", 1657 s_id, d_id, 1658 ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff), 1659 ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff)); 1660 1661 if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) && 1662 (frame->type == FCNVME_TYPE_NVMF_DATA)) { 1663 struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt; 1664 struct spdk_nvmf_fc_ls_rqst *ls_rqst; 1665 1666 SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n"); 1667 1668 rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport); 1669 if (rc) { 1670 if (nport == NULL) { 1671 SPDK_ERRLOG("Nport not found. Dropping\n"); 1672 /* increment invalid nport counter */ 1673 hwqp->counters.nport_invalid++; 1674 } else if (rport == NULL) { 1675 SPDK_ERRLOG("Rport not found. Dropping\n"); 1676 /* increment invalid rport counter */ 1677 hwqp->counters.rport_invalid++; 1678 } 1679 return rc; 1680 } 1681 1682 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1683 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1684 SPDK_ERRLOG("%s state not created. Dropping\n", 1685 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1686 "Nport" : "Rport"); 1687 return -EACCES; 1688 } 1689 1690 /* Use the RQ buffer for holding LS request. */ 1691 ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst; 1692 1693 /* Fill in the LS request structure */ 1694 ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst; 1695 ls_rqst->rqstbuf.phys = buffer->phys + 1696 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst); 1697 ls_rqst->rqstbuf.buf_index = buff_idx; 1698 ls_rqst->rqst_len = plen; 1699 1700 ls_rqst->rspbuf.virt = (void *)&req_buf->resp; 1701 ls_rqst->rspbuf.phys = buffer->phys + 1702 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp); 1703 ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE; 1704 1705 ls_rqst->private_data = (void *)hwqp; 1706 ls_rqst->rpi = rport->rpi; 1707 ls_rqst->oxid = (uint16_t)frame->ox_id; 1708 ls_rqst->oxid = from_be16(&ls_rqst->oxid); 1709 ls_rqst->s_id = s_id; 1710 ls_rqst->d_id = d_id; 1711 ls_rqst->nport = nport; 1712 ls_rqst->rport = rport; 1713 ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt; 1714 1715 if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) { 1716 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1717 } else { 1718 ls_rqst->xchg = NULL; 1719 } 1720 1721 if (ls_rqst->xchg) { 1722 /* Handover the request to LS module */ 1723 nvmf_fc_handle_ls_rqst(ls_rqst); 1724 } else { 1725 /* No XCHG available. Add to pending list. */ 1726 hwqp->counters.no_xchg++; 1727 TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1728 } 1729 } else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) && 1730 (frame->type == FCNVME_TYPE_FC_EXCHANGE)) { 1731 1732 SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n"); 1733 rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen); 1734 if (!rc) { 1735 nvmf_fc_rqpair_buffer_release(hwqp, buff_idx); 1736 } 1737 } else { 1738 1739 SPDK_ERRLOG("Unknown frame received. Dropping\n"); 1740 hwqp->counters.unknown_frame++; 1741 rc = -EINVAL; 1742 } 1743 1744 return rc; 1745 } 1746 1747 void 1748 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp) 1749 { 1750 struct spdk_nvmf_request *req = NULL, *tmp; 1751 struct spdk_nvmf_fc_request *fc_req; 1752 int budget = 64; 1753 1754 if (!hwqp->fgroup) { 1755 /* LS queue is tied to acceptor_poll group and LS pending requests 1756 * are stagged and processed using hwqp->ls_pending_queue. 1757 */ 1758 return; 1759 } 1760 1761 STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) { 1762 fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req); 1763 if (!nvmf_fc_request_execute(fc_req)) { 1764 /* Successfully posted, Delete from pending. */ 1765 nvmf_fc_request_remove_from_pending(fc_req); 1766 } 1767 1768 if (budget) { 1769 budget--; 1770 } else { 1771 return; 1772 } 1773 } 1774 } 1775 1776 void 1777 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp) 1778 { 1779 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1780 struct spdk_nvmf_fc_nport *nport = NULL; 1781 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1782 1783 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1784 /* lookup nport and rport again - make sure they are still valid */ 1785 int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport); 1786 if (rc) { 1787 if (nport == NULL) { 1788 SPDK_ERRLOG("Nport not found. Dropping\n"); 1789 /* increment invalid nport counter */ 1790 hwqp->counters.nport_invalid++; 1791 } else if (rport == NULL) { 1792 SPDK_ERRLOG("Rport not found. Dropping\n"); 1793 /* increment invalid rport counter */ 1794 hwqp->counters.rport_invalid++; 1795 } 1796 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1797 continue; 1798 } 1799 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1800 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1801 SPDK_ERRLOG("%s state not created. Dropping\n", 1802 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1803 "Nport" : "Rport"); 1804 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1805 continue; 1806 } 1807 1808 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1809 if (ls_rqst->xchg) { 1810 /* Got an XCHG */ 1811 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1812 /* Handover the request to LS module */ 1813 nvmf_fc_handle_ls_rqst(ls_rqst); 1814 } else { 1815 /* No more XCHGs. Stop processing. */ 1816 hwqp->counters.no_xchg++; 1817 return; 1818 } 1819 } 1820 } 1821 1822 int 1823 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req) 1824 { 1825 int rc = 0; 1826 struct spdk_nvmf_request *req = &fc_req->req; 1827 struct spdk_nvmf_qpair *qpair = req->qpair; 1828 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1829 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1830 uint16_t ersp_len = 0; 1831 1832 /* set sq head value in resp */ 1833 rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair); 1834 1835 /* Increment connection responses */ 1836 fc_conn->rsp_count++; 1837 1838 if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count, 1839 fc_req->transferred_len)) { 1840 /* Fill ERSP Len */ 1841 to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) / 1842 sizeof(uint32_t))); 1843 fc_req->ersp.ersp_len = ersp_len; 1844 1845 /* Fill RSN */ 1846 to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn); 1847 fc_conn->rsn++; 1848 1849 /* Fill transfer length */ 1850 to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len); 1851 1852 SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n"); 1853 rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp, 1854 sizeof(struct spdk_nvmf_fc_ersp_iu)); 1855 } else { 1856 SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n"); 1857 rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0); 1858 } 1859 1860 return rc; 1861 } 1862 1863 bool 1864 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req, 1865 uint32_t rsp_cnt, uint32_t xfer_len) 1866 { 1867 struct spdk_nvmf_request *req = &fc_req->req; 1868 struct spdk_nvmf_qpair *qpair = req->qpair; 1869 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1870 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1871 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1872 uint16_t status = *((uint16_t *)&rsp->status); 1873 1874 /* 1875 * Check if we need to send ERSP 1876 * 1) For every N responses where N == ersp_ratio 1877 * 2) Fabric commands. 1878 * 3) Completion status failed or Completion dw0 or dw1 valid. 1879 * 4) SQ == 90% full. 1880 * 5) Transfer length not equal to CMD IU length 1881 */ 1882 1883 if (!(rsp_cnt % fc_conn->esrp_ratio) || 1884 (cmd->opc == SPDK_NVME_OPC_FABRIC) || 1885 (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 || 1886 (req->length != xfer_len)) { 1887 return true; 1888 } 1889 return false; 1890 } 1891 1892 static int 1893 nvmf_fc_request_complete(struct spdk_nvmf_request *req) 1894 { 1895 int rc = 0; 1896 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 1897 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1898 1899 if (fc_req->is_aborted) { 1900 /* Defer this to make sure we dont call io cleanup in same context. */ 1901 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1902 (void *)fc_req); 1903 } else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && 1904 req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1905 1906 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER); 1907 1908 rc = nvmf_fc_send_data(fc_req); 1909 } else { 1910 if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1911 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP); 1912 } else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1913 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP); 1914 } else { 1915 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP); 1916 } 1917 1918 rc = nvmf_fc_handle_rsp(fc_req); 1919 } 1920 1921 if (rc) { 1922 SPDK_ERRLOG("Error in request complete.\n"); 1923 _nvmf_fc_request_free(fc_req); 1924 } 1925 return 0; 1926 } 1927 1928 struct spdk_nvmf_tgt * 1929 nvmf_fc_get_tgt(void) 1930 { 1931 if (g_nvmf_ftransport) { 1932 return g_nvmf_ftransport->transport.tgt; 1933 } 1934 return NULL; 1935 } 1936 1937 /* 1938 * FC Transport Public API begins here 1939 */ 1940 1941 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128 1942 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32 1943 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5 1944 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0 1945 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536 1946 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096 1947 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192 1948 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE / \ 1949 SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE) 1950 1951 static void 1952 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts) 1953 { 1954 opts->max_queue_depth = SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH; 1955 opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR; 1956 opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE; 1957 opts->max_io_size = SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE; 1958 opts->io_unit_size = SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE; 1959 opts->max_aq_depth = SPDK_NVMF_FC_DEFAULT_AQ_DEPTH; 1960 opts->num_shared_buffers = SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS; 1961 } 1962 1963 static int 1964 nvmf_fc_accept(void *ctx); 1965 1966 static struct spdk_nvmf_transport * 1967 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts) 1968 { 1969 uint32_t sge_count; 1970 1971 SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n" 1972 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 1973 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 1974 " max_aq_depth=%d\n", 1975 opts->max_queue_depth, 1976 opts->max_io_size, 1977 opts->max_qpairs_per_ctrlr - 1, 1978 opts->io_unit_size, 1979 opts->max_aq_depth); 1980 1981 if (g_nvmf_ftransport) { 1982 SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n"); 1983 return NULL; 1984 } 1985 1986 if (spdk_env_get_last_core() < 1) { 1987 SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n", 1988 spdk_env_get_last_core() + 1); 1989 return NULL; 1990 } 1991 1992 sge_count = opts->max_io_size / opts->io_unit_size; 1993 if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) { 1994 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 1995 return NULL; 1996 } 1997 1998 g_nvmf_fc_main_thread = spdk_get_thread(); 1999 g_nvmf_fgroup_count = 0; 2000 g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport)); 2001 2002 if (!g_nvmf_ftransport) { 2003 SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n"); 2004 return NULL; 2005 } 2006 2007 if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) { 2008 SPDK_ERRLOG("pthread_mutex_init() failed\n"); 2009 free(g_nvmf_ftransport); 2010 g_nvmf_ftransport = NULL; 2011 return NULL; 2012 } 2013 2014 g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept, 2015 &g_nvmf_ftransport->transport, opts->acceptor_poll_rate); 2016 if (!g_nvmf_ftransport->accept_poller) { 2017 free(g_nvmf_ftransport); 2018 g_nvmf_ftransport = NULL; 2019 return NULL; 2020 } 2021 2022 /* initialize the low level FC driver */ 2023 nvmf_fc_lld_init(); 2024 2025 return &g_nvmf_ftransport->transport; 2026 } 2027 2028 static void 2029 nvmf_fc_destroy_done_cb(void *cb_arg) 2030 { 2031 free(g_nvmf_ftransport); 2032 if (g_transport_destroy_done_cb) { 2033 g_transport_destroy_done_cb(cb_arg); 2034 g_transport_destroy_done_cb = NULL; 2035 } 2036 } 2037 2038 static int 2039 nvmf_fc_destroy(struct spdk_nvmf_transport *transport, 2040 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 2041 { 2042 if (transport) { 2043 struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp; 2044 2045 /* clean up any FC poll groups still around */ 2046 TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) { 2047 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 2048 free(fgroup); 2049 } 2050 2051 spdk_poller_unregister(&g_nvmf_ftransport->accept_poller); 2052 g_nvmf_fgroup_count = 0; 2053 g_transport_destroy_done_cb = cb_fn; 2054 2055 /* low level FC driver clean up */ 2056 nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg); 2057 } 2058 2059 return 0; 2060 } 2061 2062 static int 2063 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid, 2064 struct spdk_nvmf_listen_opts *listen_opts) 2065 { 2066 return 0; 2067 } 2068 2069 static void 2070 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport, 2071 const struct spdk_nvme_transport_id *_trid) 2072 { 2073 } 2074 2075 static int 2076 nvmf_fc_accept(void *ctx) 2077 { 2078 struct spdk_nvmf_fc_port *fc_port = NULL; 2079 uint32_t count = 0; 2080 static bool start_lld = false; 2081 2082 if (spdk_unlikely(!start_lld)) { 2083 start_lld = true; 2084 nvmf_fc_lld_start(); 2085 } 2086 2087 /* poll the LS queue on each port */ 2088 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 2089 if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) { 2090 count += nvmf_fc_process_queue(&fc_port->ls_queue); 2091 } 2092 } 2093 2094 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 2095 } 2096 2097 static void 2098 nvmf_fc_discover(struct spdk_nvmf_transport *transport, 2099 struct spdk_nvme_transport_id *trid, 2100 struct spdk_nvmf_discovery_log_page_entry *entry) 2101 { 2102 entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC; 2103 entry->adrfam = trid->adrfam; 2104 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED; 2105 2106 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 2107 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 2108 } 2109 2110 static struct spdk_nvmf_transport_poll_group * 2111 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport, 2112 struct spdk_nvmf_poll_group *group) 2113 { 2114 struct spdk_nvmf_fc_poll_group *fgroup; 2115 struct spdk_nvmf_fc_transport *ftransport = 2116 SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 2117 2118 fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group)); 2119 if (!fgroup) { 2120 SPDK_ERRLOG("Unable to alloc FC poll group\n"); 2121 return NULL; 2122 } 2123 2124 TAILQ_INIT(&fgroup->hwqp_list); 2125 2126 pthread_mutex_lock(&ftransport->lock); 2127 TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link); 2128 g_nvmf_fgroup_count++; 2129 pthread_mutex_unlock(&ftransport->lock); 2130 2131 return &fgroup->group; 2132 } 2133 2134 static void 2135 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 2136 { 2137 struct spdk_nvmf_fc_poll_group *fgroup; 2138 struct spdk_nvmf_fc_transport *ftransport = 2139 SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport); 2140 2141 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2142 pthread_mutex_lock(&ftransport->lock); 2143 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 2144 g_nvmf_fgroup_count--; 2145 pthread_mutex_unlock(&ftransport->lock); 2146 2147 free(fgroup); 2148 } 2149 2150 static int 2151 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 2152 struct spdk_nvmf_qpair *qpair) 2153 { 2154 struct spdk_nvmf_fc_poll_group *fgroup; 2155 struct spdk_nvmf_fc_conn *fc_conn; 2156 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2157 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 2158 bool hwqp_found = false; 2159 2160 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2161 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2162 2163 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2164 if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) { 2165 hwqp_found = true; 2166 break; 2167 } 2168 } 2169 2170 if (!hwqp_found) { 2171 SPDK_ERRLOG("No valid hwqp found for new QP.\n"); 2172 goto err; 2173 } 2174 2175 if (!nvmf_fc_assign_conn_to_hwqp(hwqp, 2176 &fc_conn->conn_id, 2177 fc_conn->max_queue_depth)) { 2178 SPDK_ERRLOG("Failed to get a connection id for new QP.\n"); 2179 goto err; 2180 } 2181 2182 fc_conn->hwqp = hwqp; 2183 2184 /* If this is for ADMIN connection, then update assoc ID. */ 2185 if (fc_conn->qpair.qid == 0) { 2186 fc_conn->fc_assoc->assoc_id = fc_conn->conn_id; 2187 } 2188 2189 api_data = &fc_conn->create_opd->u.add_conn; 2190 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); 2191 return 0; 2192 err: 2193 return -1; 2194 } 2195 2196 static int 2197 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2198 { 2199 uint32_t count = 0; 2200 struct spdk_nvmf_fc_poll_group *fgroup; 2201 struct spdk_nvmf_fc_hwqp *hwqp; 2202 2203 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2204 2205 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2206 if (hwqp->state == SPDK_FC_HWQP_ONLINE) { 2207 count += nvmf_fc_process_queue(hwqp); 2208 } 2209 } 2210 2211 return (int) count; 2212 } 2213 2214 static int 2215 nvmf_fc_request_free(struct spdk_nvmf_request *req) 2216 { 2217 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 2218 2219 if (!fc_req->is_aborted) { 2220 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED); 2221 nvmf_fc_request_abort(fc_req, true, NULL, NULL); 2222 } else { 2223 nvmf_fc_request_abort_complete(fc_req); 2224 } 2225 2226 return 0; 2227 } 2228 2229 static void 2230 nvmf_fc_connection_delete_done_cb(void *arg) 2231 { 2232 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg; 2233 2234 if (fc_ctx->cb_fn) { 2235 spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx); 2236 } 2237 free(fc_ctx); 2238 } 2239 2240 static void 2241 _nvmf_fc_close_qpair(void *arg) 2242 { 2243 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg; 2244 struct spdk_nvmf_qpair *qpair = fc_ctx->qpair; 2245 struct spdk_nvmf_fc_conn *fc_conn; 2246 int rc; 2247 2248 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2249 if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) { 2250 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 2251 2252 if (fc_conn->create_opd) { 2253 api_data = &fc_conn->create_opd->u.add_conn; 2254 2255 nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst, 2256 api_data->args.fc_conn, api_data->aq_conn); 2257 } 2258 } else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) { 2259 rc = nvmf_fc_delete_connection(fc_conn, false, true, 2260 nvmf_fc_connection_delete_done_cb, fc_ctx); 2261 if (!rc) { 2262 /* Wait for transport to complete its work. */ 2263 return; 2264 } 2265 2266 SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__); 2267 } 2268 2269 nvmf_fc_connection_delete_done_cb(fc_ctx); 2270 } 2271 2272 static void 2273 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair, 2274 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 2275 { 2276 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx; 2277 2278 fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx)); 2279 if (!fc_ctx) { 2280 SPDK_ERRLOG("Unable to allocate close_qpair ctx."); 2281 if (cb_fn) { 2282 cb_fn(cb_arg); 2283 } 2284 return; 2285 } 2286 fc_ctx->qpair = qpair; 2287 fc_ctx->cb_fn = cb_fn; 2288 fc_ctx->cb_ctx = cb_arg; 2289 fc_ctx->qpair_thread = spdk_get_thread(); 2290 2291 spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx); 2292 } 2293 2294 static int 2295 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2296 struct spdk_nvme_transport_id *trid) 2297 { 2298 struct spdk_nvmf_fc_conn *fc_conn; 2299 2300 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2301 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2302 return 0; 2303 } 2304 2305 static int 2306 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2307 struct spdk_nvme_transport_id *trid) 2308 { 2309 struct spdk_nvmf_fc_conn *fc_conn; 2310 2311 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2312 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2313 return 0; 2314 } 2315 2316 static int 2317 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2318 struct spdk_nvme_transport_id *trid) 2319 { 2320 struct spdk_nvmf_fc_conn *fc_conn; 2321 2322 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2323 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2324 return 0; 2325 } 2326 2327 static void 2328 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 2329 struct spdk_nvmf_request *req) 2330 { 2331 spdk_nvmf_request_complete(req); 2332 } 2333 2334 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = { 2335 .name = "FC", 2336 .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC, 2337 .opts_init = nvmf_fc_opts_init, 2338 .create = nvmf_fc_create, 2339 .destroy = nvmf_fc_destroy, 2340 2341 .listen = nvmf_fc_listen, 2342 .stop_listen = nvmf_fc_stop_listen, 2343 2344 .listener_discover = nvmf_fc_discover, 2345 2346 .poll_group_create = nvmf_fc_poll_group_create, 2347 .poll_group_destroy = nvmf_fc_poll_group_destroy, 2348 .poll_group_add = nvmf_fc_poll_group_add, 2349 .poll_group_poll = nvmf_fc_poll_group_poll, 2350 2351 .req_complete = nvmf_fc_request_complete, 2352 .req_free = nvmf_fc_request_free, 2353 .qpair_fini = nvmf_fc_close_qpair, 2354 .qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid, 2355 .qpair_get_local_trid = nvmf_fc_qpair_get_local_trid, 2356 .qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid, 2357 .qpair_abort_request = nvmf_fc_qpair_abort_request, 2358 }; 2359 2360 /* Initializes the data for the creation of a FC-Port object in the SPDK 2361 * library. The spdk_nvmf_fc_port is a well defined structure that is part of 2362 * the API to the library. The contents added to this well defined structure 2363 * is private to each vendors implementation. 2364 */ 2365 static int 2366 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port, 2367 struct spdk_nvmf_fc_hw_port_init_args *args) 2368 { 2369 int rc = 0; 2370 /* Used a high number for the LS HWQP so that it does not clash with the 2371 * IO HWQP's and immediately shows a LS queue during tracing. 2372 */ 2373 uint32_t i; 2374 2375 fc_port->port_hdl = args->port_handle; 2376 fc_port->lld_fc_port = args->lld_fc_port; 2377 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 2378 fc_port->fcp_rq_id = args->fcp_rq_id; 2379 fc_port->num_io_queues = args->io_queue_cnt; 2380 2381 /* 2382 * Set port context from init args. Used for FCP port stats. 2383 */ 2384 fc_port->port_ctx = args->port_ctx; 2385 2386 /* 2387 * Initialize the LS queue wherever needed. 2388 */ 2389 fc_port->ls_queue.queues = args->ls_queue; 2390 fc_port->ls_queue.thread = nvmf_fc_get_main_thread(); 2391 fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues; 2392 fc_port->ls_queue.is_ls_queue = true; 2393 2394 /* 2395 * Initialize the LS queue. 2396 */ 2397 rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue); 2398 if (rc) { 2399 return rc; 2400 } 2401 2402 /* 2403 * Initialize the IO queues. 2404 */ 2405 for (i = 0; i < args->io_queue_cnt; i++) { 2406 struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i]; 2407 hwqp->hwqp_id = i; 2408 hwqp->queues = args->io_queues[i]; 2409 hwqp->is_ls_queue = false; 2410 rc = nvmf_fc_init_hwqp(fc_port, hwqp); 2411 if (rc) { 2412 for (; i > 0; --i) { 2413 rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash); 2414 rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash); 2415 } 2416 rte_hash_free(fc_port->ls_queue.connection_list_hash); 2417 rte_hash_free(fc_port->ls_queue.rport_list_hash); 2418 return rc; 2419 } 2420 } 2421 2422 /* 2423 * Initialize the LS processing for port 2424 */ 2425 nvmf_fc_ls_init(fc_port); 2426 2427 /* 2428 * Initialize the list of nport on this HW port. 2429 */ 2430 TAILQ_INIT(&fc_port->nport_list); 2431 fc_port->num_nports = 0; 2432 2433 return 0; 2434 } 2435 2436 /* 2437 * FC port must have all its nports deleted before transitioning to offline state. 2438 */ 2439 static void 2440 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port) 2441 { 2442 struct spdk_nvmf_fc_nport *nport = NULL; 2443 /* All nports must have been deleted at this point for this fc port */ 2444 DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list)); 2445 DEV_VERIFY(fc_port->num_nports == 0); 2446 /* Mark the nport states to be zombie, if they exist */ 2447 if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) { 2448 TAILQ_FOREACH(nport, &fc_port->nport_list, link) { 2449 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2450 } 2451 } 2452 } 2453 2454 static void 2455 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err) 2456 { 2457 ASSERT_SPDK_FC_MAIN_THREAD(); 2458 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args; 2459 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2460 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2461 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 2462 int spdk_err = 0; 2463 uint8_t port_handle = cb_data->port_handle; 2464 uint32_t s_id = rport->s_id; 2465 uint32_t rpi = rport->rpi; 2466 uint32_t assoc_count = rport->assoc_count; 2467 uint32_t nport_hdl = nport->nport_hdl; 2468 uint32_t d_id = nport->d_id; 2469 char log_str[256]; 2470 2471 /* 2472 * Assert on any delete failure. 2473 */ 2474 if (0 != err) { 2475 DEV_VERIFY(!"Error in IT Delete callback."); 2476 goto out; 2477 } 2478 2479 if (cb_func != NULL) { 2480 (void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err); 2481 } 2482 2483 out: 2484 free(cb_data); 2485 2486 snprintf(log_str, sizeof(log_str), 2487 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n", 2488 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err); 2489 2490 if (err != 0) { 2491 SPDK_ERRLOG("%s", log_str); 2492 } else { 2493 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2494 } 2495 } 2496 2497 static void 2498 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err) 2499 { 2500 ASSERT_SPDK_FC_MAIN_THREAD(); 2501 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args; 2502 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2503 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2504 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func; 2505 uint32_t s_id = rport->s_id; 2506 uint32_t rpi = rport->rpi; 2507 uint32_t assoc_count = rport->assoc_count; 2508 uint32_t nport_hdl = nport->nport_hdl; 2509 uint32_t d_id = nport->d_id; 2510 char log_str[256]; 2511 2512 /* 2513 * Assert on any association delete failure. We continue to delete other 2514 * associations in promoted builds. 2515 */ 2516 if (0 != err) { 2517 DEV_VERIFY(!"Nport's association delete callback returned error"); 2518 if (nport->assoc_count > 0) { 2519 nport->assoc_count--; 2520 } 2521 if (rport->assoc_count > 0) { 2522 rport->assoc_count--; 2523 } 2524 } 2525 2526 /* 2527 * If this is the last association being deleted for the ITN, 2528 * execute the callback(s). 2529 */ 2530 if (0 == rport->assoc_count) { 2531 /* Remove the rport from the remote port list. */ 2532 if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) { 2533 SPDK_ERRLOG("Error while removing rport from list.\n"); 2534 DEV_VERIFY(!"Error while removing rport from list."); 2535 } 2536 2537 if (cb_func != NULL) { 2538 /* 2539 * Callback function is provided by the caller 2540 * of nvmf_fc_adm_i_t_delete_assoc(). 2541 */ 2542 (void)cb_func(cb_data->cb_ctx, 0); 2543 } 2544 free(rport); 2545 free(args); 2546 } 2547 2548 snprintf(log_str, sizeof(log_str), 2549 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n", 2550 nport_hdl, s_id, d_id, rpi, assoc_count, err); 2551 2552 if (err != 0) { 2553 SPDK_ERRLOG("%s", log_str); 2554 } else { 2555 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2556 } 2557 } 2558 2559 /** 2560 * Process a IT delete. 2561 */ 2562 static void 2563 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport, 2564 struct spdk_nvmf_fc_remote_port_info *rport, 2565 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func, 2566 void *cb_ctx) 2567 { 2568 int err = 0; 2569 struct spdk_nvmf_fc_association *assoc = NULL; 2570 int assoc_err = 0; 2571 uint32_t num_assoc = 0; 2572 uint32_t num_assoc_del_scheduled = 0; 2573 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL; 2574 uint8_t port_hdl = nport->port_hdl; 2575 uint32_t s_id = rport->s_id; 2576 uint32_t rpi = rport->rpi; 2577 uint32_t assoc_count = rport->assoc_count; 2578 char log_str[256]; 2579 2580 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n", 2581 nport->nport_hdl); 2582 2583 /* 2584 * Allocate memory for callback data. 2585 * This memory will be freed by the callback function. 2586 */ 2587 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data)); 2588 if (NULL == cb_data) { 2589 SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl); 2590 err = -ENOMEM; 2591 goto out; 2592 } 2593 cb_data->nport = nport; 2594 cb_data->rport = rport; 2595 cb_data->port_handle = port_hdl; 2596 cb_data->cb_func = cb_func; 2597 cb_data->cb_ctx = cb_ctx; 2598 2599 /* 2600 * Delete all associations, if any, related with this ITN/remote_port. 2601 */ 2602 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 2603 num_assoc++; 2604 if (assoc->s_id == s_id) { 2605 assoc_err = nvmf_fc_delete_association(nport, 2606 assoc->assoc_id, 2607 false /* send abts */, false, 2608 nvmf_fc_adm_i_t_delete_assoc_cb, cb_data); 2609 if (0 != assoc_err) { 2610 /* 2611 * Mark this association as zombie. 2612 */ 2613 err = -EINVAL; 2614 DEV_VERIFY(!"Error while deleting association"); 2615 (void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2616 } else { 2617 num_assoc_del_scheduled++; 2618 } 2619 } 2620 } 2621 2622 out: 2623 if ((cb_data) && (num_assoc_del_scheduled == 0)) { 2624 /* 2625 * Since there are no association_delete calls 2626 * successfully scheduled, the association_delete 2627 * callback function will never be called. 2628 * In this case, call the callback function now. 2629 */ 2630 nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0); 2631 } 2632 2633 snprintf(log_str, sizeof(log_str), 2634 "IT delete associations on nport:%d end. " 2635 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n", 2636 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err); 2637 2638 if (err == 0) { 2639 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2640 } else { 2641 SPDK_ERRLOG("%s", log_str); 2642 } 2643 } 2644 2645 static void 2646 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 2647 { 2648 ASSERT_SPDK_FC_MAIN_THREAD(); 2649 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL; 2650 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2651 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2652 struct spdk_nvmf_fc_port *fc_port = NULL; 2653 int err = 0; 2654 2655 quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data; 2656 hwqp = quiesce_api_data->hwqp; 2657 fc_port = hwqp->fc_port; 2658 port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx; 2659 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func; 2660 2661 /* 2662 * Decrement the callback/quiesced queue count. 2663 */ 2664 port_quiesce_ctx->quiesce_count--; 2665 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id); 2666 2667 free(quiesce_api_data); 2668 /* 2669 * Wait for call backs i.e. max_ioq_queues + LS QUEUE. 2670 */ 2671 if (port_quiesce_ctx->quiesce_count > 0) { 2672 return; 2673 } 2674 2675 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2676 SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl); 2677 } else { 2678 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl); 2679 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2680 } 2681 2682 if (cb_func) { 2683 /* 2684 * Callback function for the called of quiesce. 2685 */ 2686 cb_func(port_quiesce_ctx->ctx, err); 2687 } 2688 2689 /* 2690 * Free the context structure. 2691 */ 2692 free(port_quiesce_ctx); 2693 2694 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl, 2695 err); 2696 } 2697 2698 static int 2699 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx, 2700 spdk_nvmf_fc_poller_api_cb cb_func) 2701 { 2702 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args; 2703 enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS; 2704 int err = 0; 2705 2706 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args)); 2707 2708 if (args == NULL) { 2709 err = -ENOMEM; 2710 SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id); 2711 goto done; 2712 } 2713 args->hwqp = fc_hwqp; 2714 args->ctx = ctx; 2715 args->cb_info.cb_func = cb_func; 2716 args->cb_info.cb_data = args; 2717 args->cb_info.cb_thread = spdk_get_thread(); 2718 2719 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id); 2720 rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args); 2721 if (rc) { 2722 free(args); 2723 err = -EINVAL; 2724 } 2725 2726 done: 2727 return err; 2728 } 2729 2730 /* 2731 * Hw port Quiesce 2732 */ 2733 static int 2734 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx, 2735 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func) 2736 { 2737 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2738 uint32_t i = 0; 2739 int err = 0; 2740 2741 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl); 2742 2743 /* 2744 * If the port is in an OFFLINE state, set the state to QUIESCED 2745 * and execute the callback. 2746 */ 2747 if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) { 2748 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2749 } 2750 2751 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2752 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n", 2753 fc_port->port_hdl); 2754 /* 2755 * Execute the callback function directly. 2756 */ 2757 cb_func(ctx, err); 2758 goto out; 2759 } 2760 2761 port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx)); 2762 2763 if (port_quiesce_ctx == NULL) { 2764 err = -ENOMEM; 2765 SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n", 2766 fc_port->port_hdl); 2767 goto out; 2768 } 2769 2770 port_quiesce_ctx->quiesce_count = 0; 2771 port_quiesce_ctx->ctx = ctx; 2772 port_quiesce_ctx->cb_func = cb_func; 2773 2774 /* 2775 * Quiesce the LS queue. 2776 */ 2777 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx, 2778 nvmf_fc_adm_queue_quiesce_cb); 2779 if (err != 0) { 2780 SPDK_ERRLOG("Failed to quiesce the LS queue.\n"); 2781 goto out; 2782 } 2783 port_quiesce_ctx->quiesce_count++; 2784 2785 /* 2786 * Quiesce the IO queues. 2787 */ 2788 for (i = 0; i < fc_port->num_io_queues; i++) { 2789 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i], 2790 port_quiesce_ctx, 2791 nvmf_fc_adm_queue_quiesce_cb); 2792 if (err != 0) { 2793 DEV_VERIFY(0); 2794 SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id); 2795 } 2796 port_quiesce_ctx->quiesce_count++; 2797 } 2798 2799 out: 2800 if (port_quiesce_ctx && err != 0) { 2801 free(port_quiesce_ctx); 2802 } 2803 return err; 2804 } 2805 2806 /* 2807 * Initialize and add a HW port entry to the global 2808 * HW port list. 2809 */ 2810 static void 2811 nvmf_fc_adm_evnt_hw_port_init(void *arg) 2812 { 2813 ASSERT_SPDK_FC_MAIN_THREAD(); 2814 struct spdk_nvmf_fc_port *fc_port = NULL; 2815 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2816 struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *) 2817 api_data->api_args; 2818 int err = 0; 2819 2820 if (args->io_queue_cnt > spdk_env_get_core_count()) { 2821 SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle); 2822 err = EINVAL; 2823 goto abort_port_init; 2824 } 2825 2826 /* 2827 * 1. Check for duplicate initialization. 2828 */ 2829 fc_port = nvmf_fc_port_lookup(args->port_handle); 2830 if (fc_port != NULL) { 2831 SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle); 2832 goto abort_port_init; 2833 } 2834 2835 /* 2836 * 2. Get the memory to instantiate a fc port. 2837 */ 2838 fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) + 2839 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp))); 2840 if (fc_port == NULL) { 2841 SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle); 2842 err = -ENOMEM; 2843 goto abort_port_init; 2844 } 2845 2846 /* assign the io_queues array */ 2847 fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof( 2848 struct spdk_nvmf_fc_port)); 2849 2850 /* 2851 * 3. Initialize the contents for the FC-port 2852 */ 2853 err = nvmf_fc_adm_hw_port_data_init(fc_port, args); 2854 2855 if (err != 0) { 2856 SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle); 2857 DEV_VERIFY(!"Data initialization failed for fc_port"); 2858 goto abort_port_init; 2859 } 2860 2861 /* 2862 * 4. Add this port to the global fc port list in the library. 2863 */ 2864 nvmf_fc_port_add(fc_port); 2865 2866 abort_port_init: 2867 if (err && fc_port) { 2868 free(fc_port); 2869 } 2870 if (api_data->cb_func != NULL) { 2871 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err); 2872 } 2873 2874 free(arg); 2875 2876 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n", 2877 args->port_handle, err); 2878 } 2879 2880 static void 2881 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp) 2882 { 2883 struct spdk_nvmf_fc_abts_ctx *ctx; 2884 struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL; 2885 2886 TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) { 2887 TAILQ_REMOVE(&hwqp->sync_cbs, args, link); 2888 ctx = args->cb_info.cb_data; 2889 if (ctx) { 2890 if (++ctx->hwqps_responded == ctx->num_hwqps) { 2891 free(ctx->sync_poller_args); 2892 free(ctx->abts_poller_args); 2893 free(ctx); 2894 } 2895 } 2896 } 2897 } 2898 2899 static void 2900 nvmf_fc_adm_evnt_hw_port_free(void *arg) 2901 { 2902 ASSERT_SPDK_FC_MAIN_THREAD(); 2903 int err = 0, i; 2904 struct spdk_nvmf_fc_port *fc_port = NULL; 2905 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2906 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2907 struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *) 2908 api_data->api_args; 2909 2910 fc_port = nvmf_fc_port_lookup(args->port_handle); 2911 if (!fc_port) { 2912 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2913 err = -EINVAL; 2914 goto out; 2915 } 2916 2917 if (!TAILQ_EMPTY(&fc_port->nport_list)) { 2918 SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle); 2919 err = -EIO; 2920 goto out; 2921 } 2922 2923 /* Clean up and free fc_port */ 2924 hwqp = &fc_port->ls_queue; 2925 nvmf_fc_adm_hwqp_clean_sync_cb(hwqp); 2926 rte_hash_free(hwqp->connection_list_hash); 2927 rte_hash_free(hwqp->rport_list_hash); 2928 2929 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2930 hwqp = &fc_port->io_queues[i]; 2931 2932 nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]); 2933 rte_hash_free(hwqp->connection_list_hash); 2934 rte_hash_free(hwqp->rport_list_hash); 2935 } 2936 2937 nvmf_fc_port_remove(fc_port); 2938 free(fc_port); 2939 out: 2940 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n", 2941 args->port_handle, err); 2942 if (api_data->cb_func != NULL) { 2943 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err); 2944 } 2945 2946 free(arg); 2947 } 2948 2949 /* 2950 * Online a HW port. 2951 */ 2952 static void 2953 nvmf_fc_adm_evnt_hw_port_online(void *arg) 2954 { 2955 ASSERT_SPDK_FC_MAIN_THREAD(); 2956 struct spdk_nvmf_fc_port *fc_port = NULL; 2957 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2958 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2959 struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *) 2960 api_data->api_args; 2961 int i = 0; 2962 int err = 0; 2963 2964 fc_port = nvmf_fc_port_lookup(args->port_handle); 2965 if (fc_port) { 2966 /* Set the port state to online */ 2967 err = nvmf_fc_port_set_online(fc_port); 2968 if (err != 0) { 2969 SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err); 2970 DEV_VERIFY(!"Hw port online failed"); 2971 goto out; 2972 } 2973 2974 hwqp = &fc_port->ls_queue; 2975 hwqp->context = NULL; 2976 (void)nvmf_fc_hwqp_set_online(hwqp); 2977 2978 /* Cycle through all the io queues and setup a hwqp poller for each. */ 2979 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2980 hwqp = &fc_port->io_queues[i]; 2981 hwqp->context = NULL; 2982 (void)nvmf_fc_hwqp_set_online(hwqp); 2983 nvmf_fc_poll_group_add_hwqp(hwqp); 2984 } 2985 } else { 2986 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2987 err = -EINVAL; 2988 } 2989 2990 out: 2991 if (api_data->cb_func != NULL) { 2992 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err); 2993 } 2994 2995 free(arg); 2996 2997 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle, 2998 err); 2999 } 3000 3001 static void 3002 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status) 3003 { 3004 int err = 0; 3005 struct spdk_nvmf_fc_port *fc_port = NULL; 3006 struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx; 3007 struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args; 3008 3009 if (--remove_hwqp_args->pending_remove_hwqp) { 3010 return; 3011 } 3012 3013 fc_port = nvmf_fc_port_lookup(args->port_handle); 3014 if (!fc_port) { 3015 err = -EINVAL; 3016 SPDK_ERRLOG("fc_port not found.\n"); 3017 goto out; 3018 } 3019 3020 /* 3021 * Delete all the nports. Ideally, the nports should have been purged 3022 * before the offline event, in which case, only a validation is required. 3023 */ 3024 nvmf_fc_adm_hw_port_offline_nport_delete(fc_port); 3025 out: 3026 if (remove_hwqp_args->cb_fn) { 3027 remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 3028 } 3029 3030 free(remove_hwqp_args); 3031 } 3032 3033 /* 3034 * Offline a HW port. 3035 */ 3036 static void 3037 nvmf_fc_adm_evnt_hw_port_offline(void *arg) 3038 { 3039 ASSERT_SPDK_FC_MAIN_THREAD(); 3040 struct spdk_nvmf_fc_port *fc_port = NULL; 3041 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 3042 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3043 struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *) 3044 api_data->api_args; 3045 struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args; 3046 int i = 0; 3047 int err = 0; 3048 3049 fc_port = nvmf_fc_port_lookup(args->port_handle); 3050 if (fc_port) { 3051 /* Set the port state to offline, if it is not already. */ 3052 err = nvmf_fc_port_set_offline(fc_port); 3053 if (err != 0) { 3054 SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err); 3055 err = 0; 3056 goto out; 3057 } 3058 3059 remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args)); 3060 if (!remove_hwqp_args) { 3061 SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n"); 3062 err = -ENOMEM; 3063 goto out; 3064 } 3065 remove_hwqp_args->cb_fn = api_data->cb_func; 3066 remove_hwqp_args->cb_args = api_data->api_args; 3067 remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues; 3068 3069 hwqp = &fc_port->ls_queue; 3070 (void)nvmf_fc_hwqp_set_offline(hwqp); 3071 3072 /* Remove poller for all the io queues. */ 3073 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 3074 hwqp = &fc_port->io_queues[i]; 3075 (void)nvmf_fc_hwqp_set_offline(hwqp); 3076 nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb, 3077 remove_hwqp_args); 3078 } 3079 3080 free(arg); 3081 3082 /* Wait untill all the hwqps are removed from poll groups. */ 3083 return; 3084 } else { 3085 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3086 err = -EINVAL; 3087 } 3088 out: 3089 if (api_data->cb_func != NULL) { 3090 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 3091 } 3092 3093 free(arg); 3094 3095 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle, 3096 err); 3097 } 3098 3099 struct nvmf_fc_add_rem_listener_ctx { 3100 struct spdk_nvmf_subsystem *subsystem; 3101 bool add_listener; 3102 struct spdk_nvme_transport_id trid; 3103 }; 3104 3105 static void 3106 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 3107 { 3108 ASSERT_SPDK_FC_MAIN_THREAD(); 3109 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 3110 free(ctx); 3111 } 3112 3113 static void 3114 nvmf_fc_adm_listen_done(void *cb_arg, int status) 3115 { 3116 ASSERT_SPDK_FC_MAIN_THREAD(); 3117 struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg; 3118 3119 if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) { 3120 SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn); 3121 free(ctx); 3122 } 3123 } 3124 3125 static void 3126 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 3127 { 3128 ASSERT_SPDK_FC_MAIN_THREAD(); 3129 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 3130 3131 if (ctx->add_listener) { 3132 spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx); 3133 } else { 3134 spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid); 3135 nvmf_fc_adm_listen_done(ctx, 0); 3136 } 3137 } 3138 3139 static int 3140 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add) 3141 { 3142 struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt(); 3143 struct spdk_nvmf_subsystem *subsystem; 3144 struct spdk_nvmf_listen_opts opts; 3145 3146 if (!tgt) { 3147 SPDK_ERRLOG("No nvmf target defined\n"); 3148 return -EINVAL; 3149 } 3150 3151 spdk_nvmf_listen_opts_init(&opts, sizeof(opts)); 3152 3153 subsystem = spdk_nvmf_subsystem_get_first(tgt); 3154 while (subsystem) { 3155 struct nvmf_fc_add_rem_listener_ctx *ctx; 3156 3157 if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) { 3158 ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx)); 3159 if (ctx) { 3160 ctx->add_listener = add; 3161 ctx->subsystem = subsystem; 3162 nvmf_fc_create_trid(&ctx->trid, 3163 nport->fc_nodename.u.wwn, 3164 nport->fc_portname.u.wwn); 3165 3166 if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) { 3167 SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n", 3168 ctx->trid.traddr); 3169 free(ctx); 3170 } else if (spdk_nvmf_subsystem_pause(subsystem, 3171 0, 3172 nvmf_fc_adm_subsystem_paused_cb, 3173 ctx)) { 3174 SPDK_ERRLOG("Failed to pause subsystem: %s\n", 3175 subsystem->subnqn); 3176 free(ctx); 3177 } 3178 } 3179 } 3180 3181 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 3182 } 3183 3184 return 0; 3185 } 3186 3187 /* 3188 * Create a Nport. 3189 */ 3190 static void 3191 nvmf_fc_adm_evnt_nport_create(void *arg) 3192 { 3193 ASSERT_SPDK_FC_MAIN_THREAD(); 3194 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3195 struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *) 3196 api_data->api_args; 3197 struct spdk_nvmf_fc_nport *nport = NULL; 3198 struct spdk_nvmf_fc_port *fc_port = NULL; 3199 int err = 0; 3200 3201 /* 3202 * Get the physical port. 3203 */ 3204 fc_port = nvmf_fc_port_lookup(args->port_handle); 3205 if (fc_port == NULL) { 3206 err = -EINVAL; 3207 goto out; 3208 } 3209 3210 /* 3211 * Check for duplicate initialization. 3212 */ 3213 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3214 if (nport != NULL) { 3215 SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle, 3216 args->port_handle); 3217 err = -EINVAL; 3218 goto out; 3219 } 3220 3221 /* 3222 * Get the memory to instantiate a fc nport. 3223 */ 3224 nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport)); 3225 if (nport == NULL) { 3226 SPDK_ERRLOG("Failed to allocate memory for nport %d.\n", 3227 args->nport_handle); 3228 err = -ENOMEM; 3229 goto out; 3230 } 3231 3232 /* 3233 * Initialize the contents for the nport 3234 */ 3235 nport->nport_hdl = args->nport_handle; 3236 nport->port_hdl = args->port_handle; 3237 nport->nport_state = SPDK_NVMF_FC_OBJECT_CREATED; 3238 nport->fc_nodename = args->fc_nodename; 3239 nport->fc_portname = args->fc_portname; 3240 nport->d_id = args->d_id; 3241 nport->fc_port = nvmf_fc_port_lookup(args->port_handle); 3242 3243 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED); 3244 TAILQ_INIT(&nport->rem_port_list); 3245 nport->rport_count = 0; 3246 TAILQ_INIT(&nport->fc_associations); 3247 nport->assoc_count = 0; 3248 3249 /* 3250 * Populate the nport address (as listening address) to the nvmf subsystems. 3251 */ 3252 err = nvmf_fc_adm_add_rem_nport_listener(nport, true); 3253 3254 (void)nvmf_fc_port_add_nport(fc_port, nport); 3255 out: 3256 if (err && nport) { 3257 free(nport); 3258 } 3259 3260 if (api_data->cb_func != NULL) { 3261 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err); 3262 } 3263 3264 free(arg); 3265 } 3266 3267 static void 3268 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type, 3269 void *cb_args, int spdk_err) 3270 { 3271 ASSERT_SPDK_FC_MAIN_THREAD(); 3272 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args; 3273 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 3274 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 3275 int err = 0; 3276 uint16_t nport_hdl = 0; 3277 char log_str[256]; 3278 3279 /* 3280 * Assert on any delete failure. 3281 */ 3282 if (nport == NULL) { 3283 SPDK_ERRLOG("Nport delete callback returned null nport"); 3284 DEV_VERIFY(!"nport is null."); 3285 goto out; 3286 } 3287 3288 nport_hdl = nport->nport_hdl; 3289 if (0 != spdk_err) { 3290 SPDK_ERRLOG("Nport delete callback returned error. FC Port: " 3291 "%d, Nport: %d\n", 3292 nport->port_hdl, nport->nport_hdl); 3293 DEV_VERIFY(!"nport delete callback error."); 3294 } 3295 3296 /* 3297 * Free the nport if this is the last rport being deleted and 3298 * execute the callback(s). 3299 */ 3300 if (nvmf_fc_nport_has_no_rport(nport)) { 3301 if (0 != nport->assoc_count) { 3302 SPDK_ERRLOG("association count != 0\n"); 3303 DEV_VERIFY(!"association count != 0"); 3304 } 3305 3306 err = nvmf_fc_port_remove_nport(nport->fc_port, nport); 3307 if (0 != err) { 3308 SPDK_ERRLOG("Nport delete callback: Failed to remove " 3309 "nport from nport list. FC Port:%d Nport:%d\n", 3310 nport->port_hdl, nport->nport_hdl); 3311 } 3312 /* Free the nport */ 3313 free(nport); 3314 3315 if (cb_func != NULL) { 3316 (void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err); 3317 } 3318 free(cb_data); 3319 } 3320 out: 3321 snprintf(log_str, sizeof(log_str), 3322 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n", 3323 port_handle, nport_hdl, event_type, spdk_err); 3324 3325 if (err != 0) { 3326 SPDK_ERRLOG("%s", log_str); 3327 } else { 3328 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3329 } 3330 } 3331 3332 /* 3333 * Delete Nport. 3334 */ 3335 static void 3336 nvmf_fc_adm_evnt_nport_delete(void *arg) 3337 { 3338 ASSERT_SPDK_FC_MAIN_THREAD(); 3339 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3340 struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *) 3341 api_data->api_args; 3342 struct spdk_nvmf_fc_nport *nport = NULL; 3343 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL; 3344 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3345 int err = 0; 3346 uint32_t rport_cnt = 0; 3347 int rc = 0; 3348 3349 /* 3350 * Make sure that the nport exists. 3351 */ 3352 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3353 if (nport == NULL) { 3354 SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle, 3355 args->port_handle); 3356 err = -EINVAL; 3357 goto out; 3358 } 3359 3360 /* 3361 * Allocate memory for callback data. 3362 */ 3363 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data)); 3364 if (NULL == cb_data) { 3365 SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle); 3366 err = -ENOMEM; 3367 goto out; 3368 } 3369 3370 cb_data->nport = nport; 3371 cb_data->port_handle = args->port_handle; 3372 cb_data->fc_cb_func = api_data->cb_func; 3373 cb_data->fc_cb_ctx = args->cb_ctx; 3374 3375 /* 3376 * Begin nport tear down 3377 */ 3378 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3379 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3380 } else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3381 /* 3382 * Deletion of this nport already in progress. Register callback 3383 * and return. 3384 */ 3385 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3386 err = -ENODEV; 3387 goto out; 3388 } else { 3389 /* nport partially created/deleted */ 3390 DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3391 DEV_VERIFY(0 != "Nport in zombie state"); 3392 err = -ENODEV; 3393 goto out; 3394 } 3395 3396 /* 3397 * Remove this nport from listening addresses across subsystems 3398 */ 3399 rc = nvmf_fc_adm_add_rem_nport_listener(nport, false); 3400 3401 if (0 != rc) { 3402 err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 3403 SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n", 3404 nport->nport_hdl); 3405 goto out; 3406 } 3407 3408 /* 3409 * Delete all the remote ports (if any) for the nport 3410 */ 3411 /* TODO - Need to do this with a "first" and a "next" accessor function 3412 * for completeness. Look at app-subsystem as examples. 3413 */ 3414 if (nvmf_fc_nport_has_no_rport(nport)) { 3415 /* No rports to delete. Complete the nport deletion. */ 3416 nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0); 3417 goto out; 3418 } 3419 3420 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3421 struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc( 3422 1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args)); 3423 3424 if (it_del_args == NULL) { 3425 err = -ENOMEM; 3426 SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n", 3427 rport_iter->rpi, rport_iter->s_id); 3428 DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory"); 3429 goto out; 3430 } 3431 3432 rport_cnt++; 3433 it_del_args->port_handle = nport->port_hdl; 3434 it_del_args->nport_handle = nport->nport_hdl; 3435 it_del_args->cb_ctx = (void *)cb_data; 3436 it_del_args->rpi = rport_iter->rpi; 3437 it_del_args->s_id = rport_iter->s_id; 3438 3439 nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args, 3440 nvmf_fc_adm_delete_nport_cb); 3441 } 3442 3443 out: 3444 /* On failure, execute the callback function now */ 3445 if ((err != 0) || (rc != 0)) { 3446 SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, " 3447 "rport_cnt:%d rc:%d.\n", 3448 args->nport_handle, err, args->port_handle, 3449 rport_cnt, rc); 3450 if (cb_data) { 3451 free(cb_data); 3452 } 3453 if (api_data->cb_func != NULL) { 3454 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err); 3455 } 3456 3457 } else { 3458 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3459 "NPort %d delete done successfully, fc port:%d. " 3460 "rport_cnt:%d\n", 3461 args->nport_handle, args->port_handle, rport_cnt); 3462 } 3463 3464 free(arg); 3465 } 3466 3467 /* 3468 * Process an PRLI/IT add. 3469 */ 3470 static void 3471 nvmf_fc_adm_evnt_i_t_add(void *arg) 3472 { 3473 ASSERT_SPDK_FC_MAIN_THREAD(); 3474 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3475 struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *) 3476 api_data->api_args; 3477 struct spdk_nvmf_fc_nport *nport = NULL; 3478 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3479 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3480 int err = 0; 3481 3482 /* 3483 * Make sure the nport port exists. 3484 */ 3485 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3486 if (nport == NULL) { 3487 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3488 err = -EINVAL; 3489 goto out; 3490 } 3491 3492 /* 3493 * Check for duplicate i_t_add. 3494 */ 3495 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3496 if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) { 3497 SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n", 3498 args->nport_handle, rport_iter->s_id, rport_iter->rpi); 3499 err = -EEXIST; 3500 goto out; 3501 } 3502 } 3503 3504 /* 3505 * Get the memory to instantiate the remote port 3506 */ 3507 rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info)); 3508 if (rport == NULL) { 3509 SPDK_ERRLOG("Memory allocation for rem port failed.\n"); 3510 err = -ENOMEM; 3511 goto out; 3512 } 3513 3514 /* 3515 * Initialize the contents for the rport 3516 */ 3517 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED); 3518 rport->s_id = args->s_id; 3519 rport->rpi = args->rpi; 3520 rport->fc_nodename = args->fc_nodename; 3521 rport->fc_portname = args->fc_portname; 3522 3523 /* 3524 * Add remote port to nport 3525 */ 3526 if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) { 3527 DEV_VERIFY(!"Error while adding rport to list"); 3528 }; 3529 3530 /* 3531 * TODO: Do we validate the initiators service parameters? 3532 */ 3533 3534 /* 3535 * Get the targets service parameters from the library 3536 * to return back to the driver. 3537 */ 3538 args->target_prli_info = nvmf_fc_get_prli_service_params(); 3539 3540 out: 3541 if (api_data->cb_func != NULL) { 3542 /* 3543 * Passing pointer to the args struct as the first argument. 3544 * The cb_func should handle this appropriately. 3545 */ 3546 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err); 3547 } 3548 3549 free(arg); 3550 3551 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3552 "IT add on nport %d done, rc = %d.\n", 3553 args->nport_handle, err); 3554 } 3555 3556 /** 3557 * Process a IT delete. 3558 */ 3559 static void 3560 nvmf_fc_adm_evnt_i_t_delete(void *arg) 3561 { 3562 ASSERT_SPDK_FC_MAIN_THREAD(); 3563 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3564 struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *) 3565 api_data->api_args; 3566 int rc = 0; 3567 struct spdk_nvmf_fc_nport *nport = NULL; 3568 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL; 3569 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3570 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3571 uint32_t num_rport = 0; 3572 char log_str[256]; 3573 3574 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle); 3575 3576 /* 3577 * Make sure the nport port exists. If it does not, error out. 3578 */ 3579 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3580 if (nport == NULL) { 3581 SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle); 3582 rc = -EINVAL; 3583 goto out; 3584 } 3585 3586 /* 3587 * Find this ITN / rport (remote port). 3588 */ 3589 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3590 num_rport++; 3591 if ((rport_iter->s_id == args->s_id) && 3592 (rport_iter->rpi == args->rpi) && 3593 (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) { 3594 rport = rport_iter; 3595 break; 3596 } 3597 } 3598 3599 /* 3600 * We should find either zero or exactly one rport. 3601 * 3602 * If we find zero rports, that means that a previous request has 3603 * removed the rport by the time we reached here. In this case, 3604 * simply return out. 3605 */ 3606 if (rport == NULL) { 3607 rc = -ENODEV; 3608 goto out; 3609 } 3610 3611 /* 3612 * We have the rport slated for deletion. At this point clean up 3613 * any LS requests that are sitting in the pending list. Do this 3614 * first, then, set the states of the rport so that new LS requests 3615 * are not accepted. Then start the cleanup. 3616 */ 3617 nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport); 3618 3619 /* 3620 * We have found exactly one rport. Allocate memory for callback data. 3621 */ 3622 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data)); 3623 if (NULL == cb_data) { 3624 SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle); 3625 rc = -ENOMEM; 3626 goto out; 3627 } 3628 3629 cb_data->nport = nport; 3630 cb_data->rport = rport; 3631 cb_data->port_handle = args->port_handle; 3632 cb_data->fc_cb_func = api_data->cb_func; 3633 cb_data->fc_cb_ctx = args->cb_ctx; 3634 3635 /* 3636 * Validate rport object state. 3637 */ 3638 if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3639 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3640 } else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3641 /* 3642 * Deletion of this rport already in progress. Register callback 3643 * and return. 3644 */ 3645 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3646 rc = -ENODEV; 3647 goto out; 3648 } else { 3649 /* rport partially created/deleted */ 3650 DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3651 DEV_VERIFY(!"Invalid rport_state"); 3652 rc = -ENODEV; 3653 goto out; 3654 } 3655 3656 /* 3657 * We have successfully found a rport to delete. Call 3658 * nvmf_fc_i_t_delete_assoc(), which will perform further 3659 * IT-delete processing as well as free the cb_data. 3660 */ 3661 nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb, 3662 (void *)cb_data); 3663 3664 out: 3665 if (rc != 0) { 3666 /* 3667 * We have entered here because either we encountered an 3668 * error, or we did not find a rport to delete. 3669 * As a result, we will not call the function 3670 * nvmf_fc_i_t_delete_assoc() for further IT-delete 3671 * processing. Therefore, execute the callback function now. 3672 */ 3673 if (cb_data) { 3674 free(cb_data); 3675 } 3676 if (api_data->cb_func != NULL) { 3677 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc); 3678 } 3679 } 3680 3681 snprintf(log_str, sizeof(log_str), 3682 "IT delete on nport:%d end. num_rport:%d rc = %d.\n", 3683 args->nport_handle, num_rport, rc); 3684 3685 if (rc != 0) { 3686 SPDK_ERRLOG("%s", log_str); 3687 } else { 3688 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3689 } 3690 3691 free(arg); 3692 } 3693 3694 /* 3695 * Process ABTS received 3696 */ 3697 static void 3698 nvmf_fc_adm_evnt_abts_recv(void *arg) 3699 { 3700 ASSERT_SPDK_FC_MAIN_THREAD(); 3701 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3702 struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args; 3703 struct spdk_nvmf_fc_nport *nport = NULL; 3704 int err = 0; 3705 3706 SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi, 3707 args->oxid, args->rxid); 3708 3709 /* 3710 * 1. Make sure the nport port exists. 3711 */ 3712 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3713 if (nport == NULL) { 3714 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3715 err = -EINVAL; 3716 goto out; 3717 } 3718 3719 /* 3720 * 2. If the nport is in the process of being deleted, drop the ABTS. 3721 */ 3722 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3723 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3724 "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n", 3725 args->rpi, args->oxid, args->rxid); 3726 err = 0; 3727 goto out; 3728 3729 } 3730 3731 /* 3732 * 3. Pass the received ABTS-LS to the library for handling. 3733 */ 3734 nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid); 3735 3736 out: 3737 if (api_data->cb_func != NULL) { 3738 /* 3739 * Passing pointer to the args struct as the first argument. 3740 * The cb_func should handle this appropriately. 3741 */ 3742 (void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err); 3743 } else { 3744 /* No callback set, free the args */ 3745 free(args); 3746 } 3747 3748 free(arg); 3749 } 3750 3751 /* 3752 * Callback function for hw port quiesce. 3753 */ 3754 static void 3755 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err) 3756 { 3757 ASSERT_SPDK_FC_MAIN_THREAD(); 3758 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx = 3759 (struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx; 3760 struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args; 3761 spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func; 3762 struct spdk_nvmf_fc_queue_dump_info dump_info; 3763 struct spdk_nvmf_fc_port *fc_port = NULL; 3764 char *dump_buf = NULL; 3765 uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE; 3766 3767 /* 3768 * Free the callback context struct. 3769 */ 3770 free(ctx); 3771 3772 if (err != 0) { 3773 SPDK_ERRLOG("Port %d quiesce operation failed.\n", args->port_handle); 3774 goto out; 3775 } 3776 3777 if (args->dump_queues == false) { 3778 /* 3779 * Queues need not be dumped. 3780 */ 3781 goto out; 3782 } 3783 3784 SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle); 3785 3786 /* 3787 * Get the fc port. 3788 */ 3789 fc_port = nvmf_fc_port_lookup(args->port_handle); 3790 if (fc_port == NULL) { 3791 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3792 err = -EINVAL; 3793 goto out; 3794 } 3795 3796 /* 3797 * Allocate memory for the dump buffer. 3798 * This memory will be freed by FCT. 3799 */ 3800 dump_buf = (char *)calloc(1, dump_buf_size); 3801 if (dump_buf == NULL) { 3802 err = -ENOMEM; 3803 SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle); 3804 goto out; 3805 } 3806 *args->dump_buf = (uint32_t *)dump_buf; 3807 dump_info.buffer = dump_buf; 3808 dump_info.offset = 0; 3809 3810 /* 3811 * Add the dump reason to the top of the buffer. 3812 */ 3813 nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason); 3814 3815 /* 3816 * Dump the hwqp. 3817 */ 3818 nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues, 3819 fc_port->num_io_queues, &dump_info); 3820 3821 out: 3822 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n", 3823 args->port_handle, args->dump_queues, err); 3824 3825 if (cb_func != NULL) { 3826 (void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3827 } 3828 } 3829 3830 /* 3831 * HW port reset 3832 3833 */ 3834 static void 3835 nvmf_fc_adm_evnt_hw_port_reset(void *arg) 3836 { 3837 ASSERT_SPDK_FC_MAIN_THREAD(); 3838 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3839 struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *) 3840 api_data->api_args; 3841 struct spdk_nvmf_fc_port *fc_port = NULL; 3842 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL; 3843 int err = 0; 3844 3845 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle); 3846 3847 /* 3848 * Make sure the physical port exists. 3849 */ 3850 fc_port = nvmf_fc_port_lookup(args->port_handle); 3851 if (fc_port == NULL) { 3852 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3853 err = -EINVAL; 3854 goto out; 3855 } 3856 3857 /* 3858 * Save the reset event args and the callback in a context struct. 3859 */ 3860 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx)); 3861 3862 if (ctx == NULL) { 3863 err = -ENOMEM; 3864 SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle); 3865 goto fail; 3866 } 3867 3868 ctx->reset_args = args; 3869 ctx->reset_cb_func = api_data->cb_func; 3870 3871 /* 3872 * Quiesce the hw port. 3873 */ 3874 err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb); 3875 if (err != 0) { 3876 goto fail; 3877 } 3878 3879 /* 3880 * Once the ports are successfully quiesced the reset processing 3881 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb 3882 */ 3883 return; 3884 fail: 3885 free(ctx); 3886 3887 out: 3888 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle, 3889 err); 3890 3891 if (api_data->cb_func != NULL) { 3892 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3893 } 3894 3895 free(arg); 3896 } 3897 3898 static inline void 3899 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args) 3900 { 3901 if (nvmf_fc_get_main_thread()) { 3902 spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args); 3903 } 3904 } 3905 3906 /* 3907 * Queue up an event in the SPDK main threads event queue. 3908 * Used by the FC driver to notify the SPDK main thread of FC related events. 3909 */ 3910 int 3911 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args, 3912 spdk_nvmf_fc_callback cb_func) 3913 { 3914 int err = 0; 3915 struct spdk_nvmf_fc_adm_api_data *api_data = NULL; 3916 spdk_msg_fn event_fn = NULL; 3917 3918 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type); 3919 3920 if (event_type >= SPDK_FC_EVENT_MAX) { 3921 SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type); 3922 err = -EINVAL; 3923 goto done; 3924 } 3925 3926 if (args == NULL) { 3927 SPDK_ERRLOG("Null args for event %d.\n", event_type); 3928 err = -EINVAL; 3929 goto done; 3930 } 3931 3932 api_data = calloc(1, sizeof(*api_data)); 3933 3934 if (api_data == NULL) { 3935 SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type); 3936 err = -ENOMEM; 3937 goto done; 3938 } 3939 3940 api_data->api_args = args; 3941 api_data->cb_func = cb_func; 3942 3943 switch (event_type) { 3944 case SPDK_FC_HW_PORT_INIT: 3945 event_fn = nvmf_fc_adm_evnt_hw_port_init; 3946 break; 3947 3948 case SPDK_FC_HW_PORT_FREE: 3949 event_fn = nvmf_fc_adm_evnt_hw_port_free; 3950 break; 3951 3952 case SPDK_FC_HW_PORT_ONLINE: 3953 event_fn = nvmf_fc_adm_evnt_hw_port_online; 3954 break; 3955 3956 case SPDK_FC_HW_PORT_OFFLINE: 3957 event_fn = nvmf_fc_adm_evnt_hw_port_offline; 3958 break; 3959 3960 case SPDK_FC_NPORT_CREATE: 3961 event_fn = nvmf_fc_adm_evnt_nport_create; 3962 break; 3963 3964 case SPDK_FC_NPORT_DELETE: 3965 event_fn = nvmf_fc_adm_evnt_nport_delete; 3966 break; 3967 3968 case SPDK_FC_IT_ADD: 3969 event_fn = nvmf_fc_adm_evnt_i_t_add; 3970 break; 3971 3972 case SPDK_FC_IT_DELETE: 3973 event_fn = nvmf_fc_adm_evnt_i_t_delete; 3974 break; 3975 3976 case SPDK_FC_ABTS_RECV: 3977 event_fn = nvmf_fc_adm_evnt_abts_recv; 3978 break; 3979 3980 case SPDK_FC_HW_PORT_RESET: 3981 event_fn = nvmf_fc_adm_evnt_hw_port_reset; 3982 break; 3983 3984 case SPDK_FC_UNRECOVERABLE_ERR: 3985 default: 3986 SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type); 3987 err = -EINVAL; 3988 break; 3989 } 3990 3991 done: 3992 3993 if (err == 0) { 3994 assert(event_fn != NULL); 3995 nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data); 3996 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type); 3997 } else { 3998 SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err); 3999 if (api_data) { 4000 free(api_data); 4001 } 4002 } 4003 4004 return err; 4005 } 4006 4007 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc); 4008 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api) 4009 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc) 4010