1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * Copyright (c) 2018-2019 Broadcom. All Rights Reserved. 4 * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. 5 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 */ 7 8 /* 9 * NVMe_FC transport functions. 10 */ 11 12 #include "spdk/env.h" 13 #include "spdk/assert.h" 14 #include "spdk/nvmf_transport.h" 15 #include "spdk/string.h" 16 #include "spdk/trace.h" 17 #include "spdk/util.h" 18 #include "spdk/likely.h" 19 #include "spdk/endian.h" 20 #include "spdk/log.h" 21 #include "spdk/thread.h" 22 23 #include "nvmf_fc.h" 24 #include "fc_lld.h" 25 26 #include "spdk_internal/trace_defs.h" 27 28 #ifndef DEV_VERIFY 29 #define DEV_VERIFY assert 30 #endif 31 32 #ifndef ASSERT_SPDK_FC_MAIN_THREAD 33 #define ASSERT_SPDK_FC_MAIN_THREAD() \ 34 DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread()); 35 #endif 36 37 /* 38 * PRLI service parameters 39 */ 40 enum spdk_nvmf_fc_service_parameters { 41 SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001, 42 SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008, 43 SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010, 44 SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020, 45 SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080, 46 }; 47 48 static char *fc_req_state_strs[] = { 49 "SPDK_NVMF_FC_REQ_INIT", 50 "SPDK_NVMF_FC_REQ_READ_BDEV", 51 "SPDK_NVMF_FC_REQ_READ_XFER", 52 "SPDK_NVMF_FC_REQ_READ_RSP", 53 "SPDK_NVMF_FC_REQ_WRITE_BUFFS", 54 "SPDK_NVMF_FC_REQ_WRITE_XFER", 55 "SPDK_NVMF_FC_REQ_WRITE_BDEV", 56 "SPDK_NVMF_FC_REQ_WRITE_RSP", 57 "SPDK_NVMF_FC_REQ_NONE_BDEV", 58 "SPDK_NVMF_FC_REQ_NONE_RSP", 59 "SPDK_NVMF_FC_REQ_SUCCESS", 60 "SPDK_NVMF_FC_REQ_FAILED", 61 "SPDK_NVMF_FC_REQ_ABORTED", 62 "SPDK_NVMF_FC_REQ_BDEV_ABORTED", 63 "SPDK_NVMF_FC_REQ_PENDING", 64 "SPDK_NVMF_FC_REQ_FUSED_WAITING" 65 }; 66 67 #define HWQP_CONN_TABLE_SIZE 8192 68 #define HWQP_RPI_TABLE_SIZE 4096 69 70 static void 71 nvmf_fc_trace(void) 72 { 73 spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r'); 74 spdk_trace_register_description("FC_NEW", 75 TRACE_FC_REQ_INIT, 76 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 1, 77 SPDK_TRACE_ARG_TYPE_INT, ""); 78 spdk_trace_register_description("FC_READ_SBMT_TO_BDEV", 79 TRACE_FC_REQ_READ_BDEV, 80 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 81 SPDK_TRACE_ARG_TYPE_INT, ""); 82 spdk_trace_register_description("FC_READ_XFER_DATA", 83 TRACE_FC_REQ_READ_XFER, 84 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 85 SPDK_TRACE_ARG_TYPE_INT, ""); 86 spdk_trace_register_description("FC_READ_RSP", 87 TRACE_FC_REQ_READ_RSP, 88 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 89 SPDK_TRACE_ARG_TYPE_INT, ""); 90 spdk_trace_register_description("FC_WRITE_NEED_BUFFER", 91 TRACE_FC_REQ_WRITE_BUFFS, 92 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 93 SPDK_TRACE_ARG_TYPE_INT, ""); 94 spdk_trace_register_description("FC_WRITE_XFER_DATA", 95 TRACE_FC_REQ_WRITE_XFER, 96 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 97 SPDK_TRACE_ARG_TYPE_INT, ""); 98 spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV", 99 TRACE_FC_REQ_WRITE_BDEV, 100 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 101 SPDK_TRACE_ARG_TYPE_INT, ""); 102 spdk_trace_register_description("FC_WRITE_RSP", 103 TRACE_FC_REQ_WRITE_RSP, 104 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 105 SPDK_TRACE_ARG_TYPE_INT, ""); 106 spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV", 107 TRACE_FC_REQ_NONE_BDEV, 108 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 109 SPDK_TRACE_ARG_TYPE_INT, ""); 110 spdk_trace_register_description("FC_NONE_RSP", 111 TRACE_FC_REQ_NONE_RSP, 112 OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0, 113 SPDK_TRACE_ARG_TYPE_INT, ""); 114 spdk_trace_register_description("FC_SUCCESS", 115 TRACE_FC_REQ_SUCCESS, 116 OWNER_TYPE_NONE, OBJECT_NONE, 0, 117 SPDK_TRACE_ARG_TYPE_INT, ""); 118 spdk_trace_register_description("FC_FAILED", 119 TRACE_FC_REQ_FAILED, 120 OWNER_TYPE_NONE, OBJECT_NONE, 0, 121 SPDK_TRACE_ARG_TYPE_INT, ""); 122 spdk_trace_register_description("FC_ABRT", 123 TRACE_FC_REQ_ABORTED, 124 OWNER_TYPE_NONE, OBJECT_NONE, 0, 125 SPDK_TRACE_ARG_TYPE_INT, ""); 126 spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV", 127 TRACE_FC_REQ_BDEV_ABORTED, 128 OWNER_TYPE_NONE, OBJECT_NONE, 0, 129 SPDK_TRACE_ARG_TYPE_INT, ""); 130 spdk_trace_register_description("FC_PENDING", 131 TRACE_FC_REQ_PENDING, 132 OWNER_TYPE_NONE, OBJECT_NONE, 0, 133 SPDK_TRACE_ARG_TYPE_INT, ""); 134 spdk_trace_register_description("FC_FUSED_WAITING", 135 TRACE_FC_REQ_FUSED_WAITING, 136 OWNER_TYPE_NONE, OBJECT_NONE, 0, 137 SPDK_TRACE_ARG_TYPE_INT, ""); 138 } 139 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC) 140 141 /** 142 * The structure used by all fc adm functions 143 */ 144 struct spdk_nvmf_fc_adm_api_data { 145 void *api_args; 146 spdk_nvmf_fc_callback cb_func; 147 }; 148 149 /** 150 * The callback structure for nport-delete 151 */ 152 struct spdk_nvmf_fc_adm_nport_del_cb_data { 153 struct spdk_nvmf_fc_nport *nport; 154 uint8_t port_handle; 155 spdk_nvmf_fc_callback fc_cb_func; 156 void *fc_cb_ctx; 157 }; 158 159 /** 160 * The callback structure for it-delete 161 */ 162 struct spdk_nvmf_fc_adm_i_t_del_cb_data { 163 struct spdk_nvmf_fc_nport *nport; 164 struct spdk_nvmf_fc_remote_port_info *rport; 165 uint8_t port_handle; 166 spdk_nvmf_fc_callback fc_cb_func; 167 void *fc_cb_ctx; 168 }; 169 170 171 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err); 172 173 /** 174 * The callback structure for the it-delete-assoc callback 175 */ 176 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data { 177 struct spdk_nvmf_fc_nport *nport; 178 struct spdk_nvmf_fc_remote_port_info *rport; 179 uint8_t port_handle; 180 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func; 181 void *cb_ctx; 182 }; 183 184 /* 185 * Call back function pointer for HW port quiesce. 186 */ 187 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err); 188 189 /** 190 * Context structure for quiescing a hardware port 191 */ 192 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx { 193 int quiesce_count; 194 void *ctx; 195 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func; 196 }; 197 198 /** 199 * Context structure used to reset a hardware port 200 */ 201 struct spdk_nvmf_fc_adm_hw_port_reset_ctx { 202 void *reset_args; 203 spdk_nvmf_fc_callback reset_cb_func; 204 }; 205 206 struct spdk_nvmf_fc_transport { 207 struct spdk_nvmf_transport transport; 208 struct spdk_poller *accept_poller; 209 pthread_mutex_t lock; 210 }; 211 212 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport; 213 214 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL; 215 216 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list = 217 TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list); 218 219 static struct spdk_thread *g_nvmf_fc_main_thread = NULL; 220 221 static uint32_t g_nvmf_fgroup_count = 0; 222 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups = 223 TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups); 224 225 struct spdk_thread * 226 nvmf_fc_get_main_thread(void) 227 { 228 return g_nvmf_fc_main_thread; 229 } 230 231 static inline void 232 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req, 233 enum spdk_nvmf_fc_request_state state) 234 { 235 uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID; 236 237 switch (state) { 238 case SPDK_NVMF_FC_REQ_INIT: 239 /* Start IO tracing */ 240 tpoint_id = TRACE_FC_REQ_INIT; 241 break; 242 case SPDK_NVMF_FC_REQ_READ_BDEV: 243 tpoint_id = TRACE_FC_REQ_READ_BDEV; 244 break; 245 case SPDK_NVMF_FC_REQ_READ_XFER: 246 tpoint_id = TRACE_FC_REQ_READ_XFER; 247 break; 248 case SPDK_NVMF_FC_REQ_READ_RSP: 249 tpoint_id = TRACE_FC_REQ_READ_RSP; 250 break; 251 case SPDK_NVMF_FC_REQ_WRITE_BUFFS: 252 tpoint_id = TRACE_FC_REQ_WRITE_BUFFS; 253 break; 254 case SPDK_NVMF_FC_REQ_WRITE_XFER: 255 tpoint_id = TRACE_FC_REQ_WRITE_XFER; 256 break; 257 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 258 tpoint_id = TRACE_FC_REQ_WRITE_BDEV; 259 break; 260 case SPDK_NVMF_FC_REQ_WRITE_RSP: 261 tpoint_id = TRACE_FC_REQ_WRITE_RSP; 262 break; 263 case SPDK_NVMF_FC_REQ_NONE_BDEV: 264 tpoint_id = TRACE_FC_REQ_NONE_BDEV; 265 break; 266 case SPDK_NVMF_FC_REQ_NONE_RSP: 267 tpoint_id = TRACE_FC_REQ_NONE_RSP; 268 break; 269 case SPDK_NVMF_FC_REQ_SUCCESS: 270 tpoint_id = TRACE_FC_REQ_SUCCESS; 271 break; 272 case SPDK_NVMF_FC_REQ_FAILED: 273 tpoint_id = TRACE_FC_REQ_FAILED; 274 break; 275 case SPDK_NVMF_FC_REQ_ABORTED: 276 tpoint_id = TRACE_FC_REQ_ABORTED; 277 break; 278 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 279 tpoint_id = TRACE_FC_REQ_ABORTED; 280 break; 281 case SPDK_NVMF_FC_REQ_PENDING: 282 tpoint_id = TRACE_FC_REQ_PENDING; 283 break; 284 case SPDK_NVMF_FC_REQ_FUSED_WAITING: 285 tpoint_id = TRACE_FC_REQ_FUSED_WAITING; 286 break; 287 default: 288 assert(0); 289 break; 290 } 291 if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) { 292 spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0, 293 (uint64_t)(&fc_req->req)); 294 } 295 } 296 297 static struct rte_hash * 298 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len) 299 { 300 struct rte_hash_parameters hash_params = { 0 }; 301 302 hash_params.entries = num_entries; 303 hash_params.key_len = key_len; 304 hash_params.name = name; 305 306 return rte_hash_create(&hash_params); 307 } 308 309 void 310 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) 311 { 312 free(fc_conn->pool_memory); 313 fc_conn->pool_memory = NULL; 314 } 315 316 int 317 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) 318 { 319 uint32_t i, qd; 320 struct spdk_nvmf_fc_pooled_request *req; 321 322 /* 323 * Create number of fc-requests to be more than the actual SQ size. 324 * This is to handle race conditions where the target driver may send 325 * back a RSP and before the target driver gets to process the CQE 326 * for the RSP, the initiator may have sent a new command. 327 * Depending on the load on the HWQP, there is a slim possibility 328 * that the target reaps the RQE corresponding to the new 329 * command before processing the CQE corresponding to the RSP. 330 */ 331 qd = fc_conn->max_queue_depth * 2; 332 333 STAILQ_INIT(&fc_conn->pool_queue); 334 fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2), 335 sizeof(struct spdk_nvmf_fc_request)); 336 if (!fc_conn->pool_memory) { 337 SPDK_ERRLOG("create fc req ring objects failed\n"); 338 goto error; 339 } 340 fc_conn->pool_size = qd; 341 fc_conn->pool_free_elems = qd; 342 343 /* Initialise value in ring objects and link the objects */ 344 for (i = 0; i < qd; i++) { 345 req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory + 346 i * sizeof(struct spdk_nvmf_fc_request)); 347 348 STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link); 349 } 350 return 0; 351 error: 352 nvmf_fc_free_conn_reqpool(fc_conn); 353 return -1; 354 } 355 356 static inline struct spdk_nvmf_fc_request * 357 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn) 358 { 359 struct spdk_nvmf_fc_request *fc_req; 360 struct spdk_nvmf_fc_pooled_request *pooled_req; 361 struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp; 362 363 pooled_req = STAILQ_FIRST(&fc_conn->pool_queue); 364 if (!pooled_req) { 365 SPDK_ERRLOG("Alloc request buffer failed\n"); 366 return NULL; 367 } 368 STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link); 369 fc_conn->pool_free_elems -= 1; 370 371 fc_req = (struct spdk_nvmf_fc_request *)pooled_req; 372 memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request)); 373 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); 374 375 TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link); 376 TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link); 377 TAILQ_INIT(&fc_req->abort_cbs); 378 return fc_req; 379 } 380 381 static inline void 382 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req) 383 { 384 if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) { 385 /* Log an error for debug purpose. */ 386 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED); 387 } 388 389 /* set the magic to mark req as no longer valid. */ 390 fc_req->magic = 0xDEADBEEF; 391 392 TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link); 393 TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link); 394 395 STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link); 396 fc_conn->pool_free_elems += 1; 397 } 398 399 static inline void 400 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req) 401 { 402 STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req, 403 spdk_nvmf_request, buf_link); 404 } 405 406 int 407 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp) 408 { 409 char name[64]; 410 411 hwqp->fc_port = fc_port; 412 413 /* clear counters */ 414 memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors)); 415 416 TAILQ_INIT(&hwqp->in_use_reqs); 417 TAILQ_INIT(&hwqp->sync_cbs); 418 TAILQ_INIT(&hwqp->ls_pending_queue); 419 420 snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id); 421 hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE, 422 sizeof(uint64_t)); 423 if (!hwqp->connection_list_hash) { 424 SPDK_ERRLOG("Failed to create connection hash table.\n"); 425 return -ENOMEM; 426 } 427 428 snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id); 429 hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t)); 430 if (!hwqp->rport_list_hash) { 431 SPDK_ERRLOG("Failed to create rpi hash table.\n"); 432 rte_hash_free(hwqp->connection_list_hash); 433 return -ENOMEM; 434 } 435 436 /* Init low level driver queues */ 437 nvmf_fc_init_q(hwqp); 438 return 0; 439 } 440 441 static struct spdk_nvmf_fc_poll_group * 442 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp) 443 { 444 uint32_t max_count = UINT32_MAX; 445 struct spdk_nvmf_fc_poll_group *fgroup; 446 struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL; 447 448 pthread_mutex_lock(&g_nvmf_ftransport->lock); 449 /* find poll group with least number of hwqp's assigned to it */ 450 TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) { 451 if (fgroup->hwqp_count < max_count) { 452 ret_fgroup = fgroup; 453 max_count = fgroup->hwqp_count; 454 } 455 } 456 457 if (ret_fgroup) { 458 ret_fgroup->hwqp_count++; 459 hwqp->thread = ret_fgroup->group.group->thread; 460 hwqp->fgroup = ret_fgroup; 461 } 462 463 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 464 465 return ret_fgroup; 466 } 467 468 bool 469 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup) 470 { 471 struct spdk_nvmf_fc_poll_group *tmp; 472 bool rc = false; 473 474 pthread_mutex_lock(&g_nvmf_ftransport->lock); 475 TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) { 476 if (tmp == fgroup) { 477 rc = true; 478 break; 479 } 480 } 481 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 482 return rc; 483 } 484 485 void 486 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 487 { 488 assert(hwqp); 489 if (hwqp == NULL) { 490 SPDK_ERRLOG("Error: hwqp is NULL\n"); 491 return; 492 } 493 494 assert(g_nvmf_fgroup_count); 495 496 if (!nvmf_fc_assign_idlest_poll_group(hwqp)) { 497 SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id); 498 return; 499 } 500 501 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL); 502 } 503 504 static void 505 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 506 { 507 struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data; 508 509 if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) { 510 SPDK_DEBUGLOG(nvmf_fc_adm_api, 511 "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id); 512 } else { 513 SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id); 514 } 515 516 if (args->cb_fn) { 517 args->cb_fn(args->cb_ctx, 0); 518 } 519 520 free(args); 521 } 522 523 void 524 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp, 525 spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx) 526 { 527 struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args; 528 struct spdk_nvmf_fc_poll_group *tmp; 529 int rc = 0; 530 531 assert(hwqp); 532 533 SPDK_DEBUGLOG(nvmf_fc, 534 "Remove hwqp from poller: for port: %d, hwqp: %d\n", 535 hwqp->fc_port->port_hdl, hwqp->hwqp_id); 536 537 if (!hwqp->fgroup) { 538 SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id); 539 } else { 540 pthread_mutex_lock(&g_nvmf_ftransport->lock); 541 TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) { 542 if (tmp == hwqp->fgroup) { 543 hwqp->fgroup->hwqp_count--; 544 break; 545 } 546 } 547 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 548 549 if (tmp != hwqp->fgroup) { 550 /* Pollgroup was already removed. Dont bother. */ 551 goto done; 552 } 553 554 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args)); 555 if (args == NULL) { 556 rc = -ENOMEM; 557 SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id); 558 goto done; 559 } 560 561 args->hwqp = hwqp; 562 args->cb_fn = cb_fn; 563 args->cb_ctx = cb_ctx; 564 args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb; 565 args->cb_info.cb_data = args; 566 args->cb_info.cb_thread = spdk_get_thread(); 567 568 rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args); 569 if (rc) { 570 rc = -EINVAL; 571 SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id); 572 free(args); 573 goto done; 574 } 575 return; 576 } 577 done: 578 if (cb_fn) { 579 cb_fn(cb_ctx, rc); 580 } 581 } 582 583 /* 584 * Note: This needs to be used only on main poller. 585 */ 586 static uint64_t 587 nvmf_fc_get_abts_unique_id(void) 588 { 589 static uint32_t u_id = 0; 590 591 return (uint64_t)(++u_id); 592 } 593 594 static void 595 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 596 { 597 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 598 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg; 599 600 ctx->hwqps_responded++; 601 602 if (ctx->hwqps_responded < ctx->num_hwqps) { 603 /* Wait for all pollers to complete. */ 604 return; 605 } 606 607 /* Free the queue sync poller args. */ 608 free(ctx->sync_poller_args); 609 610 /* Mark as queue synced */ 611 ctx->queue_synced = true; 612 613 /* Reset the ctx values */ 614 ctx->hwqps_responded = 0; 615 ctx->handled = false; 616 617 SPDK_DEBUGLOG(nvmf_fc, 618 "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 619 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 620 621 /* Resend ABTS to pollers */ 622 args = ctx->abts_poller_args; 623 for (int i = 0; i < ctx->num_hwqps; i++) { 624 poller_arg = args + i; 625 nvmf_fc_poller_api_func(poller_arg->hwqp, 626 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 627 poller_arg); 628 } 629 } 630 631 static int 632 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx) 633 { 634 struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg; 635 struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg; 636 637 /* check if FC driver supports queue sync */ 638 if (!nvmf_fc_q_sync_available()) { 639 return -EPERM; 640 } 641 642 assert(ctx); 643 if (!ctx) { 644 SPDK_ERRLOG("NULL ctx pointer"); 645 return -EINVAL; 646 } 647 648 /* Reset the ctx values */ 649 ctx->hwqps_responded = 0; 650 651 args = calloc(ctx->num_hwqps, 652 sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args)); 653 if (!args) { 654 SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 655 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 656 return -ENOMEM; 657 } 658 ctx->sync_poller_args = args; 659 660 abts_args = ctx->abts_poller_args; 661 for (int i = 0; i < ctx->num_hwqps; i++) { 662 abts_poller_arg = abts_args + i; 663 poller_arg = args + i; 664 poller_arg->u_id = ctx->u_id; 665 poller_arg->hwqp = abts_poller_arg->hwqp; 666 poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb; 667 poller_arg->cb_info.cb_data = ctx; 668 poller_arg->cb_info.cb_thread = spdk_get_thread(); 669 670 /* Send a Queue sync message to interested pollers */ 671 nvmf_fc_poller_api_func(poller_arg->hwqp, 672 SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC, 673 poller_arg); 674 } 675 676 SPDK_DEBUGLOG(nvmf_fc, 677 "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 678 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 679 680 /* Post Marker to queue to track aborted request */ 681 nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id); 682 683 return 0; 684 } 685 686 static void 687 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 688 { 689 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 690 struct spdk_nvmf_fc_nport *nport = NULL; 691 692 if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) { 693 ctx->handled = true; 694 } 695 696 ctx->hwqps_responded++; 697 698 if (ctx->hwqps_responded < ctx->num_hwqps) { 699 /* Wait for all pollers to complete. */ 700 return; 701 } 702 703 nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl); 704 705 if (ctx->nport != nport) { 706 /* Nport can be deleted while this abort is being 707 * processed by the pollers. 708 */ 709 SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 710 ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 711 } else { 712 if (!ctx->handled) { 713 /* Try syncing the queues and try one more time */ 714 if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) { 715 SPDK_DEBUGLOG(nvmf_fc, 716 "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 717 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 718 return; 719 } else { 720 /* Send Reject */ 721 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 722 ctx->oxid, ctx->rxid, ctx->rpi, true, 723 FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL); 724 } 725 } else { 726 /* Send Accept */ 727 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 728 ctx->oxid, ctx->rxid, ctx->rpi, false, 729 0, NULL, NULL); 730 } 731 } 732 SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 733 (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 734 735 free(ctx->abts_poller_args); 736 free(ctx); 737 } 738 739 void 740 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi, 741 uint16_t oxid, uint16_t rxid) 742 { 743 struct spdk_nvmf_fc_abts_ctx *ctx = NULL; 744 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg; 745 struct spdk_nvmf_fc_association *assoc = NULL; 746 struct spdk_nvmf_fc_conn *conn = NULL; 747 uint32_t hwqp_cnt = 0; 748 bool skip_hwqp_cnt; 749 struct spdk_nvmf_fc_hwqp **hwqps = NULL; 750 uint32_t i; 751 752 SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 753 nport->nport_hdl, rpi, oxid, rxid); 754 755 /* Allocate memory to track hwqp's with at least 1 active connection. */ 756 hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *)); 757 if (hwqps == NULL) { 758 SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n"); 759 goto bls_rej; 760 } 761 762 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 763 TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) { 764 if ((conn->rpi != rpi) || !conn->hwqp) { 765 continue; 766 } 767 768 skip_hwqp_cnt = false; 769 for (i = 0; i < hwqp_cnt; i++) { 770 if (hwqps[i] == conn->hwqp) { 771 /* Skip. This is already present */ 772 skip_hwqp_cnt = true; 773 break; 774 } 775 } 776 if (!skip_hwqp_cnt) { 777 assert(hwqp_cnt < nport->fc_port->num_io_queues); 778 hwqps[hwqp_cnt] = conn->hwqp; 779 hwqp_cnt++; 780 } 781 } 782 } 783 784 if (!hwqp_cnt) { 785 goto bls_rej; 786 } 787 788 args = calloc(hwqp_cnt, 789 sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args)); 790 if (!args) { 791 goto bls_rej; 792 } 793 794 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx)); 795 if (!ctx) { 796 goto bls_rej; 797 } 798 ctx->rpi = rpi; 799 ctx->oxid = oxid; 800 ctx->rxid = rxid; 801 ctx->nport = nport; 802 ctx->nport_hdl = nport->nport_hdl; 803 ctx->port_hdl = nport->fc_port->port_hdl; 804 ctx->num_hwqps = hwqp_cnt; 805 ctx->ls_hwqp = &nport->fc_port->ls_queue; 806 ctx->fcp_rq_id = nport->fc_port->fcp_rq_id; 807 ctx->abts_poller_args = args; 808 809 /* Get a unique context for this ABTS */ 810 ctx->u_id = nvmf_fc_get_abts_unique_id(); 811 812 for (i = 0; i < hwqp_cnt; i++) { 813 poller_arg = args + i; 814 poller_arg->hwqp = hwqps[i]; 815 poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb; 816 poller_arg->cb_info.cb_data = ctx; 817 poller_arg->cb_info.cb_thread = spdk_get_thread(); 818 poller_arg->ctx = ctx; 819 820 nvmf_fc_poller_api_func(poller_arg->hwqp, 821 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 822 poller_arg); 823 } 824 825 free(hwqps); 826 827 return; 828 bls_rej: 829 free(args); 830 free(hwqps); 831 832 /* Send Reject */ 833 nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi, 834 true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL); 835 SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 836 nport->nport_hdl, rpi, oxid, rxid); 837 return; 838 } 839 840 /*** Accessor functions for the FC structures - BEGIN */ 841 /* 842 * Returns true if the port is in offline state. 843 */ 844 bool 845 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port) 846 { 847 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) { 848 return true; 849 } 850 851 return false; 852 } 853 854 /* 855 * Returns true if the port is in online state. 856 */ 857 bool 858 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port) 859 { 860 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) { 861 return true; 862 } 863 864 return false; 865 } 866 867 int 868 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port) 869 { 870 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) { 871 fc_port->hw_port_status = SPDK_FC_PORT_ONLINE; 872 return 0; 873 } 874 875 return -EPERM; 876 } 877 878 int 879 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port) 880 { 881 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) { 882 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 883 return 0; 884 } 885 886 return -EPERM; 887 } 888 889 int 890 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp) 891 { 892 if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) { 893 hwqp->state = SPDK_FC_HWQP_ONLINE; 894 /* reset some queue counters */ 895 hwqp->num_conns = 0; 896 return nvmf_fc_set_q_online_state(hwqp, true); 897 } 898 899 return -EPERM; 900 } 901 902 int 903 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp) 904 { 905 if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) { 906 hwqp->state = SPDK_FC_HWQP_OFFLINE; 907 return nvmf_fc_set_q_online_state(hwqp, false); 908 } 909 910 return -EPERM; 911 } 912 913 void 914 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port) 915 { 916 TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link); 917 918 /* 919 * Let LLD add the port to its list. 920 */ 921 nvmf_fc_lld_port_add(fc_port); 922 } 923 924 static void 925 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port) 926 { 927 TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link); 928 929 /* 930 * Let LLD remove the port from its list. 931 */ 932 nvmf_fc_lld_port_remove(fc_port); 933 } 934 935 struct spdk_nvmf_fc_port * 936 nvmf_fc_port_lookup(uint8_t port_hdl) 937 { 938 struct spdk_nvmf_fc_port *fc_port = NULL; 939 940 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 941 if (fc_port->port_hdl == port_hdl) { 942 return fc_port; 943 } 944 } 945 return NULL; 946 } 947 948 uint32_t 949 nvmf_fc_get_prli_service_params(void) 950 { 951 return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION); 952 } 953 954 int 955 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port, 956 struct spdk_nvmf_fc_nport *nport) 957 { 958 if (fc_port) { 959 TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link); 960 fc_port->num_nports++; 961 return 0; 962 } 963 964 return -EINVAL; 965 } 966 967 int 968 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port, 969 struct spdk_nvmf_fc_nport *nport) 970 { 971 if (fc_port && nport) { 972 TAILQ_REMOVE(&fc_port->nport_list, nport, link); 973 fc_port->num_nports--; 974 return 0; 975 } 976 977 return -EINVAL; 978 } 979 980 static struct spdk_nvmf_fc_nport * 981 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl) 982 { 983 struct spdk_nvmf_fc_nport *fc_nport = NULL; 984 985 TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) { 986 if (fc_nport->nport_hdl == nport_hdl) { 987 return fc_nport; 988 } 989 } 990 991 return NULL; 992 } 993 994 struct spdk_nvmf_fc_nport * 995 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl) 996 { 997 struct spdk_nvmf_fc_port *fc_port = NULL; 998 999 fc_port = nvmf_fc_port_lookup(port_hdl); 1000 if (fc_port) { 1001 return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl); 1002 } 1003 1004 return NULL; 1005 } 1006 1007 static inline int 1008 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp, 1009 uint32_t d_id, struct spdk_nvmf_fc_nport **nport, 1010 uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport) 1011 { 1012 struct spdk_nvmf_fc_nport *n_port; 1013 struct spdk_nvmf_fc_remote_port_info *r_port; 1014 1015 assert(hwqp); 1016 if (hwqp == NULL) { 1017 SPDK_ERRLOG("Error: hwqp is NULL\n"); 1018 return -EINVAL; 1019 } 1020 assert(nport); 1021 if (nport == NULL) { 1022 SPDK_ERRLOG("Error: nport is NULL\n"); 1023 return -EINVAL; 1024 } 1025 assert(rport); 1026 if (rport == NULL) { 1027 SPDK_ERRLOG("Error: rport is NULL\n"); 1028 return -EINVAL; 1029 } 1030 1031 TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) { 1032 if (n_port->d_id == d_id) { 1033 TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) { 1034 if (r_port->s_id == s_id) { 1035 *nport = n_port; 1036 *rport = r_port; 1037 return 0; 1038 } 1039 } 1040 break; 1041 } 1042 } 1043 1044 return -ENOENT; 1045 } 1046 1047 /* Returns true if the Nport is empty of all rem_ports */ 1048 bool 1049 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport) 1050 { 1051 if (nport && TAILQ_EMPTY(&nport->rem_port_list)) { 1052 assert(nport->rport_count == 0); 1053 return true; 1054 } else { 1055 return false; 1056 } 1057 } 1058 1059 int 1060 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport, 1061 enum spdk_nvmf_fc_object_state state) 1062 { 1063 if (nport) { 1064 nport->nport_state = state; 1065 return 0; 1066 } else { 1067 return -EINVAL; 1068 } 1069 } 1070 1071 bool 1072 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport, 1073 struct spdk_nvmf_fc_remote_port_info *rem_port) 1074 { 1075 if (nport && rem_port) { 1076 TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link); 1077 nport->rport_count++; 1078 return 0; 1079 } else { 1080 return -EINVAL; 1081 } 1082 } 1083 1084 bool 1085 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport, 1086 struct spdk_nvmf_fc_remote_port_info *rem_port) 1087 { 1088 if (nport && rem_port) { 1089 TAILQ_REMOVE(&nport->rem_port_list, rem_port, link); 1090 nport->rport_count--; 1091 return 0; 1092 } else { 1093 return -EINVAL; 1094 } 1095 } 1096 1097 int 1098 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport, 1099 enum spdk_nvmf_fc_object_state state) 1100 { 1101 if (rport) { 1102 rport->rport_state = state; 1103 return 0; 1104 } else { 1105 return -EINVAL; 1106 } 1107 } 1108 int 1109 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc, 1110 enum spdk_nvmf_fc_object_state state) 1111 { 1112 if (assoc) { 1113 assoc->assoc_state = state; 1114 return 0; 1115 } else { 1116 return -EINVAL; 1117 } 1118 } 1119 1120 static struct spdk_nvmf_fc_association * 1121 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr) 1122 { 1123 struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair; 1124 struct spdk_nvmf_fc_conn *fc_conn; 1125 1126 if (!qpair) { 1127 SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid); 1128 return NULL; 1129 } 1130 1131 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1132 1133 return fc_conn->fc_assoc; 1134 } 1135 1136 bool 1137 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl, 1138 struct spdk_nvmf_ctrlr *ctrlr) 1139 { 1140 struct spdk_nvmf_fc_nport *fc_nport = NULL; 1141 struct spdk_nvmf_fc_association *assoc = NULL; 1142 1143 if (!ctrlr) { 1144 return false; 1145 } 1146 1147 fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl); 1148 if (!fc_nport) { 1149 return false; 1150 } 1151 1152 assoc = nvmf_ctrlr_get_fc_assoc(ctrlr); 1153 if (assoc && assoc->tgtport == fc_nport) { 1154 SPDK_DEBUGLOG(nvmf_fc, 1155 "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n", 1156 ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl, 1157 nport_hdl); 1158 return true; 1159 } 1160 return false; 1161 } 1162 1163 static void 1164 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp, 1165 struct spdk_nvmf_fc_ls_rqst *ls_rqst) 1166 { 1167 assert(ls_rqst); 1168 1169 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1170 1171 /* Return buffer to chip */ 1172 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1173 } 1174 1175 static int 1176 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp, 1177 struct spdk_nvmf_fc_nport *nport, 1178 struct spdk_nvmf_fc_remote_port_info *rport) 1179 { 1180 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1181 int num_deleted = 0; 1182 1183 assert(hwqp); 1184 assert(nport); 1185 assert(rport); 1186 1187 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1188 if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) { 1189 num_deleted++; 1190 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1191 } 1192 } 1193 return num_deleted; 1194 } 1195 1196 static void 1197 nvmf_fc_req_bdev_abort(void *arg1) 1198 { 1199 struct spdk_nvmf_fc_request *fc_req = arg1; 1200 struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr; 1201 int i; 1202 1203 /* Initial release - we don't have to abort Admin Queue or 1204 * Fabric commands. The AQ commands supported at this time are 1205 * Get-Log-Page, 1206 * Identify 1207 * Set Features 1208 * Get Features 1209 * AER -> Special case and handled differently. 1210 * Every one of the above Admin commands (except AER) run 1211 * to completion and so an Abort of such commands doesn't 1212 * make sense. 1213 */ 1214 /* The Fabric commands supported are 1215 * Property Set 1216 * Property Get 1217 * Connect -> Special case (async. handling). Not sure how to 1218 * handle at this point. Let it run to completion. 1219 */ 1220 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 1221 if (ctrlr->aer_req[i] == &fc_req->req) { 1222 SPDK_NOTICELOG("Abort AER request\n"); 1223 nvmf_qpair_free_aer(fc_req->req.qpair); 1224 } 1225 } 1226 } 1227 1228 void 1229 nvmf_fc_request_abort_complete(void *arg1) 1230 { 1231 struct spdk_nvmf_fc_request *fc_req = 1232 (struct spdk_nvmf_fc_request *)arg1; 1233 struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp; 1234 struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL; 1235 TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs; 1236 1237 /* Make a copy of the cb list from fc_req */ 1238 TAILQ_INIT(&abort_cbs); 1239 TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link); 1240 1241 SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req, 1242 fc_req_state_strs[fc_req->state]); 1243 1244 _nvmf_fc_request_free(fc_req); 1245 1246 /* Request abort completed. Notify all the callbacks */ 1247 TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) { 1248 /* Notify */ 1249 ctx->cb(hwqp, 0, ctx->cb_args); 1250 /* Remove */ 1251 TAILQ_REMOVE(&abort_cbs, ctx, link); 1252 /* free */ 1253 free(ctx); 1254 } 1255 } 1256 1257 void 1258 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts, 1259 spdk_nvmf_fc_caller_cb cb, void *cb_args) 1260 { 1261 struct spdk_nvmf_fc_caller_ctx *ctx = NULL; 1262 bool kill_req = false; 1263 1264 /* Add the cb to list */ 1265 if (cb) { 1266 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx)); 1267 if (!ctx) { 1268 SPDK_ERRLOG("ctx alloc failed.\n"); 1269 return; 1270 } 1271 ctx->cb = cb; 1272 ctx->cb_args = cb_args; 1273 1274 TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link); 1275 } 1276 1277 if (!fc_req->is_aborted) { 1278 /* Increment aborted command counter */ 1279 fc_req->hwqp->counters.num_aborted++; 1280 } 1281 1282 /* If port is dead, skip abort wqe */ 1283 kill_req = nvmf_fc_is_port_dead(fc_req->hwqp); 1284 if (kill_req && nvmf_fc_req_in_xfer(fc_req)) { 1285 fc_req->is_aborted = true; 1286 goto complete; 1287 } 1288 1289 /* Check if the request is already marked for deletion */ 1290 if (fc_req->is_aborted) { 1291 return; 1292 } 1293 1294 /* Mark request as aborted */ 1295 fc_req->is_aborted = true; 1296 1297 /* If xchg is allocated, then save if we need to send abts or not. */ 1298 if (fc_req->xchg) { 1299 fc_req->xchg->send_abts = send_abts; 1300 fc_req->xchg->aborted = true; 1301 } 1302 1303 switch (fc_req->state) { 1304 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 1305 /* Aborted by backend */ 1306 goto complete; 1307 1308 case SPDK_NVMF_FC_REQ_READ_BDEV: 1309 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 1310 case SPDK_NVMF_FC_REQ_NONE_BDEV: 1311 /* Notify bdev */ 1312 spdk_thread_send_msg(fc_req->hwqp->thread, 1313 nvmf_fc_req_bdev_abort, (void *)fc_req); 1314 break; 1315 1316 case SPDK_NVMF_FC_REQ_READ_XFER: 1317 case SPDK_NVMF_FC_REQ_READ_RSP: 1318 case SPDK_NVMF_FC_REQ_WRITE_XFER: 1319 case SPDK_NVMF_FC_REQ_WRITE_RSP: 1320 case SPDK_NVMF_FC_REQ_NONE_RSP: 1321 /* Notify HBA to abort this exchange */ 1322 nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL); 1323 break; 1324 1325 case SPDK_NVMF_FC_REQ_PENDING: 1326 /* Remove from pending */ 1327 nvmf_fc_request_remove_from_pending(fc_req); 1328 goto complete; 1329 case SPDK_NVMF_FC_REQ_FUSED_WAITING: 1330 TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link); 1331 goto complete; 1332 default: 1333 SPDK_ERRLOG("Request in invalid state.\n"); 1334 goto complete; 1335 } 1336 1337 return; 1338 complete: 1339 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED); 1340 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1341 (void *)fc_req); 1342 } 1343 1344 static int 1345 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req) 1346 { 1347 uint32_t length = fc_req->req.length; 1348 struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup; 1349 struct spdk_nvmf_transport_poll_group *group = &fgroup->group; 1350 struct spdk_nvmf_transport *transport = group->transport; 1351 1352 if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) { 1353 return -ENOMEM; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int 1360 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req) 1361 { 1362 /* Allocate an XCHG if we dont use send frame for this command. */ 1363 if (!nvmf_fc_use_send_frame(fc_req)) { 1364 fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp); 1365 if (!fc_req->xchg) { 1366 fc_req->hwqp->counters.no_xchg++; 1367 return -EAGAIN; 1368 } 1369 } 1370 1371 if (fc_req->req.length) { 1372 if (nvmf_fc_request_alloc_buffers(fc_req) < 0) { 1373 fc_req->hwqp->counters.buf_alloc_err++; 1374 if (fc_req->xchg) { 1375 nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg); 1376 fc_req->xchg = NULL; 1377 } 1378 return -EAGAIN; 1379 } 1380 } 1381 1382 if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1383 SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n"); 1384 1385 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER); 1386 1387 if (nvmf_fc_recv_data(fc_req)) { 1388 /* Dropped return success to caller */ 1389 fc_req->hwqp->counters.unexpected_err++; 1390 _nvmf_fc_request_free(fc_req); 1391 } 1392 } else { 1393 SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n"); 1394 1395 if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1396 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV); 1397 } else { 1398 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV); 1399 } 1400 spdk_nvmf_request_exec(&fc_req->req); 1401 } 1402 1403 return 0; 1404 } 1405 1406 static void 1407 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req, 1408 struct spdk_nvmf_fc_frame_hdr *fchdr) 1409 { 1410 uint8_t df_ctl = fchdr->df_ctl; 1411 uint32_t f_ctl = fchdr->f_ctl; 1412 1413 /* VMID */ 1414 if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) { 1415 struct spdk_nvmf_fc_vm_header *vhdr; 1416 uint32_t vmhdr_offset = 0; 1417 1418 if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) { 1419 vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE; 1420 } 1421 1422 if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) { 1423 vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE; 1424 } 1425 1426 vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr + 1427 sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset); 1428 fc_req->app_id = from_be32(&vhdr->src_vmid); 1429 } 1430 1431 /* Priority */ 1432 if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) { 1433 fc_req->csctl = fchdr->cs_ctl; 1434 } 1435 } 1436 1437 static int 1438 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame, 1439 struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen) 1440 { 1441 uint16_t cmnd_len; 1442 uint64_t rqst_conn_id; 1443 struct spdk_nvmf_fc_request *fc_req = NULL; 1444 struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL; 1445 struct spdk_nvmf_fc_conn *fc_conn = NULL; 1446 enum spdk_nvme_data_transfer xfer; 1447 uint32_t s_id, d_id; 1448 1449 s_id = (uint32_t)frame->s_id; 1450 d_id = (uint32_t)frame->d_id; 1451 s_id = from_be32(&s_id) >> 8; 1452 d_id = from_be32(&d_id) >> 8; 1453 1454 cmd_iu = buffer->virt; 1455 cmnd_len = cmd_iu->cmnd_iu_len; 1456 cmnd_len = from_be16(&cmnd_len); 1457 1458 /* check for a valid cmnd_iu format */ 1459 if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) || 1460 (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) || 1461 (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) { 1462 SPDK_ERRLOG("IU CMD error\n"); 1463 hwqp->counters.nvme_cmd_iu_err++; 1464 return -ENXIO; 1465 } 1466 1467 xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags); 1468 if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) { 1469 SPDK_ERRLOG("IU CMD xfer error\n"); 1470 hwqp->counters.nvme_cmd_xfer_err++; 1471 return -EPERM; 1472 } 1473 1474 rqst_conn_id = from_be64(&cmd_iu->conn_id); 1475 1476 if (rte_hash_lookup_data(hwqp->connection_list_hash, 1477 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) { 1478 SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id); 1479 hwqp->counters.invalid_conn_err++; 1480 return -ENODEV; 1481 } 1482 1483 /* Validate s_id and d_id */ 1484 if (s_id != fc_conn->s_id) { 1485 hwqp->counters.rport_invalid++; 1486 SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id); 1487 return -ENODEV; 1488 } 1489 1490 if (d_id != fc_conn->d_id) { 1491 hwqp->counters.nport_invalid++; 1492 SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id); 1493 return -ENODEV; 1494 } 1495 1496 /* If association/connection is being deleted - return */ 1497 if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1498 SPDK_ERRLOG("Association %ld state = %d not valid\n", 1499 fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state); 1500 return -EACCES; 1501 } 1502 1503 if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1504 SPDK_ERRLOG("Connection %ld state = %d not valid\n", 1505 rqst_conn_id, fc_conn->conn_state); 1506 return -EACCES; 1507 } 1508 1509 if (!spdk_nvmf_qpair_is_active(&fc_conn->qpair)) { 1510 SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n", 1511 rqst_conn_id, fc_conn->qpair.state); 1512 return -EACCES; 1513 } 1514 1515 /* Make sure xfer len is according to mdts */ 1516 if (from_be32(&cmd_iu->data_len) > 1517 hwqp->fgroup->group.transport->opts.max_io_size) { 1518 SPDK_ERRLOG("IO length requested is greater than MDTS\n"); 1519 return -EINVAL; 1520 } 1521 1522 /* allocate a request buffer */ 1523 fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn); 1524 if (fc_req == NULL) { 1525 return -ENOMEM; 1526 } 1527 1528 fc_req->req.length = from_be32(&cmd_iu->data_len); 1529 fc_req->req.qpair = &fc_conn->qpair; 1530 memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg)); 1531 fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd; 1532 fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp; 1533 fc_req->oxid = frame->ox_id; 1534 fc_req->oxid = from_be16(&fc_req->oxid); 1535 fc_req->rpi = fc_conn->rpi; 1536 fc_req->poller_lcore = hwqp->lcore_id; 1537 fc_req->poller_thread = hwqp->thread; 1538 fc_req->hwqp = hwqp; 1539 fc_req->fc_conn = fc_conn; 1540 fc_req->req.xfer = xfer; 1541 fc_req->s_id = s_id; 1542 fc_req->d_id = d_id; 1543 fc_req->csn = from_be32(&cmd_iu->cmnd_seq_num); 1544 nvmf_fc_set_vmid_priority(fc_req, frame); 1545 1546 nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT); 1547 1548 if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) { 1549 STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link); 1550 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING); 1551 } 1552 1553 return 0; 1554 } 1555 1556 /* 1557 * These functions are called from the FC LLD 1558 */ 1559 1560 void 1561 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req) 1562 { 1563 struct spdk_nvmf_fc_hwqp *hwqp; 1564 struct spdk_nvmf_transport_poll_group *group; 1565 1566 if (!fc_req) { 1567 return; 1568 } 1569 hwqp = fc_req->hwqp; 1570 1571 if (fc_req->xchg) { 1572 nvmf_fc_put_xchg(hwqp, fc_req->xchg); 1573 fc_req->xchg = NULL; 1574 } 1575 1576 /* Release IO buffers */ 1577 if (fc_req->req.data_from_pool) { 1578 group = &hwqp->fgroup->group; 1579 spdk_nvmf_request_free_buffers(&fc_req->req, group, 1580 group->transport); 1581 } 1582 fc_req->req.iovcnt = 0; 1583 1584 /* Free Fc request */ 1585 nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req); 1586 } 1587 1588 void 1589 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req, 1590 enum spdk_nvmf_fc_request_state state) 1591 { 1592 assert(fc_req->magic != 0xDEADBEEF); 1593 1594 SPDK_DEBUGLOG(nvmf_fc, 1595 "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req, 1596 nvmf_fc_request_get_state_str(fc_req->state), 1597 nvmf_fc_request_get_state_str(state)); 1598 nvmf_fc_record_req_trace_point(fc_req, state); 1599 fc_req->state = state; 1600 } 1601 1602 char * 1603 nvmf_fc_request_get_state_str(int state) 1604 { 1605 static char *unk_str = "unknown"; 1606 1607 return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ? 1608 fc_req_state_strs[state] : unk_str); 1609 } 1610 1611 int 1612 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp, 1613 uint32_t buff_idx, 1614 struct spdk_nvmf_fc_frame_hdr *frame, 1615 struct spdk_nvmf_fc_buffer_desc *buffer, 1616 uint32_t plen) 1617 { 1618 int rc = 0; 1619 uint32_t s_id, d_id; 1620 struct spdk_nvmf_fc_nport *nport = NULL; 1621 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1622 1623 s_id = (uint32_t)frame->s_id; 1624 d_id = (uint32_t)frame->d_id; 1625 s_id = from_be32(&s_id) >> 8; 1626 d_id = from_be32(&d_id) >> 8; 1627 1628 SPDK_DEBUGLOG(nvmf_fc, 1629 "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n", 1630 s_id, d_id, 1631 ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff), 1632 ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff)); 1633 1634 if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) && 1635 (frame->type == FCNVME_TYPE_NVMF_DATA)) { 1636 struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt; 1637 struct spdk_nvmf_fc_ls_rqst *ls_rqst; 1638 1639 SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n"); 1640 1641 rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport); 1642 if (rc) { 1643 if (nport == NULL) { 1644 SPDK_ERRLOG("Nport not found. Dropping\n"); 1645 /* increment invalid nport counter */ 1646 hwqp->counters.nport_invalid++; 1647 } else if (rport == NULL) { 1648 SPDK_ERRLOG("Rport not found. Dropping\n"); 1649 /* increment invalid rport counter */ 1650 hwqp->counters.rport_invalid++; 1651 } 1652 return rc; 1653 } 1654 1655 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1656 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1657 SPDK_ERRLOG("%s state not created. Dropping\n", 1658 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1659 "Nport" : "Rport"); 1660 return -EACCES; 1661 } 1662 1663 /* Use the RQ buffer for holding LS request. */ 1664 ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst; 1665 1666 /* Fill in the LS request structure */ 1667 ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst; 1668 ls_rqst->rqstbuf.phys = buffer->phys + 1669 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst); 1670 ls_rqst->rqstbuf.buf_index = buff_idx; 1671 ls_rqst->rqst_len = plen; 1672 1673 ls_rqst->rspbuf.virt = (void *)&req_buf->resp; 1674 ls_rqst->rspbuf.phys = buffer->phys + 1675 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp); 1676 ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE; 1677 1678 ls_rqst->private_data = (void *)hwqp; 1679 ls_rqst->rpi = rport->rpi; 1680 ls_rqst->oxid = (uint16_t)frame->ox_id; 1681 ls_rqst->oxid = from_be16(&ls_rqst->oxid); 1682 ls_rqst->s_id = s_id; 1683 ls_rqst->d_id = d_id; 1684 ls_rqst->nport = nport; 1685 ls_rqst->rport = rport; 1686 ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt; 1687 1688 if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) { 1689 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1690 } else { 1691 ls_rqst->xchg = NULL; 1692 } 1693 1694 if (ls_rqst->xchg) { 1695 /* Handover the request to LS module */ 1696 nvmf_fc_handle_ls_rqst(ls_rqst); 1697 } else { 1698 /* No XCHG available. Add to pending list. */ 1699 hwqp->counters.no_xchg++; 1700 TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1701 } 1702 } else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) && 1703 (frame->type == FCNVME_TYPE_FC_EXCHANGE)) { 1704 1705 SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n"); 1706 rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen); 1707 if (!rc) { 1708 nvmf_fc_rqpair_buffer_release(hwqp, buff_idx); 1709 } 1710 } else { 1711 1712 SPDK_ERRLOG("Unknown frame received. Dropping\n"); 1713 hwqp->counters.unknown_frame++; 1714 rc = -EINVAL; 1715 } 1716 1717 return rc; 1718 } 1719 1720 void 1721 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp) 1722 { 1723 struct spdk_nvmf_request *req = NULL, *tmp; 1724 struct spdk_nvmf_fc_request *fc_req; 1725 int budget = 64; 1726 1727 if (!hwqp->fgroup) { 1728 /* LS queue is tied to acceptor_poll group and LS pending requests 1729 * are stagged and processed using hwqp->ls_pending_queue. 1730 */ 1731 return; 1732 } 1733 1734 STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) { 1735 fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req); 1736 if (!nvmf_fc_request_execute(fc_req)) { 1737 /* Successfully posted, Delete from pending. */ 1738 nvmf_fc_request_remove_from_pending(fc_req); 1739 } 1740 1741 if (budget) { 1742 budget--; 1743 } else { 1744 return; 1745 } 1746 } 1747 } 1748 1749 void 1750 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp) 1751 { 1752 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1753 struct spdk_nvmf_fc_nport *nport = NULL; 1754 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1755 1756 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1757 /* lookup nport and rport again - make sure they are still valid */ 1758 int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport); 1759 if (rc) { 1760 if (nport == NULL) { 1761 SPDK_ERRLOG("Nport not found. Dropping\n"); 1762 /* increment invalid nport counter */ 1763 hwqp->counters.nport_invalid++; 1764 } else if (rport == NULL) { 1765 SPDK_ERRLOG("Rport not found. Dropping\n"); 1766 /* increment invalid rport counter */ 1767 hwqp->counters.rport_invalid++; 1768 } 1769 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1770 continue; 1771 } 1772 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1773 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1774 SPDK_ERRLOG("%s state not created. Dropping\n", 1775 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1776 "Nport" : "Rport"); 1777 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1778 continue; 1779 } 1780 1781 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1782 if (ls_rqst->xchg) { 1783 /* Got an XCHG */ 1784 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1785 /* Handover the request to LS module */ 1786 nvmf_fc_handle_ls_rqst(ls_rqst); 1787 } else { 1788 /* No more XCHGs. Stop processing. */ 1789 hwqp->counters.no_xchg++; 1790 return; 1791 } 1792 } 1793 } 1794 1795 int 1796 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req) 1797 { 1798 int rc = 0; 1799 struct spdk_nvmf_request *req = &fc_req->req; 1800 struct spdk_nvmf_qpair *qpair = req->qpair; 1801 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1802 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1803 uint16_t ersp_len = 0; 1804 1805 /* set sq head value in resp */ 1806 rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair); 1807 1808 /* Increment connection responses */ 1809 fc_conn->rsp_count++; 1810 1811 if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count, 1812 fc_req->transferred_len)) { 1813 /* Fill ERSP Len */ 1814 to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) / 1815 sizeof(uint32_t))); 1816 fc_req->ersp.ersp_len = ersp_len; 1817 1818 /* Fill RSN */ 1819 to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn); 1820 fc_conn->rsn++; 1821 1822 /* Fill transfer length */ 1823 to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len); 1824 1825 SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n"); 1826 rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp, 1827 sizeof(struct spdk_nvmf_fc_ersp_iu)); 1828 } else { 1829 SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n"); 1830 rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0); 1831 } 1832 1833 return rc; 1834 } 1835 1836 bool 1837 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req, 1838 uint32_t rsp_cnt, uint32_t xfer_len) 1839 { 1840 struct spdk_nvmf_request *req = &fc_req->req; 1841 struct spdk_nvmf_qpair *qpair = req->qpair; 1842 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1843 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1844 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1845 uint16_t status = *((uint16_t *)&rsp->status); 1846 1847 /* 1848 * Check if we need to send ERSP 1849 * 1) For every N responses where N == ersp_ratio 1850 * 2) Fabric commands. 1851 * 3) Completion status failed or Completion dw0 or dw1 valid. 1852 * 4) SQ == 90% full. 1853 * 5) Transfer length not equal to CMD IU length 1854 */ 1855 1856 if (!(rsp_cnt % fc_conn->esrp_ratio) || 1857 (cmd->opc == SPDK_NVME_OPC_FABRIC) || 1858 (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 || 1859 (req->length != xfer_len)) { 1860 return true; 1861 } 1862 return false; 1863 } 1864 1865 static int 1866 nvmf_fc_request_complete(struct spdk_nvmf_request *req) 1867 { 1868 int rc = 0; 1869 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 1870 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1871 1872 if (fc_req->is_aborted) { 1873 /* Defer this to make sure we dont call io cleanup in same context. */ 1874 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1875 (void *)fc_req); 1876 } else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && 1877 req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1878 1879 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER); 1880 1881 rc = nvmf_fc_send_data(fc_req); 1882 } else { 1883 if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1884 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP); 1885 } else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1886 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP); 1887 } else { 1888 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP); 1889 } 1890 1891 rc = nvmf_fc_handle_rsp(fc_req); 1892 } 1893 1894 if (rc) { 1895 SPDK_ERRLOG("Error in request complete.\n"); 1896 _nvmf_fc_request_free(fc_req); 1897 } 1898 return 0; 1899 } 1900 1901 struct spdk_nvmf_tgt * 1902 nvmf_fc_get_tgt(void) 1903 { 1904 if (g_nvmf_ftransport) { 1905 return g_nvmf_ftransport->transport.tgt; 1906 } 1907 return NULL; 1908 } 1909 1910 /* 1911 * FC Transport Public API begins here 1912 */ 1913 1914 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128 1915 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32 1916 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5 1917 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0 1918 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536 1919 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096 1920 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192 1921 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE / \ 1922 SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE) 1923 1924 static void 1925 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts) 1926 { 1927 opts->max_queue_depth = SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH; 1928 opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR; 1929 opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE; 1930 opts->max_io_size = SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE; 1931 opts->io_unit_size = SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE; 1932 opts->max_aq_depth = SPDK_NVMF_FC_DEFAULT_AQ_DEPTH; 1933 opts->num_shared_buffers = SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS; 1934 } 1935 1936 static int nvmf_fc_accept(void *ctx); 1937 1938 static struct spdk_nvmf_transport * 1939 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts) 1940 { 1941 uint32_t sge_count; 1942 1943 SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n" 1944 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 1945 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 1946 " max_aq_depth=%d\n", 1947 opts->max_queue_depth, 1948 opts->max_io_size, 1949 opts->max_qpairs_per_ctrlr - 1, 1950 opts->io_unit_size, 1951 opts->max_aq_depth); 1952 1953 if (g_nvmf_ftransport) { 1954 SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n"); 1955 return NULL; 1956 } 1957 1958 if (spdk_env_get_last_core() < 1) { 1959 SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n", 1960 spdk_env_get_last_core() + 1); 1961 return NULL; 1962 } 1963 1964 sge_count = opts->max_io_size / opts->io_unit_size; 1965 if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) { 1966 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 1967 return NULL; 1968 } 1969 1970 g_nvmf_fc_main_thread = spdk_get_thread(); 1971 g_nvmf_fgroup_count = 0; 1972 g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport)); 1973 1974 if (!g_nvmf_ftransport) { 1975 SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n"); 1976 return NULL; 1977 } 1978 1979 if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) { 1980 SPDK_ERRLOG("pthread_mutex_init() failed\n"); 1981 free(g_nvmf_ftransport); 1982 g_nvmf_ftransport = NULL; 1983 return NULL; 1984 } 1985 1986 g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept, 1987 &g_nvmf_ftransport->transport, opts->acceptor_poll_rate); 1988 if (!g_nvmf_ftransport->accept_poller) { 1989 free(g_nvmf_ftransport); 1990 g_nvmf_ftransport = NULL; 1991 return NULL; 1992 } 1993 1994 /* initialize the low level FC driver */ 1995 nvmf_fc_lld_init(); 1996 1997 return &g_nvmf_ftransport->transport; 1998 } 1999 2000 static void 2001 nvmf_fc_destroy_done_cb(void *cb_arg) 2002 { 2003 free(g_nvmf_ftransport); 2004 if (g_transport_destroy_done_cb) { 2005 g_transport_destroy_done_cb(cb_arg); 2006 g_transport_destroy_done_cb = NULL; 2007 } 2008 } 2009 2010 static int 2011 nvmf_fc_destroy(struct spdk_nvmf_transport *transport, 2012 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 2013 { 2014 if (transport) { 2015 struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp; 2016 2017 /* clean up any FC poll groups still around */ 2018 TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) { 2019 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 2020 free(fgroup); 2021 } 2022 2023 spdk_poller_unregister(&g_nvmf_ftransport->accept_poller); 2024 g_nvmf_fgroup_count = 0; 2025 g_transport_destroy_done_cb = cb_fn; 2026 2027 /* low level FC driver clean up */ 2028 nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg); 2029 } 2030 2031 return 0; 2032 } 2033 2034 static int 2035 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid, 2036 struct spdk_nvmf_listen_opts *listen_opts) 2037 { 2038 return 0; 2039 } 2040 2041 static void 2042 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport, 2043 const struct spdk_nvme_transport_id *_trid) 2044 { 2045 } 2046 2047 static int 2048 nvmf_fc_accept(void *ctx) 2049 { 2050 struct spdk_nvmf_fc_port *fc_port = NULL; 2051 uint32_t count = 0; 2052 static bool start_lld = false; 2053 2054 if (spdk_unlikely(!start_lld)) { 2055 start_lld = true; 2056 nvmf_fc_lld_start(); 2057 } 2058 2059 /* poll the LS queue on each port */ 2060 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 2061 if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) { 2062 count += nvmf_fc_process_queue(&fc_port->ls_queue); 2063 } 2064 } 2065 2066 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 2067 } 2068 2069 static void 2070 nvmf_fc_discover(struct spdk_nvmf_transport *transport, 2071 struct spdk_nvme_transport_id *trid, 2072 struct spdk_nvmf_discovery_log_page_entry *entry) 2073 { 2074 entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC; 2075 entry->adrfam = trid->adrfam; 2076 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED; 2077 2078 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 2079 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 2080 } 2081 2082 static struct spdk_nvmf_transport_poll_group * 2083 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport, 2084 struct spdk_nvmf_poll_group *group) 2085 { 2086 struct spdk_nvmf_fc_poll_group *fgroup; 2087 struct spdk_nvmf_fc_transport *ftransport = 2088 SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 2089 2090 if (spdk_interrupt_mode_is_enabled()) { 2091 SPDK_ERRLOG("FC transport does not support interrupt mode\n"); 2092 return NULL; 2093 } 2094 2095 fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group)); 2096 if (!fgroup) { 2097 SPDK_ERRLOG("Unable to alloc FC poll group\n"); 2098 return NULL; 2099 } 2100 2101 TAILQ_INIT(&fgroup->hwqp_list); 2102 2103 pthread_mutex_lock(&ftransport->lock); 2104 TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link); 2105 g_nvmf_fgroup_count++; 2106 pthread_mutex_unlock(&ftransport->lock); 2107 2108 return &fgroup->group; 2109 } 2110 2111 static void 2112 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 2113 { 2114 struct spdk_nvmf_fc_poll_group *fgroup; 2115 struct spdk_nvmf_fc_transport *ftransport = 2116 SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport); 2117 2118 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2119 pthread_mutex_lock(&ftransport->lock); 2120 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 2121 g_nvmf_fgroup_count--; 2122 pthread_mutex_unlock(&ftransport->lock); 2123 2124 free(fgroup); 2125 } 2126 2127 static int 2128 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 2129 struct spdk_nvmf_qpair *qpair) 2130 { 2131 struct spdk_nvmf_fc_poll_group *fgroup; 2132 struct spdk_nvmf_fc_conn *fc_conn; 2133 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2134 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 2135 bool hwqp_found = false; 2136 2137 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2138 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2139 2140 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2141 if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) { 2142 hwqp_found = true; 2143 break; 2144 } 2145 } 2146 2147 if (!hwqp_found) { 2148 SPDK_ERRLOG("No valid hwqp found for new QP.\n"); 2149 goto err; 2150 } 2151 2152 if (!nvmf_fc_assign_conn_to_hwqp(hwqp, 2153 &fc_conn->conn_id, 2154 fc_conn->max_queue_depth)) { 2155 SPDK_ERRLOG("Failed to get a connection id for new QP.\n"); 2156 goto err; 2157 } 2158 2159 fc_conn->hwqp = hwqp; 2160 2161 /* If this is for ADMIN connection, then update assoc ID. */ 2162 if (fc_conn->qpair.qid == 0) { 2163 fc_conn->fc_assoc->assoc_id = fc_conn->conn_id; 2164 } 2165 2166 api_data = &fc_conn->create_opd->u.add_conn; 2167 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); 2168 return 0; 2169 err: 2170 return -1; 2171 } 2172 2173 static int 2174 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2175 { 2176 uint32_t count = 0; 2177 struct spdk_nvmf_fc_poll_group *fgroup; 2178 struct spdk_nvmf_fc_hwqp *hwqp; 2179 2180 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2181 2182 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2183 if (hwqp->state == SPDK_FC_HWQP_ONLINE) { 2184 count += nvmf_fc_process_queue(hwqp); 2185 } 2186 } 2187 2188 return (int) count; 2189 } 2190 2191 static int 2192 nvmf_fc_request_free(struct spdk_nvmf_request *req) 2193 { 2194 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 2195 2196 if (!fc_req->is_aborted) { 2197 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED); 2198 nvmf_fc_request_abort(fc_req, true, NULL, NULL); 2199 } else { 2200 nvmf_fc_request_abort_complete(fc_req); 2201 } 2202 2203 return 0; 2204 } 2205 2206 static void 2207 nvmf_fc_connection_delete_done_cb(void *arg) 2208 { 2209 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg; 2210 2211 if (fc_ctx->cb_fn) { 2212 spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx); 2213 } 2214 free(fc_ctx); 2215 } 2216 2217 static void 2218 _nvmf_fc_close_qpair(void *arg) 2219 { 2220 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg; 2221 struct spdk_nvmf_qpair *qpair = fc_ctx->qpair; 2222 struct spdk_nvmf_fc_conn *fc_conn; 2223 int rc; 2224 2225 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2226 if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) { 2227 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 2228 2229 if (fc_conn->create_opd) { 2230 api_data = &fc_conn->create_opd->u.add_conn; 2231 2232 nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst, 2233 api_data->args.fc_conn, api_data->aq_conn); 2234 } 2235 } else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) { 2236 rc = nvmf_fc_delete_connection(fc_conn, false, true, 2237 nvmf_fc_connection_delete_done_cb, fc_ctx); 2238 if (!rc) { 2239 /* Wait for transport to complete its work. */ 2240 return; 2241 } 2242 2243 SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__); 2244 } else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 2245 /* This is the case where deletion started from FC layer. */ 2246 spdk_thread_send_msg(fc_ctx->qpair_thread, fc_conn->qpair_disconnect_cb_fn, 2247 fc_conn->qpair_disconnect_ctx); 2248 } 2249 2250 nvmf_fc_connection_delete_done_cb(fc_ctx); 2251 } 2252 2253 static void 2254 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair, 2255 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 2256 { 2257 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx; 2258 2259 fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx)); 2260 if (!fc_ctx) { 2261 SPDK_ERRLOG("Unable to allocate close_qpair ctx."); 2262 if (cb_fn) { 2263 cb_fn(cb_arg); 2264 } 2265 return; 2266 } 2267 fc_ctx->qpair = qpair; 2268 fc_ctx->cb_fn = cb_fn; 2269 fc_ctx->cb_ctx = cb_arg; 2270 fc_ctx->qpair_thread = spdk_get_thread(); 2271 2272 spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx); 2273 } 2274 2275 static int 2276 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2277 struct spdk_nvme_transport_id *trid) 2278 { 2279 struct spdk_nvmf_fc_conn *fc_conn; 2280 2281 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2282 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2283 return 0; 2284 } 2285 2286 static int 2287 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2288 struct spdk_nvme_transport_id *trid) 2289 { 2290 struct spdk_nvmf_fc_conn *fc_conn; 2291 2292 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2293 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2294 return 0; 2295 } 2296 2297 static int 2298 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2299 struct spdk_nvme_transport_id *trid) 2300 { 2301 struct spdk_nvmf_fc_conn *fc_conn; 2302 2303 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2304 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2305 return 0; 2306 } 2307 2308 static void 2309 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 2310 struct spdk_nvmf_request *req) 2311 { 2312 spdk_nvmf_request_complete(req); 2313 } 2314 2315 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = { 2316 .name = "FC", 2317 .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC, 2318 .opts_init = nvmf_fc_opts_init, 2319 .create = nvmf_fc_create, 2320 .destroy = nvmf_fc_destroy, 2321 2322 .listen = nvmf_fc_listen, 2323 .stop_listen = nvmf_fc_stop_listen, 2324 2325 .listener_discover = nvmf_fc_discover, 2326 2327 .poll_group_create = nvmf_fc_poll_group_create, 2328 .poll_group_destroy = nvmf_fc_poll_group_destroy, 2329 .poll_group_add = nvmf_fc_poll_group_add, 2330 .poll_group_poll = nvmf_fc_poll_group_poll, 2331 2332 .req_complete = nvmf_fc_request_complete, 2333 .req_free = nvmf_fc_request_free, 2334 .qpair_fini = nvmf_fc_close_qpair, 2335 .qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid, 2336 .qpair_get_local_trid = nvmf_fc_qpair_get_local_trid, 2337 .qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid, 2338 .qpair_abort_request = nvmf_fc_qpair_abort_request, 2339 }; 2340 2341 /* Initializes the data for the creation of a FC-Port object in the SPDK 2342 * library. The spdk_nvmf_fc_port is a well defined structure that is part of 2343 * the API to the library. The contents added to this well defined structure 2344 * is private to each vendors implementation. 2345 */ 2346 static int 2347 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port, 2348 struct spdk_nvmf_fc_hw_port_init_args *args) 2349 { 2350 int rc = 0; 2351 /* Used a high number for the LS HWQP so that it does not clash with the 2352 * IO HWQP's and immediately shows a LS queue during tracing. 2353 */ 2354 uint32_t i; 2355 2356 fc_port->port_hdl = args->port_handle; 2357 fc_port->lld_fc_port = args->lld_fc_port; 2358 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 2359 fc_port->fcp_rq_id = args->fcp_rq_id; 2360 fc_port->num_io_queues = args->io_queue_cnt; 2361 2362 /* 2363 * Set port context from init args. Used for FCP port stats. 2364 */ 2365 fc_port->port_ctx = args->port_ctx; 2366 2367 /* 2368 * Initialize the LS queue wherever needed. 2369 */ 2370 fc_port->ls_queue.queues = args->ls_queue; 2371 fc_port->ls_queue.thread = nvmf_fc_get_main_thread(); 2372 fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues; 2373 fc_port->ls_queue.is_ls_queue = true; 2374 2375 /* 2376 * Initialize the LS queue. 2377 */ 2378 rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue); 2379 if (rc) { 2380 return rc; 2381 } 2382 2383 /* 2384 * Initialize the IO queues. 2385 */ 2386 for (i = 0; i < args->io_queue_cnt; i++) { 2387 struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i]; 2388 hwqp->hwqp_id = i; 2389 hwqp->queues = args->io_queues[i]; 2390 hwqp->is_ls_queue = false; 2391 rc = nvmf_fc_init_hwqp(fc_port, hwqp); 2392 if (rc) { 2393 for (; i > 0; --i) { 2394 rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash); 2395 rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash); 2396 } 2397 rte_hash_free(fc_port->ls_queue.connection_list_hash); 2398 rte_hash_free(fc_port->ls_queue.rport_list_hash); 2399 return rc; 2400 } 2401 } 2402 2403 /* 2404 * Initialize the LS processing for port 2405 */ 2406 nvmf_fc_ls_init(fc_port); 2407 2408 /* 2409 * Initialize the list of nport on this HW port. 2410 */ 2411 TAILQ_INIT(&fc_port->nport_list); 2412 fc_port->num_nports = 0; 2413 2414 return 0; 2415 } 2416 2417 /* 2418 * FC port must have all its nports deleted before transitioning to offline state. 2419 */ 2420 static void 2421 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port) 2422 { 2423 struct spdk_nvmf_fc_nport *nport = NULL; 2424 /* All nports must have been deleted at this point for this fc port */ 2425 DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list)); 2426 DEV_VERIFY(fc_port->num_nports == 0); 2427 /* Mark the nport states to be zombie, if they exist */ 2428 if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) { 2429 TAILQ_FOREACH(nport, &fc_port->nport_list, link) { 2430 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2431 } 2432 } 2433 } 2434 2435 static void 2436 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err) 2437 { 2438 ASSERT_SPDK_FC_MAIN_THREAD(); 2439 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args; 2440 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2441 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2442 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 2443 int spdk_err = 0; 2444 uint8_t port_handle = cb_data->port_handle; 2445 uint32_t s_id = rport->s_id; 2446 uint32_t rpi = rport->rpi; 2447 uint32_t assoc_count = rport->assoc_count; 2448 uint32_t nport_hdl = nport->nport_hdl; 2449 uint32_t d_id = nport->d_id; 2450 char log_str[256]; 2451 2452 /* 2453 * Assert on any delete failure. 2454 */ 2455 if (0 != err) { 2456 DEV_VERIFY(!"Error in IT Delete callback."); 2457 goto out; 2458 } 2459 2460 if (cb_func != NULL) { 2461 (void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err); 2462 } 2463 2464 out: 2465 free(cb_data); 2466 2467 snprintf(log_str, sizeof(log_str), 2468 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n", 2469 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err); 2470 2471 if (err != 0) { 2472 SPDK_ERRLOG("%s", log_str); 2473 } else { 2474 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2475 } 2476 } 2477 2478 static void 2479 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err) 2480 { 2481 ASSERT_SPDK_FC_MAIN_THREAD(); 2482 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args; 2483 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2484 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2485 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func; 2486 uint32_t s_id = rport->s_id; 2487 uint32_t rpi = rport->rpi; 2488 uint32_t assoc_count = rport->assoc_count; 2489 uint32_t nport_hdl = nport->nport_hdl; 2490 uint32_t d_id = nport->d_id; 2491 char log_str[256]; 2492 2493 /* 2494 * Assert on any association delete failure. We continue to delete other 2495 * associations in promoted builds. 2496 */ 2497 if (0 != err) { 2498 DEV_VERIFY(!"Nport's association delete callback returned error"); 2499 if (nport->assoc_count > 0) { 2500 nport->assoc_count--; 2501 } 2502 if (rport->assoc_count > 0) { 2503 rport->assoc_count--; 2504 } 2505 } 2506 2507 /* 2508 * If this is the last association being deleted for the ITN, 2509 * execute the callback(s). 2510 */ 2511 if (0 == rport->assoc_count) { 2512 /* Remove the rport from the remote port list. */ 2513 if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) { 2514 SPDK_ERRLOG("Error while removing rport from list.\n"); 2515 DEV_VERIFY(!"Error while removing rport from list."); 2516 } 2517 2518 if (cb_func != NULL) { 2519 /* 2520 * Callback function is provided by the caller 2521 * of nvmf_fc_adm_i_t_delete_assoc(). 2522 */ 2523 (void)cb_func(cb_data->cb_ctx, 0); 2524 } 2525 free(rport); 2526 free(args); 2527 } 2528 2529 snprintf(log_str, sizeof(log_str), 2530 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n", 2531 nport_hdl, s_id, d_id, rpi, assoc_count, err); 2532 2533 if (err != 0) { 2534 SPDK_ERRLOG("%s", log_str); 2535 } else { 2536 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2537 } 2538 } 2539 2540 /** 2541 * Process a IT delete. 2542 */ 2543 static void 2544 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport, 2545 struct spdk_nvmf_fc_remote_port_info *rport, 2546 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func, 2547 void *cb_ctx) 2548 { 2549 int err = 0; 2550 struct spdk_nvmf_fc_association *assoc = NULL; 2551 int assoc_err = 0; 2552 uint32_t num_assoc = 0; 2553 uint32_t num_assoc_del_scheduled = 0; 2554 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL; 2555 uint8_t port_hdl = nport->port_hdl; 2556 uint32_t s_id = rport->s_id; 2557 uint32_t rpi = rport->rpi; 2558 uint32_t assoc_count = rport->assoc_count; 2559 char log_str[256]; 2560 2561 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n", 2562 nport->nport_hdl); 2563 2564 /* 2565 * Allocate memory for callback data. 2566 * This memory will be freed by the callback function. 2567 */ 2568 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data)); 2569 if (NULL == cb_data) { 2570 SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl); 2571 err = -ENOMEM; 2572 goto out; 2573 } 2574 cb_data->nport = nport; 2575 cb_data->rport = rport; 2576 cb_data->port_handle = port_hdl; 2577 cb_data->cb_func = cb_func; 2578 cb_data->cb_ctx = cb_ctx; 2579 2580 /* 2581 * Delete all associations, if any, related with this ITN/remote_port. 2582 */ 2583 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 2584 num_assoc++; 2585 if (assoc->s_id == s_id) { 2586 assoc_err = nvmf_fc_delete_association(nport, 2587 assoc->assoc_id, 2588 false /* send abts */, false, 2589 nvmf_fc_adm_i_t_delete_assoc_cb, cb_data); 2590 if (0 != assoc_err) { 2591 /* 2592 * Mark this association as zombie. 2593 */ 2594 err = -EINVAL; 2595 DEV_VERIFY(!"Error while deleting association"); 2596 (void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2597 } else { 2598 num_assoc_del_scheduled++; 2599 } 2600 } 2601 } 2602 2603 out: 2604 if ((cb_data) && (num_assoc_del_scheduled == 0)) { 2605 /* 2606 * Since there are no association_delete calls 2607 * successfully scheduled, the association_delete 2608 * callback function will never be called. 2609 * In this case, call the callback function now. 2610 */ 2611 nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0); 2612 } 2613 2614 snprintf(log_str, sizeof(log_str), 2615 "IT delete associations on nport:%d end. " 2616 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n", 2617 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err); 2618 2619 if (err == 0) { 2620 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2621 } else { 2622 SPDK_ERRLOG("%s", log_str); 2623 } 2624 } 2625 2626 static void 2627 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 2628 { 2629 ASSERT_SPDK_FC_MAIN_THREAD(); 2630 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL; 2631 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2632 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2633 struct spdk_nvmf_fc_port *fc_port = NULL; 2634 int err = 0; 2635 2636 quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data; 2637 hwqp = quiesce_api_data->hwqp; 2638 fc_port = hwqp->fc_port; 2639 port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx; 2640 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func; 2641 2642 /* 2643 * Decrement the callback/quiesced queue count. 2644 */ 2645 port_quiesce_ctx->quiesce_count--; 2646 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id); 2647 2648 free(quiesce_api_data); 2649 /* 2650 * Wait for call backs i.e. max_ioq_queues + LS QUEUE. 2651 */ 2652 if (port_quiesce_ctx->quiesce_count > 0) { 2653 return; 2654 } 2655 2656 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2657 SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl); 2658 } else { 2659 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl); 2660 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2661 } 2662 2663 if (cb_func) { 2664 /* 2665 * Callback function for the called of quiesce. 2666 */ 2667 cb_func(port_quiesce_ctx->ctx, err); 2668 } 2669 2670 /* 2671 * Free the context structure. 2672 */ 2673 free(port_quiesce_ctx); 2674 2675 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl, 2676 err); 2677 } 2678 2679 static int 2680 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx, 2681 spdk_nvmf_fc_poller_api_cb cb_func) 2682 { 2683 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args; 2684 enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS; 2685 int err = 0; 2686 2687 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args)); 2688 2689 if (args == NULL) { 2690 err = -ENOMEM; 2691 SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id); 2692 goto done; 2693 } 2694 args->hwqp = fc_hwqp; 2695 args->ctx = ctx; 2696 args->cb_info.cb_func = cb_func; 2697 args->cb_info.cb_data = args; 2698 args->cb_info.cb_thread = spdk_get_thread(); 2699 2700 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id); 2701 rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args); 2702 if (rc) { 2703 free(args); 2704 err = -EINVAL; 2705 } 2706 2707 done: 2708 return err; 2709 } 2710 2711 /* 2712 * Hw port Quiesce 2713 */ 2714 static int 2715 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx, 2716 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func) 2717 { 2718 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2719 uint32_t i = 0; 2720 int err = 0; 2721 2722 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl); 2723 2724 /* 2725 * If the port is in an OFFLINE state, set the state to QUIESCED 2726 * and execute the callback. 2727 */ 2728 if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) { 2729 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2730 } 2731 2732 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2733 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n", 2734 fc_port->port_hdl); 2735 /* 2736 * Execute the callback function directly. 2737 */ 2738 cb_func(ctx, err); 2739 goto out; 2740 } 2741 2742 port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx)); 2743 2744 if (port_quiesce_ctx == NULL) { 2745 err = -ENOMEM; 2746 SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n", 2747 fc_port->port_hdl); 2748 goto out; 2749 } 2750 2751 port_quiesce_ctx->quiesce_count = 0; 2752 port_quiesce_ctx->ctx = ctx; 2753 port_quiesce_ctx->cb_func = cb_func; 2754 2755 /* 2756 * Quiesce the LS queue. 2757 */ 2758 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx, 2759 nvmf_fc_adm_queue_quiesce_cb); 2760 if (err != 0) { 2761 SPDK_ERRLOG("Failed to quiesce the LS queue.\n"); 2762 goto out; 2763 } 2764 port_quiesce_ctx->quiesce_count++; 2765 2766 /* 2767 * Quiesce the IO queues. 2768 */ 2769 for (i = 0; i < fc_port->num_io_queues; i++) { 2770 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i], 2771 port_quiesce_ctx, 2772 nvmf_fc_adm_queue_quiesce_cb); 2773 if (err != 0) { 2774 DEV_VERIFY(0); 2775 SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id); 2776 } 2777 port_quiesce_ctx->quiesce_count++; 2778 } 2779 2780 out: 2781 if (port_quiesce_ctx && err != 0) { 2782 free(port_quiesce_ctx); 2783 } 2784 return err; 2785 } 2786 2787 /* 2788 * Initialize and add a HW port entry to the global 2789 * HW port list. 2790 */ 2791 static void 2792 nvmf_fc_adm_evnt_hw_port_init(void *arg) 2793 { 2794 ASSERT_SPDK_FC_MAIN_THREAD(); 2795 struct spdk_nvmf_fc_port *fc_port = NULL; 2796 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2797 struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *) 2798 api_data->api_args; 2799 int err = 0; 2800 2801 if (args->io_queue_cnt > spdk_env_get_core_count()) { 2802 SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle); 2803 err = EINVAL; 2804 goto abort_port_init; 2805 } 2806 2807 /* 2808 * 1. Check for duplicate initialization. 2809 */ 2810 fc_port = nvmf_fc_port_lookup(args->port_handle); 2811 if (fc_port != NULL) { 2812 SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle); 2813 goto abort_port_init; 2814 } 2815 2816 /* 2817 * 2. Get the memory to instantiate a fc port. 2818 */ 2819 fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) + 2820 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp))); 2821 if (fc_port == NULL) { 2822 SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle); 2823 err = -ENOMEM; 2824 goto abort_port_init; 2825 } 2826 2827 /* assign the io_queues array */ 2828 fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof( 2829 struct spdk_nvmf_fc_port)); 2830 2831 /* 2832 * 3. Initialize the contents for the FC-port 2833 */ 2834 err = nvmf_fc_adm_hw_port_data_init(fc_port, args); 2835 2836 if (err != 0) { 2837 SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle); 2838 DEV_VERIFY(!"Data initialization failed for fc_port"); 2839 goto abort_port_init; 2840 } 2841 2842 /* 2843 * 4. Add this port to the global fc port list in the library. 2844 */ 2845 nvmf_fc_port_add(fc_port); 2846 2847 abort_port_init: 2848 if (err && fc_port) { 2849 free(fc_port); 2850 } 2851 if (api_data->cb_func != NULL) { 2852 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err); 2853 } 2854 2855 free(arg); 2856 2857 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n", 2858 args->port_handle, err); 2859 } 2860 2861 static void 2862 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp) 2863 { 2864 struct spdk_nvmf_fc_abts_ctx *ctx; 2865 struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL; 2866 2867 TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) { 2868 TAILQ_REMOVE(&hwqp->sync_cbs, args, link); 2869 ctx = args->cb_info.cb_data; 2870 if (ctx) { 2871 if (++ctx->hwqps_responded == ctx->num_hwqps) { 2872 free(ctx->sync_poller_args); 2873 free(ctx->abts_poller_args); 2874 free(ctx); 2875 } 2876 } 2877 } 2878 } 2879 2880 static void 2881 nvmf_fc_adm_evnt_hw_port_free(void *arg) 2882 { 2883 ASSERT_SPDK_FC_MAIN_THREAD(); 2884 int err = 0, i; 2885 struct spdk_nvmf_fc_port *fc_port = NULL; 2886 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2887 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2888 struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *) 2889 api_data->api_args; 2890 2891 fc_port = nvmf_fc_port_lookup(args->port_handle); 2892 if (!fc_port) { 2893 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2894 err = -EINVAL; 2895 goto out; 2896 } 2897 2898 if (!TAILQ_EMPTY(&fc_port->nport_list)) { 2899 SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle); 2900 err = -EIO; 2901 goto out; 2902 } 2903 2904 /* Clean up and free fc_port */ 2905 hwqp = &fc_port->ls_queue; 2906 nvmf_fc_adm_hwqp_clean_sync_cb(hwqp); 2907 rte_hash_free(hwqp->connection_list_hash); 2908 rte_hash_free(hwqp->rport_list_hash); 2909 2910 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2911 hwqp = &fc_port->io_queues[i]; 2912 2913 nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]); 2914 rte_hash_free(hwqp->connection_list_hash); 2915 rte_hash_free(hwqp->rport_list_hash); 2916 } 2917 2918 nvmf_fc_port_remove(fc_port); 2919 free(fc_port); 2920 out: 2921 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n", 2922 args->port_handle, err); 2923 if (api_data->cb_func != NULL) { 2924 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err); 2925 } 2926 2927 free(arg); 2928 } 2929 2930 /* 2931 * Online a HW port. 2932 */ 2933 static void 2934 nvmf_fc_adm_evnt_hw_port_online(void *arg) 2935 { 2936 ASSERT_SPDK_FC_MAIN_THREAD(); 2937 struct spdk_nvmf_fc_port *fc_port = NULL; 2938 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2939 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2940 struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *) 2941 api_data->api_args; 2942 int i = 0; 2943 int err = 0; 2944 2945 fc_port = nvmf_fc_port_lookup(args->port_handle); 2946 if (fc_port) { 2947 /* Set the port state to online */ 2948 err = nvmf_fc_port_set_online(fc_port); 2949 if (err != 0) { 2950 SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err); 2951 DEV_VERIFY(!"Hw port online failed"); 2952 goto out; 2953 } 2954 2955 hwqp = &fc_port->ls_queue; 2956 hwqp->context = NULL; 2957 (void)nvmf_fc_hwqp_set_online(hwqp); 2958 2959 /* Cycle through all the io queues and setup a hwqp poller for each. */ 2960 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2961 hwqp = &fc_port->io_queues[i]; 2962 hwqp->context = NULL; 2963 (void)nvmf_fc_hwqp_set_online(hwqp); 2964 nvmf_fc_poll_group_add_hwqp(hwqp); 2965 } 2966 } else { 2967 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2968 err = -EINVAL; 2969 } 2970 2971 out: 2972 if (api_data->cb_func != NULL) { 2973 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err); 2974 } 2975 2976 free(arg); 2977 2978 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle, 2979 err); 2980 } 2981 2982 static void 2983 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status) 2984 { 2985 int err = 0; 2986 struct spdk_nvmf_fc_port *fc_port = NULL; 2987 struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx; 2988 struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args; 2989 2990 if (--remove_hwqp_args->pending_remove_hwqp) { 2991 return; 2992 } 2993 2994 fc_port = nvmf_fc_port_lookup(args->port_handle); 2995 if (!fc_port) { 2996 err = -EINVAL; 2997 SPDK_ERRLOG("fc_port not found.\n"); 2998 goto out; 2999 } 3000 3001 /* 3002 * Delete all the nports. Ideally, the nports should have been purged 3003 * before the offline event, in which case, only a validation is required. 3004 */ 3005 nvmf_fc_adm_hw_port_offline_nport_delete(fc_port); 3006 out: 3007 if (remove_hwqp_args->cb_fn) { 3008 remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 3009 } 3010 3011 free(remove_hwqp_args); 3012 } 3013 3014 /* 3015 * Offline a HW port. 3016 */ 3017 static void 3018 nvmf_fc_adm_evnt_hw_port_offline(void *arg) 3019 { 3020 ASSERT_SPDK_FC_MAIN_THREAD(); 3021 struct spdk_nvmf_fc_port *fc_port = NULL; 3022 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 3023 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3024 struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *) 3025 api_data->api_args; 3026 struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args; 3027 int i = 0; 3028 int err = 0; 3029 3030 fc_port = nvmf_fc_port_lookup(args->port_handle); 3031 if (fc_port) { 3032 /* Set the port state to offline, if it is not already. */ 3033 err = nvmf_fc_port_set_offline(fc_port); 3034 if (err != 0) { 3035 SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err); 3036 err = 0; 3037 goto out; 3038 } 3039 3040 remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args)); 3041 if (!remove_hwqp_args) { 3042 SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n"); 3043 err = -ENOMEM; 3044 goto out; 3045 } 3046 remove_hwqp_args->cb_fn = api_data->cb_func; 3047 remove_hwqp_args->cb_args = api_data->api_args; 3048 remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues; 3049 3050 hwqp = &fc_port->ls_queue; 3051 (void)nvmf_fc_hwqp_set_offline(hwqp); 3052 3053 /* Remove poller for all the io queues. */ 3054 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 3055 hwqp = &fc_port->io_queues[i]; 3056 (void)nvmf_fc_hwqp_set_offline(hwqp); 3057 nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb, 3058 remove_hwqp_args); 3059 } 3060 3061 free(arg); 3062 3063 /* Wait until all the hwqps are removed from poll groups. */ 3064 return; 3065 } else { 3066 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3067 err = -EINVAL; 3068 } 3069 out: 3070 if (api_data->cb_func != NULL) { 3071 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 3072 } 3073 3074 free(arg); 3075 3076 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle, 3077 err); 3078 } 3079 3080 struct nvmf_fc_add_rem_listener_ctx { 3081 struct spdk_nvmf_subsystem *subsystem; 3082 bool add_listener; 3083 struct spdk_nvme_transport_id trid; 3084 }; 3085 3086 static void 3087 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 3088 { 3089 ASSERT_SPDK_FC_MAIN_THREAD(); 3090 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 3091 free(ctx); 3092 } 3093 3094 static void 3095 nvmf_fc_adm_listen_done(void *cb_arg, int status) 3096 { 3097 ASSERT_SPDK_FC_MAIN_THREAD(); 3098 struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg; 3099 3100 if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) { 3101 SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn); 3102 free(ctx); 3103 } 3104 } 3105 3106 static void 3107 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 3108 { 3109 ASSERT_SPDK_FC_MAIN_THREAD(); 3110 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 3111 3112 if (ctx->add_listener) { 3113 spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx); 3114 } else { 3115 spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid); 3116 nvmf_fc_adm_listen_done(ctx, 0); 3117 } 3118 } 3119 3120 static int 3121 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add) 3122 { 3123 struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt(); 3124 struct spdk_nvmf_subsystem *subsystem; 3125 struct spdk_nvmf_listen_opts opts; 3126 3127 if (!tgt) { 3128 SPDK_ERRLOG("No nvmf target defined\n"); 3129 return -EINVAL; 3130 } 3131 3132 spdk_nvmf_listen_opts_init(&opts, sizeof(opts)); 3133 3134 subsystem = spdk_nvmf_subsystem_get_first(tgt); 3135 while (subsystem) { 3136 struct nvmf_fc_add_rem_listener_ctx *ctx; 3137 3138 if (spdk_nvmf_subsystem_any_listener_allowed(subsystem) == true) { 3139 ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx)); 3140 if (ctx) { 3141 ctx->add_listener = add; 3142 ctx->subsystem = subsystem; 3143 nvmf_fc_create_trid(&ctx->trid, 3144 nport->fc_nodename.u.wwn, 3145 nport->fc_portname.u.wwn); 3146 3147 if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) { 3148 SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n", 3149 ctx->trid.traddr); 3150 free(ctx); 3151 } else if (spdk_nvmf_subsystem_pause(subsystem, 3152 0, 3153 nvmf_fc_adm_subsystem_paused_cb, 3154 ctx)) { 3155 SPDK_ERRLOG("Failed to pause subsystem: %s\n", 3156 subsystem->subnqn); 3157 free(ctx); 3158 } 3159 } 3160 } 3161 3162 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 3163 } 3164 3165 return 0; 3166 } 3167 3168 /* 3169 * Create a Nport. 3170 */ 3171 static void 3172 nvmf_fc_adm_evnt_nport_create(void *arg) 3173 { 3174 ASSERT_SPDK_FC_MAIN_THREAD(); 3175 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3176 struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *) 3177 api_data->api_args; 3178 struct spdk_nvmf_fc_nport *nport = NULL; 3179 struct spdk_nvmf_fc_port *fc_port = NULL; 3180 int err = 0; 3181 3182 /* 3183 * Get the physical port. 3184 */ 3185 fc_port = nvmf_fc_port_lookup(args->port_handle); 3186 if (fc_port == NULL) { 3187 err = -EINVAL; 3188 goto out; 3189 } 3190 3191 /* 3192 * Check for duplicate initialization. 3193 */ 3194 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3195 if (nport != NULL) { 3196 SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle, 3197 args->port_handle); 3198 err = -EINVAL; 3199 goto out; 3200 } 3201 3202 /* 3203 * Get the memory to instantiate a fc nport. 3204 */ 3205 nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport)); 3206 if (nport == NULL) { 3207 SPDK_ERRLOG("Failed to allocate memory for nport %d.\n", 3208 args->nport_handle); 3209 err = -ENOMEM; 3210 goto out; 3211 } 3212 3213 /* 3214 * Initialize the contents for the nport 3215 */ 3216 nport->nport_hdl = args->nport_handle; 3217 nport->port_hdl = args->port_handle; 3218 nport->nport_state = SPDK_NVMF_FC_OBJECT_CREATED; 3219 nport->fc_nodename = args->fc_nodename; 3220 nport->fc_portname = args->fc_portname; 3221 nport->d_id = args->d_id; 3222 nport->fc_port = nvmf_fc_port_lookup(args->port_handle); 3223 3224 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED); 3225 TAILQ_INIT(&nport->rem_port_list); 3226 nport->rport_count = 0; 3227 TAILQ_INIT(&nport->fc_associations); 3228 nport->assoc_count = 0; 3229 3230 /* 3231 * Populate the nport address (as listening address) to the nvmf subsystems. 3232 */ 3233 err = nvmf_fc_adm_add_rem_nport_listener(nport, true); 3234 3235 (void)nvmf_fc_port_add_nport(fc_port, nport); 3236 out: 3237 if (err && nport) { 3238 free(nport); 3239 } 3240 3241 if (api_data->cb_func != NULL) { 3242 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err); 3243 } 3244 3245 free(arg); 3246 } 3247 3248 static void 3249 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type, 3250 void *cb_args, int spdk_err) 3251 { 3252 ASSERT_SPDK_FC_MAIN_THREAD(); 3253 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args; 3254 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 3255 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 3256 int err = 0; 3257 uint16_t nport_hdl = 0; 3258 char log_str[256]; 3259 3260 /* 3261 * Assert on any delete failure. 3262 */ 3263 if (nport == NULL) { 3264 SPDK_ERRLOG("Nport delete callback returned null nport"); 3265 DEV_VERIFY(!"nport is null."); 3266 goto out; 3267 } 3268 3269 nport_hdl = nport->nport_hdl; 3270 if (0 != spdk_err) { 3271 SPDK_ERRLOG("Nport delete callback returned error. FC Port: " 3272 "%d, Nport: %d\n", 3273 nport->port_hdl, nport->nport_hdl); 3274 DEV_VERIFY(!"nport delete callback error."); 3275 } 3276 3277 /* 3278 * Free the nport if this is the last rport being deleted and 3279 * execute the callback(s). 3280 */ 3281 if (nvmf_fc_nport_has_no_rport(nport)) { 3282 if (0 != nport->assoc_count) { 3283 SPDK_ERRLOG("association count != 0\n"); 3284 DEV_VERIFY(!"association count != 0"); 3285 } 3286 3287 err = nvmf_fc_port_remove_nport(nport->fc_port, nport); 3288 if (0 != err) { 3289 SPDK_ERRLOG("Nport delete callback: Failed to remove " 3290 "nport from nport list. FC Port:%d Nport:%d\n", 3291 nport->port_hdl, nport->nport_hdl); 3292 } 3293 /* Free the nport */ 3294 free(nport); 3295 3296 if (cb_func != NULL) { 3297 (void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err); 3298 } 3299 free(cb_data); 3300 } 3301 out: 3302 snprintf(log_str, sizeof(log_str), 3303 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n", 3304 port_handle, nport_hdl, event_type, spdk_err); 3305 3306 if (err != 0) { 3307 SPDK_ERRLOG("%s", log_str); 3308 } else { 3309 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3310 } 3311 } 3312 3313 /* 3314 * Delete Nport. 3315 */ 3316 static void 3317 nvmf_fc_adm_evnt_nport_delete(void *arg) 3318 { 3319 ASSERT_SPDK_FC_MAIN_THREAD(); 3320 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3321 struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *) 3322 api_data->api_args; 3323 struct spdk_nvmf_fc_nport *nport = NULL; 3324 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL; 3325 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3326 int err = 0; 3327 uint32_t rport_cnt = 0; 3328 int rc = 0; 3329 3330 /* 3331 * Make sure that the nport exists. 3332 */ 3333 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3334 if (nport == NULL) { 3335 SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle, 3336 args->port_handle); 3337 err = -EINVAL; 3338 goto out; 3339 } 3340 3341 /* 3342 * Allocate memory for callback data. 3343 */ 3344 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data)); 3345 if (NULL == cb_data) { 3346 SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle); 3347 err = -ENOMEM; 3348 goto out; 3349 } 3350 3351 cb_data->nport = nport; 3352 cb_data->port_handle = args->port_handle; 3353 cb_data->fc_cb_func = api_data->cb_func; 3354 cb_data->fc_cb_ctx = args->cb_ctx; 3355 3356 /* 3357 * Begin nport tear down 3358 */ 3359 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3360 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3361 } else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3362 /* 3363 * Deletion of this nport already in progress. Register callback 3364 * and return. 3365 */ 3366 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3367 err = -ENODEV; 3368 goto out; 3369 } else { 3370 /* nport partially created/deleted */ 3371 DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3372 DEV_VERIFY(0 != "Nport in zombie state"); 3373 err = -ENODEV; 3374 goto out; 3375 } 3376 3377 /* 3378 * Remove this nport from listening addresses across subsystems 3379 */ 3380 rc = nvmf_fc_adm_add_rem_nport_listener(nport, false); 3381 3382 if (0 != rc) { 3383 err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 3384 SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n", 3385 nport->nport_hdl); 3386 goto out; 3387 } 3388 3389 /* 3390 * Delete all the remote ports (if any) for the nport 3391 */ 3392 /* TODO - Need to do this with a "first" and a "next" accessor function 3393 * for completeness. Look at app-subsystem as examples. 3394 */ 3395 if (nvmf_fc_nport_has_no_rport(nport)) { 3396 /* No rports to delete. Complete the nport deletion. */ 3397 nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0); 3398 goto out; 3399 } 3400 3401 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3402 struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc( 3403 1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args)); 3404 3405 if (it_del_args == NULL) { 3406 err = -ENOMEM; 3407 SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n", 3408 rport_iter->rpi, rport_iter->s_id); 3409 DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory"); 3410 goto out; 3411 } 3412 3413 rport_cnt++; 3414 it_del_args->port_handle = nport->port_hdl; 3415 it_del_args->nport_handle = nport->nport_hdl; 3416 it_del_args->cb_ctx = (void *)cb_data; 3417 it_del_args->rpi = rport_iter->rpi; 3418 it_del_args->s_id = rport_iter->s_id; 3419 3420 err = nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args, 3421 nvmf_fc_adm_delete_nport_cb); 3422 if (err) { 3423 free(it_del_args); 3424 } 3425 } 3426 3427 out: 3428 /* On failure, execute the callback function now */ 3429 if ((err != 0) || (rc != 0)) { 3430 SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, " 3431 "rport_cnt:%d rc:%d.\n", 3432 args->nport_handle, err, args->port_handle, 3433 rport_cnt, rc); 3434 if (cb_data) { 3435 free(cb_data); 3436 } 3437 if (api_data->cb_func != NULL) { 3438 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err); 3439 } 3440 3441 } else { 3442 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3443 "NPort %d delete done successfully, fc port:%d. " 3444 "rport_cnt:%d\n", 3445 args->nport_handle, args->port_handle, rport_cnt); 3446 } 3447 3448 free(arg); 3449 } 3450 3451 /* 3452 * Process an PRLI/IT add. 3453 */ 3454 static void 3455 nvmf_fc_adm_evnt_i_t_add(void *arg) 3456 { 3457 ASSERT_SPDK_FC_MAIN_THREAD(); 3458 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3459 struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *) 3460 api_data->api_args; 3461 struct spdk_nvmf_fc_nport *nport = NULL; 3462 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3463 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3464 int err = 0; 3465 3466 /* 3467 * Make sure the nport port exists. 3468 */ 3469 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3470 if (nport == NULL) { 3471 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3472 err = -EINVAL; 3473 goto out; 3474 } 3475 3476 /* 3477 * Check for duplicate i_t_add. 3478 */ 3479 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3480 if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) { 3481 SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n", 3482 args->nport_handle, rport_iter->s_id, rport_iter->rpi); 3483 err = -EEXIST; 3484 goto out; 3485 } 3486 } 3487 3488 /* 3489 * Get the memory to instantiate the remote port 3490 */ 3491 rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info)); 3492 if (rport == NULL) { 3493 SPDK_ERRLOG("Memory allocation for rem port failed.\n"); 3494 err = -ENOMEM; 3495 goto out; 3496 } 3497 3498 /* 3499 * Initialize the contents for the rport 3500 */ 3501 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED); 3502 rport->s_id = args->s_id; 3503 rport->rpi = args->rpi; 3504 rport->fc_nodename = args->fc_nodename; 3505 rport->fc_portname = args->fc_portname; 3506 3507 /* 3508 * Add remote port to nport 3509 */ 3510 if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) { 3511 DEV_VERIFY(!"Error while adding rport to list"); 3512 }; 3513 3514 /* 3515 * TODO: Do we validate the initiators service parameters? 3516 */ 3517 3518 /* 3519 * Get the targets service parameters from the library 3520 * to return back to the driver. 3521 */ 3522 args->target_prli_info = nvmf_fc_get_prli_service_params(); 3523 3524 out: 3525 if (api_data->cb_func != NULL) { 3526 /* 3527 * Passing pointer to the args struct as the first argument. 3528 * The cb_func should handle this appropriately. 3529 */ 3530 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err); 3531 } 3532 3533 free(arg); 3534 3535 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3536 "IT add on nport %d done, rc = %d.\n", 3537 args->nport_handle, err); 3538 } 3539 3540 /** 3541 * Process a IT delete. 3542 */ 3543 static void 3544 nvmf_fc_adm_evnt_i_t_delete(void *arg) 3545 { 3546 ASSERT_SPDK_FC_MAIN_THREAD(); 3547 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3548 struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *) 3549 api_data->api_args; 3550 int rc = 0; 3551 struct spdk_nvmf_fc_nport *nport = NULL; 3552 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL; 3553 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3554 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3555 uint32_t num_rport = 0; 3556 char log_str[256]; 3557 3558 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle); 3559 3560 /* 3561 * Make sure the nport port exists. If it does not, error out. 3562 */ 3563 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3564 if (nport == NULL) { 3565 SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle); 3566 rc = -EINVAL; 3567 goto out; 3568 } 3569 3570 /* 3571 * Find this ITN / rport (remote port). 3572 */ 3573 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3574 num_rport++; 3575 if ((rport_iter->s_id == args->s_id) && 3576 (rport_iter->rpi == args->rpi) && 3577 (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) { 3578 rport = rport_iter; 3579 break; 3580 } 3581 } 3582 3583 /* 3584 * We should find either zero or exactly one rport. 3585 * 3586 * If we find zero rports, that means that a previous request has 3587 * removed the rport by the time we reached here. In this case, 3588 * simply return out. 3589 */ 3590 if (rport == NULL) { 3591 rc = -ENODEV; 3592 goto out; 3593 } 3594 3595 /* 3596 * We have the rport slated for deletion. At this point clean up 3597 * any LS requests that are sitting in the pending list. Do this 3598 * first, then, set the states of the rport so that new LS requests 3599 * are not accepted. Then start the cleanup. 3600 */ 3601 nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport); 3602 3603 /* 3604 * We have found exactly one rport. Allocate memory for callback data. 3605 */ 3606 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data)); 3607 if (NULL == cb_data) { 3608 SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle); 3609 rc = -ENOMEM; 3610 goto out; 3611 } 3612 3613 cb_data->nport = nport; 3614 cb_data->rport = rport; 3615 cb_data->port_handle = args->port_handle; 3616 cb_data->fc_cb_func = api_data->cb_func; 3617 cb_data->fc_cb_ctx = args->cb_ctx; 3618 3619 /* 3620 * Validate rport object state. 3621 */ 3622 if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3623 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3624 } else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3625 /* 3626 * Deletion of this rport already in progress. Register callback 3627 * and return. 3628 */ 3629 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3630 rc = -ENODEV; 3631 goto out; 3632 } else { 3633 /* rport partially created/deleted */ 3634 DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3635 DEV_VERIFY(!"Invalid rport_state"); 3636 rc = -ENODEV; 3637 goto out; 3638 } 3639 3640 /* 3641 * We have successfully found a rport to delete. Call 3642 * nvmf_fc_i_t_delete_assoc(), which will perform further 3643 * IT-delete processing as well as free the cb_data. 3644 */ 3645 nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb, 3646 (void *)cb_data); 3647 3648 out: 3649 if (rc != 0) { 3650 /* 3651 * We have entered here because either we encountered an 3652 * error, or we did not find a rport to delete. 3653 * As a result, we will not call the function 3654 * nvmf_fc_i_t_delete_assoc() for further IT-delete 3655 * processing. Therefore, execute the callback function now. 3656 */ 3657 if (cb_data) { 3658 free(cb_data); 3659 } 3660 if (api_data->cb_func != NULL) { 3661 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc); 3662 } 3663 } 3664 3665 snprintf(log_str, sizeof(log_str), 3666 "IT delete on nport:%d end. num_rport:%d rc = %d.\n", 3667 args->nport_handle, num_rport, rc); 3668 3669 if (rc != 0) { 3670 SPDK_ERRLOG("%s", log_str); 3671 } else { 3672 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3673 } 3674 3675 free(arg); 3676 } 3677 3678 /* 3679 * Process ABTS received 3680 */ 3681 static void 3682 nvmf_fc_adm_evnt_abts_recv(void *arg) 3683 { 3684 ASSERT_SPDK_FC_MAIN_THREAD(); 3685 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3686 struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args; 3687 struct spdk_nvmf_fc_nport *nport = NULL; 3688 int err = 0; 3689 3690 SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi, 3691 args->oxid, args->rxid); 3692 3693 /* 3694 * 1. Make sure the nport port exists. 3695 */ 3696 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3697 if (nport == NULL) { 3698 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3699 err = -EINVAL; 3700 goto out; 3701 } 3702 3703 /* 3704 * 2. If the nport is in the process of being deleted, drop the ABTS. 3705 */ 3706 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3707 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3708 "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n", 3709 args->rpi, args->oxid, args->rxid); 3710 err = 0; 3711 goto out; 3712 3713 } 3714 3715 /* 3716 * 3. Pass the received ABTS-LS to the library for handling. 3717 */ 3718 nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid); 3719 3720 out: 3721 if (api_data->cb_func != NULL) { 3722 /* 3723 * Passing pointer to the args struct as the first argument. 3724 * The cb_func should handle this appropriately. 3725 */ 3726 (void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err); 3727 } else { 3728 /* No callback set, free the args */ 3729 free(args); 3730 } 3731 3732 free(arg); 3733 } 3734 3735 /* 3736 * Callback function for hw port quiesce. 3737 */ 3738 static void 3739 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err) 3740 { 3741 ASSERT_SPDK_FC_MAIN_THREAD(); 3742 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx = 3743 (struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx; 3744 struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args; 3745 spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func; 3746 struct spdk_nvmf_fc_queue_dump_info dump_info; 3747 struct spdk_nvmf_fc_port *fc_port = NULL; 3748 char *dump_buf = NULL; 3749 uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE; 3750 3751 /* 3752 * Free the callback context struct. 3753 */ 3754 free(ctx); 3755 3756 if (err != 0) { 3757 SPDK_ERRLOG("Port %d quiesce operation failed.\n", args->port_handle); 3758 goto out; 3759 } 3760 3761 if (args->dump_queues == false) { 3762 /* 3763 * Queues need not be dumped. 3764 */ 3765 goto out; 3766 } 3767 3768 SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle); 3769 3770 /* 3771 * Get the fc port. 3772 */ 3773 fc_port = nvmf_fc_port_lookup(args->port_handle); 3774 if (fc_port == NULL) { 3775 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3776 err = -EINVAL; 3777 goto out; 3778 } 3779 3780 /* 3781 * Allocate memory for the dump buffer. 3782 * This memory will be freed by FCT. 3783 */ 3784 dump_buf = (char *)calloc(1, dump_buf_size); 3785 if (dump_buf == NULL) { 3786 err = -ENOMEM; 3787 SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle); 3788 goto out; 3789 } 3790 *args->dump_buf = (uint32_t *)dump_buf; 3791 dump_info.buffer = dump_buf; 3792 dump_info.offset = 0; 3793 3794 /* 3795 * Add the dump reason to the top of the buffer. 3796 */ 3797 nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason); 3798 3799 /* 3800 * Dump the hwqp. 3801 */ 3802 nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues, 3803 fc_port->num_io_queues, &dump_info); 3804 3805 out: 3806 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n", 3807 args->port_handle, args->dump_queues, err); 3808 3809 if (cb_func != NULL) { 3810 (void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3811 } 3812 } 3813 3814 /* 3815 * HW port reset 3816 3817 */ 3818 static void 3819 nvmf_fc_adm_evnt_hw_port_reset(void *arg) 3820 { 3821 ASSERT_SPDK_FC_MAIN_THREAD(); 3822 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3823 struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *) 3824 api_data->api_args; 3825 struct spdk_nvmf_fc_port *fc_port = NULL; 3826 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL; 3827 int err = 0; 3828 3829 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle); 3830 3831 /* 3832 * Make sure the physical port exists. 3833 */ 3834 fc_port = nvmf_fc_port_lookup(args->port_handle); 3835 if (fc_port == NULL) { 3836 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3837 err = -EINVAL; 3838 goto out; 3839 } 3840 3841 /* 3842 * Save the reset event args and the callback in a context struct. 3843 */ 3844 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx)); 3845 3846 if (ctx == NULL) { 3847 err = -ENOMEM; 3848 SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle); 3849 goto fail; 3850 } 3851 3852 ctx->reset_args = args; 3853 ctx->reset_cb_func = api_data->cb_func; 3854 3855 /* 3856 * Quiesce the hw port. 3857 */ 3858 err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb); 3859 if (err != 0) { 3860 goto fail; 3861 } 3862 3863 /* 3864 * Once the ports are successfully quiesced the reset processing 3865 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb 3866 */ 3867 return; 3868 fail: 3869 free(ctx); 3870 3871 out: 3872 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle, 3873 err); 3874 3875 if (api_data->cb_func != NULL) { 3876 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3877 } 3878 3879 free(arg); 3880 } 3881 3882 static inline void 3883 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args) 3884 { 3885 if (nvmf_fc_get_main_thread()) { 3886 spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args); 3887 } 3888 } 3889 3890 /* 3891 * Queue up an event in the SPDK main threads event queue. 3892 * Used by the FC driver to notify the SPDK main thread of FC related events. 3893 */ 3894 int 3895 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args, 3896 spdk_nvmf_fc_callback cb_func) 3897 { 3898 int err = 0; 3899 struct spdk_nvmf_fc_adm_api_data *api_data = NULL; 3900 spdk_msg_fn event_fn = NULL; 3901 3902 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type); 3903 3904 if (event_type >= SPDK_FC_EVENT_MAX) { 3905 SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type); 3906 err = -EINVAL; 3907 goto done; 3908 } 3909 3910 if (args == NULL) { 3911 SPDK_ERRLOG("Null args for event %d.\n", event_type); 3912 err = -EINVAL; 3913 goto done; 3914 } 3915 3916 api_data = calloc(1, sizeof(*api_data)); 3917 3918 if (api_data == NULL) { 3919 SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type); 3920 err = -ENOMEM; 3921 goto done; 3922 } 3923 3924 api_data->api_args = args; 3925 api_data->cb_func = cb_func; 3926 3927 switch (event_type) { 3928 case SPDK_FC_HW_PORT_INIT: 3929 event_fn = nvmf_fc_adm_evnt_hw_port_init; 3930 break; 3931 3932 case SPDK_FC_HW_PORT_FREE: 3933 event_fn = nvmf_fc_adm_evnt_hw_port_free; 3934 break; 3935 3936 case SPDK_FC_HW_PORT_ONLINE: 3937 event_fn = nvmf_fc_adm_evnt_hw_port_online; 3938 break; 3939 3940 case SPDK_FC_HW_PORT_OFFLINE: 3941 event_fn = nvmf_fc_adm_evnt_hw_port_offline; 3942 break; 3943 3944 case SPDK_FC_NPORT_CREATE: 3945 event_fn = nvmf_fc_adm_evnt_nport_create; 3946 break; 3947 3948 case SPDK_FC_NPORT_DELETE: 3949 event_fn = nvmf_fc_adm_evnt_nport_delete; 3950 break; 3951 3952 case SPDK_FC_IT_ADD: 3953 event_fn = nvmf_fc_adm_evnt_i_t_add; 3954 break; 3955 3956 case SPDK_FC_IT_DELETE: 3957 event_fn = nvmf_fc_adm_evnt_i_t_delete; 3958 break; 3959 3960 case SPDK_FC_ABTS_RECV: 3961 event_fn = nvmf_fc_adm_evnt_abts_recv; 3962 break; 3963 3964 case SPDK_FC_HW_PORT_RESET: 3965 event_fn = nvmf_fc_adm_evnt_hw_port_reset; 3966 break; 3967 3968 case SPDK_FC_UNRECOVERABLE_ERR: 3969 default: 3970 SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type); 3971 err = -EINVAL; 3972 break; 3973 } 3974 3975 done: 3976 3977 if (err == 0) { 3978 assert(event_fn != NULL); 3979 nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data); 3980 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type); 3981 } else { 3982 SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err); 3983 if (api_data) { 3984 free(api_data); 3985 } 3986 } 3987 3988 return err; 3989 } 3990 3991 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc); 3992 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api) 3993 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc) 3994