1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * Copyright (c) 2018-2019 Broadcom. All Rights Reserved. 4 * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. 5 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 6 */ 7 8 /* 9 * NVMe_FC transport functions. 10 */ 11 12 #include "spdk/env.h" 13 #include "spdk/assert.h" 14 #include "spdk/nvmf_transport.h" 15 #include "spdk/string.h" 16 #include "spdk/trace.h" 17 #include "spdk/util.h" 18 #include "spdk/likely.h" 19 #include "spdk/endian.h" 20 #include "spdk/log.h" 21 #include "spdk/thread.h" 22 23 #include "nvmf_fc.h" 24 #include "fc_lld.h" 25 26 #include "spdk_internal/trace_defs.h" 27 28 #ifndef DEV_VERIFY 29 #define DEV_VERIFY assert 30 #endif 31 32 #ifndef ASSERT_SPDK_FC_MAIN_THREAD 33 #define ASSERT_SPDK_FC_MAIN_THREAD() \ 34 DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread()); 35 #endif 36 37 /* 38 * PRLI service parameters 39 */ 40 enum spdk_nvmf_fc_service_parameters { 41 SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001, 42 SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008, 43 SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010, 44 SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020, 45 SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080, 46 }; 47 48 static char *fc_req_state_strs[] = { 49 "SPDK_NVMF_FC_REQ_INIT", 50 "SPDK_NVMF_FC_REQ_READ_BDEV", 51 "SPDK_NVMF_FC_REQ_READ_XFER", 52 "SPDK_NVMF_FC_REQ_READ_RSP", 53 "SPDK_NVMF_FC_REQ_WRITE_BUFFS", 54 "SPDK_NVMF_FC_REQ_WRITE_XFER", 55 "SPDK_NVMF_FC_REQ_WRITE_BDEV", 56 "SPDK_NVMF_FC_REQ_WRITE_RSP", 57 "SPDK_NVMF_FC_REQ_NONE_BDEV", 58 "SPDK_NVMF_FC_REQ_NONE_RSP", 59 "SPDK_NVMF_FC_REQ_SUCCESS", 60 "SPDK_NVMF_FC_REQ_FAILED", 61 "SPDK_NVMF_FC_REQ_ABORTED", 62 "SPDK_NVMF_FC_REQ_BDEV_ABORTED", 63 "SPDK_NVMF_FC_REQ_PENDING", 64 "SPDK_NVMF_FC_REQ_FUSED_WAITING" 65 }; 66 67 #define HWQP_CONN_TABLE_SIZE 8192 68 #define HWQP_RPI_TABLE_SIZE 4096 69 70 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC) 71 { 72 spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r'); 73 spdk_trace_register_description("FC_NEW", 74 TRACE_FC_REQ_INIT, 75 OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 76 SPDK_TRACE_ARG_TYPE_INT, ""); 77 spdk_trace_register_description("FC_READ_SBMT_TO_BDEV", 78 TRACE_FC_REQ_READ_BDEV, 79 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 80 SPDK_TRACE_ARG_TYPE_INT, ""); 81 spdk_trace_register_description("FC_READ_XFER_DATA", 82 TRACE_FC_REQ_READ_XFER, 83 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 84 SPDK_TRACE_ARG_TYPE_INT, ""); 85 spdk_trace_register_description("FC_READ_RSP", 86 TRACE_FC_REQ_READ_RSP, 87 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 88 SPDK_TRACE_ARG_TYPE_INT, ""); 89 spdk_trace_register_description("FC_WRITE_NEED_BUFFER", 90 TRACE_FC_REQ_WRITE_BUFFS, 91 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 92 SPDK_TRACE_ARG_TYPE_INT, ""); 93 spdk_trace_register_description("FC_WRITE_XFER_DATA", 94 TRACE_FC_REQ_WRITE_XFER, 95 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 96 SPDK_TRACE_ARG_TYPE_INT, ""); 97 spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV", 98 TRACE_FC_REQ_WRITE_BDEV, 99 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 100 SPDK_TRACE_ARG_TYPE_INT, ""); 101 spdk_trace_register_description("FC_WRITE_RSP", 102 TRACE_FC_REQ_WRITE_RSP, 103 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 104 SPDK_TRACE_ARG_TYPE_INT, ""); 105 spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV", 106 TRACE_FC_REQ_NONE_BDEV, 107 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 108 SPDK_TRACE_ARG_TYPE_INT, ""); 109 spdk_trace_register_description("FC_NONE_RSP", 110 TRACE_FC_REQ_NONE_RSP, 111 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 112 SPDK_TRACE_ARG_TYPE_INT, ""); 113 spdk_trace_register_description("FC_SUCCESS", 114 TRACE_FC_REQ_SUCCESS, 115 OWNER_NONE, OBJECT_NONE, 0, 116 SPDK_TRACE_ARG_TYPE_INT, ""); 117 spdk_trace_register_description("FC_FAILED", 118 TRACE_FC_REQ_FAILED, 119 OWNER_NONE, OBJECT_NONE, 0, 120 SPDK_TRACE_ARG_TYPE_INT, ""); 121 spdk_trace_register_description("FC_ABRT", 122 TRACE_FC_REQ_ABORTED, 123 OWNER_NONE, OBJECT_NONE, 0, 124 SPDK_TRACE_ARG_TYPE_INT, ""); 125 spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV", 126 TRACE_FC_REQ_BDEV_ABORTED, 127 OWNER_NONE, OBJECT_NONE, 0, 128 SPDK_TRACE_ARG_TYPE_INT, ""); 129 spdk_trace_register_description("FC_PENDING", 130 TRACE_FC_REQ_PENDING, 131 OWNER_NONE, OBJECT_NONE, 0, 132 SPDK_TRACE_ARG_TYPE_INT, ""); 133 spdk_trace_register_description("FC_FUSED_WAITING", 134 TRACE_FC_REQ_FUSED_WAITING, 135 OWNER_NONE, OBJECT_NONE, 0, 136 SPDK_TRACE_ARG_TYPE_INT, ""); 137 } 138 139 /** 140 * The structure used by all fc adm functions 141 */ 142 struct spdk_nvmf_fc_adm_api_data { 143 void *api_args; 144 spdk_nvmf_fc_callback cb_func; 145 }; 146 147 /** 148 * The callback structure for nport-delete 149 */ 150 struct spdk_nvmf_fc_adm_nport_del_cb_data { 151 struct spdk_nvmf_fc_nport *nport; 152 uint8_t port_handle; 153 spdk_nvmf_fc_callback fc_cb_func; 154 void *fc_cb_ctx; 155 }; 156 157 /** 158 * The callback structure for it-delete 159 */ 160 struct spdk_nvmf_fc_adm_i_t_del_cb_data { 161 struct spdk_nvmf_fc_nport *nport; 162 struct spdk_nvmf_fc_remote_port_info *rport; 163 uint8_t port_handle; 164 spdk_nvmf_fc_callback fc_cb_func; 165 void *fc_cb_ctx; 166 }; 167 168 169 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err); 170 171 /** 172 * The callback structure for the it-delete-assoc callback 173 */ 174 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data { 175 struct spdk_nvmf_fc_nport *nport; 176 struct spdk_nvmf_fc_remote_port_info *rport; 177 uint8_t port_handle; 178 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func; 179 void *cb_ctx; 180 }; 181 182 /* 183 * Call back function pointer for HW port quiesce. 184 */ 185 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err); 186 187 /** 188 * Context structure for quiescing a hardware port 189 */ 190 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx { 191 int quiesce_count; 192 void *ctx; 193 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func; 194 }; 195 196 /** 197 * Context structure used to reset a hardware port 198 */ 199 struct spdk_nvmf_fc_adm_hw_port_reset_ctx { 200 void *reset_args; 201 spdk_nvmf_fc_callback reset_cb_func; 202 }; 203 204 struct spdk_nvmf_fc_transport { 205 struct spdk_nvmf_transport transport; 206 struct spdk_poller *accept_poller; 207 pthread_mutex_t lock; 208 }; 209 210 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport; 211 212 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL; 213 214 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list = 215 TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list); 216 217 static struct spdk_thread *g_nvmf_fc_main_thread = NULL; 218 219 static uint32_t g_nvmf_fgroup_count = 0; 220 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups = 221 TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups); 222 223 struct spdk_thread * 224 nvmf_fc_get_main_thread(void) 225 { 226 return g_nvmf_fc_main_thread; 227 } 228 229 static inline void 230 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req, 231 enum spdk_nvmf_fc_request_state state) 232 { 233 uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID; 234 235 switch (state) { 236 case SPDK_NVMF_FC_REQ_INIT: 237 /* Start IO tracing */ 238 tpoint_id = TRACE_FC_REQ_INIT; 239 break; 240 case SPDK_NVMF_FC_REQ_READ_BDEV: 241 tpoint_id = TRACE_FC_REQ_READ_BDEV; 242 break; 243 case SPDK_NVMF_FC_REQ_READ_XFER: 244 tpoint_id = TRACE_FC_REQ_READ_XFER; 245 break; 246 case SPDK_NVMF_FC_REQ_READ_RSP: 247 tpoint_id = TRACE_FC_REQ_READ_RSP; 248 break; 249 case SPDK_NVMF_FC_REQ_WRITE_BUFFS: 250 tpoint_id = TRACE_FC_REQ_WRITE_BUFFS; 251 break; 252 case SPDK_NVMF_FC_REQ_WRITE_XFER: 253 tpoint_id = TRACE_FC_REQ_WRITE_XFER; 254 break; 255 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 256 tpoint_id = TRACE_FC_REQ_WRITE_BDEV; 257 break; 258 case SPDK_NVMF_FC_REQ_WRITE_RSP: 259 tpoint_id = TRACE_FC_REQ_WRITE_RSP; 260 break; 261 case SPDK_NVMF_FC_REQ_NONE_BDEV: 262 tpoint_id = TRACE_FC_REQ_NONE_BDEV; 263 break; 264 case SPDK_NVMF_FC_REQ_NONE_RSP: 265 tpoint_id = TRACE_FC_REQ_NONE_RSP; 266 break; 267 case SPDK_NVMF_FC_REQ_SUCCESS: 268 tpoint_id = TRACE_FC_REQ_SUCCESS; 269 break; 270 case SPDK_NVMF_FC_REQ_FAILED: 271 tpoint_id = TRACE_FC_REQ_FAILED; 272 break; 273 case SPDK_NVMF_FC_REQ_ABORTED: 274 tpoint_id = TRACE_FC_REQ_ABORTED; 275 break; 276 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 277 tpoint_id = TRACE_FC_REQ_ABORTED; 278 break; 279 case SPDK_NVMF_FC_REQ_PENDING: 280 tpoint_id = TRACE_FC_REQ_PENDING; 281 break; 282 case SPDK_NVMF_FC_REQ_FUSED_WAITING: 283 tpoint_id = TRACE_FC_REQ_FUSED_WAITING; 284 break; 285 default: 286 assert(0); 287 break; 288 } 289 if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) { 290 spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0, 291 (uint64_t)(&fc_req->req)); 292 } 293 } 294 295 static struct rte_hash * 296 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len) 297 { 298 struct rte_hash_parameters hash_params = { 0 }; 299 300 hash_params.entries = num_entries; 301 hash_params.key_len = key_len; 302 hash_params.name = name; 303 304 return rte_hash_create(&hash_params); 305 } 306 307 void 308 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) 309 { 310 free(fc_conn->pool_memory); 311 fc_conn->pool_memory = NULL; 312 } 313 314 int 315 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn) 316 { 317 uint32_t i, qd; 318 struct spdk_nvmf_fc_pooled_request *req; 319 320 /* 321 * Create number of fc-requests to be more than the actual SQ size. 322 * This is to handle race conditions where the target driver may send 323 * back a RSP and before the target driver gets to process the CQE 324 * for the RSP, the initiator may have sent a new command. 325 * Depending on the load on the HWQP, there is a slim possibility 326 * that the target reaps the RQE corresponding to the new 327 * command before processing the CQE corresponding to the RSP. 328 */ 329 qd = fc_conn->max_queue_depth * 2; 330 331 STAILQ_INIT(&fc_conn->pool_queue); 332 fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2), 333 sizeof(struct spdk_nvmf_fc_request)); 334 if (!fc_conn->pool_memory) { 335 SPDK_ERRLOG("create fc req ring objects failed\n"); 336 goto error; 337 } 338 fc_conn->pool_size = qd; 339 fc_conn->pool_free_elems = qd; 340 341 /* Initialise value in ring objects and link the objects */ 342 for (i = 0; i < qd; i++) { 343 req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory + 344 i * sizeof(struct spdk_nvmf_fc_request)); 345 346 STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link); 347 } 348 return 0; 349 error: 350 nvmf_fc_free_conn_reqpool(fc_conn); 351 return -1; 352 } 353 354 static inline struct spdk_nvmf_fc_request * 355 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn) 356 { 357 struct spdk_nvmf_fc_request *fc_req; 358 struct spdk_nvmf_fc_pooled_request *pooled_req; 359 struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp; 360 361 pooled_req = STAILQ_FIRST(&fc_conn->pool_queue); 362 if (!pooled_req) { 363 SPDK_ERRLOG("Alloc request buffer failed\n"); 364 return NULL; 365 } 366 STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link); 367 fc_conn->pool_free_elems -= 1; 368 369 fc_req = (struct spdk_nvmf_fc_request *)pooled_req; 370 memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request)); 371 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); 372 373 TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link); 374 TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link); 375 TAILQ_INIT(&fc_req->abort_cbs); 376 return fc_req; 377 } 378 379 static inline void 380 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req) 381 { 382 if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) { 383 /* Log an error for debug purpose. */ 384 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED); 385 } 386 387 /* set the magic to mark req as no longer valid. */ 388 fc_req->magic = 0xDEADBEEF; 389 390 TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link); 391 TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link); 392 393 STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link); 394 fc_conn->pool_free_elems += 1; 395 } 396 397 static inline void 398 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req) 399 { 400 STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req, 401 spdk_nvmf_request, buf_link); 402 } 403 404 int 405 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp) 406 { 407 char name[64]; 408 409 hwqp->fc_port = fc_port; 410 411 /* clear counters */ 412 memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors)); 413 414 TAILQ_INIT(&hwqp->in_use_reqs); 415 TAILQ_INIT(&hwqp->sync_cbs); 416 TAILQ_INIT(&hwqp->ls_pending_queue); 417 418 snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id); 419 hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE, 420 sizeof(uint64_t)); 421 if (!hwqp->connection_list_hash) { 422 SPDK_ERRLOG("Failed to create connection hash table.\n"); 423 return -ENOMEM; 424 } 425 426 snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id); 427 hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t)); 428 if (!hwqp->rport_list_hash) { 429 SPDK_ERRLOG("Failed to create rpi hash table.\n"); 430 rte_hash_free(hwqp->connection_list_hash); 431 return -ENOMEM; 432 } 433 434 /* Init low level driver queues */ 435 nvmf_fc_init_q(hwqp); 436 return 0; 437 } 438 439 static struct spdk_nvmf_fc_poll_group * 440 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp) 441 { 442 uint32_t max_count = UINT32_MAX; 443 struct spdk_nvmf_fc_poll_group *fgroup; 444 struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL; 445 446 pthread_mutex_lock(&g_nvmf_ftransport->lock); 447 /* find poll group with least number of hwqp's assigned to it */ 448 TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) { 449 if (fgroup->hwqp_count < max_count) { 450 ret_fgroup = fgroup; 451 max_count = fgroup->hwqp_count; 452 } 453 } 454 455 if (ret_fgroup) { 456 ret_fgroup->hwqp_count++; 457 hwqp->thread = ret_fgroup->group.group->thread; 458 hwqp->fgroup = ret_fgroup; 459 } 460 461 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 462 463 return ret_fgroup; 464 } 465 466 bool 467 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup) 468 { 469 struct spdk_nvmf_fc_poll_group *tmp; 470 bool rc = false; 471 472 pthread_mutex_lock(&g_nvmf_ftransport->lock); 473 TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) { 474 if (tmp == fgroup) { 475 rc = true; 476 break; 477 } 478 } 479 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 480 return rc; 481 } 482 483 void 484 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 485 { 486 assert(hwqp); 487 if (hwqp == NULL) { 488 SPDK_ERRLOG("Error: hwqp is NULL\n"); 489 return; 490 } 491 492 assert(g_nvmf_fgroup_count); 493 494 if (!nvmf_fc_assign_idlest_poll_group(hwqp)) { 495 SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id); 496 return; 497 } 498 499 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL); 500 } 501 502 static void 503 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 504 { 505 struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data; 506 507 if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) { 508 SPDK_DEBUGLOG(nvmf_fc_adm_api, 509 "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id); 510 } else { 511 SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id); 512 } 513 514 if (args->cb_fn) { 515 args->cb_fn(args->cb_ctx, 0); 516 } 517 518 free(args); 519 } 520 521 void 522 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp, 523 spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx) 524 { 525 struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args; 526 struct spdk_nvmf_fc_poll_group *tmp; 527 int rc = 0; 528 529 assert(hwqp); 530 531 SPDK_DEBUGLOG(nvmf_fc, 532 "Remove hwqp from poller: for port: %d, hwqp: %d\n", 533 hwqp->fc_port->port_hdl, hwqp->hwqp_id); 534 535 if (!hwqp->fgroup) { 536 SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id); 537 } else { 538 pthread_mutex_lock(&g_nvmf_ftransport->lock); 539 TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) { 540 if (tmp == hwqp->fgroup) { 541 hwqp->fgroup->hwqp_count--; 542 break; 543 } 544 } 545 pthread_mutex_unlock(&g_nvmf_ftransport->lock); 546 547 if (tmp != hwqp->fgroup) { 548 /* Pollgroup was already removed. Dont bother. */ 549 goto done; 550 } 551 552 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args)); 553 if (args == NULL) { 554 rc = -ENOMEM; 555 SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id); 556 goto done; 557 } 558 559 args->hwqp = hwqp; 560 args->cb_fn = cb_fn; 561 args->cb_ctx = cb_ctx; 562 args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb; 563 args->cb_info.cb_data = args; 564 args->cb_info.cb_thread = spdk_get_thread(); 565 566 rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args); 567 if (rc) { 568 rc = -EINVAL; 569 SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id); 570 free(args); 571 goto done; 572 } 573 return; 574 } 575 done: 576 if (cb_fn) { 577 cb_fn(cb_ctx, rc); 578 } 579 } 580 581 /* 582 * Note: This needs to be used only on main poller. 583 */ 584 static uint64_t 585 nvmf_fc_get_abts_unique_id(void) 586 { 587 static uint32_t u_id = 0; 588 589 return (uint64_t)(++u_id); 590 } 591 592 static void 593 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 594 { 595 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 596 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg; 597 598 ctx->hwqps_responded++; 599 600 if (ctx->hwqps_responded < ctx->num_hwqps) { 601 /* Wait for all pollers to complete. */ 602 return; 603 } 604 605 /* Free the queue sync poller args. */ 606 free(ctx->sync_poller_args); 607 608 /* Mark as queue synced */ 609 ctx->queue_synced = true; 610 611 /* Reset the ctx values */ 612 ctx->hwqps_responded = 0; 613 ctx->handled = false; 614 615 SPDK_DEBUGLOG(nvmf_fc, 616 "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 617 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 618 619 /* Resend ABTS to pollers */ 620 args = ctx->abts_poller_args; 621 for (int i = 0; i < ctx->num_hwqps; i++) { 622 poller_arg = args + i; 623 nvmf_fc_poller_api_func(poller_arg->hwqp, 624 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 625 poller_arg); 626 } 627 } 628 629 static int 630 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx) 631 { 632 struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg; 633 struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg; 634 635 /* check if FC driver supports queue sync */ 636 if (!nvmf_fc_q_sync_available()) { 637 return -EPERM; 638 } 639 640 assert(ctx); 641 if (!ctx) { 642 SPDK_ERRLOG("NULL ctx pointer"); 643 return -EINVAL; 644 } 645 646 /* Reset the ctx values */ 647 ctx->hwqps_responded = 0; 648 649 args = calloc(ctx->num_hwqps, 650 sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args)); 651 if (!args) { 652 SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 653 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 654 return -ENOMEM; 655 } 656 ctx->sync_poller_args = args; 657 658 abts_args = ctx->abts_poller_args; 659 for (int i = 0; i < ctx->num_hwqps; i++) { 660 abts_poller_arg = abts_args + i; 661 poller_arg = args + i; 662 poller_arg->u_id = ctx->u_id; 663 poller_arg->hwqp = abts_poller_arg->hwqp; 664 poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb; 665 poller_arg->cb_info.cb_data = ctx; 666 poller_arg->cb_info.cb_thread = spdk_get_thread(); 667 668 /* Send a Queue sync message to interested pollers */ 669 nvmf_fc_poller_api_func(poller_arg->hwqp, 670 SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC, 671 poller_arg); 672 } 673 674 SPDK_DEBUGLOG(nvmf_fc, 675 "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 676 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 677 678 /* Post Marker to queue to track aborted request */ 679 nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id); 680 681 return 0; 682 } 683 684 static void 685 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 686 { 687 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 688 struct spdk_nvmf_fc_nport *nport = NULL; 689 690 if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) { 691 ctx->handled = true; 692 } 693 694 ctx->hwqps_responded++; 695 696 if (ctx->hwqps_responded < ctx->num_hwqps) { 697 /* Wait for all pollers to complete. */ 698 return; 699 } 700 701 nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl); 702 703 if (ctx->nport != nport) { 704 /* Nport can be deleted while this abort is being 705 * processed by the pollers. 706 */ 707 SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 708 ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 709 } else { 710 if (!ctx->handled) { 711 /* Try syncing the queues and try one more time */ 712 if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) { 713 SPDK_DEBUGLOG(nvmf_fc, 714 "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 715 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 716 return; 717 } else { 718 /* Send Reject */ 719 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 720 ctx->oxid, ctx->rxid, ctx->rpi, true, 721 FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL); 722 } 723 } else { 724 /* Send Accept */ 725 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 726 ctx->oxid, ctx->rxid, ctx->rpi, false, 727 0, NULL, NULL); 728 } 729 } 730 SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 731 (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 732 733 free(ctx->abts_poller_args); 734 free(ctx); 735 } 736 737 void 738 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi, 739 uint16_t oxid, uint16_t rxid) 740 { 741 struct spdk_nvmf_fc_abts_ctx *ctx = NULL; 742 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg; 743 struct spdk_nvmf_fc_association *assoc = NULL; 744 struct spdk_nvmf_fc_conn *conn = NULL; 745 uint32_t hwqp_cnt = 0; 746 bool skip_hwqp_cnt; 747 struct spdk_nvmf_fc_hwqp **hwqps = NULL; 748 uint32_t i; 749 750 SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 751 nport->nport_hdl, rpi, oxid, rxid); 752 753 /* Allocate memory to track hwqp's with at least 1 active connection. */ 754 hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *)); 755 if (hwqps == NULL) { 756 SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n"); 757 goto bls_rej; 758 } 759 760 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 761 TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) { 762 if ((conn->rpi != rpi) || !conn->hwqp) { 763 continue; 764 } 765 766 skip_hwqp_cnt = false; 767 for (i = 0; i < hwqp_cnt; i++) { 768 if (hwqps[i] == conn->hwqp) { 769 /* Skip. This is already present */ 770 skip_hwqp_cnt = true; 771 break; 772 } 773 } 774 if (!skip_hwqp_cnt) { 775 assert(hwqp_cnt < nport->fc_port->num_io_queues); 776 hwqps[hwqp_cnt] = conn->hwqp; 777 hwqp_cnt++; 778 } 779 } 780 } 781 782 if (!hwqp_cnt) { 783 goto bls_rej; 784 } 785 786 args = calloc(hwqp_cnt, 787 sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args)); 788 if (!args) { 789 goto bls_rej; 790 } 791 792 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx)); 793 if (!ctx) { 794 goto bls_rej; 795 } 796 ctx->rpi = rpi; 797 ctx->oxid = oxid; 798 ctx->rxid = rxid; 799 ctx->nport = nport; 800 ctx->nport_hdl = nport->nport_hdl; 801 ctx->port_hdl = nport->fc_port->port_hdl; 802 ctx->num_hwqps = hwqp_cnt; 803 ctx->ls_hwqp = &nport->fc_port->ls_queue; 804 ctx->fcp_rq_id = nport->fc_port->fcp_rq_id; 805 ctx->abts_poller_args = args; 806 807 /* Get a unique context for this ABTS */ 808 ctx->u_id = nvmf_fc_get_abts_unique_id(); 809 810 for (i = 0; i < hwqp_cnt; i++) { 811 poller_arg = args + i; 812 poller_arg->hwqp = hwqps[i]; 813 poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb; 814 poller_arg->cb_info.cb_data = ctx; 815 poller_arg->cb_info.cb_thread = spdk_get_thread(); 816 poller_arg->ctx = ctx; 817 818 nvmf_fc_poller_api_func(poller_arg->hwqp, 819 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 820 poller_arg); 821 } 822 823 free(hwqps); 824 825 return; 826 bls_rej: 827 free(args); 828 free(hwqps); 829 830 /* Send Reject */ 831 nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi, 832 true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL); 833 SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 834 nport->nport_hdl, rpi, oxid, rxid); 835 return; 836 } 837 838 /*** Accessor functions for the FC structures - BEGIN */ 839 /* 840 * Returns true if the port is in offline state. 841 */ 842 bool 843 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port) 844 { 845 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) { 846 return true; 847 } 848 849 return false; 850 } 851 852 /* 853 * Returns true if the port is in online state. 854 */ 855 bool 856 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port) 857 { 858 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) { 859 return true; 860 } 861 862 return false; 863 } 864 865 int 866 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port) 867 { 868 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) { 869 fc_port->hw_port_status = SPDK_FC_PORT_ONLINE; 870 return 0; 871 } 872 873 return -EPERM; 874 } 875 876 int 877 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port) 878 { 879 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) { 880 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 881 return 0; 882 } 883 884 return -EPERM; 885 } 886 887 int 888 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp) 889 { 890 if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) { 891 hwqp->state = SPDK_FC_HWQP_ONLINE; 892 /* reset some queue counters */ 893 hwqp->num_conns = 0; 894 return nvmf_fc_set_q_online_state(hwqp, true); 895 } 896 897 return -EPERM; 898 } 899 900 int 901 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp) 902 { 903 if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) { 904 hwqp->state = SPDK_FC_HWQP_OFFLINE; 905 return nvmf_fc_set_q_online_state(hwqp, false); 906 } 907 908 return -EPERM; 909 } 910 911 void 912 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port) 913 { 914 TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link); 915 916 /* 917 * Let LLD add the port to its list. 918 */ 919 nvmf_fc_lld_port_add(fc_port); 920 } 921 922 static void 923 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port) 924 { 925 TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link); 926 927 /* 928 * Let LLD remove the port from its list. 929 */ 930 nvmf_fc_lld_port_remove(fc_port); 931 } 932 933 struct spdk_nvmf_fc_port * 934 nvmf_fc_port_lookup(uint8_t port_hdl) 935 { 936 struct spdk_nvmf_fc_port *fc_port = NULL; 937 938 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 939 if (fc_port->port_hdl == port_hdl) { 940 return fc_port; 941 } 942 } 943 return NULL; 944 } 945 946 uint32_t 947 nvmf_fc_get_prli_service_params(void) 948 { 949 return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION); 950 } 951 952 int 953 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port, 954 struct spdk_nvmf_fc_nport *nport) 955 { 956 if (fc_port) { 957 TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link); 958 fc_port->num_nports++; 959 return 0; 960 } 961 962 return -EINVAL; 963 } 964 965 int 966 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port, 967 struct spdk_nvmf_fc_nport *nport) 968 { 969 if (fc_port && nport) { 970 TAILQ_REMOVE(&fc_port->nport_list, nport, link); 971 fc_port->num_nports--; 972 return 0; 973 } 974 975 return -EINVAL; 976 } 977 978 static struct spdk_nvmf_fc_nport * 979 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl) 980 { 981 struct spdk_nvmf_fc_nport *fc_nport = NULL; 982 983 TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) { 984 if (fc_nport->nport_hdl == nport_hdl) { 985 return fc_nport; 986 } 987 } 988 989 return NULL; 990 } 991 992 struct spdk_nvmf_fc_nport * 993 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl) 994 { 995 struct spdk_nvmf_fc_port *fc_port = NULL; 996 997 fc_port = nvmf_fc_port_lookup(port_hdl); 998 if (fc_port) { 999 return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl); 1000 } 1001 1002 return NULL; 1003 } 1004 1005 static inline int 1006 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp, 1007 uint32_t d_id, struct spdk_nvmf_fc_nport **nport, 1008 uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport) 1009 { 1010 struct spdk_nvmf_fc_nport *n_port; 1011 struct spdk_nvmf_fc_remote_port_info *r_port; 1012 1013 assert(hwqp); 1014 if (hwqp == NULL) { 1015 SPDK_ERRLOG("Error: hwqp is NULL\n"); 1016 return -EINVAL; 1017 } 1018 assert(nport); 1019 if (nport == NULL) { 1020 SPDK_ERRLOG("Error: nport is NULL\n"); 1021 return -EINVAL; 1022 } 1023 assert(rport); 1024 if (rport == NULL) { 1025 SPDK_ERRLOG("Error: rport is NULL\n"); 1026 return -EINVAL; 1027 } 1028 1029 TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) { 1030 if (n_port->d_id == d_id) { 1031 TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) { 1032 if (r_port->s_id == s_id) { 1033 *nport = n_port; 1034 *rport = r_port; 1035 return 0; 1036 } 1037 } 1038 break; 1039 } 1040 } 1041 1042 return -ENOENT; 1043 } 1044 1045 /* Returns true if the Nport is empty of all rem_ports */ 1046 bool 1047 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport) 1048 { 1049 if (nport && TAILQ_EMPTY(&nport->rem_port_list)) { 1050 assert(nport->rport_count == 0); 1051 return true; 1052 } else { 1053 return false; 1054 } 1055 } 1056 1057 int 1058 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport, 1059 enum spdk_nvmf_fc_object_state state) 1060 { 1061 if (nport) { 1062 nport->nport_state = state; 1063 return 0; 1064 } else { 1065 return -EINVAL; 1066 } 1067 } 1068 1069 bool 1070 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport, 1071 struct spdk_nvmf_fc_remote_port_info *rem_port) 1072 { 1073 if (nport && rem_port) { 1074 TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link); 1075 nport->rport_count++; 1076 return 0; 1077 } else { 1078 return -EINVAL; 1079 } 1080 } 1081 1082 bool 1083 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport, 1084 struct spdk_nvmf_fc_remote_port_info *rem_port) 1085 { 1086 if (nport && rem_port) { 1087 TAILQ_REMOVE(&nport->rem_port_list, rem_port, link); 1088 nport->rport_count--; 1089 return 0; 1090 } else { 1091 return -EINVAL; 1092 } 1093 } 1094 1095 int 1096 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport, 1097 enum spdk_nvmf_fc_object_state state) 1098 { 1099 if (rport) { 1100 rport->rport_state = state; 1101 return 0; 1102 } else { 1103 return -EINVAL; 1104 } 1105 } 1106 int 1107 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc, 1108 enum spdk_nvmf_fc_object_state state) 1109 { 1110 if (assoc) { 1111 assoc->assoc_state = state; 1112 return 0; 1113 } else { 1114 return -EINVAL; 1115 } 1116 } 1117 1118 static struct spdk_nvmf_fc_association * 1119 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr) 1120 { 1121 struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair; 1122 struct spdk_nvmf_fc_conn *fc_conn; 1123 1124 if (!qpair) { 1125 SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid); 1126 return NULL; 1127 } 1128 1129 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1130 1131 return fc_conn->fc_assoc; 1132 } 1133 1134 bool 1135 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl, 1136 struct spdk_nvmf_ctrlr *ctrlr) 1137 { 1138 struct spdk_nvmf_fc_nport *fc_nport = NULL; 1139 struct spdk_nvmf_fc_association *assoc = NULL; 1140 1141 if (!ctrlr) { 1142 return false; 1143 } 1144 1145 fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl); 1146 if (!fc_nport) { 1147 return false; 1148 } 1149 1150 assoc = nvmf_ctrlr_get_fc_assoc(ctrlr); 1151 if (assoc && assoc->tgtport == fc_nport) { 1152 SPDK_DEBUGLOG(nvmf_fc, 1153 "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n", 1154 ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl, 1155 nport_hdl); 1156 return true; 1157 } 1158 return false; 1159 } 1160 1161 static void 1162 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp, 1163 struct spdk_nvmf_fc_ls_rqst *ls_rqst) 1164 { 1165 assert(ls_rqst); 1166 1167 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1168 1169 /* Return buffer to chip */ 1170 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1171 } 1172 1173 static int 1174 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp, 1175 struct spdk_nvmf_fc_nport *nport, 1176 struct spdk_nvmf_fc_remote_port_info *rport) 1177 { 1178 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1179 int num_deleted = 0; 1180 1181 assert(hwqp); 1182 assert(nport); 1183 assert(rport); 1184 1185 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1186 if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) { 1187 num_deleted++; 1188 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1189 } 1190 } 1191 return num_deleted; 1192 } 1193 1194 static void 1195 nvmf_fc_req_bdev_abort(void *arg1) 1196 { 1197 struct spdk_nvmf_fc_request *fc_req = arg1; 1198 struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr; 1199 int i; 1200 1201 /* Initial release - we don't have to abort Admin Queue or 1202 * Fabric commands. The AQ commands supported at this time are 1203 * Get-Log-Page, 1204 * Identify 1205 * Set Features 1206 * Get Features 1207 * AER -> Special case and handled differently. 1208 * Every one of the above Admin commands (except AER) run 1209 * to completion and so an Abort of such commands doesn't 1210 * make sense. 1211 */ 1212 /* The Fabric commands supported are 1213 * Property Set 1214 * Property Get 1215 * Connect -> Special case (async. handling). Not sure how to 1216 * handle at this point. Let it run to completion. 1217 */ 1218 for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) { 1219 if (ctrlr->aer_req[i] == &fc_req->req) { 1220 SPDK_NOTICELOG("Abort AER request\n"); 1221 nvmf_qpair_free_aer(fc_req->req.qpair); 1222 } 1223 } 1224 } 1225 1226 void 1227 nvmf_fc_request_abort_complete(void *arg1) 1228 { 1229 struct spdk_nvmf_fc_request *fc_req = 1230 (struct spdk_nvmf_fc_request *)arg1; 1231 struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp; 1232 struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL; 1233 TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs; 1234 1235 /* Make a copy of the cb list from fc_req */ 1236 TAILQ_INIT(&abort_cbs); 1237 TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link); 1238 1239 SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req, 1240 fc_req_state_strs[fc_req->state]); 1241 1242 _nvmf_fc_request_free(fc_req); 1243 1244 /* Request abort completed. Notify all the callbacks */ 1245 TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) { 1246 /* Notify */ 1247 ctx->cb(hwqp, 0, ctx->cb_args); 1248 /* Remove */ 1249 TAILQ_REMOVE(&abort_cbs, ctx, link); 1250 /* free */ 1251 free(ctx); 1252 } 1253 } 1254 1255 void 1256 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts, 1257 spdk_nvmf_fc_caller_cb cb, void *cb_args) 1258 { 1259 struct spdk_nvmf_fc_caller_ctx *ctx = NULL; 1260 bool kill_req = false; 1261 1262 /* Add the cb to list */ 1263 if (cb) { 1264 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx)); 1265 if (!ctx) { 1266 SPDK_ERRLOG("ctx alloc failed.\n"); 1267 return; 1268 } 1269 ctx->cb = cb; 1270 ctx->cb_args = cb_args; 1271 1272 TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link); 1273 } 1274 1275 if (!fc_req->is_aborted) { 1276 /* Increment aborted command counter */ 1277 fc_req->hwqp->counters.num_aborted++; 1278 } 1279 1280 /* If port is dead, skip abort wqe */ 1281 kill_req = nvmf_fc_is_port_dead(fc_req->hwqp); 1282 if (kill_req && nvmf_fc_req_in_xfer(fc_req)) { 1283 fc_req->is_aborted = true; 1284 goto complete; 1285 } 1286 1287 /* Check if the request is already marked for deletion */ 1288 if (fc_req->is_aborted) { 1289 return; 1290 } 1291 1292 /* Mark request as aborted */ 1293 fc_req->is_aborted = true; 1294 1295 /* If xchg is allocated, then save if we need to send abts or not. */ 1296 if (fc_req->xchg) { 1297 fc_req->xchg->send_abts = send_abts; 1298 fc_req->xchg->aborted = true; 1299 } 1300 1301 switch (fc_req->state) { 1302 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 1303 /* Aborted by backend */ 1304 goto complete; 1305 1306 case SPDK_NVMF_FC_REQ_READ_BDEV: 1307 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 1308 case SPDK_NVMF_FC_REQ_NONE_BDEV: 1309 /* Notify bdev */ 1310 spdk_thread_send_msg(fc_req->hwqp->thread, 1311 nvmf_fc_req_bdev_abort, (void *)fc_req); 1312 break; 1313 1314 case SPDK_NVMF_FC_REQ_READ_XFER: 1315 case SPDK_NVMF_FC_REQ_READ_RSP: 1316 case SPDK_NVMF_FC_REQ_WRITE_XFER: 1317 case SPDK_NVMF_FC_REQ_WRITE_RSP: 1318 case SPDK_NVMF_FC_REQ_NONE_RSP: 1319 /* Notify HBA to abort this exchange */ 1320 nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL); 1321 break; 1322 1323 case SPDK_NVMF_FC_REQ_PENDING: 1324 /* Remove from pending */ 1325 nvmf_fc_request_remove_from_pending(fc_req); 1326 goto complete; 1327 case SPDK_NVMF_FC_REQ_FUSED_WAITING: 1328 TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link); 1329 goto complete; 1330 default: 1331 SPDK_ERRLOG("Request in invalid state.\n"); 1332 goto complete; 1333 } 1334 1335 return; 1336 complete: 1337 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED); 1338 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1339 (void *)fc_req); 1340 } 1341 1342 static int 1343 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req) 1344 { 1345 uint32_t length = fc_req->req.length; 1346 struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup; 1347 struct spdk_nvmf_transport_poll_group *group = &fgroup->group; 1348 struct spdk_nvmf_transport *transport = group->transport; 1349 1350 if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) { 1351 return -ENOMEM; 1352 } 1353 1354 return 0; 1355 } 1356 1357 static int 1358 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req) 1359 { 1360 /* Allocate an XCHG if we dont use send frame for this command. */ 1361 if (!nvmf_fc_use_send_frame(fc_req)) { 1362 fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp); 1363 if (!fc_req->xchg) { 1364 fc_req->hwqp->counters.no_xchg++; 1365 return -EAGAIN; 1366 } 1367 } 1368 1369 if (fc_req->req.length) { 1370 if (nvmf_fc_request_alloc_buffers(fc_req) < 0) { 1371 fc_req->hwqp->counters.buf_alloc_err++; 1372 if (fc_req->xchg) { 1373 nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg); 1374 fc_req->xchg = NULL; 1375 } 1376 return -EAGAIN; 1377 } 1378 fc_req->req.data = fc_req->req.iov[0].iov_base; 1379 } 1380 1381 if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1382 SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n"); 1383 1384 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER); 1385 1386 if (nvmf_fc_recv_data(fc_req)) { 1387 /* Dropped return success to caller */ 1388 fc_req->hwqp->counters.unexpected_err++; 1389 _nvmf_fc_request_free(fc_req); 1390 } 1391 } else { 1392 SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n"); 1393 1394 if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1395 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV); 1396 } else { 1397 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV); 1398 } 1399 spdk_nvmf_request_exec(&fc_req->req); 1400 } 1401 1402 return 0; 1403 } 1404 1405 static void 1406 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req, 1407 struct spdk_nvmf_fc_frame_hdr *fchdr) 1408 { 1409 uint8_t df_ctl = fchdr->df_ctl; 1410 uint32_t f_ctl = fchdr->f_ctl; 1411 1412 /* VMID */ 1413 if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) { 1414 struct spdk_nvmf_fc_vm_header *vhdr; 1415 uint32_t vmhdr_offset = 0; 1416 1417 if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) { 1418 vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE; 1419 } 1420 1421 if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) { 1422 vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE; 1423 } 1424 1425 vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr + 1426 sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset); 1427 fc_req->app_id = from_be32(&vhdr->src_vmid); 1428 } 1429 1430 /* Priority */ 1431 if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) { 1432 fc_req->csctl = fchdr->cs_ctl; 1433 } 1434 } 1435 1436 static int 1437 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame, 1438 struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen) 1439 { 1440 uint16_t cmnd_len; 1441 uint64_t rqst_conn_id; 1442 struct spdk_nvmf_fc_request *fc_req = NULL; 1443 struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL; 1444 struct spdk_nvmf_fc_conn *fc_conn = NULL; 1445 enum spdk_nvme_data_transfer xfer; 1446 uint32_t s_id, d_id; 1447 1448 s_id = (uint32_t)frame->s_id; 1449 d_id = (uint32_t)frame->d_id; 1450 s_id = from_be32(&s_id) >> 8; 1451 d_id = from_be32(&d_id) >> 8; 1452 1453 cmd_iu = buffer->virt; 1454 cmnd_len = cmd_iu->cmnd_iu_len; 1455 cmnd_len = from_be16(&cmnd_len); 1456 1457 /* check for a valid cmnd_iu format */ 1458 if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) || 1459 (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) || 1460 (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) { 1461 SPDK_ERRLOG("IU CMD error\n"); 1462 hwqp->counters.nvme_cmd_iu_err++; 1463 return -ENXIO; 1464 } 1465 1466 xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags); 1467 if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) { 1468 SPDK_ERRLOG("IU CMD xfer error\n"); 1469 hwqp->counters.nvme_cmd_xfer_err++; 1470 return -EPERM; 1471 } 1472 1473 rqst_conn_id = from_be64(&cmd_iu->conn_id); 1474 1475 if (rte_hash_lookup_data(hwqp->connection_list_hash, 1476 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) { 1477 SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id); 1478 hwqp->counters.invalid_conn_err++; 1479 return -ENODEV; 1480 } 1481 1482 /* Validate s_id and d_id */ 1483 if (s_id != fc_conn->s_id) { 1484 hwqp->counters.rport_invalid++; 1485 SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id); 1486 return -ENODEV; 1487 } 1488 1489 if (d_id != fc_conn->d_id) { 1490 hwqp->counters.nport_invalid++; 1491 SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id); 1492 return -ENODEV; 1493 } 1494 1495 /* If association/connection is being deleted - return */ 1496 if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1497 SPDK_ERRLOG("Association %ld state = %d not valid\n", 1498 fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state); 1499 return -EACCES; 1500 } 1501 1502 if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1503 SPDK_ERRLOG("Connection %ld state = %d not valid\n", 1504 rqst_conn_id, fc_conn->conn_state); 1505 return -EACCES; 1506 } 1507 1508 if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) { 1509 SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n", 1510 rqst_conn_id, fc_conn->qpair.state); 1511 return -EACCES; 1512 } 1513 1514 /* Make sure xfer len is according to mdts */ 1515 if (from_be32(&cmd_iu->data_len) > 1516 hwqp->fgroup->group.transport->opts.max_io_size) { 1517 SPDK_ERRLOG("IO length requested is greater than MDTS\n"); 1518 return -EINVAL; 1519 } 1520 1521 /* allocate a request buffer */ 1522 fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn); 1523 if (fc_req == NULL) { 1524 return -ENOMEM; 1525 } 1526 1527 fc_req->req.length = from_be32(&cmd_iu->data_len); 1528 fc_req->req.qpair = &fc_conn->qpair; 1529 memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg)); 1530 fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd; 1531 fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp; 1532 fc_req->oxid = frame->ox_id; 1533 fc_req->oxid = from_be16(&fc_req->oxid); 1534 fc_req->rpi = fc_conn->rpi; 1535 fc_req->poller_lcore = hwqp->lcore_id; 1536 fc_req->poller_thread = hwqp->thread; 1537 fc_req->hwqp = hwqp; 1538 fc_req->fc_conn = fc_conn; 1539 fc_req->req.xfer = xfer; 1540 fc_req->s_id = s_id; 1541 fc_req->d_id = d_id; 1542 fc_req->csn = from_be32(&cmd_iu->cmnd_seq_num); 1543 nvmf_fc_set_vmid_priority(fc_req, frame); 1544 1545 nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT); 1546 1547 if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) { 1548 STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link); 1549 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING); 1550 } 1551 1552 return 0; 1553 } 1554 1555 /* 1556 * These functions are called from the FC LLD 1557 */ 1558 1559 void 1560 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req) 1561 { 1562 struct spdk_nvmf_fc_hwqp *hwqp; 1563 struct spdk_nvmf_transport_poll_group *group; 1564 1565 if (!fc_req) { 1566 return; 1567 } 1568 hwqp = fc_req->hwqp; 1569 1570 if (fc_req->xchg) { 1571 nvmf_fc_put_xchg(hwqp, fc_req->xchg); 1572 fc_req->xchg = NULL; 1573 } 1574 1575 /* Release IO buffers */ 1576 if (fc_req->req.data_from_pool) { 1577 group = &hwqp->fgroup->group; 1578 spdk_nvmf_request_free_buffers(&fc_req->req, group, 1579 group->transport); 1580 } 1581 fc_req->req.data = NULL; 1582 fc_req->req.iovcnt = 0; 1583 1584 /* Free Fc request */ 1585 nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req); 1586 } 1587 1588 void 1589 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req, 1590 enum spdk_nvmf_fc_request_state state) 1591 { 1592 assert(fc_req->magic != 0xDEADBEEF); 1593 1594 SPDK_DEBUGLOG(nvmf_fc, 1595 "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req, 1596 nvmf_fc_request_get_state_str(fc_req->state), 1597 nvmf_fc_request_get_state_str(state)); 1598 nvmf_fc_record_req_trace_point(fc_req, state); 1599 fc_req->state = state; 1600 } 1601 1602 char * 1603 nvmf_fc_request_get_state_str(int state) 1604 { 1605 static char *unk_str = "unknown"; 1606 1607 return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ? 1608 fc_req_state_strs[state] : unk_str); 1609 } 1610 1611 int 1612 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp, 1613 uint32_t buff_idx, 1614 struct spdk_nvmf_fc_frame_hdr *frame, 1615 struct spdk_nvmf_fc_buffer_desc *buffer, 1616 uint32_t plen) 1617 { 1618 int rc = 0; 1619 uint32_t s_id, d_id; 1620 struct spdk_nvmf_fc_nport *nport = NULL; 1621 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1622 1623 s_id = (uint32_t)frame->s_id; 1624 d_id = (uint32_t)frame->d_id; 1625 s_id = from_be32(&s_id) >> 8; 1626 d_id = from_be32(&d_id) >> 8; 1627 1628 SPDK_DEBUGLOG(nvmf_fc, 1629 "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n", 1630 s_id, d_id, 1631 ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff), 1632 ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff)); 1633 1634 if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) && 1635 (frame->type == FCNVME_TYPE_NVMF_DATA)) { 1636 struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt; 1637 struct spdk_nvmf_fc_ls_rqst *ls_rqst; 1638 1639 SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n"); 1640 1641 rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport); 1642 if (rc) { 1643 if (nport == NULL) { 1644 SPDK_ERRLOG("Nport not found. Dropping\n"); 1645 /* increment invalid nport counter */ 1646 hwqp->counters.nport_invalid++; 1647 } else if (rport == NULL) { 1648 SPDK_ERRLOG("Rport not found. Dropping\n"); 1649 /* increment invalid rport counter */ 1650 hwqp->counters.rport_invalid++; 1651 } 1652 return rc; 1653 } 1654 1655 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1656 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1657 SPDK_ERRLOG("%s state not created. Dropping\n", 1658 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1659 "Nport" : "Rport"); 1660 return -EACCES; 1661 } 1662 1663 /* Use the RQ buffer for holding LS request. */ 1664 ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst; 1665 1666 /* Fill in the LS request structure */ 1667 ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst; 1668 ls_rqst->rqstbuf.phys = buffer->phys + 1669 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst); 1670 ls_rqst->rqstbuf.buf_index = buff_idx; 1671 ls_rqst->rqst_len = plen; 1672 1673 ls_rqst->rspbuf.virt = (void *)&req_buf->resp; 1674 ls_rqst->rspbuf.phys = buffer->phys + 1675 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp); 1676 ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE; 1677 1678 ls_rqst->private_data = (void *)hwqp; 1679 ls_rqst->rpi = rport->rpi; 1680 ls_rqst->oxid = (uint16_t)frame->ox_id; 1681 ls_rqst->oxid = from_be16(&ls_rqst->oxid); 1682 ls_rqst->s_id = s_id; 1683 ls_rqst->d_id = d_id; 1684 ls_rqst->nport = nport; 1685 ls_rqst->rport = rport; 1686 ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt; 1687 1688 if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) { 1689 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1690 } else { 1691 ls_rqst->xchg = NULL; 1692 } 1693 1694 if (ls_rqst->xchg) { 1695 /* Handover the request to LS module */ 1696 nvmf_fc_handle_ls_rqst(ls_rqst); 1697 } else { 1698 /* No XCHG available. Add to pending list. */ 1699 hwqp->counters.no_xchg++; 1700 TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1701 } 1702 } else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) && 1703 (frame->type == FCNVME_TYPE_FC_EXCHANGE)) { 1704 1705 SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n"); 1706 rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen); 1707 if (!rc) { 1708 nvmf_fc_rqpair_buffer_release(hwqp, buff_idx); 1709 } 1710 } else { 1711 1712 SPDK_ERRLOG("Unknown frame received. Dropping\n"); 1713 hwqp->counters.unknown_frame++; 1714 rc = -EINVAL; 1715 } 1716 1717 return rc; 1718 } 1719 1720 void 1721 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp) 1722 { 1723 struct spdk_nvmf_request *req = NULL, *tmp; 1724 struct spdk_nvmf_fc_request *fc_req; 1725 int budget = 64; 1726 1727 if (!hwqp->fgroup) { 1728 /* LS queue is tied to acceptor_poll group and LS pending requests 1729 * are stagged and processed using hwqp->ls_pending_queue. 1730 */ 1731 return; 1732 } 1733 1734 STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) { 1735 fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req); 1736 if (!nvmf_fc_request_execute(fc_req)) { 1737 /* Successfully posted, Delete from pending. */ 1738 nvmf_fc_request_remove_from_pending(fc_req); 1739 } 1740 1741 if (budget) { 1742 budget--; 1743 } else { 1744 return; 1745 } 1746 } 1747 } 1748 1749 void 1750 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp) 1751 { 1752 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1753 struct spdk_nvmf_fc_nport *nport = NULL; 1754 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1755 1756 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1757 /* lookup nport and rport again - make sure they are still valid */ 1758 int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport); 1759 if (rc) { 1760 if (nport == NULL) { 1761 SPDK_ERRLOG("Nport not found. Dropping\n"); 1762 /* increment invalid nport counter */ 1763 hwqp->counters.nport_invalid++; 1764 } else if (rport == NULL) { 1765 SPDK_ERRLOG("Rport not found. Dropping\n"); 1766 /* increment invalid rport counter */ 1767 hwqp->counters.rport_invalid++; 1768 } 1769 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1770 continue; 1771 } 1772 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1773 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1774 SPDK_ERRLOG("%s state not created. Dropping\n", 1775 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1776 "Nport" : "Rport"); 1777 nvmf_fc_release_ls_rqst(hwqp, ls_rqst); 1778 continue; 1779 } 1780 1781 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1782 if (ls_rqst->xchg) { 1783 /* Got an XCHG */ 1784 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1785 /* Handover the request to LS module */ 1786 nvmf_fc_handle_ls_rqst(ls_rqst); 1787 } else { 1788 /* No more XCHGs. Stop processing. */ 1789 hwqp->counters.no_xchg++; 1790 return; 1791 } 1792 } 1793 } 1794 1795 int 1796 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req) 1797 { 1798 int rc = 0; 1799 struct spdk_nvmf_request *req = &fc_req->req; 1800 struct spdk_nvmf_qpair *qpair = req->qpair; 1801 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1802 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1803 uint16_t ersp_len = 0; 1804 1805 /* set sq head value in resp */ 1806 rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair); 1807 1808 /* Increment connection responses */ 1809 fc_conn->rsp_count++; 1810 1811 if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count, 1812 fc_req->transferred_len)) { 1813 /* Fill ERSP Len */ 1814 to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) / 1815 sizeof(uint32_t))); 1816 fc_req->ersp.ersp_len = ersp_len; 1817 1818 /* Fill RSN */ 1819 to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn); 1820 fc_conn->rsn++; 1821 1822 /* Fill transfer length */ 1823 to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len); 1824 1825 SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n"); 1826 rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp, 1827 sizeof(struct spdk_nvmf_fc_ersp_iu)); 1828 } else { 1829 SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n"); 1830 rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0); 1831 } 1832 1833 return rc; 1834 } 1835 1836 bool 1837 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req, 1838 uint32_t rsp_cnt, uint32_t xfer_len) 1839 { 1840 struct spdk_nvmf_request *req = &fc_req->req; 1841 struct spdk_nvmf_qpair *qpair = req->qpair; 1842 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1843 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1844 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1845 uint16_t status = *((uint16_t *)&rsp->status); 1846 1847 /* 1848 * Check if we need to send ERSP 1849 * 1) For every N responses where N == ersp_ratio 1850 * 2) Fabric commands. 1851 * 3) Completion status failed or Completion dw0 or dw1 valid. 1852 * 4) SQ == 90% full. 1853 * 5) Transfer length not equal to CMD IU length 1854 */ 1855 1856 if (!(rsp_cnt % fc_conn->esrp_ratio) || 1857 (cmd->opc == SPDK_NVME_OPC_FABRIC) || 1858 (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 || 1859 (req->length != xfer_len)) { 1860 return true; 1861 } 1862 return false; 1863 } 1864 1865 static int 1866 nvmf_fc_request_complete(struct spdk_nvmf_request *req) 1867 { 1868 int rc = 0; 1869 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 1870 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1871 1872 if (fc_req->is_aborted) { 1873 /* Defer this to make sure we dont call io cleanup in same context. */ 1874 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1875 (void *)fc_req); 1876 } else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && 1877 req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1878 1879 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER); 1880 1881 rc = nvmf_fc_send_data(fc_req); 1882 } else { 1883 if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1884 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP); 1885 } else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1886 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP); 1887 } else { 1888 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP); 1889 } 1890 1891 rc = nvmf_fc_handle_rsp(fc_req); 1892 } 1893 1894 if (rc) { 1895 SPDK_ERRLOG("Error in request complete.\n"); 1896 _nvmf_fc_request_free(fc_req); 1897 } 1898 return 0; 1899 } 1900 1901 struct spdk_nvmf_tgt * 1902 nvmf_fc_get_tgt(void) 1903 { 1904 if (g_nvmf_ftransport) { 1905 return g_nvmf_ftransport->transport.tgt; 1906 } 1907 return NULL; 1908 } 1909 1910 /* 1911 * FC Transport Public API begins here 1912 */ 1913 1914 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128 1915 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32 1916 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5 1917 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0 1918 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536 1919 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096 1920 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192 1921 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE / \ 1922 SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE) 1923 1924 static void 1925 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts) 1926 { 1927 opts->max_queue_depth = SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH; 1928 opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR; 1929 opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE; 1930 opts->max_io_size = SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE; 1931 opts->io_unit_size = SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE; 1932 opts->max_aq_depth = SPDK_NVMF_FC_DEFAULT_AQ_DEPTH; 1933 opts->num_shared_buffers = SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS; 1934 } 1935 1936 static int nvmf_fc_accept(void *ctx); 1937 1938 static struct spdk_nvmf_transport * 1939 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts) 1940 { 1941 uint32_t sge_count; 1942 1943 SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n" 1944 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 1945 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 1946 " max_aq_depth=%d\n", 1947 opts->max_queue_depth, 1948 opts->max_io_size, 1949 opts->max_qpairs_per_ctrlr - 1, 1950 opts->io_unit_size, 1951 opts->max_aq_depth); 1952 1953 if (g_nvmf_ftransport) { 1954 SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n"); 1955 return NULL; 1956 } 1957 1958 if (spdk_env_get_last_core() < 1) { 1959 SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n", 1960 spdk_env_get_last_core() + 1); 1961 return NULL; 1962 } 1963 1964 sge_count = opts->max_io_size / opts->io_unit_size; 1965 if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) { 1966 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 1967 return NULL; 1968 } 1969 1970 g_nvmf_fc_main_thread = spdk_get_thread(); 1971 g_nvmf_fgroup_count = 0; 1972 g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport)); 1973 1974 if (!g_nvmf_ftransport) { 1975 SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n"); 1976 return NULL; 1977 } 1978 1979 if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) { 1980 SPDK_ERRLOG("pthread_mutex_init() failed\n"); 1981 free(g_nvmf_ftransport); 1982 g_nvmf_ftransport = NULL; 1983 return NULL; 1984 } 1985 1986 g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept, 1987 &g_nvmf_ftransport->transport, opts->acceptor_poll_rate); 1988 if (!g_nvmf_ftransport->accept_poller) { 1989 free(g_nvmf_ftransport); 1990 g_nvmf_ftransport = NULL; 1991 return NULL; 1992 } 1993 1994 /* initialize the low level FC driver */ 1995 nvmf_fc_lld_init(); 1996 1997 return &g_nvmf_ftransport->transport; 1998 } 1999 2000 static void 2001 nvmf_fc_destroy_done_cb(void *cb_arg) 2002 { 2003 free(g_nvmf_ftransport); 2004 if (g_transport_destroy_done_cb) { 2005 g_transport_destroy_done_cb(cb_arg); 2006 g_transport_destroy_done_cb = NULL; 2007 } 2008 } 2009 2010 static int 2011 nvmf_fc_destroy(struct spdk_nvmf_transport *transport, 2012 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 2013 { 2014 if (transport) { 2015 struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp; 2016 2017 /* clean up any FC poll groups still around */ 2018 TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) { 2019 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 2020 free(fgroup); 2021 } 2022 2023 spdk_poller_unregister(&g_nvmf_ftransport->accept_poller); 2024 g_nvmf_fgroup_count = 0; 2025 g_transport_destroy_done_cb = cb_fn; 2026 2027 /* low level FC driver clean up */ 2028 nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg); 2029 } 2030 2031 return 0; 2032 } 2033 2034 static int 2035 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid, 2036 struct spdk_nvmf_listen_opts *listen_opts) 2037 { 2038 return 0; 2039 } 2040 2041 static void 2042 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport, 2043 const struct spdk_nvme_transport_id *_trid) 2044 { 2045 } 2046 2047 static int 2048 nvmf_fc_accept(void *ctx) 2049 { 2050 struct spdk_nvmf_fc_port *fc_port = NULL; 2051 uint32_t count = 0; 2052 static bool start_lld = false; 2053 2054 if (spdk_unlikely(!start_lld)) { 2055 start_lld = true; 2056 nvmf_fc_lld_start(); 2057 } 2058 2059 /* poll the LS queue on each port */ 2060 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 2061 if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) { 2062 count += nvmf_fc_process_queue(&fc_port->ls_queue); 2063 } 2064 } 2065 2066 return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 2067 } 2068 2069 static void 2070 nvmf_fc_discover(struct spdk_nvmf_transport *transport, 2071 struct spdk_nvme_transport_id *trid, 2072 struct spdk_nvmf_discovery_log_page_entry *entry) 2073 { 2074 entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC; 2075 entry->adrfam = trid->adrfam; 2076 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED; 2077 2078 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 2079 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 2080 } 2081 2082 static struct spdk_nvmf_transport_poll_group * 2083 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport, 2084 struct spdk_nvmf_poll_group *group) 2085 { 2086 struct spdk_nvmf_fc_poll_group *fgroup; 2087 struct spdk_nvmf_fc_transport *ftransport = 2088 SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 2089 2090 fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group)); 2091 if (!fgroup) { 2092 SPDK_ERRLOG("Unable to alloc FC poll group\n"); 2093 return NULL; 2094 } 2095 2096 TAILQ_INIT(&fgroup->hwqp_list); 2097 2098 pthread_mutex_lock(&ftransport->lock); 2099 TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link); 2100 g_nvmf_fgroup_count++; 2101 pthread_mutex_unlock(&ftransport->lock); 2102 2103 return &fgroup->group; 2104 } 2105 2106 static void 2107 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 2108 { 2109 struct spdk_nvmf_fc_poll_group *fgroup; 2110 struct spdk_nvmf_fc_transport *ftransport = 2111 SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport); 2112 2113 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2114 pthread_mutex_lock(&ftransport->lock); 2115 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 2116 g_nvmf_fgroup_count--; 2117 pthread_mutex_unlock(&ftransport->lock); 2118 2119 free(fgroup); 2120 } 2121 2122 static int 2123 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 2124 struct spdk_nvmf_qpair *qpair) 2125 { 2126 struct spdk_nvmf_fc_poll_group *fgroup; 2127 struct spdk_nvmf_fc_conn *fc_conn; 2128 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2129 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 2130 bool hwqp_found = false; 2131 2132 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2133 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2134 2135 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2136 if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) { 2137 hwqp_found = true; 2138 break; 2139 } 2140 } 2141 2142 if (!hwqp_found) { 2143 SPDK_ERRLOG("No valid hwqp found for new QP.\n"); 2144 goto err; 2145 } 2146 2147 if (!nvmf_fc_assign_conn_to_hwqp(hwqp, 2148 &fc_conn->conn_id, 2149 fc_conn->max_queue_depth)) { 2150 SPDK_ERRLOG("Failed to get a connection id for new QP.\n"); 2151 goto err; 2152 } 2153 2154 fc_conn->hwqp = hwqp; 2155 2156 /* If this is for ADMIN connection, then update assoc ID. */ 2157 if (fc_conn->qpair.qid == 0) { 2158 fc_conn->fc_assoc->assoc_id = fc_conn->conn_id; 2159 } 2160 2161 api_data = &fc_conn->create_opd->u.add_conn; 2162 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); 2163 return 0; 2164 err: 2165 return -1; 2166 } 2167 2168 static int 2169 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2170 { 2171 uint32_t count = 0; 2172 struct spdk_nvmf_fc_poll_group *fgroup; 2173 struct spdk_nvmf_fc_hwqp *hwqp; 2174 2175 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2176 2177 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2178 if (hwqp->state == SPDK_FC_HWQP_ONLINE) { 2179 count += nvmf_fc_process_queue(hwqp); 2180 } 2181 } 2182 2183 return (int) count; 2184 } 2185 2186 static int 2187 nvmf_fc_request_free(struct spdk_nvmf_request *req) 2188 { 2189 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 2190 2191 if (!fc_req->is_aborted) { 2192 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED); 2193 nvmf_fc_request_abort(fc_req, true, NULL, NULL); 2194 } else { 2195 nvmf_fc_request_abort_complete(fc_req); 2196 } 2197 2198 return 0; 2199 } 2200 2201 static void 2202 nvmf_fc_connection_delete_done_cb(void *arg) 2203 { 2204 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg; 2205 2206 if (fc_ctx->cb_fn) { 2207 spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx); 2208 } 2209 free(fc_ctx); 2210 } 2211 2212 static void 2213 _nvmf_fc_close_qpair(void *arg) 2214 { 2215 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg; 2216 struct spdk_nvmf_qpair *qpair = fc_ctx->qpair; 2217 struct spdk_nvmf_fc_conn *fc_conn; 2218 int rc; 2219 2220 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2221 if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) { 2222 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 2223 2224 if (fc_conn->create_opd) { 2225 api_data = &fc_conn->create_opd->u.add_conn; 2226 2227 nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst, 2228 api_data->args.fc_conn, api_data->aq_conn); 2229 } 2230 } else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) { 2231 rc = nvmf_fc_delete_connection(fc_conn, false, true, 2232 nvmf_fc_connection_delete_done_cb, fc_ctx); 2233 if (!rc) { 2234 /* Wait for transport to complete its work. */ 2235 return; 2236 } 2237 2238 SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__); 2239 } else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 2240 /* This is the case where deletion started from FC layer. */ 2241 spdk_thread_send_msg(fc_ctx->qpair_thread, fc_conn->qpair_disconnect_cb_fn, 2242 fc_conn->qpair_disconnect_ctx); 2243 } 2244 2245 nvmf_fc_connection_delete_done_cb(fc_ctx); 2246 } 2247 2248 static void 2249 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair, 2250 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 2251 { 2252 struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx; 2253 2254 fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx)); 2255 if (!fc_ctx) { 2256 SPDK_ERRLOG("Unable to allocate close_qpair ctx."); 2257 if (cb_fn) { 2258 cb_fn(cb_arg); 2259 } 2260 return; 2261 } 2262 fc_ctx->qpair = qpair; 2263 fc_ctx->cb_fn = cb_fn; 2264 fc_ctx->cb_ctx = cb_arg; 2265 fc_ctx->qpair_thread = spdk_get_thread(); 2266 2267 spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx); 2268 } 2269 2270 static int 2271 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2272 struct spdk_nvme_transport_id *trid) 2273 { 2274 struct spdk_nvmf_fc_conn *fc_conn; 2275 2276 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2277 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2278 return 0; 2279 } 2280 2281 static int 2282 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2283 struct spdk_nvme_transport_id *trid) 2284 { 2285 struct spdk_nvmf_fc_conn *fc_conn; 2286 2287 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2288 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2289 return 0; 2290 } 2291 2292 static int 2293 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2294 struct spdk_nvme_transport_id *trid) 2295 { 2296 struct spdk_nvmf_fc_conn *fc_conn; 2297 2298 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2299 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2300 return 0; 2301 } 2302 2303 static void 2304 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 2305 struct spdk_nvmf_request *req) 2306 { 2307 spdk_nvmf_request_complete(req); 2308 } 2309 2310 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = { 2311 .name = "FC", 2312 .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC, 2313 .opts_init = nvmf_fc_opts_init, 2314 .create = nvmf_fc_create, 2315 .destroy = nvmf_fc_destroy, 2316 2317 .listen = nvmf_fc_listen, 2318 .stop_listen = nvmf_fc_stop_listen, 2319 2320 .listener_discover = nvmf_fc_discover, 2321 2322 .poll_group_create = nvmf_fc_poll_group_create, 2323 .poll_group_destroy = nvmf_fc_poll_group_destroy, 2324 .poll_group_add = nvmf_fc_poll_group_add, 2325 .poll_group_poll = nvmf_fc_poll_group_poll, 2326 2327 .req_complete = nvmf_fc_request_complete, 2328 .req_free = nvmf_fc_request_free, 2329 .qpair_fini = nvmf_fc_close_qpair, 2330 .qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid, 2331 .qpair_get_local_trid = nvmf_fc_qpair_get_local_trid, 2332 .qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid, 2333 .qpair_abort_request = nvmf_fc_qpair_abort_request, 2334 }; 2335 2336 /* Initializes the data for the creation of a FC-Port object in the SPDK 2337 * library. The spdk_nvmf_fc_port is a well defined structure that is part of 2338 * the API to the library. The contents added to this well defined structure 2339 * is private to each vendors implementation. 2340 */ 2341 static int 2342 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port, 2343 struct spdk_nvmf_fc_hw_port_init_args *args) 2344 { 2345 int rc = 0; 2346 /* Used a high number for the LS HWQP so that it does not clash with the 2347 * IO HWQP's and immediately shows a LS queue during tracing. 2348 */ 2349 uint32_t i; 2350 2351 fc_port->port_hdl = args->port_handle; 2352 fc_port->lld_fc_port = args->lld_fc_port; 2353 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 2354 fc_port->fcp_rq_id = args->fcp_rq_id; 2355 fc_port->num_io_queues = args->io_queue_cnt; 2356 2357 /* 2358 * Set port context from init args. Used for FCP port stats. 2359 */ 2360 fc_port->port_ctx = args->port_ctx; 2361 2362 /* 2363 * Initialize the LS queue wherever needed. 2364 */ 2365 fc_port->ls_queue.queues = args->ls_queue; 2366 fc_port->ls_queue.thread = nvmf_fc_get_main_thread(); 2367 fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues; 2368 fc_port->ls_queue.is_ls_queue = true; 2369 2370 /* 2371 * Initialize the LS queue. 2372 */ 2373 rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue); 2374 if (rc) { 2375 return rc; 2376 } 2377 2378 /* 2379 * Initialize the IO queues. 2380 */ 2381 for (i = 0; i < args->io_queue_cnt; i++) { 2382 struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i]; 2383 hwqp->hwqp_id = i; 2384 hwqp->queues = args->io_queues[i]; 2385 hwqp->is_ls_queue = false; 2386 rc = nvmf_fc_init_hwqp(fc_port, hwqp); 2387 if (rc) { 2388 for (; i > 0; --i) { 2389 rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash); 2390 rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash); 2391 } 2392 rte_hash_free(fc_port->ls_queue.connection_list_hash); 2393 rte_hash_free(fc_port->ls_queue.rport_list_hash); 2394 return rc; 2395 } 2396 } 2397 2398 /* 2399 * Initialize the LS processing for port 2400 */ 2401 nvmf_fc_ls_init(fc_port); 2402 2403 /* 2404 * Initialize the list of nport on this HW port. 2405 */ 2406 TAILQ_INIT(&fc_port->nport_list); 2407 fc_port->num_nports = 0; 2408 2409 return 0; 2410 } 2411 2412 /* 2413 * FC port must have all its nports deleted before transitioning to offline state. 2414 */ 2415 static void 2416 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port) 2417 { 2418 struct spdk_nvmf_fc_nport *nport = NULL; 2419 /* All nports must have been deleted at this point for this fc port */ 2420 DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list)); 2421 DEV_VERIFY(fc_port->num_nports == 0); 2422 /* Mark the nport states to be zombie, if they exist */ 2423 if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) { 2424 TAILQ_FOREACH(nport, &fc_port->nport_list, link) { 2425 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2426 } 2427 } 2428 } 2429 2430 static void 2431 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err) 2432 { 2433 ASSERT_SPDK_FC_MAIN_THREAD(); 2434 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args; 2435 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2436 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2437 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 2438 int spdk_err = 0; 2439 uint8_t port_handle = cb_data->port_handle; 2440 uint32_t s_id = rport->s_id; 2441 uint32_t rpi = rport->rpi; 2442 uint32_t assoc_count = rport->assoc_count; 2443 uint32_t nport_hdl = nport->nport_hdl; 2444 uint32_t d_id = nport->d_id; 2445 char log_str[256]; 2446 2447 /* 2448 * Assert on any delete failure. 2449 */ 2450 if (0 != err) { 2451 DEV_VERIFY(!"Error in IT Delete callback."); 2452 goto out; 2453 } 2454 2455 if (cb_func != NULL) { 2456 (void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err); 2457 } 2458 2459 out: 2460 free(cb_data); 2461 2462 snprintf(log_str, sizeof(log_str), 2463 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n", 2464 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err); 2465 2466 if (err != 0) { 2467 SPDK_ERRLOG("%s", log_str); 2468 } else { 2469 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2470 } 2471 } 2472 2473 static void 2474 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err) 2475 { 2476 ASSERT_SPDK_FC_MAIN_THREAD(); 2477 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args; 2478 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2479 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2480 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func; 2481 uint32_t s_id = rport->s_id; 2482 uint32_t rpi = rport->rpi; 2483 uint32_t assoc_count = rport->assoc_count; 2484 uint32_t nport_hdl = nport->nport_hdl; 2485 uint32_t d_id = nport->d_id; 2486 char log_str[256]; 2487 2488 /* 2489 * Assert on any association delete failure. We continue to delete other 2490 * associations in promoted builds. 2491 */ 2492 if (0 != err) { 2493 DEV_VERIFY(!"Nport's association delete callback returned error"); 2494 if (nport->assoc_count > 0) { 2495 nport->assoc_count--; 2496 } 2497 if (rport->assoc_count > 0) { 2498 rport->assoc_count--; 2499 } 2500 } 2501 2502 /* 2503 * If this is the last association being deleted for the ITN, 2504 * execute the callback(s). 2505 */ 2506 if (0 == rport->assoc_count) { 2507 /* Remove the rport from the remote port list. */ 2508 if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) { 2509 SPDK_ERRLOG("Error while removing rport from list.\n"); 2510 DEV_VERIFY(!"Error while removing rport from list."); 2511 } 2512 2513 if (cb_func != NULL) { 2514 /* 2515 * Callback function is provided by the caller 2516 * of nvmf_fc_adm_i_t_delete_assoc(). 2517 */ 2518 (void)cb_func(cb_data->cb_ctx, 0); 2519 } 2520 free(rport); 2521 free(args); 2522 } 2523 2524 snprintf(log_str, sizeof(log_str), 2525 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n", 2526 nport_hdl, s_id, d_id, rpi, assoc_count, err); 2527 2528 if (err != 0) { 2529 SPDK_ERRLOG("%s", log_str); 2530 } else { 2531 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2532 } 2533 } 2534 2535 /** 2536 * Process a IT delete. 2537 */ 2538 static void 2539 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport, 2540 struct spdk_nvmf_fc_remote_port_info *rport, 2541 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func, 2542 void *cb_ctx) 2543 { 2544 int err = 0; 2545 struct spdk_nvmf_fc_association *assoc = NULL; 2546 int assoc_err = 0; 2547 uint32_t num_assoc = 0; 2548 uint32_t num_assoc_del_scheduled = 0; 2549 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL; 2550 uint8_t port_hdl = nport->port_hdl; 2551 uint32_t s_id = rport->s_id; 2552 uint32_t rpi = rport->rpi; 2553 uint32_t assoc_count = rport->assoc_count; 2554 char log_str[256]; 2555 2556 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n", 2557 nport->nport_hdl); 2558 2559 /* 2560 * Allocate memory for callback data. 2561 * This memory will be freed by the callback function. 2562 */ 2563 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data)); 2564 if (NULL == cb_data) { 2565 SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl); 2566 err = -ENOMEM; 2567 goto out; 2568 } 2569 cb_data->nport = nport; 2570 cb_data->rport = rport; 2571 cb_data->port_handle = port_hdl; 2572 cb_data->cb_func = cb_func; 2573 cb_data->cb_ctx = cb_ctx; 2574 2575 /* 2576 * Delete all associations, if any, related with this ITN/remote_port. 2577 */ 2578 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 2579 num_assoc++; 2580 if (assoc->s_id == s_id) { 2581 assoc_err = nvmf_fc_delete_association(nport, 2582 assoc->assoc_id, 2583 false /* send abts */, false, 2584 nvmf_fc_adm_i_t_delete_assoc_cb, cb_data); 2585 if (0 != assoc_err) { 2586 /* 2587 * Mark this association as zombie. 2588 */ 2589 err = -EINVAL; 2590 DEV_VERIFY(!"Error while deleting association"); 2591 (void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2592 } else { 2593 num_assoc_del_scheduled++; 2594 } 2595 } 2596 } 2597 2598 out: 2599 if ((cb_data) && (num_assoc_del_scheduled == 0)) { 2600 /* 2601 * Since there are no association_delete calls 2602 * successfully scheduled, the association_delete 2603 * callback function will never be called. 2604 * In this case, call the callback function now. 2605 */ 2606 nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0); 2607 } 2608 2609 snprintf(log_str, sizeof(log_str), 2610 "IT delete associations on nport:%d end. " 2611 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n", 2612 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err); 2613 2614 if (err == 0) { 2615 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2616 } else { 2617 SPDK_ERRLOG("%s", log_str); 2618 } 2619 } 2620 2621 static void 2622 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 2623 { 2624 ASSERT_SPDK_FC_MAIN_THREAD(); 2625 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL; 2626 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2627 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2628 struct spdk_nvmf_fc_port *fc_port = NULL; 2629 int err = 0; 2630 2631 quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data; 2632 hwqp = quiesce_api_data->hwqp; 2633 fc_port = hwqp->fc_port; 2634 port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx; 2635 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func; 2636 2637 /* 2638 * Decrement the callback/quiesced queue count. 2639 */ 2640 port_quiesce_ctx->quiesce_count--; 2641 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id); 2642 2643 free(quiesce_api_data); 2644 /* 2645 * Wait for call backs i.e. max_ioq_queues + LS QUEUE. 2646 */ 2647 if (port_quiesce_ctx->quiesce_count > 0) { 2648 return; 2649 } 2650 2651 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2652 SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl); 2653 } else { 2654 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl); 2655 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2656 } 2657 2658 if (cb_func) { 2659 /* 2660 * Callback function for the called of quiesce. 2661 */ 2662 cb_func(port_quiesce_ctx->ctx, err); 2663 } 2664 2665 /* 2666 * Free the context structure. 2667 */ 2668 free(port_quiesce_ctx); 2669 2670 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl, 2671 err); 2672 } 2673 2674 static int 2675 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx, 2676 spdk_nvmf_fc_poller_api_cb cb_func) 2677 { 2678 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args; 2679 enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS; 2680 int err = 0; 2681 2682 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args)); 2683 2684 if (args == NULL) { 2685 err = -ENOMEM; 2686 SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id); 2687 goto done; 2688 } 2689 args->hwqp = fc_hwqp; 2690 args->ctx = ctx; 2691 args->cb_info.cb_func = cb_func; 2692 args->cb_info.cb_data = args; 2693 args->cb_info.cb_thread = spdk_get_thread(); 2694 2695 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id); 2696 rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args); 2697 if (rc) { 2698 free(args); 2699 err = -EINVAL; 2700 } 2701 2702 done: 2703 return err; 2704 } 2705 2706 /* 2707 * Hw port Quiesce 2708 */ 2709 static int 2710 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx, 2711 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func) 2712 { 2713 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2714 uint32_t i = 0; 2715 int err = 0; 2716 2717 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl); 2718 2719 /* 2720 * If the port is in an OFFLINE state, set the state to QUIESCED 2721 * and execute the callback. 2722 */ 2723 if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) { 2724 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2725 } 2726 2727 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2728 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n", 2729 fc_port->port_hdl); 2730 /* 2731 * Execute the callback function directly. 2732 */ 2733 cb_func(ctx, err); 2734 goto out; 2735 } 2736 2737 port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx)); 2738 2739 if (port_quiesce_ctx == NULL) { 2740 err = -ENOMEM; 2741 SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n", 2742 fc_port->port_hdl); 2743 goto out; 2744 } 2745 2746 port_quiesce_ctx->quiesce_count = 0; 2747 port_quiesce_ctx->ctx = ctx; 2748 port_quiesce_ctx->cb_func = cb_func; 2749 2750 /* 2751 * Quiesce the LS queue. 2752 */ 2753 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx, 2754 nvmf_fc_adm_queue_quiesce_cb); 2755 if (err != 0) { 2756 SPDK_ERRLOG("Failed to quiesce the LS queue.\n"); 2757 goto out; 2758 } 2759 port_quiesce_ctx->quiesce_count++; 2760 2761 /* 2762 * Quiesce the IO queues. 2763 */ 2764 for (i = 0; i < fc_port->num_io_queues; i++) { 2765 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i], 2766 port_quiesce_ctx, 2767 nvmf_fc_adm_queue_quiesce_cb); 2768 if (err != 0) { 2769 DEV_VERIFY(0); 2770 SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id); 2771 } 2772 port_quiesce_ctx->quiesce_count++; 2773 } 2774 2775 out: 2776 if (port_quiesce_ctx && err != 0) { 2777 free(port_quiesce_ctx); 2778 } 2779 return err; 2780 } 2781 2782 /* 2783 * Initialize and add a HW port entry to the global 2784 * HW port list. 2785 */ 2786 static void 2787 nvmf_fc_adm_evnt_hw_port_init(void *arg) 2788 { 2789 ASSERT_SPDK_FC_MAIN_THREAD(); 2790 struct spdk_nvmf_fc_port *fc_port = NULL; 2791 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2792 struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *) 2793 api_data->api_args; 2794 int err = 0; 2795 2796 if (args->io_queue_cnt > spdk_env_get_core_count()) { 2797 SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle); 2798 err = EINVAL; 2799 goto abort_port_init; 2800 } 2801 2802 /* 2803 * 1. Check for duplicate initialization. 2804 */ 2805 fc_port = nvmf_fc_port_lookup(args->port_handle); 2806 if (fc_port != NULL) { 2807 SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle); 2808 goto abort_port_init; 2809 } 2810 2811 /* 2812 * 2. Get the memory to instantiate a fc port. 2813 */ 2814 fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) + 2815 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp))); 2816 if (fc_port == NULL) { 2817 SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle); 2818 err = -ENOMEM; 2819 goto abort_port_init; 2820 } 2821 2822 /* assign the io_queues array */ 2823 fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof( 2824 struct spdk_nvmf_fc_port)); 2825 2826 /* 2827 * 3. Initialize the contents for the FC-port 2828 */ 2829 err = nvmf_fc_adm_hw_port_data_init(fc_port, args); 2830 2831 if (err != 0) { 2832 SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle); 2833 DEV_VERIFY(!"Data initialization failed for fc_port"); 2834 goto abort_port_init; 2835 } 2836 2837 /* 2838 * 4. Add this port to the global fc port list in the library. 2839 */ 2840 nvmf_fc_port_add(fc_port); 2841 2842 abort_port_init: 2843 if (err && fc_port) { 2844 free(fc_port); 2845 } 2846 if (api_data->cb_func != NULL) { 2847 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err); 2848 } 2849 2850 free(arg); 2851 2852 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n", 2853 args->port_handle, err); 2854 } 2855 2856 static void 2857 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp) 2858 { 2859 struct spdk_nvmf_fc_abts_ctx *ctx; 2860 struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL; 2861 2862 TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) { 2863 TAILQ_REMOVE(&hwqp->sync_cbs, args, link); 2864 ctx = args->cb_info.cb_data; 2865 if (ctx) { 2866 if (++ctx->hwqps_responded == ctx->num_hwqps) { 2867 free(ctx->sync_poller_args); 2868 free(ctx->abts_poller_args); 2869 free(ctx); 2870 } 2871 } 2872 } 2873 } 2874 2875 static void 2876 nvmf_fc_adm_evnt_hw_port_free(void *arg) 2877 { 2878 ASSERT_SPDK_FC_MAIN_THREAD(); 2879 int err = 0, i; 2880 struct spdk_nvmf_fc_port *fc_port = NULL; 2881 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2882 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2883 struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *) 2884 api_data->api_args; 2885 2886 fc_port = nvmf_fc_port_lookup(args->port_handle); 2887 if (!fc_port) { 2888 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2889 err = -EINVAL; 2890 goto out; 2891 } 2892 2893 if (!TAILQ_EMPTY(&fc_port->nport_list)) { 2894 SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle); 2895 err = -EIO; 2896 goto out; 2897 } 2898 2899 /* Clean up and free fc_port */ 2900 hwqp = &fc_port->ls_queue; 2901 nvmf_fc_adm_hwqp_clean_sync_cb(hwqp); 2902 rte_hash_free(hwqp->connection_list_hash); 2903 rte_hash_free(hwqp->rport_list_hash); 2904 2905 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2906 hwqp = &fc_port->io_queues[i]; 2907 2908 nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]); 2909 rte_hash_free(hwqp->connection_list_hash); 2910 rte_hash_free(hwqp->rport_list_hash); 2911 } 2912 2913 nvmf_fc_port_remove(fc_port); 2914 free(fc_port); 2915 out: 2916 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n", 2917 args->port_handle, err); 2918 if (api_data->cb_func != NULL) { 2919 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err); 2920 } 2921 2922 free(arg); 2923 } 2924 2925 /* 2926 * Online a HW port. 2927 */ 2928 static void 2929 nvmf_fc_adm_evnt_hw_port_online(void *arg) 2930 { 2931 ASSERT_SPDK_FC_MAIN_THREAD(); 2932 struct spdk_nvmf_fc_port *fc_port = NULL; 2933 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2934 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2935 struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *) 2936 api_data->api_args; 2937 int i = 0; 2938 int err = 0; 2939 2940 fc_port = nvmf_fc_port_lookup(args->port_handle); 2941 if (fc_port) { 2942 /* Set the port state to online */ 2943 err = nvmf_fc_port_set_online(fc_port); 2944 if (err != 0) { 2945 SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err); 2946 DEV_VERIFY(!"Hw port online failed"); 2947 goto out; 2948 } 2949 2950 hwqp = &fc_port->ls_queue; 2951 hwqp->context = NULL; 2952 (void)nvmf_fc_hwqp_set_online(hwqp); 2953 2954 /* Cycle through all the io queues and setup a hwqp poller for each. */ 2955 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2956 hwqp = &fc_port->io_queues[i]; 2957 hwqp->context = NULL; 2958 (void)nvmf_fc_hwqp_set_online(hwqp); 2959 nvmf_fc_poll_group_add_hwqp(hwqp); 2960 } 2961 } else { 2962 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2963 err = -EINVAL; 2964 } 2965 2966 out: 2967 if (api_data->cb_func != NULL) { 2968 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err); 2969 } 2970 2971 free(arg); 2972 2973 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle, 2974 err); 2975 } 2976 2977 static void 2978 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status) 2979 { 2980 int err = 0; 2981 struct spdk_nvmf_fc_port *fc_port = NULL; 2982 struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx; 2983 struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args; 2984 2985 if (--remove_hwqp_args->pending_remove_hwqp) { 2986 return; 2987 } 2988 2989 fc_port = nvmf_fc_port_lookup(args->port_handle); 2990 if (!fc_port) { 2991 err = -EINVAL; 2992 SPDK_ERRLOG("fc_port not found.\n"); 2993 goto out; 2994 } 2995 2996 /* 2997 * Delete all the nports. Ideally, the nports should have been purged 2998 * before the offline event, in which case, only a validation is required. 2999 */ 3000 nvmf_fc_adm_hw_port_offline_nport_delete(fc_port); 3001 out: 3002 if (remove_hwqp_args->cb_fn) { 3003 remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 3004 } 3005 3006 free(remove_hwqp_args); 3007 } 3008 3009 /* 3010 * Offline a HW port. 3011 */ 3012 static void 3013 nvmf_fc_adm_evnt_hw_port_offline(void *arg) 3014 { 3015 ASSERT_SPDK_FC_MAIN_THREAD(); 3016 struct spdk_nvmf_fc_port *fc_port = NULL; 3017 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 3018 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3019 struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *) 3020 api_data->api_args; 3021 struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args; 3022 int i = 0; 3023 int err = 0; 3024 3025 fc_port = nvmf_fc_port_lookup(args->port_handle); 3026 if (fc_port) { 3027 /* Set the port state to offline, if it is not already. */ 3028 err = nvmf_fc_port_set_offline(fc_port); 3029 if (err != 0) { 3030 SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err); 3031 err = 0; 3032 goto out; 3033 } 3034 3035 remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args)); 3036 if (!remove_hwqp_args) { 3037 SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n"); 3038 err = -ENOMEM; 3039 goto out; 3040 } 3041 remove_hwqp_args->cb_fn = api_data->cb_func; 3042 remove_hwqp_args->cb_args = api_data->api_args; 3043 remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues; 3044 3045 hwqp = &fc_port->ls_queue; 3046 (void)nvmf_fc_hwqp_set_offline(hwqp); 3047 3048 /* Remove poller for all the io queues. */ 3049 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 3050 hwqp = &fc_port->io_queues[i]; 3051 (void)nvmf_fc_hwqp_set_offline(hwqp); 3052 nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb, 3053 remove_hwqp_args); 3054 } 3055 3056 free(arg); 3057 3058 /* Wait until all the hwqps are removed from poll groups. */ 3059 return; 3060 } else { 3061 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3062 err = -EINVAL; 3063 } 3064 out: 3065 if (api_data->cb_func != NULL) { 3066 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 3067 } 3068 3069 free(arg); 3070 3071 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle, 3072 err); 3073 } 3074 3075 struct nvmf_fc_add_rem_listener_ctx { 3076 struct spdk_nvmf_subsystem *subsystem; 3077 bool add_listener; 3078 struct spdk_nvme_transport_id trid; 3079 }; 3080 3081 static void 3082 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 3083 { 3084 ASSERT_SPDK_FC_MAIN_THREAD(); 3085 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 3086 free(ctx); 3087 } 3088 3089 static void 3090 nvmf_fc_adm_listen_done(void *cb_arg, int status) 3091 { 3092 ASSERT_SPDK_FC_MAIN_THREAD(); 3093 struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg; 3094 3095 if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) { 3096 SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn); 3097 free(ctx); 3098 } 3099 } 3100 3101 static void 3102 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 3103 { 3104 ASSERT_SPDK_FC_MAIN_THREAD(); 3105 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 3106 3107 if (ctx->add_listener) { 3108 spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx); 3109 } else { 3110 spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid); 3111 nvmf_fc_adm_listen_done(ctx, 0); 3112 } 3113 } 3114 3115 static int 3116 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add) 3117 { 3118 struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt(); 3119 struct spdk_nvmf_subsystem *subsystem; 3120 struct spdk_nvmf_listen_opts opts; 3121 3122 if (!tgt) { 3123 SPDK_ERRLOG("No nvmf target defined\n"); 3124 return -EINVAL; 3125 } 3126 3127 spdk_nvmf_listen_opts_init(&opts, sizeof(opts)); 3128 3129 subsystem = spdk_nvmf_subsystem_get_first(tgt); 3130 while (subsystem) { 3131 struct nvmf_fc_add_rem_listener_ctx *ctx; 3132 3133 if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) { 3134 ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx)); 3135 if (ctx) { 3136 ctx->add_listener = add; 3137 ctx->subsystem = subsystem; 3138 nvmf_fc_create_trid(&ctx->trid, 3139 nport->fc_nodename.u.wwn, 3140 nport->fc_portname.u.wwn); 3141 3142 if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) { 3143 SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n", 3144 ctx->trid.traddr); 3145 free(ctx); 3146 } else if (spdk_nvmf_subsystem_pause(subsystem, 3147 0, 3148 nvmf_fc_adm_subsystem_paused_cb, 3149 ctx)) { 3150 SPDK_ERRLOG("Failed to pause subsystem: %s\n", 3151 subsystem->subnqn); 3152 free(ctx); 3153 } 3154 } 3155 } 3156 3157 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 3158 } 3159 3160 return 0; 3161 } 3162 3163 /* 3164 * Create a Nport. 3165 */ 3166 static void 3167 nvmf_fc_adm_evnt_nport_create(void *arg) 3168 { 3169 ASSERT_SPDK_FC_MAIN_THREAD(); 3170 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3171 struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *) 3172 api_data->api_args; 3173 struct spdk_nvmf_fc_nport *nport = NULL; 3174 struct spdk_nvmf_fc_port *fc_port = NULL; 3175 int err = 0; 3176 3177 /* 3178 * Get the physical port. 3179 */ 3180 fc_port = nvmf_fc_port_lookup(args->port_handle); 3181 if (fc_port == NULL) { 3182 err = -EINVAL; 3183 goto out; 3184 } 3185 3186 /* 3187 * Check for duplicate initialization. 3188 */ 3189 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3190 if (nport != NULL) { 3191 SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle, 3192 args->port_handle); 3193 err = -EINVAL; 3194 goto out; 3195 } 3196 3197 /* 3198 * Get the memory to instantiate a fc nport. 3199 */ 3200 nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport)); 3201 if (nport == NULL) { 3202 SPDK_ERRLOG("Failed to allocate memory for nport %d.\n", 3203 args->nport_handle); 3204 err = -ENOMEM; 3205 goto out; 3206 } 3207 3208 /* 3209 * Initialize the contents for the nport 3210 */ 3211 nport->nport_hdl = args->nport_handle; 3212 nport->port_hdl = args->port_handle; 3213 nport->nport_state = SPDK_NVMF_FC_OBJECT_CREATED; 3214 nport->fc_nodename = args->fc_nodename; 3215 nport->fc_portname = args->fc_portname; 3216 nport->d_id = args->d_id; 3217 nport->fc_port = nvmf_fc_port_lookup(args->port_handle); 3218 3219 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED); 3220 TAILQ_INIT(&nport->rem_port_list); 3221 nport->rport_count = 0; 3222 TAILQ_INIT(&nport->fc_associations); 3223 nport->assoc_count = 0; 3224 3225 /* 3226 * Populate the nport address (as listening address) to the nvmf subsystems. 3227 */ 3228 err = nvmf_fc_adm_add_rem_nport_listener(nport, true); 3229 3230 (void)nvmf_fc_port_add_nport(fc_port, nport); 3231 out: 3232 if (err && nport) { 3233 free(nport); 3234 } 3235 3236 if (api_data->cb_func != NULL) { 3237 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err); 3238 } 3239 3240 free(arg); 3241 } 3242 3243 static void 3244 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type, 3245 void *cb_args, int spdk_err) 3246 { 3247 ASSERT_SPDK_FC_MAIN_THREAD(); 3248 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args; 3249 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 3250 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 3251 int err = 0; 3252 uint16_t nport_hdl = 0; 3253 char log_str[256]; 3254 3255 /* 3256 * Assert on any delete failure. 3257 */ 3258 if (nport == NULL) { 3259 SPDK_ERRLOG("Nport delete callback returned null nport"); 3260 DEV_VERIFY(!"nport is null."); 3261 goto out; 3262 } 3263 3264 nport_hdl = nport->nport_hdl; 3265 if (0 != spdk_err) { 3266 SPDK_ERRLOG("Nport delete callback returned error. FC Port: " 3267 "%d, Nport: %d\n", 3268 nport->port_hdl, nport->nport_hdl); 3269 DEV_VERIFY(!"nport delete callback error."); 3270 } 3271 3272 /* 3273 * Free the nport if this is the last rport being deleted and 3274 * execute the callback(s). 3275 */ 3276 if (nvmf_fc_nport_has_no_rport(nport)) { 3277 if (0 != nport->assoc_count) { 3278 SPDK_ERRLOG("association count != 0\n"); 3279 DEV_VERIFY(!"association count != 0"); 3280 } 3281 3282 err = nvmf_fc_port_remove_nport(nport->fc_port, nport); 3283 if (0 != err) { 3284 SPDK_ERRLOG("Nport delete callback: Failed to remove " 3285 "nport from nport list. FC Port:%d Nport:%d\n", 3286 nport->port_hdl, nport->nport_hdl); 3287 } 3288 /* Free the nport */ 3289 free(nport); 3290 3291 if (cb_func != NULL) { 3292 (void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err); 3293 } 3294 free(cb_data); 3295 } 3296 out: 3297 snprintf(log_str, sizeof(log_str), 3298 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n", 3299 port_handle, nport_hdl, event_type, spdk_err); 3300 3301 if (err != 0) { 3302 SPDK_ERRLOG("%s", log_str); 3303 } else { 3304 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3305 } 3306 } 3307 3308 /* 3309 * Delete Nport. 3310 */ 3311 static void 3312 nvmf_fc_adm_evnt_nport_delete(void *arg) 3313 { 3314 ASSERT_SPDK_FC_MAIN_THREAD(); 3315 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3316 struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *) 3317 api_data->api_args; 3318 struct spdk_nvmf_fc_nport *nport = NULL; 3319 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL; 3320 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3321 int err = 0; 3322 uint32_t rport_cnt = 0; 3323 int rc = 0; 3324 3325 /* 3326 * Make sure that the nport exists. 3327 */ 3328 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3329 if (nport == NULL) { 3330 SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle, 3331 args->port_handle); 3332 err = -EINVAL; 3333 goto out; 3334 } 3335 3336 /* 3337 * Allocate memory for callback data. 3338 */ 3339 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data)); 3340 if (NULL == cb_data) { 3341 SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle); 3342 err = -ENOMEM; 3343 goto out; 3344 } 3345 3346 cb_data->nport = nport; 3347 cb_data->port_handle = args->port_handle; 3348 cb_data->fc_cb_func = api_data->cb_func; 3349 cb_data->fc_cb_ctx = args->cb_ctx; 3350 3351 /* 3352 * Begin nport tear down 3353 */ 3354 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3355 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3356 } else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3357 /* 3358 * Deletion of this nport already in progress. Register callback 3359 * and return. 3360 */ 3361 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3362 err = -ENODEV; 3363 goto out; 3364 } else { 3365 /* nport partially created/deleted */ 3366 DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3367 DEV_VERIFY(0 != "Nport in zombie state"); 3368 err = -ENODEV; 3369 goto out; 3370 } 3371 3372 /* 3373 * Remove this nport from listening addresses across subsystems 3374 */ 3375 rc = nvmf_fc_adm_add_rem_nport_listener(nport, false); 3376 3377 if (0 != rc) { 3378 err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 3379 SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n", 3380 nport->nport_hdl); 3381 goto out; 3382 } 3383 3384 /* 3385 * Delete all the remote ports (if any) for the nport 3386 */ 3387 /* TODO - Need to do this with a "first" and a "next" accessor function 3388 * for completeness. Look at app-subsystem as examples. 3389 */ 3390 if (nvmf_fc_nport_has_no_rport(nport)) { 3391 /* No rports to delete. Complete the nport deletion. */ 3392 nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0); 3393 goto out; 3394 } 3395 3396 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3397 struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc( 3398 1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args)); 3399 3400 if (it_del_args == NULL) { 3401 err = -ENOMEM; 3402 SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n", 3403 rport_iter->rpi, rport_iter->s_id); 3404 DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory"); 3405 goto out; 3406 } 3407 3408 rport_cnt++; 3409 it_del_args->port_handle = nport->port_hdl; 3410 it_del_args->nport_handle = nport->nport_hdl; 3411 it_del_args->cb_ctx = (void *)cb_data; 3412 it_del_args->rpi = rport_iter->rpi; 3413 it_del_args->s_id = rport_iter->s_id; 3414 3415 err = nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args, 3416 nvmf_fc_adm_delete_nport_cb); 3417 if (err) { 3418 free(it_del_args); 3419 } 3420 } 3421 3422 out: 3423 /* On failure, execute the callback function now */ 3424 if ((err != 0) || (rc != 0)) { 3425 SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, " 3426 "rport_cnt:%d rc:%d.\n", 3427 args->nport_handle, err, args->port_handle, 3428 rport_cnt, rc); 3429 if (cb_data) { 3430 free(cb_data); 3431 } 3432 if (api_data->cb_func != NULL) { 3433 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err); 3434 } 3435 3436 } else { 3437 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3438 "NPort %d delete done successfully, fc port:%d. " 3439 "rport_cnt:%d\n", 3440 args->nport_handle, args->port_handle, rport_cnt); 3441 } 3442 3443 free(arg); 3444 } 3445 3446 /* 3447 * Process an PRLI/IT add. 3448 */ 3449 static void 3450 nvmf_fc_adm_evnt_i_t_add(void *arg) 3451 { 3452 ASSERT_SPDK_FC_MAIN_THREAD(); 3453 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3454 struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *) 3455 api_data->api_args; 3456 struct spdk_nvmf_fc_nport *nport = NULL; 3457 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3458 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3459 int err = 0; 3460 3461 /* 3462 * Make sure the nport port exists. 3463 */ 3464 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3465 if (nport == NULL) { 3466 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3467 err = -EINVAL; 3468 goto out; 3469 } 3470 3471 /* 3472 * Check for duplicate i_t_add. 3473 */ 3474 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3475 if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) { 3476 SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n", 3477 args->nport_handle, rport_iter->s_id, rport_iter->rpi); 3478 err = -EEXIST; 3479 goto out; 3480 } 3481 } 3482 3483 /* 3484 * Get the memory to instantiate the remote port 3485 */ 3486 rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info)); 3487 if (rport == NULL) { 3488 SPDK_ERRLOG("Memory allocation for rem port failed.\n"); 3489 err = -ENOMEM; 3490 goto out; 3491 } 3492 3493 /* 3494 * Initialize the contents for the rport 3495 */ 3496 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED); 3497 rport->s_id = args->s_id; 3498 rport->rpi = args->rpi; 3499 rport->fc_nodename = args->fc_nodename; 3500 rport->fc_portname = args->fc_portname; 3501 3502 /* 3503 * Add remote port to nport 3504 */ 3505 if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) { 3506 DEV_VERIFY(!"Error while adding rport to list"); 3507 }; 3508 3509 /* 3510 * TODO: Do we validate the initiators service parameters? 3511 */ 3512 3513 /* 3514 * Get the targets service parameters from the library 3515 * to return back to the driver. 3516 */ 3517 args->target_prli_info = nvmf_fc_get_prli_service_params(); 3518 3519 out: 3520 if (api_data->cb_func != NULL) { 3521 /* 3522 * Passing pointer to the args struct as the first argument. 3523 * The cb_func should handle this appropriately. 3524 */ 3525 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err); 3526 } 3527 3528 free(arg); 3529 3530 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3531 "IT add on nport %d done, rc = %d.\n", 3532 args->nport_handle, err); 3533 } 3534 3535 /** 3536 * Process a IT delete. 3537 */ 3538 static void 3539 nvmf_fc_adm_evnt_i_t_delete(void *arg) 3540 { 3541 ASSERT_SPDK_FC_MAIN_THREAD(); 3542 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3543 struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *) 3544 api_data->api_args; 3545 int rc = 0; 3546 struct spdk_nvmf_fc_nport *nport = NULL; 3547 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL; 3548 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3549 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3550 uint32_t num_rport = 0; 3551 char log_str[256]; 3552 3553 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle); 3554 3555 /* 3556 * Make sure the nport port exists. If it does not, error out. 3557 */ 3558 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3559 if (nport == NULL) { 3560 SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle); 3561 rc = -EINVAL; 3562 goto out; 3563 } 3564 3565 /* 3566 * Find this ITN / rport (remote port). 3567 */ 3568 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3569 num_rport++; 3570 if ((rport_iter->s_id == args->s_id) && 3571 (rport_iter->rpi == args->rpi) && 3572 (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) { 3573 rport = rport_iter; 3574 break; 3575 } 3576 } 3577 3578 /* 3579 * We should find either zero or exactly one rport. 3580 * 3581 * If we find zero rports, that means that a previous request has 3582 * removed the rport by the time we reached here. In this case, 3583 * simply return out. 3584 */ 3585 if (rport == NULL) { 3586 rc = -ENODEV; 3587 goto out; 3588 } 3589 3590 /* 3591 * We have the rport slated for deletion. At this point clean up 3592 * any LS requests that are sitting in the pending list. Do this 3593 * first, then, set the states of the rport so that new LS requests 3594 * are not accepted. Then start the cleanup. 3595 */ 3596 nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport); 3597 3598 /* 3599 * We have found exactly one rport. Allocate memory for callback data. 3600 */ 3601 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data)); 3602 if (NULL == cb_data) { 3603 SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle); 3604 rc = -ENOMEM; 3605 goto out; 3606 } 3607 3608 cb_data->nport = nport; 3609 cb_data->rport = rport; 3610 cb_data->port_handle = args->port_handle; 3611 cb_data->fc_cb_func = api_data->cb_func; 3612 cb_data->fc_cb_ctx = args->cb_ctx; 3613 3614 /* 3615 * Validate rport object state. 3616 */ 3617 if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3618 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3619 } else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3620 /* 3621 * Deletion of this rport already in progress. Register callback 3622 * and return. 3623 */ 3624 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3625 rc = -ENODEV; 3626 goto out; 3627 } else { 3628 /* rport partially created/deleted */ 3629 DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3630 DEV_VERIFY(!"Invalid rport_state"); 3631 rc = -ENODEV; 3632 goto out; 3633 } 3634 3635 /* 3636 * We have successfully found a rport to delete. Call 3637 * nvmf_fc_i_t_delete_assoc(), which will perform further 3638 * IT-delete processing as well as free the cb_data. 3639 */ 3640 nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb, 3641 (void *)cb_data); 3642 3643 out: 3644 if (rc != 0) { 3645 /* 3646 * We have entered here because either we encountered an 3647 * error, or we did not find a rport to delete. 3648 * As a result, we will not call the function 3649 * nvmf_fc_i_t_delete_assoc() for further IT-delete 3650 * processing. Therefore, execute the callback function now. 3651 */ 3652 if (cb_data) { 3653 free(cb_data); 3654 } 3655 if (api_data->cb_func != NULL) { 3656 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc); 3657 } 3658 } 3659 3660 snprintf(log_str, sizeof(log_str), 3661 "IT delete on nport:%d end. num_rport:%d rc = %d.\n", 3662 args->nport_handle, num_rport, rc); 3663 3664 if (rc != 0) { 3665 SPDK_ERRLOG("%s", log_str); 3666 } else { 3667 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3668 } 3669 3670 free(arg); 3671 } 3672 3673 /* 3674 * Process ABTS received 3675 */ 3676 static void 3677 nvmf_fc_adm_evnt_abts_recv(void *arg) 3678 { 3679 ASSERT_SPDK_FC_MAIN_THREAD(); 3680 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3681 struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args; 3682 struct spdk_nvmf_fc_nport *nport = NULL; 3683 int err = 0; 3684 3685 SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi, 3686 args->oxid, args->rxid); 3687 3688 /* 3689 * 1. Make sure the nport port exists. 3690 */ 3691 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3692 if (nport == NULL) { 3693 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3694 err = -EINVAL; 3695 goto out; 3696 } 3697 3698 /* 3699 * 2. If the nport is in the process of being deleted, drop the ABTS. 3700 */ 3701 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3702 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3703 "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n", 3704 args->rpi, args->oxid, args->rxid); 3705 err = 0; 3706 goto out; 3707 3708 } 3709 3710 /* 3711 * 3. Pass the received ABTS-LS to the library for handling. 3712 */ 3713 nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid); 3714 3715 out: 3716 if (api_data->cb_func != NULL) { 3717 /* 3718 * Passing pointer to the args struct as the first argument. 3719 * The cb_func should handle this appropriately. 3720 */ 3721 (void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err); 3722 } else { 3723 /* No callback set, free the args */ 3724 free(args); 3725 } 3726 3727 free(arg); 3728 } 3729 3730 /* 3731 * Callback function for hw port quiesce. 3732 */ 3733 static void 3734 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err) 3735 { 3736 ASSERT_SPDK_FC_MAIN_THREAD(); 3737 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx = 3738 (struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx; 3739 struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args; 3740 spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func; 3741 struct spdk_nvmf_fc_queue_dump_info dump_info; 3742 struct spdk_nvmf_fc_port *fc_port = NULL; 3743 char *dump_buf = NULL; 3744 uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE; 3745 3746 /* 3747 * Free the callback context struct. 3748 */ 3749 free(ctx); 3750 3751 if (err != 0) { 3752 SPDK_ERRLOG("Port %d quiesce operation failed.\n", args->port_handle); 3753 goto out; 3754 } 3755 3756 if (args->dump_queues == false) { 3757 /* 3758 * Queues need not be dumped. 3759 */ 3760 goto out; 3761 } 3762 3763 SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle); 3764 3765 /* 3766 * Get the fc port. 3767 */ 3768 fc_port = nvmf_fc_port_lookup(args->port_handle); 3769 if (fc_port == NULL) { 3770 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3771 err = -EINVAL; 3772 goto out; 3773 } 3774 3775 /* 3776 * Allocate memory for the dump buffer. 3777 * This memory will be freed by FCT. 3778 */ 3779 dump_buf = (char *)calloc(1, dump_buf_size); 3780 if (dump_buf == NULL) { 3781 err = -ENOMEM; 3782 SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle); 3783 goto out; 3784 } 3785 *args->dump_buf = (uint32_t *)dump_buf; 3786 dump_info.buffer = dump_buf; 3787 dump_info.offset = 0; 3788 3789 /* 3790 * Add the dump reason to the top of the buffer. 3791 */ 3792 nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason); 3793 3794 /* 3795 * Dump the hwqp. 3796 */ 3797 nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues, 3798 fc_port->num_io_queues, &dump_info); 3799 3800 out: 3801 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n", 3802 args->port_handle, args->dump_queues, err); 3803 3804 if (cb_func != NULL) { 3805 (void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3806 } 3807 } 3808 3809 /* 3810 * HW port reset 3811 3812 */ 3813 static void 3814 nvmf_fc_adm_evnt_hw_port_reset(void *arg) 3815 { 3816 ASSERT_SPDK_FC_MAIN_THREAD(); 3817 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3818 struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *) 3819 api_data->api_args; 3820 struct spdk_nvmf_fc_port *fc_port = NULL; 3821 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL; 3822 int err = 0; 3823 3824 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle); 3825 3826 /* 3827 * Make sure the physical port exists. 3828 */ 3829 fc_port = nvmf_fc_port_lookup(args->port_handle); 3830 if (fc_port == NULL) { 3831 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3832 err = -EINVAL; 3833 goto out; 3834 } 3835 3836 /* 3837 * Save the reset event args and the callback in a context struct. 3838 */ 3839 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx)); 3840 3841 if (ctx == NULL) { 3842 err = -ENOMEM; 3843 SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle); 3844 goto fail; 3845 } 3846 3847 ctx->reset_args = args; 3848 ctx->reset_cb_func = api_data->cb_func; 3849 3850 /* 3851 * Quiesce the hw port. 3852 */ 3853 err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb); 3854 if (err != 0) { 3855 goto fail; 3856 } 3857 3858 /* 3859 * Once the ports are successfully quiesced the reset processing 3860 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb 3861 */ 3862 return; 3863 fail: 3864 free(ctx); 3865 3866 out: 3867 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle, 3868 err); 3869 3870 if (api_data->cb_func != NULL) { 3871 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3872 } 3873 3874 free(arg); 3875 } 3876 3877 static inline void 3878 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args) 3879 { 3880 if (nvmf_fc_get_main_thread()) { 3881 spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args); 3882 } 3883 } 3884 3885 /* 3886 * Queue up an event in the SPDK main threads event queue. 3887 * Used by the FC driver to notify the SPDK main thread of FC related events. 3888 */ 3889 int 3890 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args, 3891 spdk_nvmf_fc_callback cb_func) 3892 { 3893 int err = 0; 3894 struct spdk_nvmf_fc_adm_api_data *api_data = NULL; 3895 spdk_msg_fn event_fn = NULL; 3896 3897 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type); 3898 3899 if (event_type >= SPDK_FC_EVENT_MAX) { 3900 SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type); 3901 err = -EINVAL; 3902 goto done; 3903 } 3904 3905 if (args == NULL) { 3906 SPDK_ERRLOG("Null args for event %d.\n", event_type); 3907 err = -EINVAL; 3908 goto done; 3909 } 3910 3911 api_data = calloc(1, sizeof(*api_data)); 3912 3913 if (api_data == NULL) { 3914 SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type); 3915 err = -ENOMEM; 3916 goto done; 3917 } 3918 3919 api_data->api_args = args; 3920 api_data->cb_func = cb_func; 3921 3922 switch (event_type) { 3923 case SPDK_FC_HW_PORT_INIT: 3924 event_fn = nvmf_fc_adm_evnt_hw_port_init; 3925 break; 3926 3927 case SPDK_FC_HW_PORT_FREE: 3928 event_fn = nvmf_fc_adm_evnt_hw_port_free; 3929 break; 3930 3931 case SPDK_FC_HW_PORT_ONLINE: 3932 event_fn = nvmf_fc_adm_evnt_hw_port_online; 3933 break; 3934 3935 case SPDK_FC_HW_PORT_OFFLINE: 3936 event_fn = nvmf_fc_adm_evnt_hw_port_offline; 3937 break; 3938 3939 case SPDK_FC_NPORT_CREATE: 3940 event_fn = nvmf_fc_adm_evnt_nport_create; 3941 break; 3942 3943 case SPDK_FC_NPORT_DELETE: 3944 event_fn = nvmf_fc_adm_evnt_nport_delete; 3945 break; 3946 3947 case SPDK_FC_IT_ADD: 3948 event_fn = nvmf_fc_adm_evnt_i_t_add; 3949 break; 3950 3951 case SPDK_FC_IT_DELETE: 3952 event_fn = nvmf_fc_adm_evnt_i_t_delete; 3953 break; 3954 3955 case SPDK_FC_ABTS_RECV: 3956 event_fn = nvmf_fc_adm_evnt_abts_recv; 3957 break; 3958 3959 case SPDK_FC_HW_PORT_RESET: 3960 event_fn = nvmf_fc_adm_evnt_hw_port_reset; 3961 break; 3962 3963 case SPDK_FC_UNRECOVERABLE_ERR: 3964 default: 3965 SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type); 3966 err = -EINVAL; 3967 break; 3968 } 3969 3970 done: 3971 3972 if (err == 0) { 3973 assert(event_fn != NULL); 3974 nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data); 3975 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type); 3976 } else { 3977 SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err); 3978 if (api_data) { 3979 free(api_data); 3980 } 3981 } 3982 3983 return err; 3984 } 3985 3986 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc); 3987 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api) 3988 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc) 3989