1 /* 2 * BSD LICENSE 3 * 4 * Copyright (c) 2018-2019 Broadcom. All Rights Reserved. 5 * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * NVMe_FC transport functions. 36 */ 37 38 #include "spdk/env.h" 39 #include "spdk/assert.h" 40 #include "spdk/nvmf_transport.h" 41 #include "spdk/string.h" 42 #include "spdk/trace.h" 43 #include "spdk/util.h" 44 #include "spdk/likely.h" 45 #include "spdk/endian.h" 46 #include "spdk/log.h" 47 #include "spdk/thread.h" 48 49 #include "nvmf_fc.h" 50 #include "fc_lld.h" 51 52 #ifndef DEV_VERIFY 53 #define DEV_VERIFY assert 54 #endif 55 56 #ifndef ASSERT_SPDK_FC_MAIN_THREAD 57 #define ASSERT_SPDK_FC_MAIN_THREAD() \ 58 DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread()); 59 #endif 60 61 /* 62 * PRLI service parameters 63 */ 64 enum spdk_nvmf_fc_service_parameters { 65 SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001, 66 SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008, 67 SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010, 68 SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020, 69 SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080, 70 }; 71 72 static char *fc_req_state_strs[] = { 73 "SPDK_NVMF_FC_REQ_INIT", 74 "SPDK_NVMF_FC_REQ_READ_BDEV", 75 "SPDK_NVMF_FC_REQ_READ_XFER", 76 "SPDK_NVMF_FC_REQ_READ_RSP", 77 "SPDK_NVMF_FC_REQ_WRITE_BUFFS", 78 "SPDK_NVMF_FC_REQ_WRITE_XFER", 79 "SPDK_NVMF_FC_REQ_WRITE_BDEV", 80 "SPDK_NVMF_FC_REQ_WRITE_RSP", 81 "SPDK_NVMF_FC_REQ_NONE_BDEV", 82 "SPDK_NVMF_FC_REQ_NONE_RSP", 83 "SPDK_NVMF_FC_REQ_SUCCESS", 84 "SPDK_NVMF_FC_REQ_FAILED", 85 "SPDK_NVMF_FC_REQ_ABORTED", 86 "SPDK_NVMF_FC_REQ_BDEV_ABORTED", 87 "SPDK_NVMF_FC_REQ_PENDING" 88 }; 89 90 #define OBJECT_NVMF_FC_IO 0xA0 91 92 #define TRACE_GROUP_NVMF_FC 0x8 93 #define TRACE_FC_REQ_INIT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01) 94 #define TRACE_FC_REQ_READ_BDEV SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02) 95 #define TRACE_FC_REQ_READ_XFER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03) 96 #define TRACE_FC_REQ_READ_RSP SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04) 97 #define TRACE_FC_REQ_WRITE_BUFFS SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05) 98 #define TRACE_FC_REQ_WRITE_XFER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06) 99 #define TRACE_FC_REQ_WRITE_BDEV SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07) 100 #define TRACE_FC_REQ_WRITE_RSP SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08) 101 #define TRACE_FC_REQ_NONE_BDEV SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09) 102 #define TRACE_FC_REQ_NONE_RSP SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A) 103 #define TRACE_FC_REQ_SUCCESS SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B) 104 #define TRACE_FC_REQ_FAILED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C) 105 #define TRACE_FC_REQ_ABORTED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D) 106 #define TRACE_FC_REQ_BDEV_ABORTED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E) 107 #define TRACE_FC_REQ_PENDING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F) 108 109 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC) 110 { 111 spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r'); 112 spdk_trace_register_description("FC_REQ_NEW", 113 TRACE_FC_REQ_INIT, 114 OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, ""); 115 spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV", 116 TRACE_FC_REQ_READ_BDEV, 117 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 118 spdk_trace_register_description("FC_REQ_READ_XFER_DATA", 119 TRACE_FC_REQ_READ_XFER, 120 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 121 spdk_trace_register_description("FC_REQ_READ_RSP", 122 TRACE_FC_REQ_READ_RSP, 123 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 124 spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER", 125 TRACE_FC_REQ_WRITE_BUFFS, 126 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 127 spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA", 128 TRACE_FC_REQ_WRITE_XFER, 129 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 130 spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV", 131 TRACE_FC_REQ_WRITE_BDEV, 132 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 133 spdk_trace_register_description("FC_REQ_WRITE_RSP", 134 TRACE_FC_REQ_WRITE_RSP, 135 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 136 spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV", 137 TRACE_FC_REQ_NONE_BDEV, 138 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 139 spdk_trace_register_description("FC_REQ_NONE_RSP", 140 TRACE_FC_REQ_NONE_RSP, 141 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 142 spdk_trace_register_description("FC_REQ_SUCCESS", 143 TRACE_FC_REQ_SUCCESS, 144 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 145 spdk_trace_register_description("FC_REQ_FAILED", 146 TRACE_FC_REQ_FAILED, 147 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 148 spdk_trace_register_description("FC_REQ_ABORTED", 149 TRACE_FC_REQ_ABORTED, 150 OWNER_NONE, OBJECT_NONE, 0, 1, ""); 151 spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV", 152 TRACE_FC_REQ_BDEV_ABORTED, 153 OWNER_NONE, OBJECT_NONE, 0, 1, ""); 154 spdk_trace_register_description("FC_REQ_PENDING", 155 TRACE_FC_REQ_PENDING, 156 OWNER_NONE, OBJECT_NONE, 0, 1, ""); 157 } 158 159 /** 160 * The structure used by all fc adm functions 161 */ 162 struct spdk_nvmf_fc_adm_api_data { 163 void *api_args; 164 spdk_nvmf_fc_callback cb_func; 165 }; 166 167 /** 168 * The callback structure for nport-delete 169 */ 170 struct spdk_nvmf_fc_adm_nport_del_cb_data { 171 struct spdk_nvmf_fc_nport *nport; 172 uint8_t port_handle; 173 spdk_nvmf_fc_callback fc_cb_func; 174 void *fc_cb_ctx; 175 }; 176 177 /** 178 * The callback structure for it-delete 179 */ 180 struct spdk_nvmf_fc_adm_i_t_del_cb_data { 181 struct spdk_nvmf_fc_nport *nport; 182 struct spdk_nvmf_fc_remote_port_info *rport; 183 uint8_t port_handle; 184 spdk_nvmf_fc_callback fc_cb_func; 185 void *fc_cb_ctx; 186 }; 187 188 189 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err); 190 191 /** 192 * The callback structure for the it-delete-assoc callback 193 */ 194 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data { 195 struct spdk_nvmf_fc_nport *nport; 196 struct spdk_nvmf_fc_remote_port_info *rport; 197 uint8_t port_handle; 198 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func; 199 void *cb_ctx; 200 }; 201 202 /* 203 * Call back function pointer for HW port quiesce. 204 */ 205 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err); 206 207 /** 208 * Context structure for quiescing a hardware port 209 */ 210 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx { 211 int quiesce_count; 212 void *ctx; 213 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func; 214 }; 215 216 /** 217 * Context structure used to reset a hardware port 218 */ 219 struct spdk_nvmf_fc_adm_hw_port_reset_ctx { 220 void *reset_args; 221 spdk_nvmf_fc_callback reset_cb_func; 222 }; 223 224 struct spdk_nvmf_fc_transport { 225 struct spdk_nvmf_transport transport; 226 pthread_mutex_t lock; 227 }; 228 229 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport; 230 231 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list = 232 TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list); 233 234 static struct spdk_thread *g_nvmf_fc_main_thread = NULL; 235 236 static uint32_t g_nvmf_fgroup_count = 0; 237 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups = 238 TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups); 239 240 struct spdk_thread * 241 nvmf_fc_get_main_thread(void) 242 { 243 return g_nvmf_fc_main_thread; 244 } 245 246 static inline void 247 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req, 248 enum spdk_nvmf_fc_request_state state) 249 { 250 uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID; 251 252 switch (state) { 253 case SPDK_NVMF_FC_REQ_INIT: 254 /* Start IO tracing */ 255 tpoint_id = TRACE_FC_REQ_INIT; 256 break; 257 case SPDK_NVMF_FC_REQ_READ_BDEV: 258 tpoint_id = TRACE_FC_REQ_READ_BDEV; 259 break; 260 case SPDK_NVMF_FC_REQ_READ_XFER: 261 tpoint_id = TRACE_FC_REQ_READ_XFER; 262 break; 263 case SPDK_NVMF_FC_REQ_READ_RSP: 264 tpoint_id = TRACE_FC_REQ_READ_RSP; 265 break; 266 case SPDK_NVMF_FC_REQ_WRITE_BUFFS: 267 tpoint_id = TRACE_FC_REQ_WRITE_BUFFS; 268 break; 269 case SPDK_NVMF_FC_REQ_WRITE_XFER: 270 tpoint_id = TRACE_FC_REQ_WRITE_XFER; 271 break; 272 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 273 tpoint_id = TRACE_FC_REQ_WRITE_BDEV; 274 break; 275 case SPDK_NVMF_FC_REQ_WRITE_RSP: 276 tpoint_id = TRACE_FC_REQ_WRITE_RSP; 277 break; 278 case SPDK_NVMF_FC_REQ_NONE_BDEV: 279 tpoint_id = TRACE_FC_REQ_NONE_BDEV; 280 break; 281 case SPDK_NVMF_FC_REQ_NONE_RSP: 282 tpoint_id = TRACE_FC_REQ_NONE_RSP; 283 break; 284 case SPDK_NVMF_FC_REQ_SUCCESS: 285 tpoint_id = TRACE_FC_REQ_SUCCESS; 286 break; 287 case SPDK_NVMF_FC_REQ_FAILED: 288 tpoint_id = TRACE_FC_REQ_FAILED; 289 break; 290 case SPDK_NVMF_FC_REQ_ABORTED: 291 tpoint_id = TRACE_FC_REQ_ABORTED; 292 break; 293 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 294 tpoint_id = TRACE_FC_REQ_ABORTED; 295 break; 296 case SPDK_NVMF_FC_REQ_PENDING: 297 tpoint_id = TRACE_FC_REQ_PENDING; 298 break; 299 default: 300 assert(0); 301 break; 302 } 303 if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) { 304 spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0, 305 (uint64_t)(&fc_req->req), 0); 306 } 307 } 308 309 static void 310 nvmf_fc_handle_connection_failure(void *arg) 311 { 312 struct spdk_nvmf_fc_conn *fc_conn = arg; 313 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 314 315 if (!fc_conn->create_opd) { 316 return; 317 } 318 api_data = &fc_conn->create_opd->u.add_conn; 319 320 nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst, 321 api_data->args.fc_conn, api_data->aq_conn); 322 } 323 324 static void 325 nvmf_fc_handle_assoc_deletion(void *arg) 326 { 327 struct spdk_nvmf_fc_conn *fc_conn = arg; 328 329 nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport, 330 fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL); 331 } 332 333 static int 334 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp) 335 { 336 uint32_t i; 337 struct spdk_nvmf_fc_request *fc_req; 338 339 TAILQ_INIT(&hwqp->free_reqs); 340 TAILQ_INIT(&hwqp->in_use_reqs); 341 342 hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request)); 343 if (hwqp->fc_reqs_buf == NULL) { 344 SPDK_ERRLOG("create fc request pool failed\n"); 345 return -ENOMEM; 346 } 347 348 for (i = 0; i < hwqp->rq_size; i++) { 349 fc_req = hwqp->fc_reqs_buf + i; 350 351 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); 352 TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link); 353 } 354 355 return 0; 356 } 357 358 static inline struct spdk_nvmf_fc_request * 359 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp) 360 { 361 struct spdk_nvmf_fc_request *fc_req; 362 363 if (TAILQ_EMPTY(&hwqp->free_reqs)) { 364 SPDK_ERRLOG("Alloc request buffer failed\n"); 365 return NULL; 366 } 367 368 fc_req = TAILQ_FIRST(&hwqp->free_reqs); 369 TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link); 370 371 memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request)); 372 TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link); 373 TAILQ_INIT(&fc_req->abort_cbs); 374 return fc_req; 375 } 376 377 static inline void 378 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req) 379 { 380 if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) { 381 /* Log an error for debug purpose. */ 382 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED); 383 } 384 385 /* set the magic to mark req as no longer valid. */ 386 fc_req->magic = 0xDEADBEEF; 387 388 TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link); 389 TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link); 390 } 391 392 struct spdk_nvmf_fc_conn * 393 nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id) 394 { 395 struct spdk_nvmf_fc_conn *fc_conn; 396 397 TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) { 398 if (fc_conn->conn_id == conn_id) { 399 return fc_conn; 400 } 401 } 402 403 return NULL; 404 } 405 406 static inline void 407 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req) 408 { 409 STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req, 410 spdk_nvmf_request, buf_link); 411 } 412 413 void 414 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp) 415 { 416 hwqp->fc_port = fc_port; 417 418 /* clear counters */ 419 memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors)); 420 421 if (&fc_port->ls_queue != hwqp) { 422 nvmf_fc_create_req_mempool(hwqp); 423 } 424 425 TAILQ_INIT(&hwqp->connection_list); 426 TAILQ_INIT(&hwqp->sync_cbs); 427 TAILQ_INIT(&hwqp->ls_pending_queue); 428 429 /* Init low level driver queues */ 430 nvmf_fc_init_q(hwqp); 431 } 432 433 static struct spdk_nvmf_fc_poll_group * 434 nvmf_fc_get_idlest_poll_group(void) 435 { 436 uint32_t max_count = UINT32_MAX; 437 struct spdk_nvmf_fc_poll_group *fgroup; 438 struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL; 439 440 /* find poll group with least number of hwqp's assigned to it */ 441 TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) { 442 if (fgroup->hwqp_count < max_count) { 443 ret_fgroup = fgroup; 444 max_count = fgroup->hwqp_count; 445 } 446 } 447 448 return ret_fgroup; 449 } 450 451 void 452 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 453 { 454 struct spdk_nvmf_fc_poll_group *fgroup = NULL; 455 456 assert(hwqp); 457 if (hwqp == NULL) { 458 SPDK_ERRLOG("Error: hwqp is NULL\n"); 459 return; 460 } 461 462 assert(g_nvmf_fgroup_count); 463 464 fgroup = nvmf_fc_get_idlest_poll_group(); 465 if (!fgroup) { 466 SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id); 467 return; 468 } 469 470 hwqp->thread = fgroup->group.group->thread; 471 hwqp->fgroup = fgroup; 472 fgroup->hwqp_count++; 473 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL); 474 } 475 476 void 477 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 478 { 479 assert(hwqp); 480 481 SPDK_DEBUGLOG(nvmf_fc, 482 "Remove hwqp from poller: for port: %d, hwqp: %d\n", 483 hwqp->fc_port->port_hdl, hwqp->hwqp_id); 484 485 if (!hwqp->fgroup) { 486 SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id); 487 } else { 488 hwqp->fgroup->hwqp_count--; 489 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL); 490 } 491 } 492 493 /* 494 * Note: This needs to be used only on main poller. 495 */ 496 static uint64_t 497 nvmf_fc_get_abts_unique_id(void) 498 { 499 static uint32_t u_id = 0; 500 501 return (uint64_t)(++u_id); 502 } 503 504 static void 505 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 506 { 507 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 508 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg; 509 510 ctx->hwqps_responded++; 511 512 if (ctx->hwqps_responded < ctx->num_hwqps) { 513 /* Wait for all pollers to complete. */ 514 return; 515 } 516 517 /* Free the queue sync poller args. */ 518 free(ctx->sync_poller_args); 519 520 /* Mark as queue synced */ 521 ctx->queue_synced = true; 522 523 /* Reset the ctx values */ 524 ctx->hwqps_responded = 0; 525 ctx->handled = false; 526 527 SPDK_DEBUGLOG(nvmf_fc, 528 "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 529 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 530 531 /* Resend ABTS to pollers */ 532 args = ctx->abts_poller_args; 533 for (int i = 0; i < ctx->num_hwqps; i++) { 534 poller_arg = args + i; 535 nvmf_fc_poller_api_func(poller_arg->hwqp, 536 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 537 poller_arg); 538 } 539 } 540 541 static int 542 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx) 543 { 544 struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg; 545 struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg; 546 547 /* check if FC driver supports queue sync */ 548 if (!nvmf_fc_q_sync_available()) { 549 return -EPERM; 550 } 551 552 assert(ctx); 553 if (!ctx) { 554 SPDK_ERRLOG("NULL ctx pointer"); 555 return -EINVAL; 556 } 557 558 /* Reset the ctx values */ 559 ctx->hwqps_responded = 0; 560 561 args = calloc(ctx->num_hwqps, 562 sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args)); 563 if (!args) { 564 SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 565 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 566 return -ENOMEM; 567 } 568 ctx->sync_poller_args = args; 569 570 abts_args = ctx->abts_poller_args; 571 for (int i = 0; i < ctx->num_hwqps; i++) { 572 abts_poller_arg = abts_args + i; 573 poller_arg = args + i; 574 poller_arg->u_id = ctx->u_id; 575 poller_arg->hwqp = abts_poller_arg->hwqp; 576 poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb; 577 poller_arg->cb_info.cb_data = ctx; 578 poller_arg->cb_info.cb_thread = spdk_get_thread(); 579 580 /* Send a Queue sync message to interested pollers */ 581 nvmf_fc_poller_api_func(poller_arg->hwqp, 582 SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC, 583 poller_arg); 584 } 585 586 SPDK_DEBUGLOG(nvmf_fc, 587 "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 588 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 589 590 /* Post Marker to queue to track aborted request */ 591 nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id); 592 593 return 0; 594 } 595 596 static void 597 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 598 { 599 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 600 struct spdk_nvmf_fc_nport *nport = NULL; 601 602 if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) { 603 ctx->handled = true; 604 } 605 606 ctx->hwqps_responded++; 607 608 if (ctx->hwqps_responded < ctx->num_hwqps) { 609 /* Wait for all pollers to complete. */ 610 return; 611 } 612 613 nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl); 614 615 if (ctx->nport != nport) { 616 /* Nport can be deleted while this abort is being 617 * processed by the pollers. 618 */ 619 SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 620 ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 621 } else { 622 if (!ctx->handled) { 623 /* Try syncing the queues and try one more time */ 624 if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) { 625 SPDK_DEBUGLOG(nvmf_fc, 626 "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 627 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 628 return; 629 } else { 630 /* Send Reject */ 631 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 632 ctx->oxid, ctx->rxid, ctx->rpi, true, 633 FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL); 634 } 635 } else { 636 /* Send Accept */ 637 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 638 ctx->oxid, ctx->rxid, ctx->rpi, false, 639 0, NULL, NULL); 640 } 641 } 642 SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 643 (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 644 645 free(ctx->abts_poller_args); 646 free(ctx); 647 } 648 649 void 650 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi, 651 uint16_t oxid, uint16_t rxid) 652 { 653 struct spdk_nvmf_fc_abts_ctx *ctx = NULL; 654 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg; 655 struct spdk_nvmf_fc_association *assoc = NULL; 656 struct spdk_nvmf_fc_conn *conn = NULL; 657 uint32_t hwqp_cnt = 0; 658 bool skip_hwqp_cnt; 659 struct spdk_nvmf_fc_hwqp **hwqps = NULL; 660 uint32_t i; 661 662 SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 663 nport->nport_hdl, rpi, oxid, rxid); 664 665 /* Allocate memory to track hwqp's with at least 1 active connection. */ 666 hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *)); 667 if (hwqps == NULL) { 668 SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n"); 669 goto bls_rej; 670 } 671 672 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 673 TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) { 674 if (conn->rpi != rpi) { 675 continue; 676 } 677 678 skip_hwqp_cnt = false; 679 for (i = 0; i < hwqp_cnt; i++) { 680 if (hwqps[i] == conn->hwqp) { 681 /* Skip. This is already present */ 682 skip_hwqp_cnt = true; 683 break; 684 } 685 } 686 if (!skip_hwqp_cnt) { 687 assert(hwqp_cnt < nport->fc_port->num_io_queues); 688 hwqps[hwqp_cnt] = conn->hwqp; 689 hwqp_cnt++; 690 } 691 } 692 } 693 694 if (!hwqp_cnt) { 695 goto bls_rej; 696 } 697 698 args = calloc(hwqp_cnt, 699 sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args)); 700 if (!args) { 701 goto bls_rej; 702 } 703 704 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx)); 705 if (!ctx) { 706 goto bls_rej; 707 } 708 ctx->rpi = rpi; 709 ctx->oxid = oxid; 710 ctx->rxid = rxid; 711 ctx->nport = nport; 712 ctx->nport_hdl = nport->nport_hdl; 713 ctx->port_hdl = nport->fc_port->port_hdl; 714 ctx->num_hwqps = hwqp_cnt; 715 ctx->ls_hwqp = &nport->fc_port->ls_queue; 716 ctx->fcp_rq_id = nport->fc_port->fcp_rq_id; 717 ctx->abts_poller_args = args; 718 719 /* Get a unique context for this ABTS */ 720 ctx->u_id = nvmf_fc_get_abts_unique_id(); 721 722 for (i = 0; i < hwqp_cnt; i++) { 723 poller_arg = args + i; 724 poller_arg->hwqp = hwqps[i]; 725 poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb; 726 poller_arg->cb_info.cb_data = ctx; 727 poller_arg->cb_info.cb_thread = spdk_get_thread(); 728 poller_arg->ctx = ctx; 729 730 nvmf_fc_poller_api_func(poller_arg->hwqp, 731 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 732 poller_arg); 733 } 734 735 free(hwqps); 736 737 return; 738 bls_rej: 739 free(args); 740 free(hwqps); 741 742 /* Send Reject */ 743 nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi, 744 true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL); 745 SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 746 nport->nport_hdl, rpi, oxid, rxid); 747 return; 748 } 749 750 /*** Accessor functions for the FC structures - BEGIN */ 751 /* 752 * Returns true if the port is in offline state. 753 */ 754 bool 755 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port) 756 { 757 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) { 758 return true; 759 } 760 761 return false; 762 } 763 764 /* 765 * Returns true if the port is in online state. 766 */ 767 bool 768 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port) 769 { 770 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) { 771 return true; 772 } 773 774 return false; 775 } 776 777 int 778 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port) 779 { 780 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) { 781 fc_port->hw_port_status = SPDK_FC_PORT_ONLINE; 782 return 0; 783 } 784 785 return -EPERM; 786 } 787 788 int 789 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port) 790 { 791 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) { 792 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 793 return 0; 794 } 795 796 return -EPERM; 797 } 798 799 int 800 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp) 801 { 802 if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) { 803 hwqp->state = SPDK_FC_HWQP_ONLINE; 804 /* reset some queue counters */ 805 hwqp->num_conns = 0; 806 return nvmf_fc_set_q_online_state(hwqp, true); 807 } 808 809 return -EPERM; 810 } 811 812 int 813 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp) 814 { 815 if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) { 816 hwqp->state = SPDK_FC_HWQP_OFFLINE; 817 return nvmf_fc_set_q_online_state(hwqp, false); 818 } 819 820 return -EPERM; 821 } 822 823 void 824 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port) 825 { 826 TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link); 827 } 828 829 struct spdk_nvmf_fc_port * 830 nvmf_fc_port_lookup(uint8_t port_hdl) 831 { 832 struct spdk_nvmf_fc_port *fc_port = NULL; 833 834 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 835 if (fc_port->port_hdl == port_hdl) { 836 return fc_port; 837 } 838 } 839 return NULL; 840 } 841 842 static void 843 nvmf_fc_port_cleanup(void) 844 { 845 struct spdk_nvmf_fc_port *fc_port, *tmp; 846 struct spdk_nvmf_fc_hwqp *hwqp; 847 uint32_t i; 848 849 TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) { 850 TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link); 851 for (i = 0; i < fc_port->num_io_queues; i++) { 852 hwqp = &fc_port->io_queues[i]; 853 if (hwqp->fc_reqs_buf) { 854 free(hwqp->fc_reqs_buf); 855 } 856 } 857 free(fc_port); 858 } 859 } 860 861 uint32_t 862 nvmf_fc_get_prli_service_params(void) 863 { 864 return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION); 865 } 866 867 int 868 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port, 869 struct spdk_nvmf_fc_nport *nport) 870 { 871 if (fc_port) { 872 TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link); 873 fc_port->num_nports++; 874 return 0; 875 } 876 877 return -EINVAL; 878 } 879 880 int 881 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port, 882 struct spdk_nvmf_fc_nport *nport) 883 { 884 if (fc_port && nport) { 885 TAILQ_REMOVE(&fc_port->nport_list, nport, link); 886 fc_port->num_nports--; 887 return 0; 888 } 889 890 return -EINVAL; 891 } 892 893 static struct spdk_nvmf_fc_nport * 894 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl) 895 { 896 struct spdk_nvmf_fc_nport *fc_nport = NULL; 897 898 TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) { 899 if (fc_nport->nport_hdl == nport_hdl) { 900 return fc_nport; 901 } 902 } 903 904 return NULL; 905 } 906 907 struct spdk_nvmf_fc_nport * 908 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl) 909 { 910 struct spdk_nvmf_fc_port *fc_port = NULL; 911 912 fc_port = nvmf_fc_port_lookup(port_hdl); 913 if (fc_port) { 914 return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl); 915 } 916 917 return NULL; 918 } 919 920 static inline int 921 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp, 922 uint32_t d_id, struct spdk_nvmf_fc_nport **nport, 923 uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport) 924 { 925 struct spdk_nvmf_fc_nport *n_port; 926 struct spdk_nvmf_fc_remote_port_info *r_port; 927 928 assert(hwqp); 929 if (hwqp == NULL) { 930 SPDK_ERRLOG("Error: hwqp is NULL\n"); 931 return -EINVAL; 932 } 933 assert(nport); 934 if (nport == NULL) { 935 SPDK_ERRLOG("Error: nport is NULL\n"); 936 return -EINVAL; 937 } 938 assert(rport); 939 if (rport == NULL) { 940 SPDK_ERRLOG("Error: rport is NULL\n"); 941 return -EINVAL; 942 } 943 944 TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) { 945 if (n_port->d_id == d_id) { 946 TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) { 947 if (r_port->s_id == s_id) { 948 *nport = n_port; 949 *rport = r_port; 950 return 0; 951 } 952 } 953 break; 954 } 955 } 956 957 return -ENOENT; 958 } 959 960 /* Returns true if the Nport is empty of all rem_ports */ 961 bool 962 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport) 963 { 964 if (nport && TAILQ_EMPTY(&nport->rem_port_list)) { 965 assert(nport->rport_count == 0); 966 return true; 967 } else { 968 return false; 969 } 970 } 971 972 int 973 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport, 974 enum spdk_nvmf_fc_object_state state) 975 { 976 if (nport) { 977 nport->nport_state = state; 978 return 0; 979 } else { 980 return -EINVAL; 981 } 982 } 983 984 bool 985 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport, 986 struct spdk_nvmf_fc_remote_port_info *rem_port) 987 { 988 if (nport && rem_port) { 989 TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link); 990 nport->rport_count++; 991 return 0; 992 } else { 993 return -EINVAL; 994 } 995 } 996 997 bool 998 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport, 999 struct spdk_nvmf_fc_remote_port_info *rem_port) 1000 { 1001 if (nport && rem_port) { 1002 TAILQ_REMOVE(&nport->rem_port_list, rem_port, link); 1003 nport->rport_count--; 1004 return 0; 1005 } else { 1006 return -EINVAL; 1007 } 1008 } 1009 1010 int 1011 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport, 1012 enum spdk_nvmf_fc_object_state state) 1013 { 1014 if (rport) { 1015 rport->rport_state = state; 1016 return 0; 1017 } else { 1018 return -EINVAL; 1019 } 1020 } 1021 int 1022 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc, 1023 enum spdk_nvmf_fc_object_state state) 1024 { 1025 if (assoc) { 1026 assoc->assoc_state = state; 1027 return 0; 1028 } else { 1029 return -EINVAL; 1030 } 1031 } 1032 1033 static struct spdk_nvmf_fc_association * 1034 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr) 1035 { 1036 struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair; 1037 struct spdk_nvmf_fc_conn *fc_conn; 1038 1039 if (!qpair) { 1040 SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid); 1041 return NULL; 1042 } 1043 1044 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1045 1046 return fc_conn->fc_assoc; 1047 } 1048 1049 bool 1050 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl, 1051 struct spdk_nvmf_ctrlr *ctrlr) 1052 { 1053 struct spdk_nvmf_fc_nport *fc_nport = NULL; 1054 struct spdk_nvmf_fc_association *assoc = NULL; 1055 1056 if (!ctrlr) { 1057 return false; 1058 } 1059 1060 fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl); 1061 if (!fc_nport) { 1062 return false; 1063 } 1064 1065 assoc = nvmf_ctrlr_get_fc_assoc(ctrlr); 1066 if (assoc && assoc->tgtport == fc_nport) { 1067 SPDK_DEBUGLOG(nvmf_fc, 1068 "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n", 1069 ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl, 1070 nport_hdl); 1071 return true; 1072 } 1073 return false; 1074 } 1075 1076 static void 1077 nvmf_fc_req_bdev_abort(void *arg1) 1078 { 1079 struct spdk_nvmf_fc_request *fc_req = arg1; 1080 struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr; 1081 int i; 1082 1083 /* Initial release - we don't have to abort Admin Queue or 1084 * Fabric commands. The AQ commands supported at this time are 1085 * Get-Log-Page, 1086 * Identify 1087 * Set Features 1088 * Get Features 1089 * AER -> Special case and handled differently. 1090 * Every one of the above Admin commands (except AER) run 1091 * to completion and so an Abort of such commands doesn't 1092 * make sense. 1093 */ 1094 /* The Fabric commands supported are 1095 * Property Set 1096 * Property Get 1097 * Connect -> Special case (async. handling). Not sure how to 1098 * handle at this point. Let it run to completion. 1099 */ 1100 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1101 if (ctrlr->aer_req[i] == &fc_req->req) { 1102 SPDK_NOTICELOG("Abort AER request\n"); 1103 nvmf_qpair_free_aer(fc_req->req.qpair); 1104 } 1105 } 1106 } 1107 1108 void 1109 nvmf_fc_request_abort_complete(void *arg1) 1110 { 1111 struct spdk_nvmf_fc_request *fc_req = 1112 (struct spdk_nvmf_fc_request *)arg1; 1113 struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL; 1114 1115 /* Request abort completed. Notify all the callbacks */ 1116 TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) { 1117 /* Notify */ 1118 ctx->cb(fc_req->hwqp, 0, ctx->cb_args); 1119 /* Remove */ 1120 TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link); 1121 /* free */ 1122 free(ctx); 1123 } 1124 1125 SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req, 1126 fc_req_state_strs[fc_req->state]); 1127 1128 _nvmf_fc_request_free(fc_req); 1129 } 1130 1131 void 1132 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts, 1133 spdk_nvmf_fc_caller_cb cb, void *cb_args) 1134 { 1135 struct spdk_nvmf_fc_caller_ctx *ctx = NULL; 1136 bool kill_req = false; 1137 1138 /* Add the cb to list */ 1139 if (cb) { 1140 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx)); 1141 if (!ctx) { 1142 SPDK_ERRLOG("ctx alloc failed.\n"); 1143 return; 1144 } 1145 ctx->cb = cb; 1146 ctx->cb_args = cb_args; 1147 1148 TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link); 1149 } 1150 1151 if (!fc_req->is_aborted) { 1152 /* Increment aborted command counter */ 1153 fc_req->hwqp->counters.num_aborted++; 1154 } 1155 1156 /* If port is dead, skip abort wqe */ 1157 kill_req = nvmf_fc_is_port_dead(fc_req->hwqp); 1158 if (kill_req && nvmf_fc_req_in_xfer(fc_req)) { 1159 fc_req->is_aborted = true; 1160 goto complete; 1161 } 1162 1163 /* Check if the request is already marked for deletion */ 1164 if (fc_req->is_aborted) { 1165 return; 1166 } 1167 1168 /* Mark request as aborted */ 1169 fc_req->is_aborted = true; 1170 1171 /* If xchg is allocated, then save if we need to send abts or not. */ 1172 if (fc_req->xchg) { 1173 fc_req->xchg->send_abts = send_abts; 1174 fc_req->xchg->aborted = true; 1175 } 1176 1177 switch (fc_req->state) { 1178 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 1179 /* Aborted by backend */ 1180 goto complete; 1181 1182 case SPDK_NVMF_FC_REQ_READ_BDEV: 1183 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 1184 case SPDK_NVMF_FC_REQ_NONE_BDEV: 1185 /* Notify bdev */ 1186 spdk_thread_send_msg(fc_req->hwqp->thread, 1187 nvmf_fc_req_bdev_abort, (void *)fc_req); 1188 break; 1189 1190 case SPDK_NVMF_FC_REQ_READ_XFER: 1191 case SPDK_NVMF_FC_REQ_READ_RSP: 1192 case SPDK_NVMF_FC_REQ_WRITE_XFER: 1193 case SPDK_NVMF_FC_REQ_WRITE_RSP: 1194 case SPDK_NVMF_FC_REQ_NONE_RSP: 1195 /* Notify HBA to abort this exchange */ 1196 nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL); 1197 break; 1198 1199 case SPDK_NVMF_FC_REQ_PENDING: 1200 /* Remove from pending */ 1201 nvmf_fc_request_remove_from_pending(fc_req); 1202 goto complete; 1203 default: 1204 SPDK_ERRLOG("Request in invalid state.\n"); 1205 goto complete; 1206 } 1207 1208 return; 1209 complete: 1210 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED); 1211 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1212 (void *)fc_req); 1213 } 1214 1215 static int 1216 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req) 1217 { 1218 uint32_t length = fc_req->req.length; 1219 struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup; 1220 struct spdk_nvmf_transport_poll_group *group = &fgroup->group; 1221 struct spdk_nvmf_transport *transport = group->transport; 1222 1223 if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) { 1224 return -ENOMEM; 1225 } 1226 1227 return 0; 1228 } 1229 1230 static int 1231 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req) 1232 { 1233 /* Allocate an XCHG if we dont use send frame for this command. */ 1234 if (!nvmf_fc_use_send_frame(&fc_req->req)) { 1235 fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp); 1236 if (!fc_req->xchg) { 1237 fc_req->hwqp->counters.no_xchg++; 1238 return -EAGAIN; 1239 } 1240 } 1241 1242 if (fc_req->req.length) { 1243 if (nvmf_fc_request_alloc_buffers(fc_req) < 0) { 1244 fc_req->hwqp->counters.buf_alloc_err++; 1245 if (fc_req->xchg) { 1246 nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg); 1247 fc_req->xchg = NULL; 1248 } 1249 return -EAGAIN; 1250 } 1251 fc_req->req.data = fc_req->req.iov[0].iov_base; 1252 } 1253 1254 if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1255 SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n"); 1256 1257 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER); 1258 1259 if (nvmf_fc_recv_data(fc_req)) { 1260 /* Dropped return success to caller */ 1261 fc_req->hwqp->counters.unexpected_err++; 1262 _nvmf_fc_request_free(fc_req); 1263 } 1264 } else { 1265 SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n"); 1266 1267 if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1268 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV); 1269 } else { 1270 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV); 1271 } 1272 spdk_nvmf_request_exec(&fc_req->req); 1273 } 1274 1275 return 0; 1276 } 1277 1278 static int 1279 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame, 1280 struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen) 1281 { 1282 uint16_t cmnd_len; 1283 uint64_t rqst_conn_id; 1284 struct spdk_nvmf_fc_request *fc_req = NULL; 1285 struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL; 1286 struct spdk_nvmf_fc_conn *fc_conn = NULL; 1287 enum spdk_nvme_data_transfer xfer; 1288 uint32_t s_id, d_id; 1289 1290 s_id = (uint32_t)frame->s_id; 1291 d_id = (uint32_t)frame->d_id; 1292 s_id = from_be32(&s_id) >> 8; 1293 d_id = from_be32(&d_id) >> 8; 1294 1295 cmd_iu = buffer->virt; 1296 cmnd_len = cmd_iu->cmnd_iu_len; 1297 cmnd_len = from_be16(&cmnd_len); 1298 1299 /* check for a valid cmnd_iu format */ 1300 if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) || 1301 (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) || 1302 (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) { 1303 SPDK_ERRLOG("IU CMD error\n"); 1304 hwqp->counters.nvme_cmd_iu_err++; 1305 return -ENXIO; 1306 } 1307 1308 xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags); 1309 if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) { 1310 SPDK_ERRLOG("IU CMD xfer error\n"); 1311 hwqp->counters.nvme_cmd_xfer_err++; 1312 return -EPERM; 1313 } 1314 1315 rqst_conn_id = from_be64(&cmd_iu->conn_id); 1316 1317 /* Check if conn id is valid */ 1318 fc_conn = nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id); 1319 if (!fc_conn) { 1320 SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id); 1321 hwqp->counters.invalid_conn_err++; 1322 return -ENODEV; 1323 } 1324 1325 /* Validate s_id and d_id */ 1326 if (s_id != fc_conn->s_id) { 1327 hwqp->counters.rport_invalid++; 1328 SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id); 1329 return -ENODEV; 1330 } 1331 1332 if (d_id != fc_conn->d_id) { 1333 hwqp->counters.nport_invalid++; 1334 SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id); 1335 return -ENODEV; 1336 } 1337 1338 /* If association/connection is being deleted - return */ 1339 if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1340 SPDK_ERRLOG("Association state not valid\n"); 1341 return -EACCES; 1342 } 1343 1344 if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) { 1345 return -EACCES; 1346 } 1347 1348 /* Make sure xfer len is according to mdts */ 1349 if (from_be32(&cmd_iu->data_len) > 1350 hwqp->fgroup->group.transport->opts.max_io_size) { 1351 SPDK_ERRLOG("IO length requested is greater than MDTS\n"); 1352 return -EINVAL; 1353 } 1354 1355 /* allocate a request buffer */ 1356 fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp); 1357 if (fc_req == NULL) { 1358 return -ENOMEM; 1359 } 1360 1361 fc_req->req.length = from_be32(&cmd_iu->data_len); 1362 fc_req->req.qpair = &fc_conn->qpair; 1363 memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg)); 1364 fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd; 1365 fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp; 1366 fc_req->oxid = frame->ox_id; 1367 fc_req->oxid = from_be16(&fc_req->oxid); 1368 fc_req->rpi = fc_conn->rpi; 1369 fc_req->poller_lcore = hwqp->lcore_id; 1370 fc_req->poller_thread = hwqp->thread; 1371 fc_req->hwqp = hwqp; 1372 fc_req->fc_conn = fc_conn; 1373 fc_req->req.xfer = xfer; 1374 fc_req->s_id = s_id; 1375 fc_req->d_id = d_id; 1376 1377 nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT); 1378 1379 if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) { 1380 STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link); 1381 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING); 1382 } 1383 1384 return 0; 1385 } 1386 1387 /* 1388 * These functions are called from the FC LLD 1389 */ 1390 1391 void 1392 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req) 1393 { 1394 struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp; 1395 struct spdk_nvmf_transport_poll_group *group; 1396 1397 if (!fc_req) { 1398 return; 1399 } 1400 1401 if (fc_req->xchg) { 1402 nvmf_fc_put_xchg(hwqp, fc_req->xchg); 1403 fc_req->xchg = NULL; 1404 } 1405 1406 /* Release IO buffers */ 1407 if (fc_req->req.data_from_pool) { 1408 group = &hwqp->fgroup->group; 1409 spdk_nvmf_request_free_buffers(&fc_req->req, group, 1410 group->transport); 1411 } 1412 fc_req->req.data = NULL; 1413 fc_req->req.iovcnt = 0; 1414 1415 /* Free Fc request */ 1416 nvmf_fc_hwqp_free_fc_request(hwqp, fc_req); 1417 } 1418 1419 void 1420 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req, 1421 enum spdk_nvmf_fc_request_state state) 1422 { 1423 assert(fc_req->magic != 0xDEADBEEF); 1424 1425 SPDK_DEBUGLOG(nvmf_fc, 1426 "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req, 1427 nvmf_fc_request_get_state_str(fc_req->state), 1428 nvmf_fc_request_get_state_str(state)); 1429 nvmf_fc_record_req_trace_point(fc_req, state); 1430 fc_req->state = state; 1431 } 1432 1433 char * 1434 nvmf_fc_request_get_state_str(int state) 1435 { 1436 static char *unk_str = "unknown"; 1437 1438 return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ? 1439 fc_req_state_strs[state] : unk_str); 1440 } 1441 1442 int 1443 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp, 1444 uint32_t buff_idx, 1445 struct spdk_nvmf_fc_frame_hdr *frame, 1446 struct spdk_nvmf_fc_buffer_desc *buffer, 1447 uint32_t plen) 1448 { 1449 int rc = 0; 1450 uint32_t s_id, d_id; 1451 struct spdk_nvmf_fc_nport *nport = NULL; 1452 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1453 1454 s_id = (uint32_t)frame->s_id; 1455 d_id = (uint32_t)frame->d_id; 1456 s_id = from_be32(&s_id) >> 8; 1457 d_id = from_be32(&d_id) >> 8; 1458 1459 SPDK_DEBUGLOG(nvmf_fc, 1460 "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n", 1461 s_id, d_id, 1462 ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff), 1463 ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff)); 1464 1465 if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) && 1466 (frame->type == FCNVME_TYPE_NVMF_DATA)) { 1467 struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt; 1468 struct spdk_nvmf_fc_ls_rqst *ls_rqst; 1469 1470 SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n"); 1471 1472 rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport); 1473 if (rc) { 1474 if (nport == NULL) { 1475 SPDK_ERRLOG("Nport not found. Dropping\n"); 1476 /* increment invalid nport counter */ 1477 hwqp->counters.nport_invalid++; 1478 } else if (rport == NULL) { 1479 SPDK_ERRLOG("Rport not found. Dropping\n"); 1480 /* increment invalid rport counter */ 1481 hwqp->counters.rport_invalid++; 1482 } 1483 return rc; 1484 } 1485 1486 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1487 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1488 SPDK_ERRLOG("%s state not created. Dropping\n", 1489 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1490 "Nport" : "Rport"); 1491 return -EACCES; 1492 } 1493 1494 /* Use the RQ buffer for holding LS request. */ 1495 ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst; 1496 1497 /* Fill in the LS request structure */ 1498 ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst; 1499 ls_rqst->rqstbuf.phys = buffer->phys + 1500 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst); 1501 ls_rqst->rqstbuf.buf_index = buff_idx; 1502 ls_rqst->rqst_len = plen; 1503 1504 ls_rqst->rspbuf.virt = (void *)&req_buf->resp; 1505 ls_rqst->rspbuf.phys = buffer->phys + 1506 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp); 1507 ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE; 1508 1509 ls_rqst->private_data = (void *)hwqp; 1510 ls_rqst->rpi = rport->rpi; 1511 ls_rqst->oxid = (uint16_t)frame->ox_id; 1512 ls_rqst->oxid = from_be16(&ls_rqst->oxid); 1513 ls_rqst->s_id = s_id; 1514 ls_rqst->d_id = d_id; 1515 ls_rqst->nport = nport; 1516 ls_rqst->rport = rport; 1517 ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt; 1518 1519 if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) { 1520 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1521 } else { 1522 ls_rqst->xchg = NULL; 1523 } 1524 1525 if (ls_rqst->xchg) { 1526 /* Handover the request to LS module */ 1527 nvmf_fc_handle_ls_rqst(ls_rqst); 1528 } else { 1529 /* No XCHG available. Add to pending list. */ 1530 hwqp->counters.no_xchg++; 1531 TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1532 } 1533 } else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) && 1534 (frame->type == FCNVME_TYPE_FC_EXCHANGE)) { 1535 1536 SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n"); 1537 rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen); 1538 if (!rc) { 1539 nvmf_fc_rqpair_buffer_release(hwqp, buff_idx); 1540 } 1541 } else { 1542 1543 SPDK_ERRLOG("Unknown frame received. Dropping\n"); 1544 hwqp->counters.unknown_frame++; 1545 rc = -EINVAL; 1546 } 1547 1548 return rc; 1549 } 1550 1551 void 1552 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp) 1553 { 1554 struct spdk_nvmf_request *req = NULL, *tmp; 1555 struct spdk_nvmf_fc_request *fc_req; 1556 int budget = 64; 1557 1558 if (!hwqp->fgroup) { 1559 /* LS queue is tied to acceptor_poll group and LS pending requests 1560 * are stagged and processed using hwqp->ls_pending_queue. 1561 */ 1562 return; 1563 } 1564 1565 STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) { 1566 fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req); 1567 if (!nvmf_fc_request_execute(fc_req)) { 1568 /* Succesfuly posted, Delete from pending. */ 1569 nvmf_fc_request_remove_from_pending(fc_req); 1570 } 1571 1572 if (budget) { 1573 budget--; 1574 } else { 1575 return; 1576 } 1577 } 1578 } 1579 1580 void 1581 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp) 1582 { 1583 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1584 struct spdk_nvmf_fc_nport *nport = NULL; 1585 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1586 1587 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1588 /* lookup nport and rport again - make sure they are still valid */ 1589 int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport); 1590 if (rc) { 1591 if (nport == NULL) { 1592 SPDK_ERRLOG("Nport not found. Dropping\n"); 1593 /* increment invalid nport counter */ 1594 hwqp->counters.nport_invalid++; 1595 } else if (rport == NULL) { 1596 SPDK_ERRLOG("Rport not found. Dropping\n"); 1597 /* increment invalid rport counter */ 1598 hwqp->counters.rport_invalid++; 1599 } 1600 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1601 /* Return buffer to chip */ 1602 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1603 continue; 1604 } 1605 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1606 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1607 SPDK_ERRLOG("%s state not created. Dropping\n", 1608 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1609 "Nport" : "Rport"); 1610 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1611 /* Return buffer to chip */ 1612 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1613 continue; 1614 } 1615 1616 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1617 if (ls_rqst->xchg) { 1618 /* Got an XCHG */ 1619 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1620 /* Handover the request to LS module */ 1621 nvmf_fc_handle_ls_rqst(ls_rqst); 1622 } else { 1623 /* No more XCHGs. Stop processing. */ 1624 hwqp->counters.no_xchg++; 1625 return; 1626 } 1627 } 1628 } 1629 1630 int 1631 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req) 1632 { 1633 int rc = 0; 1634 struct spdk_nvmf_request *req = &fc_req->req; 1635 struct spdk_nvmf_qpair *qpair = req->qpair; 1636 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1637 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1638 uint16_t ersp_len = 0; 1639 1640 /* set sq head value in resp */ 1641 rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair); 1642 1643 /* Increment connection responses */ 1644 fc_conn->rsp_count++; 1645 1646 if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count, 1647 fc_req->transfered_len)) { 1648 /* Fill ERSP Len */ 1649 to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) / 1650 sizeof(uint32_t))); 1651 fc_req->ersp.ersp_len = ersp_len; 1652 1653 /* Fill RSN */ 1654 to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn); 1655 fc_conn->rsn++; 1656 1657 /* Fill transfer length */ 1658 to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len); 1659 1660 SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n"); 1661 rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp, 1662 sizeof(struct spdk_nvmf_fc_ersp_iu)); 1663 } else { 1664 SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n"); 1665 rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0); 1666 } 1667 1668 return rc; 1669 } 1670 1671 bool 1672 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req, 1673 uint32_t rsp_cnt, uint32_t xfer_len) 1674 { 1675 struct spdk_nvmf_request *req = &fc_req->req; 1676 struct spdk_nvmf_qpair *qpair = req->qpair; 1677 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1678 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1679 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1680 uint16_t status = *((uint16_t *)&rsp->status); 1681 1682 /* 1683 * Check if we need to send ERSP 1684 * 1) For every N responses where N == ersp_ratio 1685 * 2) Fabric commands. 1686 * 3) Completion status failed or Completion dw0 or dw1 valid. 1687 * 4) SQ == 90% full. 1688 * 5) Transfer length not equal to CMD IU length 1689 */ 1690 1691 if (!(rsp_cnt % fc_conn->esrp_ratio) || 1692 (cmd->opc == SPDK_NVME_OPC_FABRIC) || 1693 (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 || 1694 (req->length != xfer_len)) { 1695 return true; 1696 } 1697 return false; 1698 } 1699 1700 static int 1701 nvmf_fc_request_complete(struct spdk_nvmf_request *req) 1702 { 1703 int rc = 0; 1704 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 1705 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1706 1707 if (fc_req->is_aborted) { 1708 /* Defer this to make sure we dont call io cleanup in same context. */ 1709 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1710 (void *)fc_req); 1711 } else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && 1712 req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1713 1714 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER); 1715 1716 rc = nvmf_fc_send_data(fc_req); 1717 } else { 1718 if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1719 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP); 1720 } else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1721 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP); 1722 } else { 1723 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP); 1724 } 1725 1726 rc = nvmf_fc_handle_rsp(fc_req); 1727 } 1728 1729 if (rc) { 1730 SPDK_ERRLOG("Error in request complete.\n"); 1731 _nvmf_fc_request_free(fc_req); 1732 } 1733 return 0; 1734 } 1735 1736 struct spdk_nvmf_tgt * 1737 nvmf_fc_get_tgt(void) 1738 { 1739 if (g_nvmf_ftransport) { 1740 return g_nvmf_ftransport->transport.tgt; 1741 } 1742 return NULL; 1743 } 1744 1745 /* 1746 * FC Transport Public API begins here 1747 */ 1748 1749 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128 1750 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32 1751 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5 1752 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0 1753 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536 1754 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096 1755 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192 1756 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE / \ 1757 SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE) 1758 1759 static void 1760 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts) 1761 { 1762 opts->max_queue_depth = SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH; 1763 opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR; 1764 opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE; 1765 opts->max_io_size = SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE; 1766 opts->io_unit_size = SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE; 1767 opts->max_aq_depth = SPDK_NVMF_FC_DEFAULT_AQ_DEPTH; 1768 opts->num_shared_buffers = SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS; 1769 } 1770 1771 static struct spdk_nvmf_transport * 1772 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts) 1773 { 1774 uint32_t sge_count; 1775 1776 SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n" 1777 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 1778 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 1779 " max_aq_depth=%d\n", 1780 opts->max_queue_depth, 1781 opts->max_io_size, 1782 opts->max_qpairs_per_ctrlr - 1, 1783 opts->io_unit_size, 1784 opts->max_aq_depth); 1785 1786 if (g_nvmf_ftransport) { 1787 SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n"); 1788 return NULL; 1789 } 1790 1791 if (spdk_env_get_last_core() < 1) { 1792 SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n", 1793 spdk_env_get_last_core() + 1); 1794 return NULL; 1795 } 1796 1797 sge_count = opts->max_io_size / opts->io_unit_size; 1798 if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) { 1799 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 1800 return NULL; 1801 } 1802 1803 g_nvmf_fc_main_thread = spdk_get_thread(); 1804 g_nvmf_fgroup_count = 0; 1805 g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport)); 1806 1807 if (!g_nvmf_ftransport) { 1808 SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n"); 1809 return NULL; 1810 } 1811 1812 if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) { 1813 SPDK_ERRLOG("pthread_mutex_init() failed\n"); 1814 free(g_nvmf_ftransport); 1815 g_nvmf_ftransport = NULL; 1816 return NULL; 1817 } 1818 1819 /* initialize the low level FC driver */ 1820 nvmf_fc_lld_init(); 1821 1822 return &g_nvmf_ftransport->transport; 1823 } 1824 1825 static int 1826 nvmf_fc_destroy(struct spdk_nvmf_transport *transport, 1827 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 1828 { 1829 if (transport) { 1830 struct spdk_nvmf_fc_transport *ftransport; 1831 struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp; 1832 1833 ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 1834 1835 free(ftransport); 1836 1837 /* clean up any FC poll groups still around */ 1838 TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) { 1839 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 1840 free(fgroup); 1841 } 1842 g_nvmf_fgroup_count = 0; 1843 1844 /* low level FC driver clean up */ 1845 nvmf_fc_lld_fini(); 1846 1847 nvmf_fc_port_cleanup(); 1848 } 1849 1850 if (cb_fn) { 1851 cb_fn(cb_arg); 1852 } 1853 return 0; 1854 } 1855 1856 static int 1857 nvmf_fc_listen(struct spdk_nvmf_transport *transport, 1858 const struct spdk_nvme_transport_id *trid) 1859 { 1860 return 0; 1861 } 1862 1863 static void 1864 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport, 1865 const struct spdk_nvme_transport_id *_trid) 1866 { 1867 } 1868 1869 static uint32_t 1870 nvmf_fc_accept(struct spdk_nvmf_transport *transport) 1871 { 1872 struct spdk_nvmf_fc_port *fc_port = NULL; 1873 uint32_t count = 0; 1874 static bool start_lld = false; 1875 1876 if (spdk_unlikely(!start_lld)) { 1877 start_lld = true; 1878 nvmf_fc_lld_start(); 1879 } 1880 1881 /* poll the LS queue on each port */ 1882 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 1883 if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) { 1884 count += nvmf_fc_process_queue(&fc_port->ls_queue); 1885 } 1886 } 1887 1888 return count; 1889 } 1890 1891 static void 1892 nvmf_fc_discover(struct spdk_nvmf_transport *transport, 1893 struct spdk_nvme_transport_id *trid, 1894 struct spdk_nvmf_discovery_log_page_entry *entry) 1895 { 1896 entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC; 1897 entry->adrfam = trid->adrfam; 1898 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED; 1899 1900 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 1901 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 1902 } 1903 1904 static struct spdk_nvmf_transport_poll_group * 1905 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport) 1906 { 1907 struct spdk_nvmf_fc_poll_group *fgroup; 1908 struct spdk_nvmf_fc_transport *ftransport = 1909 SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 1910 1911 fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group)); 1912 if (!fgroup) { 1913 SPDK_ERRLOG("Unable to alloc FC poll group\n"); 1914 return NULL; 1915 } 1916 1917 TAILQ_INIT(&fgroup->hwqp_list); 1918 1919 pthread_mutex_lock(&ftransport->lock); 1920 TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link); 1921 g_nvmf_fgroup_count++; 1922 pthread_mutex_unlock(&ftransport->lock); 1923 1924 return &fgroup->group; 1925 } 1926 1927 static void 1928 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 1929 { 1930 struct spdk_nvmf_fc_poll_group *fgroup; 1931 struct spdk_nvmf_fc_transport *ftransport = 1932 SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport); 1933 1934 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 1935 pthread_mutex_lock(&ftransport->lock); 1936 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 1937 g_nvmf_fgroup_count--; 1938 pthread_mutex_unlock(&ftransport->lock); 1939 1940 free(fgroup); 1941 } 1942 1943 static int 1944 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 1945 struct spdk_nvmf_qpair *qpair) 1946 { 1947 struct spdk_nvmf_fc_poll_group *fgroup; 1948 struct spdk_nvmf_fc_conn *fc_conn; 1949 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 1950 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 1951 bool hwqp_found = false; 1952 1953 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 1954 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1955 1956 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 1957 if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) { 1958 hwqp_found = true; 1959 break; 1960 } 1961 } 1962 1963 if (!hwqp_found) { 1964 SPDK_ERRLOG("No valid hwqp found for new QP.\n"); 1965 goto err; 1966 } 1967 1968 if (!nvmf_fc_assign_conn_to_hwqp(hwqp, 1969 &fc_conn->conn_id, 1970 fc_conn->max_queue_depth)) { 1971 SPDK_ERRLOG("Failed to get a connection id for new QP.\n"); 1972 goto err; 1973 } 1974 1975 fc_conn->hwqp = hwqp; 1976 1977 /* If this is for ADMIN connection, then update assoc ID. */ 1978 if (fc_conn->qpair.qid == 0) { 1979 fc_conn->fc_assoc->assoc_id = fc_conn->conn_id; 1980 } 1981 1982 api_data = &fc_conn->create_opd->u.add_conn; 1983 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); 1984 return 0; 1985 err: 1986 return -1; 1987 } 1988 1989 static int 1990 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 1991 { 1992 uint32_t count = 0; 1993 struct spdk_nvmf_fc_poll_group *fgroup; 1994 struct spdk_nvmf_fc_hwqp *hwqp; 1995 1996 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 1997 1998 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 1999 if (hwqp->state == SPDK_FC_HWQP_ONLINE) { 2000 count += nvmf_fc_process_queue(hwqp); 2001 } 2002 } 2003 2004 return (int) count; 2005 } 2006 2007 static int 2008 nvmf_fc_request_free(struct spdk_nvmf_request *req) 2009 { 2010 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 2011 2012 if (!fc_req->is_aborted) { 2013 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED); 2014 nvmf_fc_request_abort(fc_req, true, NULL, NULL); 2015 } else { 2016 nvmf_fc_request_abort_complete(fc_req); 2017 } 2018 2019 return 0; 2020 } 2021 2022 static void 2023 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair, 2024 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 2025 { 2026 struct spdk_nvmf_fc_conn *fc_conn; 2027 2028 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2029 2030 if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) { 2031 /* QP creation failure in FC tranport. Cleanup. */ 2032 spdk_thread_send_msg(nvmf_fc_get_main_thread(), 2033 nvmf_fc_handle_connection_failure, fc_conn); 2034 } else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id && 2035 fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 2036 /* Admin connection */ 2037 spdk_thread_send_msg(nvmf_fc_get_main_thread(), 2038 nvmf_fc_handle_assoc_deletion, fc_conn); 2039 } 2040 2041 if (cb_fn) { 2042 cb_fn(cb_arg); 2043 } 2044 } 2045 2046 static int 2047 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2048 struct spdk_nvme_transport_id *trid) 2049 { 2050 struct spdk_nvmf_fc_conn *fc_conn; 2051 2052 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2053 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2054 return 0; 2055 } 2056 2057 static int 2058 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2059 struct spdk_nvme_transport_id *trid) 2060 { 2061 struct spdk_nvmf_fc_conn *fc_conn; 2062 2063 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2064 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2065 return 0; 2066 } 2067 2068 static int 2069 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2070 struct spdk_nvme_transport_id *trid) 2071 { 2072 struct spdk_nvmf_fc_conn *fc_conn; 2073 2074 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2075 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2076 return 0; 2077 } 2078 2079 static void 2080 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 2081 struct spdk_nvmf_request *req) 2082 { 2083 spdk_nvmf_request_complete(req); 2084 } 2085 2086 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = { 2087 .name = "FC", 2088 .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC, 2089 .opts_init = nvmf_fc_opts_init, 2090 .create = nvmf_fc_create, 2091 .destroy = nvmf_fc_destroy, 2092 2093 .listen = nvmf_fc_listen, 2094 .stop_listen = nvmf_fc_stop_listen, 2095 .accept = nvmf_fc_accept, 2096 2097 .listener_discover = nvmf_fc_discover, 2098 2099 .poll_group_create = nvmf_fc_poll_group_create, 2100 .poll_group_destroy = nvmf_fc_poll_group_destroy, 2101 .poll_group_add = nvmf_fc_poll_group_add, 2102 .poll_group_poll = nvmf_fc_poll_group_poll, 2103 2104 .req_complete = nvmf_fc_request_complete, 2105 .req_free = nvmf_fc_request_free, 2106 .qpair_fini = nvmf_fc_close_qpair, 2107 .qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid, 2108 .qpair_get_local_trid = nvmf_fc_qpair_get_local_trid, 2109 .qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid, 2110 .qpair_abort_request = nvmf_fc_qpair_abort_request, 2111 }; 2112 2113 /* Initializes the data for the creation of a FC-Port object in the SPDK 2114 * library. The spdk_nvmf_fc_port is a well defined structure that is part of 2115 * the API to the library. The contents added to this well defined structure 2116 * is private to each vendors implementation. 2117 */ 2118 static int 2119 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port, 2120 struct spdk_nvmf_fc_hw_port_init_args *args) 2121 { 2122 /* Used a high number for the LS HWQP so that it does not clash with the 2123 * IO HWQP's and immediately shows a LS queue during tracing. 2124 */ 2125 uint32_t i; 2126 2127 fc_port->port_hdl = args->port_handle; 2128 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 2129 fc_port->fcp_rq_id = args->fcp_rq_id; 2130 fc_port->num_io_queues = args->io_queue_cnt; 2131 2132 /* 2133 * Set port context from init args. Used for FCP port stats. 2134 */ 2135 fc_port->port_ctx = args->port_ctx; 2136 2137 /* 2138 * Initialize the LS queue wherever needed. 2139 */ 2140 fc_port->ls_queue.queues = args->ls_queue; 2141 fc_port->ls_queue.thread = nvmf_fc_get_main_thread(); 2142 fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues; 2143 2144 /* 2145 * Initialize the LS queue. 2146 */ 2147 nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue); 2148 2149 /* 2150 * Initialize the IO queues. 2151 */ 2152 for (i = 0; i < args->io_queue_cnt; i++) { 2153 struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i]; 2154 hwqp->hwqp_id = i; 2155 hwqp->queues = args->io_queues[i]; 2156 hwqp->rq_size = args->io_queue_size; 2157 nvmf_fc_init_hwqp(fc_port, hwqp); 2158 } 2159 2160 /* 2161 * Initialize the LS processing for port 2162 */ 2163 nvmf_fc_ls_init(fc_port); 2164 2165 /* 2166 * Initialize the list of nport on this HW port. 2167 */ 2168 TAILQ_INIT(&fc_port->nport_list); 2169 fc_port->num_nports = 0; 2170 2171 return 0; 2172 } 2173 2174 /* 2175 * FC port must have all its nports deleted before transitioning to offline state. 2176 */ 2177 static void 2178 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port) 2179 { 2180 struct spdk_nvmf_fc_nport *nport = NULL; 2181 /* All nports must have been deleted at this point for this fc port */ 2182 DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list)); 2183 DEV_VERIFY(fc_port->num_nports == 0); 2184 /* Mark the nport states to be zombie, if they exist */ 2185 if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) { 2186 TAILQ_FOREACH(nport, &fc_port->nport_list, link) { 2187 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2188 } 2189 } 2190 } 2191 2192 static void 2193 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err) 2194 { 2195 ASSERT_SPDK_FC_MAIN_THREAD(); 2196 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args; 2197 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2198 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2199 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 2200 int spdk_err = 0; 2201 uint8_t port_handle = cb_data->port_handle; 2202 uint32_t s_id = rport->s_id; 2203 uint32_t rpi = rport->rpi; 2204 uint32_t assoc_count = rport->assoc_count; 2205 uint32_t nport_hdl = nport->nport_hdl; 2206 uint32_t d_id = nport->d_id; 2207 char log_str[256]; 2208 2209 /* 2210 * Assert on any delete failure. 2211 */ 2212 if (0 != err) { 2213 DEV_VERIFY(!"Error in IT Delete callback."); 2214 goto out; 2215 } 2216 2217 if (cb_func != NULL) { 2218 (void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err); 2219 } 2220 2221 out: 2222 free(cb_data); 2223 2224 snprintf(log_str, sizeof(log_str), 2225 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n", 2226 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err); 2227 2228 if (err != 0) { 2229 SPDK_ERRLOG("%s", log_str); 2230 } else { 2231 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2232 } 2233 } 2234 2235 static void 2236 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err) 2237 { 2238 ASSERT_SPDK_FC_MAIN_THREAD(); 2239 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args; 2240 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2241 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2242 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func; 2243 uint32_t s_id = rport->s_id; 2244 uint32_t rpi = rport->rpi; 2245 uint32_t assoc_count = rport->assoc_count; 2246 uint32_t nport_hdl = nport->nport_hdl; 2247 uint32_t d_id = nport->d_id; 2248 char log_str[256]; 2249 2250 /* 2251 * Assert on any association delete failure. We continue to delete other 2252 * associations in promoted builds. 2253 */ 2254 if (0 != err) { 2255 DEV_VERIFY(!"Nport's association delete callback returned error"); 2256 if (nport->assoc_count > 0) { 2257 nport->assoc_count--; 2258 } 2259 if (rport->assoc_count > 0) { 2260 rport->assoc_count--; 2261 } 2262 } 2263 2264 /* 2265 * If this is the last association being deleted for the ITN, 2266 * execute the callback(s). 2267 */ 2268 if (0 == rport->assoc_count) { 2269 /* Remove the rport from the remote port list. */ 2270 if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) { 2271 SPDK_ERRLOG("Error while removing rport from list.\n"); 2272 DEV_VERIFY(!"Error while removing rport from list."); 2273 } 2274 2275 if (cb_func != NULL) { 2276 /* 2277 * Callback function is provided by the caller 2278 * of nvmf_fc_adm_i_t_delete_assoc(). 2279 */ 2280 (void)cb_func(cb_data->cb_ctx, 0); 2281 } 2282 free(rport); 2283 free(args); 2284 } 2285 2286 snprintf(log_str, sizeof(log_str), 2287 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n", 2288 nport_hdl, s_id, d_id, rpi, assoc_count, err); 2289 2290 if (err != 0) { 2291 SPDK_ERRLOG("%s", log_str); 2292 } else { 2293 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2294 } 2295 } 2296 2297 /** 2298 * Process a IT delete. 2299 */ 2300 static void 2301 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport, 2302 struct spdk_nvmf_fc_remote_port_info *rport, 2303 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func, 2304 void *cb_ctx) 2305 { 2306 int err = 0; 2307 struct spdk_nvmf_fc_association *assoc = NULL; 2308 int assoc_err = 0; 2309 uint32_t num_assoc = 0; 2310 uint32_t num_assoc_del_scheduled = 0; 2311 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL; 2312 uint8_t port_hdl = nport->port_hdl; 2313 uint32_t s_id = rport->s_id; 2314 uint32_t rpi = rport->rpi; 2315 uint32_t assoc_count = rport->assoc_count; 2316 char log_str[256]; 2317 2318 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n", 2319 nport->nport_hdl); 2320 2321 /* 2322 * Allocate memory for callback data. 2323 * This memory will be freed by the callback function. 2324 */ 2325 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data)); 2326 if (NULL == cb_data) { 2327 SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl); 2328 err = -ENOMEM; 2329 goto out; 2330 } 2331 cb_data->nport = nport; 2332 cb_data->rport = rport; 2333 cb_data->port_handle = port_hdl; 2334 cb_data->cb_func = cb_func; 2335 cb_data->cb_ctx = cb_ctx; 2336 2337 /* 2338 * Delete all associations, if any, related with this ITN/remote_port. 2339 */ 2340 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 2341 num_assoc++; 2342 if (assoc->s_id == s_id) { 2343 assoc_err = nvmf_fc_delete_association(nport, 2344 assoc->assoc_id, 2345 false /* send abts */, false, 2346 nvmf_fc_adm_i_t_delete_assoc_cb, cb_data); 2347 if (0 != assoc_err) { 2348 /* 2349 * Mark this association as zombie. 2350 */ 2351 err = -EINVAL; 2352 DEV_VERIFY(!"Error while deleting association"); 2353 (void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2354 } else { 2355 num_assoc_del_scheduled++; 2356 } 2357 } 2358 } 2359 2360 out: 2361 if ((cb_data) && (num_assoc_del_scheduled == 0)) { 2362 /* 2363 * Since there are no association_delete calls 2364 * successfully scheduled, the association_delete 2365 * callback function will never be called. 2366 * In this case, call the callback function now. 2367 */ 2368 nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0); 2369 } 2370 2371 snprintf(log_str, sizeof(log_str), 2372 "IT delete associations on nport:%d end. " 2373 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n", 2374 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err); 2375 2376 if (err == 0) { 2377 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2378 } else { 2379 SPDK_ERRLOG("%s", log_str); 2380 } 2381 } 2382 2383 static void 2384 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 2385 { 2386 ASSERT_SPDK_FC_MAIN_THREAD(); 2387 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL; 2388 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2389 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2390 struct spdk_nvmf_fc_port *fc_port = NULL; 2391 int err = 0; 2392 2393 quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data; 2394 hwqp = quiesce_api_data->hwqp; 2395 fc_port = hwqp->fc_port; 2396 port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx; 2397 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func; 2398 2399 /* 2400 * Decrement the callback/quiesced queue count. 2401 */ 2402 port_quiesce_ctx->quiesce_count--; 2403 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id); 2404 2405 free(quiesce_api_data); 2406 /* 2407 * Wait for call backs i.e. max_ioq_queues + LS QUEUE. 2408 */ 2409 if (port_quiesce_ctx->quiesce_count > 0) { 2410 return; 2411 } 2412 2413 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2414 SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl); 2415 } else { 2416 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl); 2417 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2418 } 2419 2420 if (cb_func) { 2421 /* 2422 * Callback function for the called of quiesce. 2423 */ 2424 cb_func(port_quiesce_ctx->ctx, err); 2425 } 2426 2427 /* 2428 * Free the context structure. 2429 */ 2430 free(port_quiesce_ctx); 2431 2432 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl, 2433 err); 2434 } 2435 2436 static int 2437 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx, 2438 spdk_nvmf_fc_poller_api_cb cb_func) 2439 { 2440 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args; 2441 enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS; 2442 int err = 0; 2443 2444 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args)); 2445 2446 if (args == NULL) { 2447 err = -ENOMEM; 2448 SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id); 2449 goto done; 2450 } 2451 args->hwqp = fc_hwqp; 2452 args->ctx = ctx; 2453 args->cb_info.cb_func = cb_func; 2454 args->cb_info.cb_data = args; 2455 args->cb_info.cb_thread = spdk_get_thread(); 2456 2457 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id); 2458 rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args); 2459 if (rc) { 2460 free(args); 2461 err = -EINVAL; 2462 } 2463 2464 done: 2465 return err; 2466 } 2467 2468 /* 2469 * Hw port Quiesce 2470 */ 2471 static int 2472 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx, 2473 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func) 2474 { 2475 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2476 uint32_t i = 0; 2477 int err = 0; 2478 2479 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl); 2480 2481 /* 2482 * If the port is in an OFFLINE state, set the state to QUIESCED 2483 * and execute the callback. 2484 */ 2485 if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) { 2486 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2487 } 2488 2489 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2490 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n", 2491 fc_port->port_hdl); 2492 /* 2493 * Execute the callback function directly. 2494 */ 2495 cb_func(ctx, err); 2496 goto out; 2497 } 2498 2499 port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx)); 2500 2501 if (port_quiesce_ctx == NULL) { 2502 err = -ENOMEM; 2503 SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n", 2504 fc_port->port_hdl); 2505 goto out; 2506 } 2507 2508 port_quiesce_ctx->quiesce_count = 0; 2509 port_quiesce_ctx->ctx = ctx; 2510 port_quiesce_ctx->cb_func = cb_func; 2511 2512 /* 2513 * Quiesce the LS queue. 2514 */ 2515 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx, 2516 nvmf_fc_adm_queue_quiesce_cb); 2517 if (err != 0) { 2518 SPDK_ERRLOG("Failed to quiesce the LS queue.\n"); 2519 goto out; 2520 } 2521 port_quiesce_ctx->quiesce_count++; 2522 2523 /* 2524 * Quiesce the IO queues. 2525 */ 2526 for (i = 0; i < fc_port->num_io_queues; i++) { 2527 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i], 2528 port_quiesce_ctx, 2529 nvmf_fc_adm_queue_quiesce_cb); 2530 if (err != 0) { 2531 DEV_VERIFY(0); 2532 SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id); 2533 } 2534 port_quiesce_ctx->quiesce_count++; 2535 } 2536 2537 out: 2538 if (port_quiesce_ctx && err != 0) { 2539 free(port_quiesce_ctx); 2540 } 2541 return err; 2542 } 2543 2544 /* 2545 * Initialize and add a HW port entry to the global 2546 * HW port list. 2547 */ 2548 static void 2549 nvmf_fc_adm_evnt_hw_port_init(void *arg) 2550 { 2551 ASSERT_SPDK_FC_MAIN_THREAD(); 2552 struct spdk_nvmf_fc_port *fc_port = NULL; 2553 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2554 struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *) 2555 api_data->api_args; 2556 int err = 0; 2557 2558 if (args->io_queue_cnt > spdk_env_get_core_count()) { 2559 SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle); 2560 err = EINVAL; 2561 goto abort_port_init; 2562 } 2563 2564 /* 2565 * 1. Check for duplicate initialization. 2566 */ 2567 fc_port = nvmf_fc_port_lookup(args->port_handle); 2568 if (fc_port != NULL) { 2569 SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle); 2570 goto abort_port_init; 2571 } 2572 2573 /* 2574 * 2. Get the memory to instantiate a fc port. 2575 */ 2576 fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) + 2577 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp))); 2578 if (fc_port == NULL) { 2579 SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle); 2580 err = -ENOMEM; 2581 goto abort_port_init; 2582 } 2583 2584 /* assign the io_queues array */ 2585 fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof( 2586 struct spdk_nvmf_fc_port)); 2587 2588 /* 2589 * 3. Initialize the contents for the FC-port 2590 */ 2591 err = nvmf_fc_adm_hw_port_data_init(fc_port, args); 2592 2593 if (err != 0) { 2594 SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle); 2595 DEV_VERIFY(!"Data initialization failed for fc_port"); 2596 goto abort_port_init; 2597 } 2598 2599 /* 2600 * 4. Add this port to the global fc port list in the library. 2601 */ 2602 nvmf_fc_port_add(fc_port); 2603 2604 abort_port_init: 2605 if (err && fc_port) { 2606 free(fc_port); 2607 } 2608 if (api_data->cb_func != NULL) { 2609 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err); 2610 } 2611 2612 free(arg); 2613 2614 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n", 2615 args->port_handle, err); 2616 } 2617 2618 /* 2619 * Online a HW port. 2620 */ 2621 static void 2622 nvmf_fc_adm_evnt_hw_port_online(void *arg) 2623 { 2624 ASSERT_SPDK_FC_MAIN_THREAD(); 2625 struct spdk_nvmf_fc_port *fc_port = NULL; 2626 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2627 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2628 struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *) 2629 api_data->api_args; 2630 int i = 0; 2631 int err = 0; 2632 2633 fc_port = nvmf_fc_port_lookup(args->port_handle); 2634 if (fc_port) { 2635 /* Set the port state to online */ 2636 err = nvmf_fc_port_set_online(fc_port); 2637 if (err != 0) { 2638 SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err); 2639 DEV_VERIFY(!"Hw port online failed"); 2640 goto out; 2641 } 2642 2643 hwqp = &fc_port->ls_queue; 2644 hwqp->context = NULL; 2645 (void)nvmf_fc_hwqp_set_online(hwqp); 2646 2647 /* Cycle through all the io queues and setup a hwqp poller for each. */ 2648 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2649 hwqp = &fc_port->io_queues[i]; 2650 hwqp->context = NULL; 2651 (void)nvmf_fc_hwqp_set_online(hwqp); 2652 nvmf_fc_poll_group_add_hwqp(hwqp); 2653 } 2654 } else { 2655 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2656 err = -EINVAL; 2657 } 2658 2659 out: 2660 if (api_data->cb_func != NULL) { 2661 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err); 2662 } 2663 2664 free(arg); 2665 2666 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle, 2667 err); 2668 } 2669 2670 /* 2671 * Offline a HW port. 2672 */ 2673 static void 2674 nvmf_fc_adm_evnt_hw_port_offline(void *arg) 2675 { 2676 ASSERT_SPDK_FC_MAIN_THREAD(); 2677 struct spdk_nvmf_fc_port *fc_port = NULL; 2678 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2679 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2680 struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *) 2681 api_data->api_args; 2682 int i = 0; 2683 int err = 0; 2684 2685 fc_port = nvmf_fc_port_lookup(args->port_handle); 2686 if (fc_port) { 2687 /* Set the port state to offline, if it is not already. */ 2688 err = nvmf_fc_port_set_offline(fc_port); 2689 if (err != 0) { 2690 SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err); 2691 err = 0; 2692 goto out; 2693 } 2694 2695 hwqp = &fc_port->ls_queue; 2696 (void)nvmf_fc_hwqp_set_offline(hwqp); 2697 2698 /* Remove poller for all the io queues. */ 2699 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2700 hwqp = &fc_port->io_queues[i]; 2701 (void)nvmf_fc_hwqp_set_offline(hwqp); 2702 nvmf_fc_poll_group_remove_hwqp(hwqp); 2703 } 2704 2705 /* 2706 * Delete all the nports. Ideally, the nports should have been purged 2707 * before the offline event, in which case, only a validation is required. 2708 */ 2709 nvmf_fc_adm_hw_port_offline_nport_delete(fc_port); 2710 } else { 2711 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2712 err = -EINVAL; 2713 } 2714 out: 2715 if (api_data->cb_func != NULL) { 2716 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 2717 } 2718 2719 free(arg); 2720 2721 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle, 2722 err); 2723 } 2724 2725 struct nvmf_fc_add_rem_listener_ctx { 2726 struct spdk_nvmf_subsystem *subsystem; 2727 bool add_listener; 2728 struct spdk_nvme_transport_id trid; 2729 }; 2730 2731 static void 2732 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 2733 { 2734 ASSERT_SPDK_FC_MAIN_THREAD(); 2735 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 2736 free(ctx); 2737 } 2738 2739 static void 2740 nvmf_fc_adm_listen_done(void *cb_arg, int status) 2741 { 2742 ASSERT_SPDK_FC_MAIN_THREAD(); 2743 struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg; 2744 2745 if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) { 2746 SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn); 2747 free(ctx); 2748 } 2749 } 2750 2751 static void 2752 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 2753 { 2754 ASSERT_SPDK_FC_MAIN_THREAD(); 2755 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 2756 2757 if (ctx->add_listener) { 2758 spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx); 2759 } else { 2760 spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid); 2761 nvmf_fc_adm_listen_done(ctx, 0); 2762 } 2763 } 2764 2765 static int 2766 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add) 2767 { 2768 struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt(); 2769 struct spdk_nvmf_subsystem *subsystem; 2770 2771 if (!tgt) { 2772 SPDK_ERRLOG("No nvmf target defined\n"); 2773 return -EINVAL; 2774 } 2775 2776 subsystem = spdk_nvmf_subsystem_get_first(tgt); 2777 while (subsystem) { 2778 struct nvmf_fc_add_rem_listener_ctx *ctx; 2779 2780 if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) { 2781 ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx)); 2782 if (ctx) { 2783 ctx->add_listener = add; 2784 ctx->subsystem = subsystem; 2785 nvmf_fc_create_trid(&ctx->trid, 2786 nport->fc_nodename.u.wwn, 2787 nport->fc_portname.u.wwn); 2788 2789 if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) { 2790 SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n", 2791 ctx->trid.traddr); 2792 free(ctx); 2793 } else if (spdk_nvmf_subsystem_pause(subsystem, 2794 nvmf_fc_adm_subsystem_paused_cb, 2795 ctx)) { 2796 SPDK_ERRLOG("Failed to pause subsystem: %s\n", 2797 subsystem->subnqn); 2798 free(ctx); 2799 } 2800 } 2801 } 2802 2803 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 2804 } 2805 2806 return 0; 2807 } 2808 2809 /* 2810 * Create a Nport. 2811 */ 2812 static void 2813 nvmf_fc_adm_evnt_nport_create(void *arg) 2814 { 2815 ASSERT_SPDK_FC_MAIN_THREAD(); 2816 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2817 struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *) 2818 api_data->api_args; 2819 struct spdk_nvmf_fc_nport *nport = NULL; 2820 struct spdk_nvmf_fc_port *fc_port = NULL; 2821 int err = 0; 2822 2823 /* 2824 * Get the physical port. 2825 */ 2826 fc_port = nvmf_fc_port_lookup(args->port_handle); 2827 if (fc_port == NULL) { 2828 err = -EINVAL; 2829 goto out; 2830 } 2831 2832 /* 2833 * Check for duplicate initialization. 2834 */ 2835 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 2836 if (nport != NULL) { 2837 SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle, 2838 args->port_handle); 2839 err = -EINVAL; 2840 goto out; 2841 } 2842 2843 /* 2844 * Get the memory to instantiate a fc nport. 2845 */ 2846 nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport)); 2847 if (nport == NULL) { 2848 SPDK_ERRLOG("Failed to allocate memory for nport %d.\n", 2849 args->nport_handle); 2850 err = -ENOMEM; 2851 goto out; 2852 } 2853 2854 /* 2855 * Initialize the contents for the nport 2856 */ 2857 nport->nport_hdl = args->nport_handle; 2858 nport->port_hdl = args->port_handle; 2859 nport->nport_state = SPDK_NVMF_FC_OBJECT_CREATED; 2860 nport->fc_nodename = args->fc_nodename; 2861 nport->fc_portname = args->fc_portname; 2862 nport->d_id = args->d_id; 2863 nport->fc_port = nvmf_fc_port_lookup(args->port_handle); 2864 2865 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED); 2866 TAILQ_INIT(&nport->rem_port_list); 2867 nport->rport_count = 0; 2868 TAILQ_INIT(&nport->fc_associations); 2869 nport->assoc_count = 0; 2870 2871 /* 2872 * Populate the nport address (as listening address) to the nvmf subsystems. 2873 */ 2874 err = nvmf_fc_adm_add_rem_nport_listener(nport, true); 2875 2876 (void)nvmf_fc_port_add_nport(fc_port, nport); 2877 out: 2878 if (err && nport) { 2879 free(nport); 2880 } 2881 2882 if (api_data->cb_func != NULL) { 2883 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err); 2884 } 2885 2886 free(arg); 2887 } 2888 2889 static void 2890 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type, 2891 void *cb_args, int spdk_err) 2892 { 2893 ASSERT_SPDK_FC_MAIN_THREAD(); 2894 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args; 2895 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2896 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 2897 int err = 0; 2898 uint16_t nport_hdl = 0; 2899 char log_str[256]; 2900 2901 /* 2902 * Assert on any delete failure. 2903 */ 2904 if (nport == NULL) { 2905 SPDK_ERRLOG("Nport delete callback returned null nport"); 2906 DEV_VERIFY(!"nport is null."); 2907 goto out; 2908 } 2909 2910 nport_hdl = nport->nport_hdl; 2911 if (0 != spdk_err) { 2912 SPDK_ERRLOG("Nport delete callback returned error. FC Port: " 2913 "%d, Nport: %d\n", 2914 nport->port_hdl, nport->nport_hdl); 2915 DEV_VERIFY(!"nport delete callback error."); 2916 } 2917 2918 /* 2919 * Free the nport if this is the last rport being deleted and 2920 * execute the callback(s). 2921 */ 2922 if (nvmf_fc_nport_has_no_rport(nport)) { 2923 if (0 != nport->assoc_count) { 2924 SPDK_ERRLOG("association count != 0\n"); 2925 DEV_VERIFY(!"association count != 0"); 2926 } 2927 2928 err = nvmf_fc_port_remove_nport(nport->fc_port, nport); 2929 if (0 != err) { 2930 SPDK_ERRLOG("Nport delete callback: Failed to remove " 2931 "nport from nport list. FC Port:%d Nport:%d\n", 2932 nport->port_hdl, nport->nport_hdl); 2933 } 2934 /* Free the nport */ 2935 free(nport); 2936 2937 if (cb_func != NULL) { 2938 (void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err); 2939 } 2940 free(cb_data); 2941 } 2942 out: 2943 snprintf(log_str, sizeof(log_str), 2944 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n", 2945 port_handle, nport_hdl, event_type, spdk_err); 2946 2947 if (err != 0) { 2948 SPDK_ERRLOG("%s", log_str); 2949 } else { 2950 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2951 } 2952 } 2953 2954 /* 2955 * Delete Nport. 2956 */ 2957 static void 2958 nvmf_fc_adm_evnt_nport_delete(void *arg) 2959 { 2960 ASSERT_SPDK_FC_MAIN_THREAD(); 2961 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2962 struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *) 2963 api_data->api_args; 2964 struct spdk_nvmf_fc_nport *nport = NULL; 2965 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL; 2966 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 2967 int err = 0; 2968 uint32_t rport_cnt = 0; 2969 int rc = 0; 2970 2971 /* 2972 * Make sure that the nport exists. 2973 */ 2974 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 2975 if (nport == NULL) { 2976 SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle, 2977 args->port_handle); 2978 err = -EINVAL; 2979 goto out; 2980 } 2981 2982 /* 2983 * Allocate memory for callback data. 2984 */ 2985 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data)); 2986 if (NULL == cb_data) { 2987 SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle); 2988 err = -ENOMEM; 2989 goto out; 2990 } 2991 2992 cb_data->nport = nport; 2993 cb_data->port_handle = args->port_handle; 2994 cb_data->fc_cb_func = api_data->cb_func; 2995 cb_data->fc_cb_ctx = args->cb_ctx; 2996 2997 /* 2998 * Begin nport tear down 2999 */ 3000 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3001 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3002 } else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3003 /* 3004 * Deletion of this nport already in progress. Register callback 3005 * and return. 3006 */ 3007 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3008 err = -ENODEV; 3009 goto out; 3010 } else { 3011 /* nport partially created/deleted */ 3012 DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3013 DEV_VERIFY(0 != "Nport in zombie state"); 3014 err = -ENODEV; 3015 goto out; 3016 } 3017 3018 /* 3019 * Remove this nport from listening addresses across subsystems 3020 */ 3021 rc = nvmf_fc_adm_add_rem_nport_listener(nport, false); 3022 3023 if (0 != rc) { 3024 err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 3025 SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n", 3026 nport->nport_hdl); 3027 goto out; 3028 } 3029 3030 /* 3031 * Delete all the remote ports (if any) for the nport 3032 */ 3033 /* TODO - Need to do this with a "first" and a "next" accessor function 3034 * for completeness. Look at app-subsystem as examples. 3035 */ 3036 if (nvmf_fc_nport_has_no_rport(nport)) { 3037 /* No rports to delete. Complete the nport deletion. */ 3038 nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0); 3039 goto out; 3040 } 3041 3042 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3043 struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc( 3044 1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args)); 3045 3046 if (it_del_args == NULL) { 3047 err = -ENOMEM; 3048 SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n", 3049 rport_iter->rpi, rport_iter->s_id); 3050 DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory"); 3051 goto out; 3052 } 3053 3054 rport_cnt++; 3055 it_del_args->port_handle = nport->port_hdl; 3056 it_del_args->nport_handle = nport->nport_hdl; 3057 it_del_args->cb_ctx = (void *)cb_data; 3058 it_del_args->rpi = rport_iter->rpi; 3059 it_del_args->s_id = rport_iter->s_id; 3060 3061 nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args, 3062 nvmf_fc_adm_delete_nport_cb); 3063 } 3064 3065 out: 3066 /* On failure, execute the callback function now */ 3067 if ((err != 0) || (rc != 0)) { 3068 SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, " 3069 "rport_cnt:%d rc:%d.\n", 3070 args->nport_handle, err, args->port_handle, 3071 rport_cnt, rc); 3072 if (cb_data) { 3073 free(cb_data); 3074 } 3075 if (api_data->cb_func != NULL) { 3076 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err); 3077 } 3078 3079 } else { 3080 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3081 "NPort %d delete done succesfully, fc port:%d. " 3082 "rport_cnt:%d\n", 3083 args->nport_handle, args->port_handle, rport_cnt); 3084 } 3085 3086 free(arg); 3087 } 3088 3089 /* 3090 * Process an PRLI/IT add. 3091 */ 3092 static void 3093 nvmf_fc_adm_evnt_i_t_add(void *arg) 3094 { 3095 ASSERT_SPDK_FC_MAIN_THREAD(); 3096 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3097 struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *) 3098 api_data->api_args; 3099 struct spdk_nvmf_fc_nport *nport = NULL; 3100 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3101 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3102 int err = 0; 3103 3104 /* 3105 * Make sure the nport port exists. 3106 */ 3107 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3108 if (nport == NULL) { 3109 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3110 err = -EINVAL; 3111 goto out; 3112 } 3113 3114 /* 3115 * Check for duplicate i_t_add. 3116 */ 3117 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3118 if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) { 3119 SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n", 3120 args->nport_handle, rport_iter->s_id, rport_iter->rpi); 3121 err = -EEXIST; 3122 goto out; 3123 } 3124 } 3125 3126 /* 3127 * Get the memory to instantiate the remote port 3128 */ 3129 rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info)); 3130 if (rport == NULL) { 3131 SPDK_ERRLOG("Memory allocation for rem port failed.\n"); 3132 err = -ENOMEM; 3133 goto out; 3134 } 3135 3136 /* 3137 * Initialize the contents for the rport 3138 */ 3139 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED); 3140 rport->s_id = args->s_id; 3141 rport->rpi = args->rpi; 3142 rport->fc_nodename = args->fc_nodename; 3143 rport->fc_portname = args->fc_portname; 3144 3145 /* 3146 * Add remote port to nport 3147 */ 3148 if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) { 3149 DEV_VERIFY(!"Error while adding rport to list"); 3150 }; 3151 3152 /* 3153 * TODO: Do we validate the initiators service parameters? 3154 */ 3155 3156 /* 3157 * Get the targets service parameters from the library 3158 * to return back to the driver. 3159 */ 3160 args->target_prli_info = nvmf_fc_get_prli_service_params(); 3161 3162 out: 3163 if (api_data->cb_func != NULL) { 3164 /* 3165 * Passing pointer to the args struct as the first argument. 3166 * The cb_func should handle this appropriately. 3167 */ 3168 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err); 3169 } 3170 3171 free(arg); 3172 3173 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3174 "IT add on nport %d done, rc = %d.\n", 3175 args->nport_handle, err); 3176 } 3177 3178 /** 3179 * Process a IT delete. 3180 */ 3181 static void 3182 nvmf_fc_adm_evnt_i_t_delete(void *arg) 3183 { 3184 ASSERT_SPDK_FC_MAIN_THREAD(); 3185 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3186 struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *) 3187 api_data->api_args; 3188 int rc = 0; 3189 struct spdk_nvmf_fc_nport *nport = NULL; 3190 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL; 3191 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3192 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3193 uint32_t num_rport = 0; 3194 char log_str[256]; 3195 3196 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle); 3197 3198 /* 3199 * Make sure the nport port exists. If it does not, error out. 3200 */ 3201 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3202 if (nport == NULL) { 3203 SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle); 3204 rc = -EINVAL; 3205 goto out; 3206 } 3207 3208 /* 3209 * Find this ITN / rport (remote port). 3210 */ 3211 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3212 num_rport++; 3213 if ((rport_iter->s_id == args->s_id) && 3214 (rport_iter->rpi == args->rpi) && 3215 (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) { 3216 rport = rport_iter; 3217 break; 3218 } 3219 } 3220 3221 /* 3222 * We should find either zero or exactly one rport. 3223 * 3224 * If we find zero rports, that means that a previous request has 3225 * removed the rport by the time we reached here. In this case, 3226 * simply return out. 3227 */ 3228 if (rport == NULL) { 3229 rc = -ENODEV; 3230 goto out; 3231 } 3232 3233 /* 3234 * We have found exactly one rport. Allocate memory for callback data. 3235 */ 3236 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data)); 3237 if (NULL == cb_data) { 3238 SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle); 3239 rc = -ENOMEM; 3240 goto out; 3241 } 3242 3243 cb_data->nport = nport; 3244 cb_data->rport = rport; 3245 cb_data->port_handle = args->port_handle; 3246 cb_data->fc_cb_func = api_data->cb_func; 3247 cb_data->fc_cb_ctx = args->cb_ctx; 3248 3249 /* 3250 * Validate rport object state. 3251 */ 3252 if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3253 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3254 } else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3255 /* 3256 * Deletion of this rport already in progress. Register callback 3257 * and return. 3258 */ 3259 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3260 rc = -ENODEV; 3261 goto out; 3262 } else { 3263 /* rport partially created/deleted */ 3264 DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3265 DEV_VERIFY(!"Invalid rport_state"); 3266 rc = -ENODEV; 3267 goto out; 3268 } 3269 3270 /* 3271 * We have successfully found a rport to delete. Call 3272 * nvmf_fc_i_t_delete_assoc(), which will perform further 3273 * IT-delete processing as well as free the cb_data. 3274 */ 3275 nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb, 3276 (void *)cb_data); 3277 3278 out: 3279 if (rc != 0) { 3280 /* 3281 * We have entered here because either we encountered an 3282 * error, or we did not find a rport to delete. 3283 * As a result, we will not call the function 3284 * nvmf_fc_i_t_delete_assoc() for further IT-delete 3285 * processing. Therefore, execute the callback function now. 3286 */ 3287 if (cb_data) { 3288 free(cb_data); 3289 } 3290 if (api_data->cb_func != NULL) { 3291 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc); 3292 } 3293 } 3294 3295 snprintf(log_str, sizeof(log_str), 3296 "IT delete on nport:%d end. num_rport:%d rc = %d.\n", 3297 args->nport_handle, num_rport, rc); 3298 3299 if (rc != 0) { 3300 SPDK_ERRLOG("%s", log_str); 3301 } else { 3302 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3303 } 3304 3305 free(arg); 3306 } 3307 3308 /* 3309 * Process ABTS received 3310 */ 3311 static void 3312 nvmf_fc_adm_evnt_abts_recv(void *arg) 3313 { 3314 ASSERT_SPDK_FC_MAIN_THREAD(); 3315 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3316 struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args; 3317 struct spdk_nvmf_fc_nport *nport = NULL; 3318 int err = 0; 3319 3320 SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi, 3321 args->oxid, args->rxid); 3322 3323 /* 3324 * 1. Make sure the nport port exists. 3325 */ 3326 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3327 if (nport == NULL) { 3328 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3329 err = -EINVAL; 3330 goto out; 3331 } 3332 3333 /* 3334 * 2. If the nport is in the process of being deleted, drop the ABTS. 3335 */ 3336 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3337 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3338 "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n", 3339 args->rpi, args->oxid, args->rxid); 3340 err = 0; 3341 goto out; 3342 3343 } 3344 3345 /* 3346 * 3. Pass the received ABTS-LS to the library for handling. 3347 */ 3348 nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid); 3349 3350 out: 3351 if (api_data->cb_func != NULL) { 3352 /* 3353 * Passing pointer to the args struct as the first argument. 3354 * The cb_func should handle this appropriately. 3355 */ 3356 (void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err); 3357 } else { 3358 /* No callback set, free the args */ 3359 free(args); 3360 } 3361 3362 free(arg); 3363 } 3364 3365 /* 3366 * Callback function for hw port quiesce. 3367 */ 3368 static void 3369 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err) 3370 { 3371 ASSERT_SPDK_FC_MAIN_THREAD(); 3372 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx = 3373 (struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx; 3374 struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args; 3375 spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func; 3376 struct spdk_nvmf_fc_queue_dump_info dump_info; 3377 struct spdk_nvmf_fc_port *fc_port = NULL; 3378 char *dump_buf = NULL; 3379 uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE; 3380 3381 /* 3382 * Free the callback context struct. 3383 */ 3384 free(ctx); 3385 3386 if (err != 0) { 3387 SPDK_ERRLOG("Port %d quiesce operation failed.\n", args->port_handle); 3388 goto out; 3389 } 3390 3391 if (args->dump_queues == false) { 3392 /* 3393 * Queues need not be dumped. 3394 */ 3395 goto out; 3396 } 3397 3398 SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle); 3399 3400 /* 3401 * Get the fc port. 3402 */ 3403 fc_port = nvmf_fc_port_lookup(args->port_handle); 3404 if (fc_port == NULL) { 3405 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3406 err = -EINVAL; 3407 goto out; 3408 } 3409 3410 /* 3411 * Allocate memory for the dump buffer. 3412 * This memory will be freed by FCT. 3413 */ 3414 dump_buf = (char *)calloc(1, dump_buf_size); 3415 if (dump_buf == NULL) { 3416 err = -ENOMEM; 3417 SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle); 3418 goto out; 3419 } 3420 *args->dump_buf = (uint32_t *)dump_buf; 3421 dump_info.buffer = dump_buf; 3422 dump_info.offset = 0; 3423 3424 /* 3425 * Add the dump reason to the top of the buffer. 3426 */ 3427 nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason); 3428 3429 /* 3430 * Dump the hwqp. 3431 */ 3432 nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues, 3433 fc_port->num_io_queues, &dump_info); 3434 3435 out: 3436 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n", 3437 args->port_handle, args->dump_queues, err); 3438 3439 if (cb_func != NULL) { 3440 (void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3441 } 3442 } 3443 3444 /* 3445 * HW port reset 3446 3447 */ 3448 static void 3449 nvmf_fc_adm_evnt_hw_port_reset(void *arg) 3450 { 3451 ASSERT_SPDK_FC_MAIN_THREAD(); 3452 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3453 struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *) 3454 api_data->api_args; 3455 struct spdk_nvmf_fc_port *fc_port = NULL; 3456 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL; 3457 int err = 0; 3458 3459 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle); 3460 3461 /* 3462 * Make sure the physical port exists. 3463 */ 3464 fc_port = nvmf_fc_port_lookup(args->port_handle); 3465 if (fc_port == NULL) { 3466 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3467 err = -EINVAL; 3468 goto out; 3469 } 3470 3471 /* 3472 * Save the reset event args and the callback in a context struct. 3473 */ 3474 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx)); 3475 3476 if (ctx == NULL) { 3477 err = -ENOMEM; 3478 SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle); 3479 goto fail; 3480 } 3481 3482 ctx->reset_args = arg; 3483 ctx->reset_cb_func = api_data->cb_func; 3484 3485 /* 3486 * Quiesce the hw port. 3487 */ 3488 err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb); 3489 if (err != 0) { 3490 goto fail; 3491 } 3492 3493 /* 3494 * Once the ports are successfully quiesced the reset processing 3495 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb 3496 */ 3497 return; 3498 fail: 3499 free(ctx); 3500 3501 out: 3502 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle, 3503 err); 3504 3505 if (api_data->cb_func != NULL) { 3506 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3507 } 3508 3509 free(arg); 3510 } 3511 3512 static inline void 3513 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args) 3514 { 3515 if (nvmf_fc_get_main_thread()) { 3516 spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args); 3517 } 3518 } 3519 3520 /* 3521 * Queue up an event in the SPDK main threads event queue. 3522 * Used by the FC driver to notify the SPDK main thread of FC related events. 3523 */ 3524 int 3525 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args, 3526 spdk_nvmf_fc_callback cb_func) 3527 { 3528 int err = 0; 3529 struct spdk_nvmf_fc_adm_api_data *api_data = NULL; 3530 spdk_msg_fn event_fn = NULL; 3531 3532 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type); 3533 3534 if (event_type >= SPDK_FC_EVENT_MAX) { 3535 SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type); 3536 err = -EINVAL; 3537 goto done; 3538 } 3539 3540 if (args == NULL) { 3541 SPDK_ERRLOG("Null args for event %d.\n", event_type); 3542 err = -EINVAL; 3543 goto done; 3544 } 3545 3546 api_data = calloc(1, sizeof(*api_data)); 3547 3548 if (api_data == NULL) { 3549 SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type); 3550 err = -ENOMEM; 3551 goto done; 3552 } 3553 3554 api_data->api_args = args; 3555 api_data->cb_func = cb_func; 3556 3557 switch (event_type) { 3558 case SPDK_FC_HW_PORT_INIT: 3559 event_fn = nvmf_fc_adm_evnt_hw_port_init; 3560 break; 3561 3562 case SPDK_FC_HW_PORT_ONLINE: 3563 event_fn = nvmf_fc_adm_evnt_hw_port_online; 3564 break; 3565 3566 case SPDK_FC_HW_PORT_OFFLINE: 3567 event_fn = nvmf_fc_adm_evnt_hw_port_offline; 3568 break; 3569 3570 case SPDK_FC_NPORT_CREATE: 3571 event_fn = nvmf_fc_adm_evnt_nport_create; 3572 break; 3573 3574 case SPDK_FC_NPORT_DELETE: 3575 event_fn = nvmf_fc_adm_evnt_nport_delete; 3576 break; 3577 3578 case SPDK_FC_IT_ADD: 3579 event_fn = nvmf_fc_adm_evnt_i_t_add; 3580 break; 3581 3582 case SPDK_FC_IT_DELETE: 3583 event_fn = nvmf_fc_adm_evnt_i_t_delete; 3584 break; 3585 3586 case SPDK_FC_ABTS_RECV: 3587 event_fn = nvmf_fc_adm_evnt_abts_recv; 3588 break; 3589 3590 case SPDK_FC_HW_PORT_RESET: 3591 event_fn = nvmf_fc_adm_evnt_hw_port_reset; 3592 break; 3593 3594 case SPDK_FC_UNRECOVERABLE_ERR: 3595 default: 3596 SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type); 3597 err = -EINVAL; 3598 break; 3599 } 3600 3601 done: 3602 3603 if (err == 0) { 3604 assert(event_fn != NULL); 3605 nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data); 3606 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type); 3607 } else { 3608 SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err); 3609 if (api_data) { 3610 free(api_data); 3611 } 3612 } 3613 3614 return err; 3615 } 3616 3617 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc); 3618 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api) 3619 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc) 3620