1 /* 2 * BSD LICENSE 3 * 4 * Copyright (c) 2018-2019 Broadcom. All Rights Reserved. 5 * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * NVMe_FC transport functions. 36 */ 37 38 #include "spdk/env.h" 39 #include "spdk/assert.h" 40 #include "spdk/nvmf_transport.h" 41 #include "spdk/string.h" 42 #include "spdk/trace.h" 43 #include "spdk/util.h" 44 #include "spdk/likely.h" 45 #include "spdk/endian.h" 46 #include "spdk/log.h" 47 #include "spdk/thread.h" 48 49 #include "nvmf_fc.h" 50 #include "fc_lld.h" 51 52 #ifndef DEV_VERIFY 53 #define DEV_VERIFY assert 54 #endif 55 56 #ifndef ASSERT_SPDK_FC_MASTER_THREAD 57 #define ASSERT_SPDK_FC_MASTER_THREAD() \ 58 DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_master_thread()); 59 #endif 60 61 /* 62 * PRLI service parameters 63 */ 64 enum spdk_nvmf_fc_service_parameters { 65 SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001, 66 SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008, 67 SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010, 68 SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020, 69 SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080, 70 }; 71 72 static char *fc_req_state_strs[] = { 73 "SPDK_NVMF_FC_REQ_INIT", 74 "SPDK_NVMF_FC_REQ_READ_BDEV", 75 "SPDK_NVMF_FC_REQ_READ_XFER", 76 "SPDK_NVMF_FC_REQ_READ_RSP", 77 "SPDK_NVMF_FC_REQ_WRITE_BUFFS", 78 "SPDK_NVMF_FC_REQ_WRITE_XFER", 79 "SPDK_NVMF_FC_REQ_WRITE_BDEV", 80 "SPDK_NVMF_FC_REQ_WRITE_RSP", 81 "SPDK_NVMF_FC_REQ_NONE_BDEV", 82 "SPDK_NVMF_FC_REQ_NONE_RSP", 83 "SPDK_NVMF_FC_REQ_SUCCESS", 84 "SPDK_NVMF_FC_REQ_FAILED", 85 "SPDK_NVMF_FC_REQ_ABORTED", 86 "SPDK_NVMF_FC_REQ_BDEV_ABORTED", 87 "SPDK_NVMF_FC_REQ_PENDING" 88 }; 89 90 #define OBJECT_NVMF_FC_IO 0xA0 91 92 #define TRACE_GROUP_NVMF_FC 0x8 93 #define TRACE_FC_REQ_INIT SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01) 94 #define TRACE_FC_REQ_READ_BDEV SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02) 95 #define TRACE_FC_REQ_READ_XFER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03) 96 #define TRACE_FC_REQ_READ_RSP SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04) 97 #define TRACE_FC_REQ_WRITE_BUFFS SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05) 98 #define TRACE_FC_REQ_WRITE_XFER SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06) 99 #define TRACE_FC_REQ_WRITE_BDEV SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07) 100 #define TRACE_FC_REQ_WRITE_RSP SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08) 101 #define TRACE_FC_REQ_NONE_BDEV SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09) 102 #define TRACE_FC_REQ_NONE_RSP SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A) 103 #define TRACE_FC_REQ_SUCCESS SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B) 104 #define TRACE_FC_REQ_FAILED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C) 105 #define TRACE_FC_REQ_ABORTED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D) 106 #define TRACE_FC_REQ_BDEV_ABORTED SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E) 107 #define TRACE_FC_REQ_PENDING SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F) 108 109 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC) 110 { 111 spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r'); 112 spdk_trace_register_description("FC_REQ_NEW", 113 TRACE_FC_REQ_INIT, 114 OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, ""); 115 spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV", 116 TRACE_FC_REQ_READ_BDEV, 117 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 118 spdk_trace_register_description("FC_REQ_READ_XFER_DATA", 119 TRACE_FC_REQ_READ_XFER, 120 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 121 spdk_trace_register_description("FC_REQ_READ_RSP", 122 TRACE_FC_REQ_READ_RSP, 123 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 124 spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER", 125 TRACE_FC_REQ_WRITE_BUFFS, 126 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 127 spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA", 128 TRACE_FC_REQ_WRITE_XFER, 129 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 130 spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV", 131 TRACE_FC_REQ_WRITE_BDEV, 132 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 133 spdk_trace_register_description("FC_REQ_WRITE_RSP", 134 TRACE_FC_REQ_WRITE_RSP, 135 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 136 spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV", 137 TRACE_FC_REQ_NONE_BDEV, 138 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 139 spdk_trace_register_description("FC_REQ_NONE_RSP", 140 TRACE_FC_REQ_NONE_RSP, 141 OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, ""); 142 spdk_trace_register_description("FC_REQ_SUCCESS", 143 TRACE_FC_REQ_SUCCESS, 144 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 145 spdk_trace_register_description("FC_REQ_FAILED", 146 TRACE_FC_REQ_FAILED, 147 OWNER_NONE, OBJECT_NONE, 0, 0, ""); 148 spdk_trace_register_description("FC_REQ_ABORTED", 149 TRACE_FC_REQ_ABORTED, 150 OWNER_NONE, OBJECT_NONE, 0, 1, ""); 151 spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV", 152 TRACE_FC_REQ_BDEV_ABORTED, 153 OWNER_NONE, OBJECT_NONE, 0, 1, ""); 154 spdk_trace_register_description("FC_REQ_PENDING", 155 TRACE_FC_REQ_PENDING, 156 OWNER_NONE, OBJECT_NONE, 0, 1, ""); 157 } 158 159 /** 160 * The structure used by all fc adm functions 161 */ 162 struct spdk_nvmf_fc_adm_api_data { 163 void *api_args; 164 spdk_nvmf_fc_callback cb_func; 165 }; 166 167 /** 168 * The callback structure for nport-delete 169 */ 170 struct spdk_nvmf_fc_adm_nport_del_cb_data { 171 struct spdk_nvmf_fc_nport *nport; 172 uint8_t port_handle; 173 spdk_nvmf_fc_callback fc_cb_func; 174 void *fc_cb_ctx; 175 }; 176 177 /** 178 * The callback structure for it-delete 179 */ 180 struct spdk_nvmf_fc_adm_i_t_del_cb_data { 181 struct spdk_nvmf_fc_nport *nport; 182 struct spdk_nvmf_fc_remote_port_info *rport; 183 uint8_t port_handle; 184 spdk_nvmf_fc_callback fc_cb_func; 185 void *fc_cb_ctx; 186 }; 187 188 189 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err); 190 191 /** 192 * The callback structure for the it-delete-assoc callback 193 */ 194 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data { 195 struct spdk_nvmf_fc_nport *nport; 196 struct spdk_nvmf_fc_remote_port_info *rport; 197 uint8_t port_handle; 198 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func; 199 void *cb_ctx; 200 }; 201 202 /* 203 * Call back function pointer for HW port quiesce. 204 */ 205 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err); 206 207 /** 208 * Context structure for quiescing a hardware port 209 */ 210 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx { 211 int quiesce_count; 212 void *ctx; 213 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func; 214 }; 215 216 /** 217 * Context structure used to reset a hardware port 218 */ 219 struct spdk_nvmf_fc_adm_hw_port_reset_ctx { 220 void *reset_args; 221 spdk_nvmf_fc_callback reset_cb_func; 222 }; 223 224 /** 225 * The callback structure for HW port link break event 226 */ 227 struct spdk_nvmf_fc_adm_port_link_break_cb_data { 228 struct spdk_nvmf_hw_port_link_break_args *args; 229 struct spdk_nvmf_fc_nport_delete_args nport_del_args; 230 spdk_nvmf_fc_callback cb_func; 231 }; 232 233 struct spdk_nvmf_fc_transport { 234 struct spdk_nvmf_transport transport; 235 pthread_mutex_t lock; 236 }; 237 238 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport; 239 240 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list = 241 TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list); 242 243 static struct spdk_thread *g_nvmf_fc_master_thread = NULL; 244 245 static uint32_t g_nvmf_fgroup_count = 0; 246 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups = 247 TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups); 248 249 struct spdk_thread * 250 nvmf_fc_get_master_thread(void) 251 { 252 return g_nvmf_fc_master_thread; 253 } 254 255 static inline void 256 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req, 257 enum spdk_nvmf_fc_request_state state) 258 { 259 uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID; 260 261 switch (state) { 262 case SPDK_NVMF_FC_REQ_INIT: 263 /* Start IO tracing */ 264 tpoint_id = TRACE_FC_REQ_INIT; 265 break; 266 case SPDK_NVMF_FC_REQ_READ_BDEV: 267 tpoint_id = TRACE_FC_REQ_READ_BDEV; 268 break; 269 case SPDK_NVMF_FC_REQ_READ_XFER: 270 tpoint_id = TRACE_FC_REQ_READ_XFER; 271 break; 272 case SPDK_NVMF_FC_REQ_READ_RSP: 273 tpoint_id = TRACE_FC_REQ_READ_RSP; 274 break; 275 case SPDK_NVMF_FC_REQ_WRITE_BUFFS: 276 tpoint_id = TRACE_FC_REQ_WRITE_BUFFS; 277 break; 278 case SPDK_NVMF_FC_REQ_WRITE_XFER: 279 tpoint_id = TRACE_FC_REQ_WRITE_XFER; 280 break; 281 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 282 tpoint_id = TRACE_FC_REQ_WRITE_BDEV; 283 break; 284 case SPDK_NVMF_FC_REQ_WRITE_RSP: 285 tpoint_id = TRACE_FC_REQ_WRITE_RSP; 286 break; 287 case SPDK_NVMF_FC_REQ_NONE_BDEV: 288 tpoint_id = TRACE_FC_REQ_NONE_BDEV; 289 break; 290 case SPDK_NVMF_FC_REQ_NONE_RSP: 291 tpoint_id = TRACE_FC_REQ_NONE_RSP; 292 break; 293 case SPDK_NVMF_FC_REQ_SUCCESS: 294 tpoint_id = TRACE_FC_REQ_SUCCESS; 295 break; 296 case SPDK_NVMF_FC_REQ_FAILED: 297 tpoint_id = TRACE_FC_REQ_FAILED; 298 break; 299 case SPDK_NVMF_FC_REQ_ABORTED: 300 tpoint_id = TRACE_FC_REQ_ABORTED; 301 break; 302 case SPDK_NVMF_FC_REQ_BDEV_ABORTED: 303 tpoint_id = TRACE_FC_REQ_ABORTED; 304 break; 305 case SPDK_NVMF_FC_REQ_PENDING: 306 tpoint_id = TRACE_FC_REQ_PENDING; 307 break; 308 default: 309 assert(0); 310 break; 311 } 312 if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) { 313 spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0, 314 (uint64_t)(&fc_req->req), 0); 315 } 316 } 317 318 static void 319 nvmf_fc_handle_connection_failure(void *arg) 320 { 321 struct spdk_nvmf_fc_conn *fc_conn = arg; 322 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 323 324 if (!fc_conn->create_opd) { 325 return; 326 } 327 api_data = &fc_conn->create_opd->u.add_conn; 328 329 nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst, 330 api_data->args.fc_conn, api_data->aq_conn); 331 } 332 333 static void 334 nvmf_fc_handle_assoc_deletion(void *arg) 335 { 336 struct spdk_nvmf_fc_conn *fc_conn = arg; 337 338 nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport, 339 fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL); 340 } 341 342 static int 343 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp) 344 { 345 uint32_t i; 346 struct spdk_nvmf_fc_request *fc_req; 347 348 TAILQ_INIT(&hwqp->free_reqs); 349 TAILQ_INIT(&hwqp->in_use_reqs); 350 351 hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request)); 352 if (hwqp->fc_reqs_buf == NULL) { 353 SPDK_ERRLOG("create fc request pool failed\n"); 354 return -ENOMEM; 355 } 356 357 for (i = 0; i < hwqp->rq_size; i++) { 358 fc_req = hwqp->fc_reqs_buf + i; 359 360 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT); 361 TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link); 362 } 363 364 return 0; 365 } 366 367 static inline struct spdk_nvmf_fc_request * 368 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp) 369 { 370 struct spdk_nvmf_fc_request *fc_req; 371 372 if (TAILQ_EMPTY(&hwqp->free_reqs)) { 373 SPDK_ERRLOG("Alloc request buffer failed\n"); 374 return NULL; 375 } 376 377 fc_req = TAILQ_FIRST(&hwqp->free_reqs); 378 TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link); 379 380 memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request)); 381 TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link); 382 TAILQ_INIT(&fc_req->abort_cbs); 383 return fc_req; 384 } 385 386 static inline void 387 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req) 388 { 389 if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) { 390 /* Log an error for debug purpose. */ 391 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED); 392 } 393 394 /* set the magic to mark req as no longer valid. */ 395 fc_req->magic = 0xDEADBEEF; 396 397 TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link); 398 TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link); 399 } 400 401 static inline bool 402 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req) 403 { 404 switch (fc_req->state) { 405 case SPDK_NVMF_FC_REQ_WRITE_BUFFS: 406 return true; 407 default: 408 return false; 409 } 410 } 411 412 void 413 nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp) 414 { 415 nvmf_fc_init_rqpair_buffers(hwqp); 416 } 417 418 struct spdk_nvmf_fc_conn * 419 nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id) 420 { 421 struct spdk_nvmf_fc_conn *fc_conn; 422 423 TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) { 424 if (fc_conn->conn_id == conn_id) { 425 return fc_conn; 426 } 427 } 428 429 return NULL; 430 } 431 432 void 433 nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr) 434 { 435 struct spdk_nvmf_fc_abts_ctx *ctx; 436 struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL; 437 438 /* Clean up any pending sync callbacks */ 439 TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) { 440 TAILQ_REMOVE(&hwqp->sync_cbs, args, link); 441 ctx = args->cb_info.cb_data; 442 if (ctx) { 443 if (++ctx->hwqps_responded == ctx->num_hwqps) { 444 free(ctx->sync_poller_args); 445 free(ctx->abts_poller_args); 446 free(ctx); 447 } 448 } 449 } 450 451 nvmf_fc_reinit_q(hwqp->queues, queues_curr); 452 } 453 454 void 455 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp) 456 { 457 hwqp->fc_port = fc_port; 458 459 /* clear counters */ 460 memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors)); 461 462 nvmf_fc_init_poller_queues(hwqp); 463 if (&fc_port->ls_queue != hwqp) { 464 nvmf_fc_create_req_mempool(hwqp); 465 } 466 467 nvmf_fc_init_q(hwqp); 468 TAILQ_INIT(&hwqp->connection_list); 469 TAILQ_INIT(&hwqp->sync_cbs); 470 TAILQ_INIT(&hwqp->ls_pending_queue); 471 } 472 473 static struct spdk_nvmf_fc_poll_group * 474 nvmf_fc_get_idlest_poll_group(void) 475 { 476 uint32_t max_count = UINT32_MAX; 477 struct spdk_nvmf_fc_poll_group *fgroup; 478 struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL; 479 480 /* find poll group with least number of hwqp's assigned to it */ 481 TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) { 482 if (fgroup->hwqp_count < max_count) { 483 ret_fgroup = fgroup; 484 max_count = fgroup->hwqp_count; 485 } 486 } 487 488 return ret_fgroup; 489 } 490 491 void 492 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 493 { 494 struct spdk_nvmf_fc_poll_group *fgroup = NULL; 495 496 assert(hwqp); 497 if (hwqp == NULL) { 498 SPDK_ERRLOG("Error: hwqp is NULL\n"); 499 return; 500 } 501 502 assert(g_nvmf_fgroup_count); 503 504 fgroup = nvmf_fc_get_idlest_poll_group(); 505 if (!fgroup) { 506 SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id); 507 return; 508 } 509 510 hwqp->thread = fgroup->group.group->thread; 511 hwqp->fgroup = fgroup; 512 fgroup->hwqp_count++; 513 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL); 514 } 515 516 void 517 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp) 518 { 519 assert(hwqp); 520 521 SPDK_DEBUGLOG(nvmf_fc, 522 "Remove hwqp from poller: for port: %d, hwqp: %d\n", 523 hwqp->fc_port->port_hdl, hwqp->hwqp_id); 524 525 if (!hwqp->fgroup) { 526 SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id); 527 } else { 528 hwqp->fgroup->hwqp_count--; 529 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL); 530 } 531 } 532 533 /* 534 * Note: This needs to be used only on master poller. 535 */ 536 static uint64_t 537 nvmf_fc_get_abts_unique_id(void) 538 { 539 static uint32_t u_id = 0; 540 541 return (uint64_t)(++u_id); 542 } 543 544 static void 545 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 546 { 547 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 548 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg; 549 550 ctx->hwqps_responded++; 551 552 if (ctx->hwqps_responded < ctx->num_hwqps) { 553 /* Wait for all pollers to complete. */ 554 return; 555 } 556 557 /* Free the queue sync poller args. */ 558 free(ctx->sync_poller_args); 559 560 /* Mark as queue synced */ 561 ctx->queue_synced = true; 562 563 /* Reset the ctx values */ 564 ctx->hwqps_responded = 0; 565 ctx->handled = false; 566 567 SPDK_DEBUGLOG(nvmf_fc, 568 "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 569 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 570 571 /* Resend ABTS to pollers */ 572 args = ctx->abts_poller_args; 573 for (int i = 0; i < ctx->num_hwqps; i++) { 574 poller_arg = args + i; 575 nvmf_fc_poller_api_func(poller_arg->hwqp, 576 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 577 poller_arg); 578 } 579 } 580 581 static int 582 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx) 583 { 584 struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg; 585 struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg; 586 587 /* check if FC driver supports queue sync */ 588 if (!nvmf_fc_q_sync_available()) { 589 return -EPERM; 590 } 591 592 assert(ctx); 593 if (!ctx) { 594 SPDK_ERRLOG("NULL ctx pointer"); 595 return -EINVAL; 596 } 597 598 /* Reset the ctx values */ 599 ctx->hwqps_responded = 0; 600 601 args = calloc(ctx->num_hwqps, 602 sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args)); 603 if (!args) { 604 SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 605 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 606 return -ENOMEM; 607 } 608 ctx->sync_poller_args = args; 609 610 abts_args = ctx->abts_poller_args; 611 for (int i = 0; i < ctx->num_hwqps; i++) { 612 abts_poller_arg = abts_args + i; 613 poller_arg = args + i; 614 poller_arg->u_id = ctx->u_id; 615 poller_arg->hwqp = abts_poller_arg->hwqp; 616 poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb; 617 poller_arg->cb_info.cb_data = ctx; 618 poller_arg->cb_info.cb_thread = spdk_get_thread(); 619 620 /* Send a Queue sync message to interested pollers */ 621 nvmf_fc_poller_api_func(poller_arg->hwqp, 622 SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC, 623 poller_arg); 624 } 625 626 SPDK_DEBUGLOG(nvmf_fc, 627 "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 628 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 629 630 /* Post Marker to queue to track aborted request */ 631 nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id); 632 633 return 0; 634 } 635 636 static void 637 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 638 { 639 struct spdk_nvmf_fc_abts_ctx *ctx = cb_data; 640 struct spdk_nvmf_fc_nport *nport = NULL; 641 642 if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) { 643 ctx->handled = true; 644 } 645 646 ctx->hwqps_responded++; 647 648 if (ctx->hwqps_responded < ctx->num_hwqps) { 649 /* Wait for all pollers to complete. */ 650 return; 651 } 652 653 nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl); 654 655 if (ctx->nport != nport) { 656 /* Nport can be deleted while this abort is being 657 * processed by the pollers. 658 */ 659 SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 660 ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 661 } else { 662 if (!ctx->handled) { 663 /* Try syncing the queues and try one more time */ 664 if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) { 665 SPDK_DEBUGLOG(nvmf_fc, 666 "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 667 ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 668 return; 669 } else { 670 /* Send Reject */ 671 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 672 ctx->oxid, ctx->rxid, ctx->rpi, true, 673 FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL); 674 } 675 } else { 676 /* Send Accept */ 677 nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue, 678 ctx->oxid, ctx->rxid, ctx->rpi, false, 679 0, NULL, NULL); 680 } 681 } 682 SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 683 (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid); 684 685 free(ctx->abts_poller_args); 686 free(ctx); 687 } 688 689 void 690 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi, 691 uint16_t oxid, uint16_t rxid) 692 { 693 struct spdk_nvmf_fc_abts_ctx *ctx = NULL; 694 struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg; 695 struct spdk_nvmf_fc_association *assoc = NULL; 696 struct spdk_nvmf_fc_conn *conn = NULL; 697 uint32_t hwqp_cnt = 0; 698 bool skip_hwqp_cnt; 699 struct spdk_nvmf_fc_hwqp **hwqps = NULL; 700 uint32_t i; 701 702 SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 703 nport->nport_hdl, rpi, oxid, rxid); 704 705 /* Allocate memory to track hwqp's with at least 1 active connection. */ 706 hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *)); 707 if (hwqps == NULL) { 708 SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n"); 709 goto bls_rej; 710 } 711 712 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 713 TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) { 714 if (conn->rpi != rpi) { 715 continue; 716 } 717 718 skip_hwqp_cnt = false; 719 for (i = 0; i < hwqp_cnt; i++) { 720 if (hwqps[i] == conn->hwqp) { 721 /* Skip. This is already present */ 722 skip_hwqp_cnt = true; 723 break; 724 } 725 } 726 if (!skip_hwqp_cnt) { 727 assert(hwqp_cnt < nport->fc_port->num_io_queues); 728 hwqps[hwqp_cnt] = conn->hwqp; 729 hwqp_cnt++; 730 } 731 } 732 } 733 734 if (!hwqp_cnt) { 735 goto bls_rej; 736 } 737 738 args = calloc(hwqp_cnt, 739 sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args)); 740 if (!args) { 741 goto bls_rej; 742 } 743 744 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx)); 745 if (!ctx) { 746 goto bls_rej; 747 } 748 ctx->rpi = rpi; 749 ctx->oxid = oxid; 750 ctx->rxid = rxid; 751 ctx->nport = nport; 752 ctx->nport_hdl = nport->nport_hdl; 753 ctx->port_hdl = nport->fc_port->port_hdl; 754 ctx->num_hwqps = hwqp_cnt; 755 ctx->ls_hwqp = &nport->fc_port->ls_queue; 756 ctx->fcp_rq_id = nport->fc_port->fcp_rq_id; 757 ctx->abts_poller_args = args; 758 759 /* Get a unique context for this ABTS */ 760 ctx->u_id = nvmf_fc_get_abts_unique_id(); 761 762 for (i = 0; i < hwqp_cnt; i++) { 763 poller_arg = args + i; 764 poller_arg->hwqp = hwqps[i]; 765 poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb; 766 poller_arg->cb_info.cb_data = ctx; 767 poller_arg->cb_info.cb_thread = spdk_get_thread(); 768 poller_arg->ctx = ctx; 769 770 nvmf_fc_poller_api_func(poller_arg->hwqp, 771 SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED, 772 poller_arg); 773 } 774 775 free(hwqps); 776 777 return; 778 bls_rej: 779 free(args); 780 free(hwqps); 781 782 /* Send Reject */ 783 nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi, 784 true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL); 785 SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n", 786 nport->nport_hdl, rpi, oxid, rxid); 787 return; 788 } 789 790 /*** Accessor functions for the FC structures - BEGIN */ 791 /* 792 * Returns true if the port is in offline state. 793 */ 794 bool 795 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port) 796 { 797 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) { 798 return true; 799 } 800 801 return false; 802 } 803 804 /* 805 * Returns true if the port is in online state. 806 */ 807 bool 808 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port) 809 { 810 if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) { 811 return true; 812 } 813 814 return false; 815 } 816 817 int 818 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port) 819 { 820 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) { 821 fc_port->hw_port_status = SPDK_FC_PORT_ONLINE; 822 return 0; 823 } 824 825 return -EPERM; 826 } 827 828 int 829 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port) 830 { 831 if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) { 832 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 833 return 0; 834 } 835 836 return -EPERM; 837 } 838 839 int 840 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp) 841 { 842 if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) { 843 hwqp->state = SPDK_FC_HWQP_ONLINE; 844 /* reset some queue counters */ 845 hwqp->num_conns = 0; 846 return nvmf_fc_set_q_online_state(hwqp, true); 847 } 848 849 return -EPERM; 850 } 851 852 int 853 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp) 854 { 855 if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) { 856 hwqp->state = SPDK_FC_HWQP_OFFLINE; 857 return nvmf_fc_set_q_online_state(hwqp, false); 858 } 859 860 return -EPERM; 861 } 862 863 void 864 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port) 865 { 866 TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link); 867 } 868 869 struct spdk_nvmf_fc_port * 870 nvmf_fc_port_lookup(uint8_t port_hdl) 871 { 872 struct spdk_nvmf_fc_port *fc_port = NULL; 873 874 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 875 if (fc_port->port_hdl == port_hdl) { 876 return fc_port; 877 } 878 } 879 return NULL; 880 } 881 882 static void 883 nvmf_fc_port_cleanup(void) 884 { 885 struct spdk_nvmf_fc_port *fc_port, *tmp; 886 struct spdk_nvmf_fc_hwqp *hwqp; 887 uint32_t i; 888 889 TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) { 890 TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link); 891 for (i = 0; i < fc_port->num_io_queues; i++) { 892 hwqp = &fc_port->io_queues[i]; 893 if (hwqp->fc_reqs_buf) { 894 free(hwqp->fc_reqs_buf); 895 } 896 } 897 free(fc_port); 898 } 899 } 900 901 uint32_t 902 nvmf_fc_get_prli_service_params(void) 903 { 904 return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION); 905 } 906 907 int 908 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port, 909 struct spdk_nvmf_fc_nport *nport) 910 { 911 if (fc_port) { 912 TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link); 913 fc_port->num_nports++; 914 return 0; 915 } 916 917 return -EINVAL; 918 } 919 920 int 921 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port, 922 struct spdk_nvmf_fc_nport *nport) 923 { 924 if (fc_port && nport) { 925 TAILQ_REMOVE(&fc_port->nport_list, nport, link); 926 fc_port->num_nports--; 927 return 0; 928 } 929 930 return -EINVAL; 931 } 932 933 static struct spdk_nvmf_fc_nport * 934 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl) 935 { 936 struct spdk_nvmf_fc_nport *fc_nport = NULL; 937 938 TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) { 939 if (fc_nport->nport_hdl == nport_hdl) { 940 return fc_nport; 941 } 942 } 943 944 return NULL; 945 } 946 947 struct spdk_nvmf_fc_nport * 948 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl) 949 { 950 struct spdk_nvmf_fc_port *fc_port = NULL; 951 952 fc_port = nvmf_fc_port_lookup(port_hdl); 953 if (fc_port) { 954 return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl); 955 } 956 957 return NULL; 958 } 959 960 static inline int 961 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp, 962 uint32_t d_id, struct spdk_nvmf_fc_nport **nport, 963 uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport) 964 { 965 struct spdk_nvmf_fc_nport *n_port; 966 struct spdk_nvmf_fc_remote_port_info *r_port; 967 968 assert(hwqp); 969 if (hwqp == NULL) { 970 SPDK_ERRLOG("Error: hwqp is NULL\n"); 971 return -EINVAL; 972 } 973 assert(nport); 974 if (nport == NULL) { 975 SPDK_ERRLOG("Error: nport is NULL\n"); 976 return -EINVAL; 977 } 978 assert(rport); 979 if (rport == NULL) { 980 SPDK_ERRLOG("Error: rport is NULL\n"); 981 return -EINVAL; 982 } 983 984 TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) { 985 if (n_port->d_id == d_id) { 986 TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) { 987 if (r_port->s_id == s_id) { 988 *nport = n_port; 989 *rport = r_port; 990 return 0; 991 } 992 } 993 break; 994 } 995 } 996 997 return -ENOENT; 998 } 999 1000 /* Returns true if the Nport is empty of all rem_ports */ 1001 bool 1002 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport) 1003 { 1004 if (nport && TAILQ_EMPTY(&nport->rem_port_list)) { 1005 assert(nport->rport_count == 0); 1006 return true; 1007 } else { 1008 return false; 1009 } 1010 } 1011 1012 int 1013 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport, 1014 enum spdk_nvmf_fc_object_state state) 1015 { 1016 if (nport) { 1017 nport->nport_state = state; 1018 return 0; 1019 } else { 1020 return -EINVAL; 1021 } 1022 } 1023 1024 bool 1025 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport, 1026 struct spdk_nvmf_fc_remote_port_info *rem_port) 1027 { 1028 if (nport && rem_port) { 1029 TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link); 1030 nport->rport_count++; 1031 return 0; 1032 } else { 1033 return -EINVAL; 1034 } 1035 } 1036 1037 bool 1038 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport, 1039 struct spdk_nvmf_fc_remote_port_info *rem_port) 1040 { 1041 if (nport && rem_port) { 1042 TAILQ_REMOVE(&nport->rem_port_list, rem_port, link); 1043 nport->rport_count--; 1044 return 0; 1045 } else { 1046 return -EINVAL; 1047 } 1048 } 1049 1050 int 1051 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport, 1052 enum spdk_nvmf_fc_object_state state) 1053 { 1054 if (rport) { 1055 rport->rport_state = state; 1056 return 0; 1057 } else { 1058 return -EINVAL; 1059 } 1060 } 1061 int 1062 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc, 1063 enum spdk_nvmf_fc_object_state state) 1064 { 1065 if (assoc) { 1066 assoc->assoc_state = state; 1067 return 0; 1068 } else { 1069 return -EINVAL; 1070 } 1071 } 1072 1073 static struct spdk_nvmf_fc_association * 1074 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr) 1075 { 1076 struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair; 1077 struct spdk_nvmf_fc_conn *fc_conn; 1078 1079 if (!qpair) { 1080 SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid); 1081 return NULL; 1082 } 1083 1084 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1085 1086 return fc_conn->fc_assoc; 1087 } 1088 1089 bool 1090 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl, 1091 struct spdk_nvmf_ctrlr *ctrlr) 1092 { 1093 struct spdk_nvmf_fc_nport *fc_nport = NULL; 1094 struct spdk_nvmf_fc_association *assoc = NULL; 1095 1096 if (!ctrlr) { 1097 return false; 1098 } 1099 1100 fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl); 1101 if (!fc_nport) { 1102 return false; 1103 } 1104 1105 assoc = nvmf_ctrlr_get_fc_assoc(ctrlr); 1106 if (assoc && assoc->tgtport == fc_nport) { 1107 SPDK_DEBUGLOG(nvmf_fc, 1108 "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n", 1109 ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl, 1110 nport_hdl); 1111 return true; 1112 } 1113 return false; 1114 } 1115 1116 static inline bool 1117 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req) 1118 { 1119 switch (fc_req->state) { 1120 case SPDK_NVMF_FC_REQ_READ_BDEV: 1121 case SPDK_NVMF_FC_REQ_WRITE_BDEV: 1122 case SPDK_NVMF_FC_REQ_NONE_BDEV: 1123 return true; 1124 default: 1125 return false; 1126 } 1127 } 1128 1129 static inline bool 1130 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req) 1131 { 1132 struct spdk_nvmf_request *tmp = NULL; 1133 1134 STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) { 1135 if (tmp == &fc_req->req) { 1136 return true; 1137 } 1138 } 1139 return false; 1140 } 1141 1142 static void 1143 nvmf_fc_req_bdev_abort(void *arg1) 1144 { 1145 struct spdk_nvmf_fc_request *fc_req = arg1; 1146 struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr; 1147 int i; 1148 1149 /* Initial release - we don't have to abort Admin Queue or 1150 * Fabric commands. The AQ commands supported at this time are 1151 * Get-Log-Page, 1152 * Identify 1153 * Set Features 1154 * Get Features 1155 * AER -> Special case and handled differently. 1156 * Every one of the above Admin commands (except AER) run 1157 * to completion and so an Abort of such commands doesn't 1158 * make sense. 1159 */ 1160 /* The Fabric commands supported are 1161 * Property Set 1162 * Property Get 1163 * Connect -> Special case (async. handling). Not sure how to 1164 * handle at this point. Let it run to completion. 1165 */ 1166 for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) { 1167 if (ctrlr->aer_req[i] == &fc_req->req) { 1168 SPDK_NOTICELOG("Abort AER request\n"); 1169 nvmf_qpair_free_aer(fc_req->req.qpair); 1170 } 1171 } 1172 } 1173 1174 void 1175 nvmf_fc_request_abort_complete(void *arg1) 1176 { 1177 struct spdk_nvmf_fc_request *fc_req = 1178 (struct spdk_nvmf_fc_request *)arg1; 1179 struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL; 1180 1181 /* Request abort completed. Notify all the callbacks */ 1182 TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) { 1183 /* Notify */ 1184 ctx->cb(fc_req->hwqp, 0, ctx->cb_args); 1185 /* Remove */ 1186 TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link); 1187 /* free */ 1188 free(ctx); 1189 } 1190 1191 SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req, 1192 fc_req_state_strs[fc_req->state]); 1193 1194 _nvmf_fc_request_free(fc_req); 1195 } 1196 1197 void 1198 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts, 1199 spdk_nvmf_fc_caller_cb cb, void *cb_args) 1200 { 1201 struct spdk_nvmf_fc_caller_ctx *ctx = NULL; 1202 bool kill_req = false; 1203 1204 /* Add the cb to list */ 1205 if (cb) { 1206 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx)); 1207 if (!ctx) { 1208 SPDK_ERRLOG("ctx alloc failed.\n"); 1209 return; 1210 } 1211 ctx->cb = cb; 1212 ctx->cb_args = cb_args; 1213 1214 TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link); 1215 } 1216 1217 if (!fc_req->is_aborted) { 1218 /* Increment aborted command counter */ 1219 fc_req->hwqp->counters.num_aborted++; 1220 } 1221 1222 /* If port is dead, skip abort wqe */ 1223 kill_req = nvmf_fc_is_port_dead(fc_req->hwqp); 1224 if (kill_req && nvmf_fc_req_in_xfer(fc_req)) { 1225 fc_req->is_aborted = true; 1226 goto complete; 1227 } 1228 1229 /* Check if the request is already marked for deletion */ 1230 if (fc_req->is_aborted) { 1231 return; 1232 } 1233 1234 /* Mark request as aborted */ 1235 fc_req->is_aborted = true; 1236 1237 /* If xchg is allocated, then save if we need to send abts or not. */ 1238 if (fc_req->xchg) { 1239 fc_req->xchg->send_abts = send_abts; 1240 fc_req->xchg->aborted = true; 1241 } 1242 1243 if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) { 1244 /* Aborted by backend */ 1245 goto complete; 1246 } else if (nvmf_fc_req_in_bdev(fc_req)) { 1247 /* Notify bdev */ 1248 spdk_thread_send_msg(fc_req->hwqp->thread, 1249 nvmf_fc_req_bdev_abort, (void *)fc_req); 1250 } else if (nvmf_fc_req_in_xfer(fc_req)) { 1251 /* Notify HBA to abort this exchange */ 1252 nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL); 1253 } else if (nvmf_fc_req_in_get_buff(fc_req)) { 1254 /* Will be completed by request_complete callback. */ 1255 SPDK_DEBUGLOG(nvmf_fc, "Abort req when getting buffers.\n"); 1256 } else if (nvmf_fc_req_in_pending(fc_req)) { 1257 /* Remove from pending */ 1258 STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req, 1259 spdk_nvmf_request, buf_link); 1260 goto complete; 1261 } else { 1262 /* Should never happen */ 1263 SPDK_ERRLOG("Request in invalid state\n"); 1264 goto complete; 1265 } 1266 1267 return; 1268 complete: 1269 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED); 1270 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1271 (void *)fc_req); 1272 } 1273 1274 static int 1275 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req) 1276 { 1277 uint32_t length = fc_req->req.length; 1278 struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup; 1279 struct spdk_nvmf_transport_poll_group *group = &fgroup->group; 1280 struct spdk_nvmf_transport *transport = group->transport; 1281 1282 if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) { 1283 return -ENOMEM; 1284 } 1285 1286 return 0; 1287 } 1288 1289 static int 1290 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req) 1291 { 1292 /* Allocate an XCHG if we dont use send frame for this command. */ 1293 if (!nvmf_fc_use_send_frame(&fc_req->req)) { 1294 fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp); 1295 if (!fc_req->xchg) { 1296 fc_req->hwqp->counters.no_xchg++; 1297 printf("NO XCHGs!\n"); 1298 goto pending; 1299 } 1300 } 1301 1302 if (fc_req->req.length) { 1303 if (nvmf_fc_request_alloc_buffers(fc_req) < 0) { 1304 fc_req->hwqp->counters.buf_alloc_err++; 1305 goto pending; 1306 } 1307 fc_req->req.data = fc_req->req.iov[0].iov_base; 1308 } 1309 1310 if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1311 SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n"); 1312 1313 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER); 1314 1315 if (nvmf_fc_recv_data(fc_req)) { 1316 /* Dropped return success to caller */ 1317 fc_req->hwqp->counters.unexpected_err++; 1318 _nvmf_fc_request_free(fc_req); 1319 } 1320 } else { 1321 SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n"); 1322 1323 if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1324 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV); 1325 } else { 1326 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV); 1327 } 1328 spdk_nvmf_request_exec(&fc_req->req); 1329 } 1330 1331 return 0; 1332 1333 pending: 1334 if (fc_req->xchg) { 1335 nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg); 1336 fc_req->xchg = NULL; 1337 } 1338 1339 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING); 1340 1341 return -EAGAIN; 1342 } 1343 1344 static int 1345 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame, 1346 uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen) 1347 { 1348 uint16_t cmnd_len; 1349 uint64_t rqst_conn_id; 1350 struct spdk_nvmf_fc_request *fc_req = NULL; 1351 struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL; 1352 struct spdk_nvmf_fc_conn *fc_conn = NULL; 1353 enum spdk_nvme_data_transfer xfer; 1354 1355 cmd_iu = buffer->virt; 1356 cmnd_len = cmd_iu->cmnd_iu_len; 1357 cmnd_len = from_be16(&cmnd_len); 1358 1359 /* check for a valid cmnd_iu format */ 1360 if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) || 1361 (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) || 1362 (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) { 1363 SPDK_ERRLOG("IU CMD error\n"); 1364 hwqp->counters.nvme_cmd_iu_err++; 1365 return -ENXIO; 1366 } 1367 1368 xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags); 1369 if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) { 1370 SPDK_ERRLOG("IU CMD xfer error\n"); 1371 hwqp->counters.nvme_cmd_xfer_err++; 1372 return -EPERM; 1373 } 1374 1375 rqst_conn_id = from_be64(&cmd_iu->conn_id); 1376 1377 /* Check if conn id is valid */ 1378 fc_conn = nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id); 1379 if (!fc_conn) { 1380 SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id); 1381 hwqp->counters.invalid_conn_err++; 1382 return -ENODEV; 1383 } 1384 1385 /* If association/connection is being deleted - return */ 1386 if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1387 SPDK_ERRLOG("Association state not valid\n"); 1388 return -EACCES; 1389 } 1390 1391 if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) { 1392 return -EACCES; 1393 } 1394 1395 /* Make sure xfer len is according to mdts */ 1396 if (from_be32(&cmd_iu->data_len) > 1397 hwqp->fgroup->group.transport->opts.max_io_size) { 1398 SPDK_ERRLOG("IO length requested is greater than MDTS\n"); 1399 return -EINVAL; 1400 } 1401 1402 /* allocate a request buffer */ 1403 fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp); 1404 if (fc_req == NULL) { 1405 /* Should not happen. Since fc_reqs == RQ buffers */ 1406 return -ENOMEM; 1407 } 1408 1409 fc_req->req.length = from_be32(&cmd_iu->data_len); 1410 fc_req->req.qpair = &fc_conn->qpair; 1411 fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd; 1412 fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp; 1413 fc_req->oxid = frame->ox_id; 1414 fc_req->oxid = from_be16(&fc_req->oxid); 1415 fc_req->rpi = fc_conn->rpi; 1416 fc_req->buf_index = buf_idx; 1417 fc_req->poller_lcore = hwqp->lcore_id; 1418 fc_req->poller_thread = hwqp->thread; 1419 fc_req->hwqp = hwqp; 1420 fc_req->fc_conn = fc_conn; 1421 fc_req->req.xfer = xfer; 1422 fc_req->s_id = (uint32_t)frame->s_id; 1423 fc_req->d_id = (uint32_t)frame->d_id; 1424 fc_req->s_id = from_be32(&fc_req->s_id) >> 8; 1425 fc_req->d_id = from_be32(&fc_req->d_id) >> 8; 1426 1427 nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT); 1428 if (nvmf_fc_request_execute(fc_req)) { 1429 STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link); 1430 } 1431 1432 return 0; 1433 } 1434 1435 /* 1436 * These functions are called from the FC LLD 1437 */ 1438 1439 void 1440 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req) 1441 { 1442 struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp; 1443 struct spdk_nvmf_fc_poll_group *fgroup = hwqp->fgroup; 1444 struct spdk_nvmf_transport_poll_group *group = &fgroup->group; 1445 struct spdk_nvmf_transport *transport = group->transport; 1446 1447 if (!fc_req) { 1448 return; 1449 } 1450 1451 if (fc_req->xchg) { 1452 nvmf_fc_put_xchg(hwqp, fc_req->xchg); 1453 fc_req->xchg = NULL; 1454 } 1455 1456 /* Release IO buffers */ 1457 if (fc_req->req.data_from_pool) { 1458 spdk_nvmf_request_free_buffers(&fc_req->req, group, transport); 1459 } 1460 fc_req->req.data = NULL; 1461 fc_req->req.iovcnt = 0; 1462 1463 /* Release Q buffer */ 1464 nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index); 1465 1466 /* Free Fc request */ 1467 nvmf_fc_hwqp_free_fc_request(hwqp, fc_req); 1468 } 1469 1470 void 1471 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req, 1472 enum spdk_nvmf_fc_request_state state) 1473 { 1474 assert(fc_req->magic != 0xDEADBEEF); 1475 1476 SPDK_DEBUGLOG(nvmf_fc, 1477 "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req, 1478 nvmf_fc_request_get_state_str(fc_req->state), 1479 nvmf_fc_request_get_state_str(state)); 1480 nvmf_fc_record_req_trace_point(fc_req, state); 1481 fc_req->state = state; 1482 } 1483 1484 char * 1485 nvmf_fc_request_get_state_str(int state) 1486 { 1487 static char *unk_str = "unknown"; 1488 1489 return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ? 1490 fc_req_state_strs[state] : unk_str); 1491 } 1492 1493 int 1494 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp, 1495 uint32_t buff_idx, 1496 struct spdk_nvmf_fc_frame_hdr *frame, 1497 struct spdk_nvmf_fc_buffer_desc *buffer, 1498 uint32_t plen) 1499 { 1500 int rc = 0; 1501 uint32_t s_id, d_id; 1502 struct spdk_nvmf_fc_nport *nport = NULL; 1503 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1504 1505 s_id = (uint32_t)frame->s_id; 1506 d_id = (uint32_t)frame->d_id; 1507 s_id = from_be32(&s_id) >> 8; 1508 d_id = from_be32(&d_id) >> 8; 1509 1510 /* Note: In tracelog below, we directly do endian conversion on rx_id and. 1511 * ox_id Since these are fields, we can't pass address to from_be16(). 1512 * Since ox_id and rx_id are only needed for tracelog, assigning to local 1513 * vars. and doing conversion is a waste of time in non-debug builds. */ 1514 SPDK_DEBUGLOG(nvmf_fc, 1515 "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n", 1516 s_id, d_id, 1517 ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff), 1518 ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff)); 1519 1520 rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport); 1521 if (rc) { 1522 if (nport == NULL) { 1523 SPDK_ERRLOG("Nport not found. Dropping\n"); 1524 /* increment invalid nport counter */ 1525 hwqp->counters.nport_invalid++; 1526 } else if (rport == NULL) { 1527 SPDK_ERRLOG("Rport not found. Dropping\n"); 1528 /* increment invalid rport counter */ 1529 hwqp->counters.rport_invalid++; 1530 } 1531 return rc; 1532 } 1533 1534 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1535 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1536 SPDK_ERRLOG("%s state not created. Dropping\n", 1537 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1538 "Nport" : "Rport"); 1539 return -EACCES; 1540 } 1541 1542 if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) && 1543 (frame->type == FCNVME_TYPE_NVMF_DATA)) { 1544 struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt; 1545 struct spdk_nvmf_fc_ls_rqst *ls_rqst; 1546 1547 SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n"); 1548 1549 /* Use the RQ buffer for holding LS request. */ 1550 ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst; 1551 1552 /* Fill in the LS request structure */ 1553 ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst; 1554 ls_rqst->rqstbuf.phys = buffer->phys + 1555 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst); 1556 ls_rqst->rqstbuf.buf_index = buff_idx; 1557 ls_rqst->rqst_len = plen; 1558 1559 ls_rqst->rspbuf.virt = (void *)&req_buf->resp; 1560 ls_rqst->rspbuf.phys = buffer->phys + 1561 offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp); 1562 ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE; 1563 1564 ls_rqst->private_data = (void *)hwqp; 1565 ls_rqst->rpi = rport->rpi; 1566 ls_rqst->oxid = (uint16_t)frame->ox_id; 1567 ls_rqst->oxid = from_be16(&ls_rqst->oxid); 1568 ls_rqst->s_id = s_id; 1569 ls_rqst->d_id = d_id; 1570 ls_rqst->nport = nport; 1571 ls_rqst->rport = rport; 1572 ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt; 1573 1574 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1575 if (ls_rqst->xchg) { 1576 /* Handover the request to LS module */ 1577 nvmf_fc_handle_ls_rqst(ls_rqst); 1578 } else { 1579 /* No XCHG available. Add to pending list. */ 1580 hwqp->counters.no_xchg++; 1581 TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1582 } 1583 } else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) && 1584 (frame->type == FCNVME_TYPE_FC_EXCHANGE)) { 1585 1586 SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n"); 1587 rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen); 1588 } else { 1589 1590 SPDK_ERRLOG("Unknown frame received. Dropping\n"); 1591 hwqp->counters.unknown_frame++; 1592 rc = -EINVAL; 1593 } 1594 1595 return rc; 1596 } 1597 1598 void 1599 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp) 1600 { 1601 struct spdk_nvmf_request *req = NULL, *tmp; 1602 struct spdk_nvmf_fc_request *fc_req; 1603 int budget = 64; 1604 1605 if (!hwqp->fgroup) { 1606 /* LS queue is tied to acceptor_poll group and LS pending requests 1607 * are stagged and processed using hwqp->ls_pending_queue. 1608 */ 1609 return; 1610 } 1611 1612 STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) { 1613 fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req); 1614 if (!nvmf_fc_request_execute(fc_req)) { 1615 /* Succesfuly posted, Delete from pending. */ 1616 STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link); 1617 } 1618 1619 if (budget) { 1620 budget--; 1621 } else { 1622 return; 1623 } 1624 } 1625 } 1626 1627 void 1628 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp) 1629 { 1630 struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp; 1631 struct spdk_nvmf_fc_nport *nport = NULL; 1632 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 1633 1634 TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) { 1635 /* lookup nport and rport again - make sure they are still valid */ 1636 int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport); 1637 if (rc) { 1638 if (nport == NULL) { 1639 SPDK_ERRLOG("Nport not found. Dropping\n"); 1640 /* increment invalid nport counter */ 1641 hwqp->counters.nport_invalid++; 1642 } else if (rport == NULL) { 1643 SPDK_ERRLOG("Rport not found. Dropping\n"); 1644 /* increment invalid rport counter */ 1645 hwqp->counters.rport_invalid++; 1646 } 1647 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1648 /* Return buffer to chip */ 1649 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1650 continue; 1651 } 1652 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED || 1653 rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 1654 SPDK_ERRLOG("%s state not created. Dropping\n", 1655 nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ? 1656 "Nport" : "Rport"); 1657 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1658 /* Return buffer to chip */ 1659 nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index); 1660 continue; 1661 } 1662 1663 ls_rqst->xchg = nvmf_fc_get_xri(hwqp); 1664 if (ls_rqst->xchg) { 1665 /* Got an XCHG */ 1666 TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link); 1667 /* Handover the request to LS module */ 1668 nvmf_fc_handle_ls_rqst(ls_rqst); 1669 } else { 1670 /* No more XCHGs. Stop processing. */ 1671 hwqp->counters.no_xchg++; 1672 return; 1673 } 1674 } 1675 } 1676 1677 int 1678 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req) 1679 { 1680 int rc = 0; 1681 struct spdk_nvmf_request *req = &fc_req->req; 1682 struct spdk_nvmf_qpair *qpair = req->qpair; 1683 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1684 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1685 uint16_t ersp_len = 0; 1686 1687 /* set sq head value in resp */ 1688 rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair); 1689 1690 /* Increment connection responses */ 1691 fc_conn->rsp_count++; 1692 1693 if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count, 1694 fc_req->transfered_len)) { 1695 /* Fill ERSP Len */ 1696 to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) / 1697 sizeof(uint32_t))); 1698 fc_req->ersp.ersp_len = ersp_len; 1699 1700 /* Fill RSN */ 1701 to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn); 1702 fc_conn->rsn++; 1703 1704 /* Fill transfer length */ 1705 to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len); 1706 1707 SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n"); 1708 rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp, 1709 sizeof(struct spdk_nvmf_fc_ersp_iu)); 1710 } else { 1711 SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n"); 1712 rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0); 1713 } 1714 1715 return rc; 1716 } 1717 1718 bool 1719 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req, 1720 uint32_t rsp_cnt, uint32_t xfer_len) 1721 { 1722 struct spdk_nvmf_request *req = &fc_req->req; 1723 struct spdk_nvmf_qpair *qpair = req->qpair; 1724 struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair); 1725 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 1726 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1727 uint16_t status = *((uint16_t *)&rsp->status); 1728 1729 /* 1730 * Check if we need to send ERSP 1731 * 1) For every N responses where N == ersp_ratio 1732 * 2) Fabric commands. 1733 * 3) Completion status failed or Completion dw0 or dw1 valid. 1734 * 4) SQ == 90% full. 1735 * 5) Transfer length not equal to CMD IU length 1736 */ 1737 1738 if (!(rsp_cnt % fc_conn->esrp_ratio) || 1739 (cmd->opc == SPDK_NVME_OPC_FABRIC) || 1740 (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 || 1741 (req->length != xfer_len)) { 1742 return true; 1743 } 1744 return false; 1745 } 1746 1747 static int 1748 nvmf_fc_request_complete(struct spdk_nvmf_request *req) 1749 { 1750 int rc = 0; 1751 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 1752 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl; 1753 1754 if (fc_req->is_aborted) { 1755 /* Defer this to make sure we dont call io cleanup in same context. */ 1756 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE, 1757 (void *)fc_req); 1758 } else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS && 1759 req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1760 1761 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER); 1762 1763 rc = nvmf_fc_send_data(fc_req); 1764 } else { 1765 if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) { 1766 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP); 1767 } else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) { 1768 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP); 1769 } else { 1770 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP); 1771 } 1772 1773 rc = nvmf_fc_handle_rsp(fc_req); 1774 } 1775 1776 if (rc) { 1777 SPDK_ERRLOG("Error in request complete.\n"); 1778 _nvmf_fc_request_free(fc_req); 1779 } 1780 return 0; 1781 } 1782 1783 struct spdk_nvmf_tgt * 1784 nvmf_fc_get_tgt(void) 1785 { 1786 if (g_nvmf_ftransport) { 1787 return g_nvmf_ftransport->transport.tgt; 1788 } 1789 return NULL; 1790 } 1791 1792 /* 1793 * FC Transport Public API begins here 1794 */ 1795 1796 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128 1797 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32 1798 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5 1799 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0 1800 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536 1801 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096 1802 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192 1803 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE / \ 1804 SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE) 1805 1806 static void 1807 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts) 1808 { 1809 opts->max_queue_depth = SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH; 1810 opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR; 1811 opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE; 1812 opts->max_io_size = SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE; 1813 opts->io_unit_size = SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE; 1814 opts->max_aq_depth = SPDK_NVMF_FC_DEFAULT_AQ_DEPTH; 1815 opts->num_shared_buffers = SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS; 1816 } 1817 1818 static struct spdk_nvmf_transport * 1819 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts) 1820 { 1821 uint32_t sge_count; 1822 1823 SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n" 1824 " Transport opts: max_ioq_depth=%d, max_io_size=%d,\n" 1825 " max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n" 1826 " max_aq_depth=%d\n", 1827 opts->max_queue_depth, 1828 opts->max_io_size, 1829 opts->max_qpairs_per_ctrlr - 1, 1830 opts->io_unit_size, 1831 opts->max_aq_depth); 1832 1833 if (g_nvmf_ftransport) { 1834 SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n"); 1835 return NULL; 1836 } 1837 1838 if (spdk_env_get_last_core() < 1) { 1839 SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n", 1840 spdk_env_get_last_core() + 1); 1841 return NULL; 1842 } 1843 1844 sge_count = opts->max_io_size / opts->io_unit_size; 1845 if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) { 1846 SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size); 1847 return NULL; 1848 } 1849 1850 g_nvmf_fc_master_thread = spdk_get_thread(); 1851 g_nvmf_fgroup_count = 0; 1852 g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport)); 1853 1854 if (!g_nvmf_ftransport) { 1855 SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n"); 1856 return NULL; 1857 } 1858 1859 if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) { 1860 SPDK_ERRLOG("pthread_mutex_init() failed\n"); 1861 free(g_nvmf_ftransport); 1862 g_nvmf_ftransport = NULL; 1863 return NULL; 1864 } 1865 1866 /* initialize the low level FC driver */ 1867 nvmf_fc_lld_init(); 1868 1869 return &g_nvmf_ftransport->transport; 1870 } 1871 1872 static int 1873 nvmf_fc_destroy(struct spdk_nvmf_transport *transport) 1874 { 1875 if (transport) { 1876 struct spdk_nvmf_fc_transport *ftransport; 1877 struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp; 1878 1879 ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 1880 1881 free(ftransport); 1882 1883 /* clean up any FC poll groups still around */ 1884 TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) { 1885 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 1886 free(fgroup); 1887 } 1888 g_nvmf_fgroup_count = 0; 1889 1890 /* low level FC driver clean up */ 1891 nvmf_fc_lld_fini(); 1892 1893 nvmf_fc_port_cleanup(); 1894 } 1895 1896 return 0; 1897 } 1898 1899 static int 1900 nvmf_fc_listen(struct spdk_nvmf_transport *transport, 1901 const struct spdk_nvme_transport_id *trid) 1902 { 1903 return 0; 1904 } 1905 1906 static void 1907 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport, 1908 const struct spdk_nvme_transport_id *_trid) 1909 { 1910 } 1911 1912 static uint32_t 1913 nvmf_fc_accept(struct spdk_nvmf_transport *transport) 1914 { 1915 struct spdk_nvmf_fc_port *fc_port = NULL; 1916 uint32_t count = 0; 1917 static bool start_lld = false; 1918 1919 if (spdk_unlikely(!start_lld)) { 1920 start_lld = true; 1921 nvmf_fc_lld_start(); 1922 } 1923 1924 /* poll the LS queue on each port */ 1925 TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) { 1926 if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) { 1927 count += nvmf_fc_process_queue(&fc_port->ls_queue); 1928 } 1929 } 1930 1931 return count; 1932 } 1933 1934 static void 1935 nvmf_fc_discover(struct spdk_nvmf_transport *transport, 1936 struct spdk_nvme_transport_id *trid, 1937 struct spdk_nvmf_discovery_log_page_entry *entry) 1938 { 1939 entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC; 1940 entry->adrfam = trid->adrfam; 1941 entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED; 1942 1943 spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' '); 1944 spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' '); 1945 } 1946 1947 static struct spdk_nvmf_transport_poll_group * 1948 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport) 1949 { 1950 struct spdk_nvmf_fc_poll_group *fgroup; 1951 struct spdk_nvmf_fc_transport *ftransport = 1952 SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport); 1953 1954 fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group)); 1955 if (!fgroup) { 1956 SPDK_ERRLOG("Unable to alloc FC poll group\n"); 1957 return NULL; 1958 } 1959 1960 TAILQ_INIT(&fgroup->hwqp_list); 1961 1962 pthread_mutex_lock(&ftransport->lock); 1963 TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link); 1964 g_nvmf_fgroup_count++; 1965 pthread_mutex_unlock(&ftransport->lock); 1966 1967 return &fgroup->group; 1968 } 1969 1970 static void 1971 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 1972 { 1973 struct spdk_nvmf_fc_poll_group *fgroup; 1974 struct spdk_nvmf_fc_transport *ftransport = 1975 SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport); 1976 1977 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 1978 pthread_mutex_lock(&ftransport->lock); 1979 TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link); 1980 g_nvmf_fgroup_count--; 1981 pthread_mutex_unlock(&ftransport->lock); 1982 1983 free(fgroup); 1984 } 1985 1986 static int 1987 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 1988 struct spdk_nvmf_qpair *qpair) 1989 { 1990 struct spdk_nvmf_fc_poll_group *fgroup; 1991 struct spdk_nvmf_fc_conn *fc_conn; 1992 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 1993 struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL; 1994 bool hwqp_found = false; 1995 1996 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 1997 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 1998 1999 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2000 if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) { 2001 hwqp_found = true; 2002 break; 2003 } 2004 } 2005 2006 if (!hwqp_found) { 2007 SPDK_ERRLOG("No valid hwqp found for new QP.\n"); 2008 goto err; 2009 } 2010 2011 if (!nvmf_fc_assign_conn_to_hwqp(hwqp, 2012 &fc_conn->conn_id, 2013 fc_conn->max_queue_depth)) { 2014 SPDK_ERRLOG("Failed to get a connection id for new QP.\n"); 2015 goto err; 2016 } 2017 2018 fc_conn->hwqp = hwqp; 2019 2020 /* If this is for ADMIN connection, then update assoc ID. */ 2021 if (fc_conn->qpair.qid == 0) { 2022 fc_conn->fc_assoc->assoc_id = fc_conn->conn_id; 2023 } 2024 2025 api_data = &fc_conn->create_opd->u.add_conn; 2026 nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args); 2027 return 0; 2028 err: 2029 return -1; 2030 } 2031 2032 static int 2033 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 2034 { 2035 uint32_t count = 0; 2036 struct spdk_nvmf_fc_poll_group *fgroup; 2037 struct spdk_nvmf_fc_hwqp *hwqp; 2038 2039 fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group); 2040 2041 TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) { 2042 if (hwqp->state == SPDK_FC_HWQP_ONLINE) { 2043 count += nvmf_fc_process_queue(hwqp); 2044 } 2045 } 2046 2047 return (int) count; 2048 } 2049 2050 static int 2051 nvmf_fc_request_free(struct spdk_nvmf_request *req) 2052 { 2053 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req); 2054 2055 if (!fc_req->is_aborted) { 2056 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED); 2057 nvmf_fc_request_abort(fc_req, true, NULL, NULL); 2058 } else { 2059 nvmf_fc_request_abort_complete(fc_req); 2060 } 2061 return 0; 2062 } 2063 2064 2065 static void 2066 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair) 2067 { 2068 struct spdk_nvmf_fc_conn *fc_conn; 2069 2070 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2071 2072 if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) { 2073 /* QP creation failure in FC tranport. Cleanup. */ 2074 spdk_thread_send_msg(nvmf_fc_get_master_thread(), 2075 nvmf_fc_handle_connection_failure, fc_conn); 2076 } else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id && 2077 fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 2078 /* Admin connection */ 2079 spdk_thread_send_msg(nvmf_fc_get_master_thread(), 2080 nvmf_fc_handle_assoc_deletion, fc_conn); 2081 } 2082 } 2083 2084 static int 2085 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 2086 struct spdk_nvme_transport_id *trid) 2087 { 2088 struct spdk_nvmf_fc_conn *fc_conn; 2089 2090 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2091 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2092 return 0; 2093 } 2094 2095 static int 2096 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 2097 struct spdk_nvme_transport_id *trid) 2098 { 2099 struct spdk_nvmf_fc_conn *fc_conn; 2100 2101 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2102 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2103 return 0; 2104 } 2105 2106 static int 2107 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 2108 struct spdk_nvme_transport_id *trid) 2109 { 2110 struct spdk_nvmf_fc_conn *fc_conn; 2111 2112 fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair); 2113 memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id)); 2114 return 0; 2115 } 2116 2117 static void 2118 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 2119 struct spdk_nvmf_request *req) 2120 { 2121 spdk_nvmf_request_complete(req); 2122 } 2123 2124 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = { 2125 .name = "FC", 2126 .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC, 2127 .opts_init = nvmf_fc_opts_init, 2128 .create = nvmf_fc_create, 2129 .destroy = nvmf_fc_destroy, 2130 2131 .listen = nvmf_fc_listen, 2132 .stop_listen = nvmf_fc_stop_listen, 2133 .accept = nvmf_fc_accept, 2134 2135 .listener_discover = nvmf_fc_discover, 2136 2137 .poll_group_create = nvmf_fc_poll_group_create, 2138 .poll_group_destroy = nvmf_fc_poll_group_destroy, 2139 .poll_group_add = nvmf_fc_poll_group_add, 2140 .poll_group_poll = nvmf_fc_poll_group_poll, 2141 2142 .req_complete = nvmf_fc_request_complete, 2143 .req_free = nvmf_fc_request_free, 2144 .qpair_fini = nvmf_fc_close_qpair, 2145 .qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid, 2146 .qpair_get_local_trid = nvmf_fc_qpair_get_local_trid, 2147 .qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid, 2148 .qpair_abort_request = nvmf_fc_qpair_abort_request, 2149 }; 2150 2151 /* 2152 * Re-initialize the FC-Port after an offline event. 2153 * Only the queue information needs to be populated. XCHG, lcore and other hwqp information remains 2154 * unchanged after the first initialization. 2155 * 2156 */ 2157 static int 2158 nvmf_fc_adm_hw_port_reinit_validate(struct spdk_nvmf_fc_port *fc_port, 2159 struct spdk_nvmf_fc_hw_port_init_args *args) 2160 { 2161 uint32_t i; 2162 2163 /* Verify that the port was previously in offline or quiesced state */ 2164 if (nvmf_fc_port_is_online(fc_port)) { 2165 SPDK_ERRLOG("SPDK FC port %d already initialized and online.\n", args->port_handle); 2166 return -EINVAL; 2167 } 2168 2169 /* Reinit information in new LS queue from previous queue */ 2170 nvmf_fc_hwqp_reinit_poller_queues(&fc_port->ls_queue, args->ls_queue); 2171 2172 fc_port->fcp_rq_id = args->fcp_rq_id; 2173 2174 /* Initialize the LS queue */ 2175 fc_port->ls_queue.queues = args->ls_queue; 2176 nvmf_fc_init_poller_queues(fc_port->ls_queue.queues); 2177 2178 for (i = 0; i < fc_port->num_io_queues; i++) { 2179 /* Reinit information in new IO queue from previous queue */ 2180 nvmf_fc_hwqp_reinit_poller_queues(&fc_port->io_queues[i], 2181 args->io_queues[i]); 2182 fc_port->io_queues[i].queues = args->io_queues[i]; 2183 /* Initialize the IO queues */ 2184 nvmf_fc_init_poller_queues(fc_port->io_queues[i].queues); 2185 } 2186 2187 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 2188 2189 /* Validate the port information */ 2190 DEV_VERIFY(TAILQ_EMPTY(&fc_port->nport_list)); 2191 DEV_VERIFY(fc_port->num_nports == 0); 2192 if (!TAILQ_EMPTY(&fc_port->nport_list) || (fc_port->num_nports != 0)) { 2193 return -EINVAL; 2194 } 2195 2196 return 0; 2197 } 2198 2199 /* Initializes the data for the creation of a FC-Port object in the SPDK 2200 * library. The spdk_nvmf_fc_port is a well defined structure that is part of 2201 * the API to the library. The contents added to this well defined structure 2202 * is private to each vendors implementation. 2203 */ 2204 static int 2205 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port, 2206 struct spdk_nvmf_fc_hw_port_init_args *args) 2207 { 2208 /* Used a high number for the LS HWQP so that it does not clash with the 2209 * IO HWQP's and immediately shows a LS queue during tracing. 2210 */ 2211 uint32_t i; 2212 2213 fc_port->port_hdl = args->port_handle; 2214 fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE; 2215 fc_port->fcp_rq_id = args->fcp_rq_id; 2216 fc_port->num_io_queues = args->io_queue_cnt; 2217 2218 /* 2219 * Set port context from init args. Used for FCP port stats. 2220 */ 2221 fc_port->port_ctx = args->port_ctx; 2222 2223 /* 2224 * Initialize the LS queue wherever needed. 2225 */ 2226 fc_port->ls_queue.queues = args->ls_queue; 2227 fc_port->ls_queue.thread = nvmf_fc_get_master_thread(); 2228 fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues; 2229 2230 /* 2231 * Initialize the LS queue. 2232 */ 2233 nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue); 2234 2235 /* 2236 * Initialize the IO queues. 2237 */ 2238 for (i = 0; i < args->io_queue_cnt; i++) { 2239 struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i]; 2240 hwqp->hwqp_id = i; 2241 hwqp->queues = args->io_queues[i]; 2242 hwqp->rq_size = args->io_queue_size; 2243 nvmf_fc_init_hwqp(fc_port, hwqp); 2244 } 2245 2246 /* 2247 * Initialize the LS processing for port 2248 */ 2249 nvmf_fc_ls_init(fc_port); 2250 2251 /* 2252 * Initialize the list of nport on this HW port. 2253 */ 2254 TAILQ_INIT(&fc_port->nport_list); 2255 fc_port->num_nports = 0; 2256 2257 return 0; 2258 } 2259 2260 static void 2261 nvmf_fc_adm_port_hwqp_offline_del_poller(struct spdk_nvmf_fc_port *fc_port) 2262 { 2263 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2264 int i = 0; 2265 2266 hwqp = &fc_port->ls_queue; 2267 (void)nvmf_fc_hwqp_set_offline(hwqp); 2268 2269 /* Remove poller for all the io queues. */ 2270 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2271 hwqp = &fc_port->io_queues[i]; 2272 (void)nvmf_fc_hwqp_set_offline(hwqp); 2273 nvmf_fc_poll_group_remove_hwqp(hwqp); 2274 } 2275 } 2276 2277 /* 2278 * Callback function for HW port link break operation. 2279 * 2280 * Notice that this callback is being triggered when spdk_fc_nport_delete() 2281 * completes, if that spdk_fc_nport_delete() called is issued by 2282 * nvmf_fc_adm_evnt_hw_port_link_break(). 2283 * 2284 * Since nvmf_fc_adm_evnt_hw_port_link_break() can invoke spdk_fc_nport_delete() multiple 2285 * times (one per nport in the HW port's nport_list), a single call to 2286 * nvmf_fc_adm_evnt_hw_port_link_break() can result in multiple calls to this callback function. 2287 * 2288 * As a result, this function only invokes a callback to the caller of 2289 * nvmf_fc_adm_evnt_hw_port_link_break() only when the HW port's nport_list is empty. 2290 */ 2291 static void 2292 nvmf_fc_adm_hw_port_link_break_cb(uint8_t port_handle, 2293 enum spdk_fc_event event_type, void *cb_args, int spdk_err) 2294 { 2295 ASSERT_SPDK_FC_MASTER_THREAD(); 2296 struct spdk_nvmf_fc_adm_port_link_break_cb_data *offline_cb_args = cb_args; 2297 struct spdk_nvmf_hw_port_link_break_args *offline_args = NULL; 2298 spdk_nvmf_fc_callback cb_func = NULL; 2299 int err = 0; 2300 struct spdk_nvmf_fc_port *fc_port = NULL; 2301 int num_nports = 0; 2302 char log_str[256]; 2303 2304 if (0 != spdk_err) { 2305 DEV_VERIFY(!"port link break cb: spdk_err not success."); 2306 SPDK_ERRLOG("port link break cb: spdk_err:%d.\n", spdk_err); 2307 goto out; 2308 } 2309 2310 if (!offline_cb_args) { 2311 DEV_VERIFY(!"port link break cb: port_offline_args is NULL."); 2312 err = -EINVAL; 2313 goto out; 2314 } 2315 2316 offline_args = offline_cb_args->args; 2317 if (!offline_args) { 2318 DEV_VERIFY(!"port link break cb: offline_args is NULL."); 2319 err = -EINVAL; 2320 goto out; 2321 } 2322 2323 if (port_handle != offline_args->port_handle) { 2324 DEV_VERIFY(!"port link break cb: port_handle mismatch."); 2325 err = -EINVAL; 2326 goto out; 2327 } 2328 2329 cb_func = offline_cb_args->cb_func; 2330 if (!cb_func) { 2331 DEV_VERIFY(!"port link break cb: cb_func is NULL."); 2332 err = -EINVAL; 2333 goto out; 2334 } 2335 2336 fc_port = nvmf_fc_port_lookup(port_handle); 2337 if (!fc_port) { 2338 DEV_VERIFY(!"port link break cb: fc_port is NULL."); 2339 SPDK_ERRLOG("port link break cb: Unable to find port:%d\n", 2340 offline_args->port_handle); 2341 err = -EINVAL; 2342 goto out; 2343 } 2344 2345 num_nports = fc_port->num_nports; 2346 if (!TAILQ_EMPTY(&fc_port->nport_list)) { 2347 /* 2348 * Don't call the callback unless all nports have been deleted. 2349 */ 2350 goto out; 2351 } 2352 2353 if (num_nports != 0) { 2354 DEV_VERIFY(!"port link break cb: num_nports in non-zero."); 2355 SPDK_ERRLOG("port link break cb: # of ports should be 0. Instead, num_nports:%d\n", 2356 num_nports); 2357 err = -EINVAL; 2358 } 2359 2360 /* 2361 * Mark the hwqps as offline and unregister the pollers. 2362 */ 2363 (void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port); 2364 2365 /* 2366 * Since there are no more nports, execute the callback(s). 2367 */ 2368 (void)cb_func(port_handle, SPDK_FC_LINK_BREAK, 2369 (void *)offline_args->cb_ctx, spdk_err); 2370 2371 out: 2372 free(offline_cb_args); 2373 2374 snprintf(log_str, sizeof(log_str), 2375 "port link break cb: port:%d evt_type:%d num_nports:%d err:%d spdk_err:%d.\n", 2376 port_handle, event_type, num_nports, err, spdk_err); 2377 2378 if (err != 0) { 2379 SPDK_ERRLOG("%s", log_str); 2380 } else { 2381 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2382 } 2383 return; 2384 } 2385 2386 /* 2387 * FC port must have all its nports deleted before transitioning to offline state. 2388 */ 2389 static void 2390 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port) 2391 { 2392 struct spdk_nvmf_fc_nport *nport = NULL; 2393 /* All nports must have been deleted at this point for this fc port */ 2394 DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list)); 2395 DEV_VERIFY(fc_port->num_nports == 0); 2396 /* Mark the nport states to be zombie, if they exist */ 2397 if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) { 2398 TAILQ_FOREACH(nport, &fc_port->nport_list, link) { 2399 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2400 } 2401 } 2402 } 2403 2404 static void 2405 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err) 2406 { 2407 ASSERT_SPDK_FC_MASTER_THREAD(); 2408 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args; 2409 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2410 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2411 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 2412 int spdk_err = 0; 2413 uint8_t port_handle = cb_data->port_handle; 2414 uint32_t s_id = rport->s_id; 2415 uint32_t rpi = rport->rpi; 2416 uint32_t assoc_count = rport->assoc_count; 2417 uint32_t nport_hdl = nport->nport_hdl; 2418 uint32_t d_id = nport->d_id; 2419 char log_str[256]; 2420 2421 /* 2422 * Assert on any delete failure. 2423 */ 2424 if (0 != err) { 2425 DEV_VERIFY(!"Error in IT Delete callback."); 2426 goto out; 2427 } 2428 2429 if (cb_func != NULL) { 2430 (void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err); 2431 } 2432 2433 out: 2434 free(cb_data); 2435 2436 snprintf(log_str, sizeof(log_str), 2437 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n", 2438 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err); 2439 2440 if (err != 0) { 2441 SPDK_ERRLOG("%s", log_str); 2442 } else { 2443 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2444 } 2445 } 2446 2447 static void 2448 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err) 2449 { 2450 ASSERT_SPDK_FC_MASTER_THREAD(); 2451 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args; 2452 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 2453 struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport; 2454 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func; 2455 uint32_t s_id = rport->s_id; 2456 uint32_t rpi = rport->rpi; 2457 uint32_t assoc_count = rport->assoc_count; 2458 uint32_t nport_hdl = nport->nport_hdl; 2459 uint32_t d_id = nport->d_id; 2460 char log_str[256]; 2461 2462 /* 2463 * Assert on any association delete failure. We continue to delete other 2464 * associations in promoted builds. 2465 */ 2466 if (0 != err) { 2467 DEV_VERIFY(!"Nport's association delete callback returned error"); 2468 if (nport->assoc_count > 0) { 2469 nport->assoc_count--; 2470 } 2471 if (rport->assoc_count > 0) { 2472 rport->assoc_count--; 2473 } 2474 } 2475 2476 /* 2477 * If this is the last association being deleted for the ITN, 2478 * execute the callback(s). 2479 */ 2480 if (0 == rport->assoc_count) { 2481 /* Remove the rport from the remote port list. */ 2482 if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) { 2483 SPDK_ERRLOG("Error while removing rport from list.\n"); 2484 DEV_VERIFY(!"Error while removing rport from list."); 2485 } 2486 2487 if (cb_func != NULL) { 2488 /* 2489 * Callback function is provided by the caller 2490 * of nvmf_fc_adm_i_t_delete_assoc(). 2491 */ 2492 (void)cb_func(cb_data->cb_ctx, 0); 2493 } 2494 free(rport); 2495 free(args); 2496 } 2497 2498 snprintf(log_str, sizeof(log_str), 2499 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n", 2500 nport_hdl, s_id, d_id, rpi, assoc_count, err); 2501 2502 if (err != 0) { 2503 SPDK_ERRLOG("%s", log_str); 2504 } else { 2505 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2506 } 2507 } 2508 2509 /** 2510 * Process a IT delete. 2511 */ 2512 static void 2513 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport, 2514 struct spdk_nvmf_fc_remote_port_info *rport, 2515 spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func, 2516 void *cb_ctx) 2517 { 2518 int err = 0; 2519 struct spdk_nvmf_fc_association *assoc = NULL; 2520 int assoc_err = 0; 2521 uint32_t num_assoc = 0; 2522 uint32_t num_assoc_del_scheduled = 0; 2523 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL; 2524 uint8_t port_hdl = nport->port_hdl; 2525 uint32_t s_id = rport->s_id; 2526 uint32_t rpi = rport->rpi; 2527 uint32_t assoc_count = rport->assoc_count; 2528 char log_str[256]; 2529 2530 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n", 2531 nport->nport_hdl); 2532 2533 /* 2534 * Allocate memory for callback data. 2535 * This memory will be freed by the callback function. 2536 */ 2537 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data)); 2538 if (NULL == cb_data) { 2539 SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl); 2540 err = -ENOMEM; 2541 goto out; 2542 } 2543 cb_data->nport = nport; 2544 cb_data->rport = rport; 2545 cb_data->port_handle = port_hdl; 2546 cb_data->cb_func = cb_func; 2547 cb_data->cb_ctx = cb_ctx; 2548 2549 /* 2550 * Delete all associations, if any, related with this ITN/remote_port. 2551 */ 2552 TAILQ_FOREACH(assoc, &nport->fc_associations, link) { 2553 num_assoc++; 2554 if (assoc->s_id == s_id) { 2555 assoc_err = nvmf_fc_delete_association(nport, 2556 assoc->assoc_id, 2557 false /* send abts */, false, 2558 nvmf_fc_adm_i_t_delete_assoc_cb, cb_data); 2559 if (0 != assoc_err) { 2560 /* 2561 * Mark this association as zombie. 2562 */ 2563 err = -EINVAL; 2564 DEV_VERIFY(!"Error while deleting association"); 2565 (void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE); 2566 } else { 2567 num_assoc_del_scheduled++; 2568 } 2569 } 2570 } 2571 2572 out: 2573 if ((cb_data) && (num_assoc_del_scheduled == 0)) { 2574 /* 2575 * Since there are no association_delete calls 2576 * successfully scheduled, the association_delete 2577 * callback function will never be called. 2578 * In this case, call the callback function now. 2579 */ 2580 nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0); 2581 } 2582 2583 snprintf(log_str, sizeof(log_str), 2584 "IT delete associations on nport:%d end. " 2585 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n", 2586 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err); 2587 2588 if (err == 0) { 2589 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 2590 } else { 2591 SPDK_ERRLOG("%s", log_str); 2592 } 2593 } 2594 2595 static void 2596 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret) 2597 { 2598 ASSERT_SPDK_FC_MASTER_THREAD(); 2599 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL; 2600 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2601 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2602 struct spdk_nvmf_fc_port *fc_port = NULL; 2603 int err = 0; 2604 2605 quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data; 2606 hwqp = quiesce_api_data->hwqp; 2607 fc_port = hwqp->fc_port; 2608 port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx; 2609 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func; 2610 2611 /* 2612 * Decrement the callback/quiesced queue count. 2613 */ 2614 port_quiesce_ctx->quiesce_count--; 2615 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id); 2616 2617 free(quiesce_api_data); 2618 /* 2619 * Wait for call backs i.e. max_ioq_queues + LS QUEUE. 2620 */ 2621 if (port_quiesce_ctx->quiesce_count > 0) { 2622 return; 2623 } 2624 2625 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2626 SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl); 2627 } else { 2628 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl); 2629 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2630 } 2631 2632 if (cb_func) { 2633 /* 2634 * Callback function for the called of quiesce. 2635 */ 2636 cb_func(port_quiesce_ctx->ctx, err); 2637 } 2638 2639 /* 2640 * Free the context structure. 2641 */ 2642 free(port_quiesce_ctx); 2643 2644 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl, 2645 err); 2646 } 2647 2648 static int 2649 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx, 2650 spdk_nvmf_fc_poller_api_cb cb_func) 2651 { 2652 struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args; 2653 enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS; 2654 int err = 0; 2655 2656 args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args)); 2657 2658 if (args == NULL) { 2659 err = -ENOMEM; 2660 SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id); 2661 goto done; 2662 } 2663 args->hwqp = fc_hwqp; 2664 args->ctx = ctx; 2665 args->cb_info.cb_func = cb_func; 2666 args->cb_info.cb_data = args; 2667 args->cb_info.cb_thread = spdk_get_thread(); 2668 2669 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id); 2670 rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args); 2671 if (rc) { 2672 free(args); 2673 err = -EINVAL; 2674 } 2675 2676 done: 2677 return err; 2678 } 2679 2680 /* 2681 * Hw port Quiesce 2682 */ 2683 static int 2684 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx, 2685 spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func) 2686 { 2687 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL; 2688 uint32_t i = 0; 2689 int err = 0; 2690 2691 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl); 2692 2693 /* 2694 * If the port is in an OFFLINE state, set the state to QUIESCED 2695 * and execute the callback. 2696 */ 2697 if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) { 2698 fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED; 2699 } 2700 2701 if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) { 2702 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n", 2703 fc_port->port_hdl); 2704 /* 2705 * Execute the callback function directly. 2706 */ 2707 cb_func(ctx, err); 2708 goto out; 2709 } 2710 2711 port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx)); 2712 2713 if (port_quiesce_ctx == NULL) { 2714 err = -ENOMEM; 2715 SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n", 2716 fc_port->port_hdl); 2717 goto out; 2718 } 2719 2720 port_quiesce_ctx->quiesce_count = 0; 2721 port_quiesce_ctx->ctx = ctx; 2722 port_quiesce_ctx->cb_func = cb_func; 2723 2724 /* 2725 * Quiesce the LS queue. 2726 */ 2727 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx, 2728 nvmf_fc_adm_queue_quiesce_cb); 2729 if (err != 0) { 2730 SPDK_ERRLOG("Failed to quiesce the LS queue.\n"); 2731 goto out; 2732 } 2733 port_quiesce_ctx->quiesce_count++; 2734 2735 /* 2736 * Quiesce the IO queues. 2737 */ 2738 for (i = 0; i < fc_port->num_io_queues; i++) { 2739 err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i], 2740 port_quiesce_ctx, 2741 nvmf_fc_adm_queue_quiesce_cb); 2742 if (err != 0) { 2743 DEV_VERIFY(0); 2744 SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id); 2745 } 2746 port_quiesce_ctx->quiesce_count++; 2747 } 2748 2749 out: 2750 if (port_quiesce_ctx && err != 0) { 2751 free(port_quiesce_ctx); 2752 } 2753 return err; 2754 } 2755 2756 /* 2757 * Initialize and add a HW port entry to the global 2758 * HW port list. 2759 */ 2760 static void 2761 nvmf_fc_adm_evnt_hw_port_init(void *arg) 2762 { 2763 ASSERT_SPDK_FC_MASTER_THREAD(); 2764 struct spdk_nvmf_fc_port *fc_port = NULL; 2765 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2766 struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *) 2767 api_data->api_args; 2768 int err = 0; 2769 2770 if (args->io_queue_cnt > spdk_env_get_core_count()) { 2771 SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle); 2772 err = EINVAL; 2773 goto abort_port_init; 2774 } 2775 2776 /* 2777 * 1. Check for duplicate initialization. 2778 */ 2779 fc_port = nvmf_fc_port_lookup(args->port_handle); 2780 if (fc_port != NULL) { 2781 /* Port already exists, check if it has to be re-initialized */ 2782 err = nvmf_fc_adm_hw_port_reinit_validate(fc_port, args); 2783 if (err) { 2784 /* 2785 * In case of an error we do not want to free the fc_port 2786 * so we set that pointer to NULL. 2787 */ 2788 fc_port = NULL; 2789 } 2790 goto abort_port_init; 2791 } 2792 2793 /* 2794 * 2. Get the memory to instantiate a fc port. 2795 */ 2796 fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) + 2797 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp))); 2798 if (fc_port == NULL) { 2799 SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle); 2800 err = -ENOMEM; 2801 goto abort_port_init; 2802 } 2803 2804 /* assign the io_queues array */ 2805 fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof( 2806 struct spdk_nvmf_fc_port)); 2807 2808 /* 2809 * 3. Initialize the contents for the FC-port 2810 */ 2811 err = nvmf_fc_adm_hw_port_data_init(fc_port, args); 2812 2813 if (err != 0) { 2814 SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle); 2815 DEV_VERIFY(!"Data initialization failed for fc_port"); 2816 goto abort_port_init; 2817 } 2818 2819 /* 2820 * 4. Add this port to the global fc port list in the library. 2821 */ 2822 nvmf_fc_port_add(fc_port); 2823 2824 abort_port_init: 2825 if (err && fc_port) { 2826 free(fc_port); 2827 } 2828 if (api_data->cb_func != NULL) { 2829 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err); 2830 } 2831 2832 free(arg); 2833 2834 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n", 2835 args->port_handle, err); 2836 } 2837 2838 /* 2839 * Online a HW port. 2840 */ 2841 static void 2842 nvmf_fc_adm_evnt_hw_port_online(void *arg) 2843 { 2844 ASSERT_SPDK_FC_MASTER_THREAD(); 2845 struct spdk_nvmf_fc_port *fc_port = NULL; 2846 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2847 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2848 struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *) 2849 api_data->api_args; 2850 int i = 0; 2851 int err = 0; 2852 2853 fc_port = nvmf_fc_port_lookup(args->port_handle); 2854 if (fc_port) { 2855 /* Set the port state to online */ 2856 err = nvmf_fc_port_set_online(fc_port); 2857 if (err != 0) { 2858 SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err); 2859 DEV_VERIFY(!"Hw port online failed"); 2860 goto out; 2861 } 2862 2863 hwqp = &fc_port->ls_queue; 2864 hwqp->context = NULL; 2865 (void)nvmf_fc_hwqp_set_online(hwqp); 2866 2867 /* Cycle through all the io queues and setup a hwqp poller for each. */ 2868 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2869 hwqp = &fc_port->io_queues[i]; 2870 hwqp->context = NULL; 2871 (void)nvmf_fc_hwqp_set_online(hwqp); 2872 nvmf_fc_poll_group_add_hwqp(hwqp); 2873 } 2874 } else { 2875 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2876 err = -EINVAL; 2877 } 2878 2879 out: 2880 if (api_data->cb_func != NULL) { 2881 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err); 2882 } 2883 2884 free(arg); 2885 2886 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle, 2887 err); 2888 } 2889 2890 /* 2891 * Offline a HW port. 2892 */ 2893 static void 2894 nvmf_fc_adm_evnt_hw_port_offline(void *arg) 2895 { 2896 ASSERT_SPDK_FC_MASTER_THREAD(); 2897 struct spdk_nvmf_fc_port *fc_port = NULL; 2898 struct spdk_nvmf_fc_hwqp *hwqp = NULL; 2899 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 2900 struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *) 2901 api_data->api_args; 2902 int i = 0; 2903 int err = 0; 2904 2905 fc_port = nvmf_fc_port_lookup(args->port_handle); 2906 if (fc_port) { 2907 /* Set the port state to offline, if it is not already. */ 2908 err = nvmf_fc_port_set_offline(fc_port); 2909 if (err != 0) { 2910 SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err); 2911 err = 0; 2912 goto out; 2913 } 2914 2915 hwqp = &fc_port->ls_queue; 2916 (void)nvmf_fc_hwqp_set_offline(hwqp); 2917 2918 /* Remove poller for all the io queues. */ 2919 for (i = 0; i < (int)fc_port->num_io_queues; i++) { 2920 hwqp = &fc_port->io_queues[i]; 2921 (void)nvmf_fc_hwqp_set_offline(hwqp); 2922 nvmf_fc_poll_group_remove_hwqp(hwqp); 2923 } 2924 2925 /* 2926 * Delete all the nports. Ideally, the nports should have been purged 2927 * before the offline event, in which case, only a validation is required. 2928 */ 2929 nvmf_fc_adm_hw_port_offline_nport_delete(fc_port); 2930 } else { 2931 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 2932 err = -EINVAL; 2933 } 2934 out: 2935 if (api_data->cb_func != NULL) { 2936 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err); 2937 } 2938 2939 free(arg); 2940 2941 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle, 2942 err); 2943 } 2944 2945 struct nvmf_fc_add_rem_listener_ctx { 2946 struct spdk_nvmf_subsystem *subsystem; 2947 bool add_listener; 2948 struct spdk_nvme_transport_id trid; 2949 }; 2950 2951 static void 2952 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 2953 { 2954 ASSERT_SPDK_FC_MASTER_THREAD(); 2955 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 2956 free(ctx); 2957 } 2958 2959 static void 2960 nvmf_fc_adm_listen_done(void *cb_arg, int status) 2961 { 2962 ASSERT_SPDK_FC_MASTER_THREAD(); 2963 struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg; 2964 2965 if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) { 2966 SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn); 2967 free(ctx); 2968 } 2969 } 2970 2971 static void 2972 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status) 2973 { 2974 ASSERT_SPDK_FC_MASTER_THREAD(); 2975 struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg; 2976 2977 if (ctx->add_listener) { 2978 spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx); 2979 } else { 2980 spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid); 2981 nvmf_fc_adm_listen_done(ctx, 0); 2982 } 2983 } 2984 2985 static int 2986 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add) 2987 { 2988 struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt(); 2989 struct spdk_nvmf_subsystem *subsystem; 2990 2991 if (!tgt) { 2992 SPDK_ERRLOG("No nvmf target defined\n"); 2993 return -EINVAL; 2994 } 2995 2996 subsystem = spdk_nvmf_subsystem_get_first(tgt); 2997 while (subsystem) { 2998 struct nvmf_fc_add_rem_listener_ctx *ctx; 2999 3000 if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) { 3001 ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx)); 3002 if (ctx) { 3003 ctx->add_listener = add; 3004 ctx->subsystem = subsystem; 3005 nvmf_fc_create_trid(&ctx->trid, 3006 nport->fc_nodename.u.wwn, 3007 nport->fc_portname.u.wwn); 3008 3009 if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) { 3010 SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n", 3011 ctx->trid.traddr); 3012 free(ctx); 3013 } else if (spdk_nvmf_subsystem_pause(subsystem, 3014 nvmf_fc_adm_subsystem_paused_cb, 3015 ctx)) { 3016 SPDK_ERRLOG("Failed to pause subsystem: %s\n", 3017 subsystem->subnqn); 3018 free(ctx); 3019 } 3020 } 3021 } 3022 3023 subsystem = spdk_nvmf_subsystem_get_next(subsystem); 3024 } 3025 3026 return 0; 3027 } 3028 3029 /* 3030 * Create a Nport. 3031 */ 3032 static void 3033 nvmf_fc_adm_evnt_nport_create(void *arg) 3034 { 3035 ASSERT_SPDK_FC_MASTER_THREAD(); 3036 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3037 struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *) 3038 api_data->api_args; 3039 struct spdk_nvmf_fc_nport *nport = NULL; 3040 struct spdk_nvmf_fc_port *fc_port = NULL; 3041 int err = 0; 3042 3043 /* 3044 * Get the physical port. 3045 */ 3046 fc_port = nvmf_fc_port_lookup(args->port_handle); 3047 if (fc_port == NULL) { 3048 err = -EINVAL; 3049 goto out; 3050 } 3051 3052 /* 3053 * Check for duplicate initialization. 3054 */ 3055 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3056 if (nport != NULL) { 3057 SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle, 3058 args->port_handle); 3059 err = -EINVAL; 3060 goto out; 3061 } 3062 3063 /* 3064 * Get the memory to instantiate a fc nport. 3065 */ 3066 nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport)); 3067 if (nport == NULL) { 3068 SPDK_ERRLOG("Failed to allocate memory for nport %d.\n", 3069 args->nport_handle); 3070 err = -ENOMEM; 3071 goto out; 3072 } 3073 3074 /* 3075 * Initialize the contents for the nport 3076 */ 3077 nport->nport_hdl = args->nport_handle; 3078 nport->port_hdl = args->port_handle; 3079 nport->nport_state = SPDK_NVMF_FC_OBJECT_CREATED; 3080 nport->fc_nodename = args->fc_nodename; 3081 nport->fc_portname = args->fc_portname; 3082 nport->d_id = args->d_id; 3083 nport->fc_port = nvmf_fc_port_lookup(args->port_handle); 3084 3085 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED); 3086 TAILQ_INIT(&nport->rem_port_list); 3087 nport->rport_count = 0; 3088 TAILQ_INIT(&nport->fc_associations); 3089 nport->assoc_count = 0; 3090 3091 /* 3092 * Populate the nport address (as listening address) to the nvmf subsystems. 3093 */ 3094 err = nvmf_fc_adm_add_rem_nport_listener(nport, true); 3095 3096 (void)nvmf_fc_port_add_nport(fc_port, nport); 3097 out: 3098 if (err && nport) { 3099 free(nport); 3100 } 3101 3102 if (api_data->cb_func != NULL) { 3103 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err); 3104 } 3105 3106 free(arg); 3107 } 3108 3109 static void 3110 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type, 3111 void *cb_args, int spdk_err) 3112 { 3113 ASSERT_SPDK_FC_MASTER_THREAD(); 3114 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args; 3115 struct spdk_nvmf_fc_nport *nport = cb_data->nport; 3116 spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func; 3117 int err = 0; 3118 uint16_t nport_hdl = 0; 3119 char log_str[256]; 3120 3121 /* 3122 * Assert on any delete failure. 3123 */ 3124 if (nport == NULL) { 3125 SPDK_ERRLOG("Nport delete callback returned null nport"); 3126 DEV_VERIFY(!"nport is null."); 3127 goto out; 3128 } 3129 3130 nport_hdl = nport->nport_hdl; 3131 if (0 != spdk_err) { 3132 SPDK_ERRLOG("Nport delete callback returned error. FC Port: " 3133 "%d, Nport: %d\n", 3134 nport->port_hdl, nport->nport_hdl); 3135 DEV_VERIFY(!"nport delete callback error."); 3136 } 3137 3138 /* 3139 * Free the nport if this is the last rport being deleted and 3140 * execute the callback(s). 3141 */ 3142 if (nvmf_fc_nport_has_no_rport(nport)) { 3143 if (0 != nport->assoc_count) { 3144 SPDK_ERRLOG("association count != 0\n"); 3145 DEV_VERIFY(!"association count != 0"); 3146 } 3147 3148 err = nvmf_fc_port_remove_nport(nport->fc_port, nport); 3149 if (0 != err) { 3150 SPDK_ERRLOG("Nport delete callback: Failed to remove " 3151 "nport from nport list. FC Port:%d Nport:%d\n", 3152 nport->port_hdl, nport->nport_hdl); 3153 } 3154 /* Free the nport */ 3155 free(nport); 3156 3157 if (cb_func != NULL) { 3158 (void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err); 3159 } 3160 free(cb_data); 3161 } 3162 out: 3163 snprintf(log_str, sizeof(log_str), 3164 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n", 3165 port_handle, nport_hdl, event_type, spdk_err); 3166 3167 if (err != 0) { 3168 SPDK_ERRLOG("%s", log_str); 3169 } else { 3170 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3171 } 3172 } 3173 3174 /* 3175 * Delete Nport. 3176 */ 3177 static void 3178 nvmf_fc_adm_evnt_nport_delete(void *arg) 3179 { 3180 ASSERT_SPDK_FC_MASTER_THREAD(); 3181 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3182 struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *) 3183 api_data->api_args; 3184 struct spdk_nvmf_fc_nport *nport = NULL; 3185 struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL; 3186 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3187 int err = 0; 3188 uint32_t rport_cnt = 0; 3189 int rc = 0; 3190 3191 /* 3192 * Make sure that the nport exists. 3193 */ 3194 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3195 if (nport == NULL) { 3196 SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle, 3197 args->port_handle); 3198 err = -EINVAL; 3199 goto out; 3200 } 3201 3202 /* 3203 * Allocate memory for callback data. 3204 */ 3205 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data)); 3206 if (NULL == cb_data) { 3207 SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle); 3208 err = -ENOMEM; 3209 goto out; 3210 } 3211 3212 cb_data->nport = nport; 3213 cb_data->port_handle = args->port_handle; 3214 cb_data->fc_cb_func = api_data->cb_func; 3215 cb_data->fc_cb_ctx = args->cb_ctx; 3216 3217 /* 3218 * Begin nport tear down 3219 */ 3220 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3221 (void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3222 } else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3223 /* 3224 * Deletion of this nport already in progress. Register callback 3225 * and return. 3226 */ 3227 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3228 err = -ENODEV; 3229 goto out; 3230 } else { 3231 /* nport partially created/deleted */ 3232 DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3233 DEV_VERIFY(0 != "Nport in zombie state"); 3234 err = -ENODEV; 3235 goto out; 3236 } 3237 3238 /* 3239 * Remove this nport from listening addresses across subsystems 3240 */ 3241 rc = nvmf_fc_adm_add_rem_nport_listener(nport, false); 3242 3243 if (0 != rc) { 3244 err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE); 3245 SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n", 3246 nport->nport_hdl); 3247 goto out; 3248 } 3249 3250 /* 3251 * Delete all the remote ports (if any) for the nport 3252 */ 3253 /* TODO - Need to do this with a "first" and a "next" accessor function 3254 * for completeness. Look at app-subsystem as examples. 3255 */ 3256 if (nvmf_fc_nport_has_no_rport(nport)) { 3257 /* No rports to delete. Complete the nport deletion. */ 3258 nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0); 3259 goto out; 3260 } 3261 3262 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3263 struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc( 3264 1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args)); 3265 3266 if (it_del_args == NULL) { 3267 err = -ENOMEM; 3268 SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n", 3269 rport_iter->rpi, rport_iter->s_id); 3270 DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory"); 3271 goto out; 3272 } 3273 3274 rport_cnt++; 3275 it_del_args->port_handle = nport->port_hdl; 3276 it_del_args->nport_handle = nport->nport_hdl; 3277 it_del_args->cb_ctx = (void *)cb_data; 3278 it_del_args->rpi = rport_iter->rpi; 3279 it_del_args->s_id = rport_iter->s_id; 3280 3281 nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args, 3282 nvmf_fc_adm_delete_nport_cb); 3283 } 3284 3285 out: 3286 /* On failure, execute the callback function now */ 3287 if ((err != 0) || (rc != 0)) { 3288 SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, " 3289 "rport_cnt:%d rc:%d.\n", 3290 args->nport_handle, err, args->port_handle, 3291 rport_cnt, rc); 3292 if (cb_data) { 3293 free(cb_data); 3294 } 3295 if (api_data->cb_func != NULL) { 3296 (void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err); 3297 } 3298 3299 } else { 3300 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3301 "NPort %d delete done succesfully, fc port:%d. " 3302 "rport_cnt:%d\n", 3303 args->nport_handle, args->port_handle, rport_cnt); 3304 } 3305 3306 free(arg); 3307 } 3308 3309 /* 3310 * Process an PRLI/IT add. 3311 */ 3312 static void 3313 nvmf_fc_adm_evnt_i_t_add(void *arg) 3314 { 3315 ASSERT_SPDK_FC_MASTER_THREAD(); 3316 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3317 struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *) 3318 api_data->api_args; 3319 struct spdk_nvmf_fc_nport *nport = NULL; 3320 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3321 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3322 int err = 0; 3323 3324 /* 3325 * Make sure the nport port exists. 3326 */ 3327 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3328 if (nport == NULL) { 3329 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3330 err = -EINVAL; 3331 goto out; 3332 } 3333 3334 /* 3335 * Check for duplicate i_t_add. 3336 */ 3337 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3338 if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) { 3339 SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n", 3340 args->nport_handle, rport_iter->s_id, rport_iter->rpi); 3341 err = -EEXIST; 3342 goto out; 3343 } 3344 } 3345 3346 /* 3347 * Get the memory to instantiate the remote port 3348 */ 3349 rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info)); 3350 if (rport == NULL) { 3351 SPDK_ERRLOG("Memory allocation for rem port failed.\n"); 3352 err = -ENOMEM; 3353 goto out; 3354 } 3355 3356 /* 3357 * Initialize the contents for the rport 3358 */ 3359 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED); 3360 rport->s_id = args->s_id; 3361 rport->rpi = args->rpi; 3362 rport->fc_nodename = args->fc_nodename; 3363 rport->fc_portname = args->fc_portname; 3364 3365 /* 3366 * Add remote port to nport 3367 */ 3368 if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) { 3369 DEV_VERIFY(!"Error while adding rport to list"); 3370 }; 3371 3372 /* 3373 * TODO: Do we validate the initiators service parameters? 3374 */ 3375 3376 /* 3377 * Get the targets service parameters from the library 3378 * to return back to the driver. 3379 */ 3380 args->target_prli_info = nvmf_fc_get_prli_service_params(); 3381 3382 out: 3383 if (api_data->cb_func != NULL) { 3384 /* 3385 * Passing pointer to the args struct as the first argument. 3386 * The cb_func should handle this appropriately. 3387 */ 3388 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err); 3389 } 3390 3391 free(arg); 3392 3393 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3394 "IT add on nport %d done, rc = %d.\n", 3395 args->nport_handle, err); 3396 } 3397 3398 /** 3399 * Process a IT delete. 3400 */ 3401 static void 3402 nvmf_fc_adm_evnt_i_t_delete(void *arg) 3403 { 3404 ASSERT_SPDK_FC_MASTER_THREAD(); 3405 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3406 struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *) 3407 api_data->api_args; 3408 int rc = 0; 3409 struct spdk_nvmf_fc_nport *nport = NULL; 3410 struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL; 3411 struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL; 3412 struct spdk_nvmf_fc_remote_port_info *rport = NULL; 3413 uint32_t num_rport = 0; 3414 char log_str[256]; 3415 3416 SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle); 3417 3418 /* 3419 * Make sure the nport port exists. If it does not, error out. 3420 */ 3421 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3422 if (nport == NULL) { 3423 SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle); 3424 rc = -EINVAL; 3425 goto out; 3426 } 3427 3428 /* 3429 * Find this ITN / rport (remote port). 3430 */ 3431 TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) { 3432 num_rport++; 3433 if ((rport_iter->s_id == args->s_id) && 3434 (rport_iter->rpi == args->rpi) && 3435 (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) { 3436 rport = rport_iter; 3437 break; 3438 } 3439 } 3440 3441 /* 3442 * We should find either zero or exactly one rport. 3443 * 3444 * If we find zero rports, that means that a previous request has 3445 * removed the rport by the time we reached here. In this case, 3446 * simply return out. 3447 */ 3448 if (rport == NULL) { 3449 rc = -ENODEV; 3450 goto out; 3451 } 3452 3453 /* 3454 * We have found exactly one rport. Allocate memory for callback data. 3455 */ 3456 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data)); 3457 if (NULL == cb_data) { 3458 SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle); 3459 rc = -ENOMEM; 3460 goto out; 3461 } 3462 3463 cb_data->nport = nport; 3464 cb_data->rport = rport; 3465 cb_data->port_handle = args->port_handle; 3466 cb_data->fc_cb_func = api_data->cb_func; 3467 cb_data->fc_cb_ctx = args->cb_ctx; 3468 3469 /* 3470 * Validate rport object state. 3471 */ 3472 if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) { 3473 (void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED); 3474 } else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3475 /* 3476 * Deletion of this rport already in progress. Register callback 3477 * and return. 3478 */ 3479 /* TODO: Register callback in callback vector. For now, set the error and return. */ 3480 rc = -ENODEV; 3481 goto out; 3482 } else { 3483 /* rport partially created/deleted */ 3484 DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE); 3485 DEV_VERIFY(!"Invalid rport_state"); 3486 rc = -ENODEV; 3487 goto out; 3488 } 3489 3490 /* 3491 * We have successfully found a rport to delete. Call 3492 * nvmf_fc_i_t_delete_assoc(), which will perform further 3493 * IT-delete processing as well as free the cb_data. 3494 */ 3495 nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb, 3496 (void *)cb_data); 3497 3498 out: 3499 if (rc != 0) { 3500 /* 3501 * We have entered here because either we encountered an 3502 * error, or we did not find a rport to delete. 3503 * As a result, we will not call the function 3504 * nvmf_fc_i_t_delete_assoc() for further IT-delete 3505 * processing. Therefore, execute the callback function now. 3506 */ 3507 if (cb_data) { 3508 free(cb_data); 3509 } 3510 if (api_data->cb_func != NULL) { 3511 (void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc); 3512 } 3513 } 3514 3515 snprintf(log_str, sizeof(log_str), 3516 "IT delete on nport:%d end. num_rport:%d rc = %d.\n", 3517 args->nport_handle, num_rport, rc); 3518 3519 if (rc != 0) { 3520 SPDK_ERRLOG("%s", log_str); 3521 } else { 3522 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3523 } 3524 3525 free(arg); 3526 } 3527 3528 /* 3529 * Process ABTS received 3530 */ 3531 static void 3532 nvmf_fc_adm_evnt_abts_recv(void *arg) 3533 { 3534 ASSERT_SPDK_FC_MASTER_THREAD(); 3535 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3536 struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args; 3537 struct spdk_nvmf_fc_nport *nport = NULL; 3538 int err = 0; 3539 3540 SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi, 3541 args->oxid, args->rxid); 3542 3543 /* 3544 * 1. Make sure the nport port exists. 3545 */ 3546 nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle); 3547 if (nport == NULL) { 3548 SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle); 3549 err = -EINVAL; 3550 goto out; 3551 } 3552 3553 /* 3554 * 2. If the nport is in the process of being deleted, drop the ABTS. 3555 */ 3556 if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) { 3557 SPDK_DEBUGLOG(nvmf_fc_adm_api, 3558 "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n", 3559 args->rpi, args->oxid, args->rxid); 3560 err = 0; 3561 goto out; 3562 3563 } 3564 3565 /* 3566 * 3. Pass the received ABTS-LS to the library for handling. 3567 */ 3568 nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid); 3569 3570 out: 3571 if (api_data->cb_func != NULL) { 3572 /* 3573 * Passing pointer to the args struct as the first argument. 3574 * The cb_func should handle this appropriately. 3575 */ 3576 (void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err); 3577 } else { 3578 /* No callback set, free the args */ 3579 free(args); 3580 } 3581 3582 free(arg); 3583 } 3584 3585 /* 3586 * Callback function for hw port quiesce. 3587 */ 3588 static void 3589 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err) 3590 { 3591 ASSERT_SPDK_FC_MASTER_THREAD(); 3592 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx = 3593 (struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx; 3594 struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args; 3595 spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func; 3596 struct spdk_nvmf_fc_queue_dump_info dump_info; 3597 struct spdk_nvmf_fc_port *fc_port = NULL; 3598 char *dump_buf = NULL; 3599 uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE; 3600 3601 /* 3602 * Free the callback context struct. 3603 */ 3604 free(ctx); 3605 3606 if (err != 0) { 3607 SPDK_ERRLOG("Port %d quiesce operation failed.\n", args->port_handle); 3608 goto out; 3609 } 3610 3611 if (args->dump_queues == false) { 3612 /* 3613 * Queues need not be dumped. 3614 */ 3615 goto out; 3616 } 3617 3618 SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle); 3619 3620 /* 3621 * Get the fc port. 3622 */ 3623 fc_port = nvmf_fc_port_lookup(args->port_handle); 3624 if (fc_port == NULL) { 3625 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3626 err = -EINVAL; 3627 goto out; 3628 } 3629 3630 /* 3631 * Allocate memory for the dump buffer. 3632 * This memory will be freed by FCT. 3633 */ 3634 dump_buf = (char *)calloc(1, dump_buf_size); 3635 if (dump_buf == NULL) { 3636 err = -ENOMEM; 3637 SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle); 3638 goto out; 3639 } 3640 *args->dump_buf = (uint32_t *)dump_buf; 3641 dump_info.buffer = dump_buf; 3642 dump_info.offset = 0; 3643 3644 /* 3645 * Add the dump reason to the top of the buffer. 3646 */ 3647 nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason); 3648 3649 /* 3650 * Dump the hwqp. 3651 */ 3652 nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues, 3653 fc_port->num_io_queues, &dump_info); 3654 3655 out: 3656 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n", 3657 args->port_handle, args->dump_queues, err); 3658 3659 if (cb_func != NULL) { 3660 (void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3661 } 3662 } 3663 3664 /* 3665 * HW port reset 3666 3667 */ 3668 static void 3669 nvmf_fc_adm_evnt_hw_port_reset(void *arg) 3670 { 3671 ASSERT_SPDK_FC_MASTER_THREAD(); 3672 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3673 struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *) 3674 api_data->api_args; 3675 struct spdk_nvmf_fc_port *fc_port = NULL; 3676 struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL; 3677 int err = 0; 3678 3679 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle); 3680 3681 /* 3682 * Make sure the physical port exists. 3683 */ 3684 fc_port = nvmf_fc_port_lookup(args->port_handle); 3685 if (fc_port == NULL) { 3686 SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle); 3687 err = -EINVAL; 3688 goto out; 3689 } 3690 3691 /* 3692 * Save the reset event args and the callback in a context struct. 3693 */ 3694 ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx)); 3695 3696 if (ctx == NULL) { 3697 err = -ENOMEM; 3698 SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle); 3699 goto fail; 3700 } 3701 3702 ctx->reset_args = arg; 3703 ctx->reset_cb_func = api_data->cb_func; 3704 3705 /* 3706 * Quiesce the hw port. 3707 */ 3708 err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb); 3709 if (err != 0) { 3710 goto fail; 3711 } 3712 3713 /* 3714 * Once the ports are successfully quiesced the reset processing 3715 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb 3716 */ 3717 return; 3718 fail: 3719 free(ctx); 3720 3721 out: 3722 SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle, 3723 err); 3724 3725 if (api_data->cb_func != NULL) { 3726 (void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err); 3727 } 3728 3729 free(arg); 3730 } 3731 3732 /* 3733 * Process a link break event on a HW port. 3734 */ 3735 static void 3736 nvmf_fc_adm_evnt_hw_port_link_break(void *arg) 3737 { 3738 ASSERT_SPDK_FC_MASTER_THREAD(); 3739 struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg; 3740 struct spdk_nvmf_hw_port_link_break_args *args = (struct spdk_nvmf_hw_port_link_break_args *) 3741 api_data->api_args; 3742 struct spdk_nvmf_fc_port *fc_port = NULL; 3743 int err = 0; 3744 struct spdk_nvmf_fc_adm_port_link_break_cb_data *cb_data = NULL; 3745 struct spdk_nvmf_fc_nport *nport = NULL; 3746 uint32_t nport_deletes_sent = 0; 3747 uint32_t nport_deletes_skipped = 0; 3748 struct spdk_nvmf_fc_nport_delete_args *nport_del_args = NULL; 3749 char log_str[256]; 3750 3751 /* 3752 * Get the fc port using the port handle. 3753 */ 3754 fc_port = nvmf_fc_port_lookup(args->port_handle); 3755 if (!fc_port) { 3756 SPDK_ERRLOG("port link break: Unable to find the SPDK FC port %d\n", 3757 args->port_handle); 3758 err = -EINVAL; 3759 goto out; 3760 } 3761 3762 /* 3763 * Set the port state to offline, if it is not already. 3764 */ 3765 err = nvmf_fc_port_set_offline(fc_port); 3766 if (err != 0) { 3767 SPDK_ERRLOG("port link break: HW port %d already offline. rc = %d\n", 3768 fc_port->port_hdl, err); 3769 err = 0; 3770 goto out; 3771 } 3772 3773 /* 3774 * Delete all the nports, if any. 3775 */ 3776 if (!TAILQ_EMPTY(&fc_port->nport_list)) { 3777 TAILQ_FOREACH(nport, &fc_port->nport_list, link) { 3778 /* Skipped the nports that are not in CREATED state */ 3779 if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED) { 3780 nport_deletes_skipped++; 3781 continue; 3782 } 3783 3784 /* Allocate memory for callback data. */ 3785 cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_port_link_break_cb_data)); 3786 if (NULL == cb_data) { 3787 SPDK_ERRLOG("port link break: Failed to allocate memory for cb_data %d.\n", 3788 args->port_handle); 3789 err = -ENOMEM; 3790 goto out; 3791 } 3792 cb_data->args = args; 3793 cb_data->cb_func = api_data->cb_func; 3794 nport_del_args = &cb_data->nport_del_args; 3795 nport_del_args->port_handle = args->port_handle; 3796 nport_del_args->nport_handle = nport->nport_hdl; 3797 nport_del_args->cb_ctx = cb_data; 3798 3799 nvmf_fc_master_enqueue_event(SPDK_FC_NPORT_DELETE, 3800 (void *)nport_del_args, 3801 nvmf_fc_adm_hw_port_link_break_cb); 3802 3803 nport_deletes_sent++; 3804 } 3805 } 3806 3807 if (nport_deletes_sent == 0 && err == 0) { 3808 /* 3809 * Mark the hwqps as offline and unregister the pollers. 3810 */ 3811 (void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port); 3812 } 3813 3814 out: 3815 snprintf(log_str, sizeof(log_str), 3816 "port link break done: port:%d nport_deletes_sent:%d nport_deletes_skipped:%d rc:%d.\n", 3817 args->port_handle, nport_deletes_sent, nport_deletes_skipped, err); 3818 3819 if (err != 0) { 3820 SPDK_ERRLOG("%s", log_str); 3821 } else { 3822 SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str); 3823 } 3824 3825 if ((api_data->cb_func != NULL) && (nport_deletes_sent == 0)) { 3826 /* 3827 * No nport_deletes are sent, which would have eventually 3828 * called the port_link_break callback. Therefore, call the 3829 * port_link_break callback here. 3830 */ 3831 (void)api_data->cb_func(args->port_handle, SPDK_FC_LINK_BREAK, args->cb_ctx, err); 3832 } 3833 3834 free(arg); 3835 } 3836 3837 static inline void 3838 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args) 3839 { 3840 if (nvmf_fc_get_master_thread()) { 3841 spdk_thread_send_msg(nvmf_fc_get_master_thread(), fn, args); 3842 } 3843 } 3844 3845 /* 3846 * Queue up an event in the SPDK masters event queue. 3847 * Used by the FC driver to notify the SPDK master of FC related events. 3848 */ 3849 int 3850 nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args, 3851 spdk_nvmf_fc_callback cb_func) 3852 { 3853 int err = 0; 3854 struct spdk_nvmf_fc_adm_api_data *api_data = NULL; 3855 spdk_msg_fn event_fn = NULL; 3856 3857 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type); 3858 3859 if (event_type >= SPDK_FC_EVENT_MAX) { 3860 SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type); 3861 err = -EINVAL; 3862 goto done; 3863 } 3864 3865 if (args == NULL) { 3866 SPDK_ERRLOG("Null args for event %d.\n", event_type); 3867 err = -EINVAL; 3868 goto done; 3869 } 3870 3871 api_data = calloc(1, sizeof(*api_data)); 3872 3873 if (api_data == NULL) { 3874 SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type); 3875 err = -ENOMEM; 3876 goto done; 3877 } 3878 3879 api_data->api_args = args; 3880 api_data->cb_func = cb_func; 3881 3882 switch (event_type) { 3883 case SPDK_FC_HW_PORT_INIT: 3884 event_fn = nvmf_fc_adm_evnt_hw_port_init; 3885 break; 3886 3887 case SPDK_FC_HW_PORT_ONLINE: 3888 event_fn = nvmf_fc_adm_evnt_hw_port_online; 3889 break; 3890 3891 case SPDK_FC_HW_PORT_OFFLINE: 3892 event_fn = nvmf_fc_adm_evnt_hw_port_offline; 3893 break; 3894 3895 case SPDK_FC_NPORT_CREATE: 3896 event_fn = nvmf_fc_adm_evnt_nport_create; 3897 break; 3898 3899 case SPDK_FC_NPORT_DELETE: 3900 event_fn = nvmf_fc_adm_evnt_nport_delete; 3901 break; 3902 3903 case SPDK_FC_IT_ADD: 3904 event_fn = nvmf_fc_adm_evnt_i_t_add; 3905 break; 3906 3907 case SPDK_FC_IT_DELETE: 3908 event_fn = nvmf_fc_adm_evnt_i_t_delete; 3909 break; 3910 3911 case SPDK_FC_ABTS_RECV: 3912 event_fn = nvmf_fc_adm_evnt_abts_recv; 3913 break; 3914 3915 case SPDK_FC_LINK_BREAK: 3916 event_fn = nvmf_fc_adm_evnt_hw_port_link_break; 3917 break; 3918 3919 case SPDK_FC_HW_PORT_RESET: 3920 event_fn = nvmf_fc_adm_evnt_hw_port_reset; 3921 break; 3922 3923 case SPDK_FC_UNRECOVERABLE_ERR: 3924 default: 3925 SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type); 3926 err = -EINVAL; 3927 break; 3928 } 3929 3930 done: 3931 3932 if (err == 0) { 3933 assert(event_fn != NULL); 3934 nvmf_fc_adm_run_on_master_thread(event_fn, (void *)api_data); 3935 SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type); 3936 } else { 3937 SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err); 3938 if (api_data) { 3939 free(api_data); 3940 } 3941 } 3942 3943 return err; 3944 } 3945 3946 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc); 3947 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api) 3948 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc) 3949