xref: /spdk/lib/nvmf/fc.c (revision ba23cec1820104cc710ad776f0127e1cf82033aa)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 #include "spdk/endian.h"
46 #include "spdk/log.h"
47 #include "spdk/thread.h"
48 
49 #include "spdk_internal/log.h"
50 
51 #include "nvmf_fc.h"
52 #include "fc_lld.h"
53 
54 #ifndef DEV_VERIFY
55 #define DEV_VERIFY assert
56 #endif
57 
58 #ifndef ASSERT_SPDK_FC_MASTER_THREAD
59 #define ASSERT_SPDK_FC_MASTER_THREAD() \
60         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_master_thread());
61 #endif
62 
63 /*
64  * PRLI service parameters
65  */
66 enum spdk_nvmf_fc_service_parameters {
67 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
68 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
69 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
70 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
71 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
72 };
73 
74 static char *fc_req_state_strs[] = {
75 	"SPDK_NVMF_FC_REQ_INIT",
76 	"SPDK_NVMF_FC_REQ_READ_BDEV",
77 	"SPDK_NVMF_FC_REQ_READ_XFER",
78 	"SPDK_NVMF_FC_REQ_READ_RSP",
79 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
80 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
81 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
82 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
83 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
84 	"SPDK_NVMF_FC_REQ_NONE_RSP",
85 	"SPDK_NVMF_FC_REQ_SUCCESS",
86 	"SPDK_NVMF_FC_REQ_FAILED",
87 	"SPDK_NVMF_FC_REQ_ABORTED",
88 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
89 	"SPDK_NVMF_FC_REQ_PENDING"
90 };
91 
92 #define OBJECT_NVMF_FC_IO				0xA0
93 
94 #define TRACE_GROUP_NVMF_FC				0x8
95 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
96 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
97 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
98 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
99 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
100 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
101 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
102 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
103 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
104 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
105 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
106 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
107 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
108 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
109 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
110 
111 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
112 {
113 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
114 	spdk_trace_register_description("FC_REQ_NEW",
115 					TRACE_FC_REQ_INIT,
116 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
117 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
118 					TRACE_FC_REQ_READ_BDEV,
119 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
120 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
121 					TRACE_FC_REQ_READ_XFER,
122 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
123 	spdk_trace_register_description("FC_REQ_READ_RSP",
124 					TRACE_FC_REQ_READ_RSP,
125 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
126 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
127 					TRACE_FC_REQ_WRITE_BUFFS,
128 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
129 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
130 					TRACE_FC_REQ_WRITE_XFER,
131 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
132 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
133 					TRACE_FC_REQ_WRITE_BDEV,
134 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
135 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
136 					TRACE_FC_REQ_WRITE_RSP,
137 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
138 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
139 					TRACE_FC_REQ_NONE_BDEV,
140 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
141 	spdk_trace_register_description("FC_REQ_NONE_RSP",
142 					TRACE_FC_REQ_NONE_RSP,
143 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
144 	spdk_trace_register_description("FC_REQ_SUCCESS",
145 					TRACE_FC_REQ_SUCCESS,
146 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
147 	spdk_trace_register_description("FC_REQ_FAILED",
148 					TRACE_FC_REQ_FAILED,
149 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
150 	spdk_trace_register_description("FC_REQ_ABORTED",
151 					TRACE_FC_REQ_ABORTED,
152 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
153 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
154 					TRACE_FC_REQ_BDEV_ABORTED,
155 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
156 	spdk_trace_register_description("FC_REQ_PENDING",
157 					TRACE_FC_REQ_PENDING,
158 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
159 }
160 
161 /**
162  * The structure used by all fc adm functions
163  */
164 struct spdk_nvmf_fc_adm_api_data {
165 	void *api_args;
166 	spdk_nvmf_fc_callback cb_func;
167 };
168 
169 /**
170  * The callback structure for nport-delete
171  */
172 struct spdk_nvmf_fc_adm_nport_del_cb_data {
173 	struct spdk_nvmf_fc_nport *nport;
174 	uint8_t port_handle;
175 	spdk_nvmf_fc_callback fc_cb_func;
176 	void *fc_cb_ctx;
177 };
178 
179 /**
180  * The callback structure for it-delete
181  */
182 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
183 	struct spdk_nvmf_fc_nport *nport;
184 	struct spdk_nvmf_fc_remote_port_info *rport;
185 	uint8_t port_handle;
186 	spdk_nvmf_fc_callback fc_cb_func;
187 	void *fc_cb_ctx;
188 };
189 
190 
191 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
192 
193 /**
194  * The callback structure for the it-delete-assoc callback
195  */
196 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
197 	struct spdk_nvmf_fc_nport *nport;
198 	struct spdk_nvmf_fc_remote_port_info *rport;
199 	uint8_t port_handle;
200 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
201 	void *cb_ctx;
202 };
203 
204 /*
205  * Call back function pointer for HW port quiesce.
206  */
207 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
208 
209 /**
210  * Context structure for quiescing a hardware port
211  */
212 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
213 	int quiesce_count;
214 	void *ctx;
215 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
216 };
217 
218 /**
219  * Context structure used to reset a hardware port
220  */
221 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
222 	void *reset_args;
223 	spdk_nvmf_fc_callback reset_cb_func;
224 };
225 
226 /**
227  * The callback structure for HW port link break event
228  */
229 struct spdk_nvmf_fc_adm_port_link_break_cb_data {
230 	struct spdk_nvmf_hw_port_link_break_args *args;
231 	struct spdk_nvmf_fc_nport_delete_args nport_del_args;
232 	spdk_nvmf_fc_callback cb_func;
233 };
234 
235 struct spdk_nvmf_fc_transport {
236 	struct spdk_nvmf_transport transport;
237 	pthread_mutex_t lock;
238 };
239 
240 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
241 
242 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
243 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
244 
245 static struct spdk_thread *g_nvmf_fc_master_thread = NULL;
246 
247 static uint32_t g_nvmf_fgroup_count = 0;
248 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
249 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
250 
251 struct spdk_thread *
252 nvmf_fc_get_master_thread(void)
253 {
254 	return g_nvmf_fc_master_thread;
255 }
256 
257 static inline void
258 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
259 			       enum spdk_nvmf_fc_request_state state)
260 {
261 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
262 
263 	switch (state) {
264 	case SPDK_NVMF_FC_REQ_INIT:
265 		/* Start IO tracing */
266 		tpoint_id = TRACE_FC_REQ_INIT;
267 		break;
268 	case SPDK_NVMF_FC_REQ_READ_BDEV:
269 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
270 		break;
271 	case SPDK_NVMF_FC_REQ_READ_XFER:
272 		tpoint_id = TRACE_FC_REQ_READ_XFER;
273 		break;
274 	case SPDK_NVMF_FC_REQ_READ_RSP:
275 		tpoint_id = TRACE_FC_REQ_READ_RSP;
276 		break;
277 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
278 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
279 		break;
280 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
281 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
282 		break;
283 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
284 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
285 		break;
286 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
287 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
288 		break;
289 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
290 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
291 		break;
292 	case SPDK_NVMF_FC_REQ_NONE_RSP:
293 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
294 		break;
295 	case SPDK_NVMF_FC_REQ_SUCCESS:
296 		tpoint_id = TRACE_FC_REQ_SUCCESS;
297 		break;
298 	case SPDK_NVMF_FC_REQ_FAILED:
299 		tpoint_id = TRACE_FC_REQ_FAILED;
300 		break;
301 	case SPDK_NVMF_FC_REQ_ABORTED:
302 		tpoint_id = TRACE_FC_REQ_ABORTED;
303 		break;
304 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
305 		tpoint_id = TRACE_FC_REQ_ABORTED;
306 		break;
307 	case SPDK_NVMF_FC_REQ_PENDING:
308 		tpoint_id = TRACE_FC_REQ_PENDING;
309 		break;
310 	default:
311 		assert(0);
312 		break;
313 	}
314 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
315 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
316 				  (uint64_t)(&fc_req->req), 0);
317 	}
318 }
319 
320 static void
321 nvmf_fc_handle_connection_failure(void *arg)
322 {
323 	struct spdk_nvmf_fc_conn *fc_conn = arg;
324 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
325 
326 	if (!fc_conn->create_opd) {
327 		return;
328 	}
329 	api_data = &fc_conn->create_opd->u.add_conn;
330 
331 	nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
332 				    api_data->args.fc_conn, api_data->aq_conn);
333 }
334 
335 static void
336 nvmf_fc_handle_assoc_deletion(void *arg)
337 {
338 	struct spdk_nvmf_fc_conn *fc_conn = arg;
339 
340 	nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport,
341 				   fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL);
342 }
343 
344 static int
345 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp)
346 {
347 	uint32_t i;
348 	struct spdk_nvmf_fc_request *fc_req;
349 
350 	TAILQ_INIT(&hwqp->free_reqs);
351 	TAILQ_INIT(&hwqp->in_use_reqs);
352 
353 	hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request));
354 	if (hwqp->fc_reqs_buf == NULL) {
355 		SPDK_ERRLOG("create fc request pool failed\n");
356 		return -ENOMEM;
357 	}
358 
359 	for (i = 0; i < hwqp->rq_size; i++) {
360 		fc_req = hwqp->fc_reqs_buf + i;
361 
362 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
363 		TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link);
364 	}
365 
366 	return 0;
367 }
368 
369 static inline struct spdk_nvmf_fc_request *
370 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp)
371 {
372 	struct spdk_nvmf_fc_request *fc_req;
373 
374 	if (TAILQ_EMPTY(&hwqp->free_reqs)) {
375 		SPDK_ERRLOG("Alloc request buffer failed\n");
376 		return NULL;
377 	}
378 
379 	fc_req = TAILQ_FIRST(&hwqp->free_reqs);
380 	TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link);
381 
382 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
383 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
384 	TAILQ_INIT(&fc_req->abort_cbs);
385 	return fc_req;
386 }
387 
388 static inline void
389 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req)
390 {
391 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
392 		/* Log an error for debug purpose. */
393 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
394 	}
395 
396 	/* set the magic to mark req as no longer valid. */
397 	fc_req->magic = 0xDEADBEEF;
398 
399 	TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link);
400 	TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link);
401 }
402 
403 static inline bool
404 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
405 {
406 	switch (fc_req->state) {
407 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
408 		return true;
409 	default:
410 		return false;
411 	}
412 }
413 
414 void
415 nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp)
416 {
417 	nvmf_fc_init_rqpair_buffers(hwqp);
418 }
419 
420 struct spdk_nvmf_fc_conn *
421 nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
422 {
423 	struct spdk_nvmf_fc_conn *fc_conn;
424 
425 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
426 		if (fc_conn->conn_id == conn_id) {
427 			return fc_conn;
428 		}
429 	}
430 
431 	return NULL;
432 }
433 
434 void
435 nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr)
436 {
437 	struct spdk_nvmf_fc_abts_ctx *ctx;
438 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
439 
440 	/* Clean up any pending sync callbacks */
441 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
442 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
443 		ctx = args->cb_info.cb_data;
444 		if (ctx) {
445 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
446 				free(ctx->sync_poller_args);
447 				free(ctx->abts_poller_args);
448 				free(ctx);
449 			}
450 		}
451 	}
452 
453 	nvmf_fc_reinit_q(hwqp->queues, queues_curr);
454 }
455 
456 void
457 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
458 {
459 	hwqp->fc_port = fc_port;
460 
461 	/* clear counters */
462 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
463 
464 	nvmf_fc_init_poller_queues(hwqp);
465 	if (&fc_port->ls_queue != hwqp) {
466 		nvmf_fc_create_req_mempool(hwqp);
467 	}
468 
469 	nvmf_fc_init_q(hwqp);
470 	TAILQ_INIT(&hwqp->connection_list);
471 	TAILQ_INIT(&hwqp->sync_cbs);
472 	TAILQ_INIT(&hwqp->ls_pending_queue);
473 }
474 
475 static struct spdk_nvmf_fc_poll_group *
476 nvmf_fc_get_idlest_poll_group(void)
477 {
478 	uint32_t max_count = UINT32_MAX;
479 	struct spdk_nvmf_fc_poll_group *fgroup;
480 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
481 
482 	/* find poll group with least number of hwqp's assigned to it */
483 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
484 		if (fgroup->hwqp_count < max_count) {
485 			ret_fgroup = fgroup;
486 			max_count = fgroup->hwqp_count;
487 		}
488 	}
489 
490 	return ret_fgroup;
491 }
492 
493 void
494 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
495 {
496 	struct spdk_nvmf_fc_poll_group *fgroup = NULL;
497 
498 	assert(hwqp);
499 	if (hwqp == NULL) {
500 		SPDK_ERRLOG("Error: hwqp is NULL\n");
501 		return;
502 	}
503 
504 	assert(g_nvmf_fgroup_count);
505 
506 	fgroup = nvmf_fc_get_idlest_poll_group();
507 	if (!fgroup) {
508 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
509 		return;
510 	}
511 
512 	hwqp->thread = fgroup->group.group->thread;
513 	hwqp->fgroup = fgroup;
514 	fgroup->hwqp_count++;
515 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
516 }
517 
518 void
519 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
520 {
521 	assert(hwqp);
522 
523 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
524 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
525 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
526 
527 	if (!hwqp->fgroup) {
528 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
529 	} else {
530 		hwqp->fgroup->hwqp_count--;
531 		nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL);
532 	}
533 }
534 
535 /*
536  * Note: This needs to be used only on master poller.
537  */
538 static uint64_t
539 nvmf_fc_get_abts_unique_id(void)
540 {
541 	static uint32_t u_id = 0;
542 
543 	return (uint64_t)(++u_id);
544 }
545 
546 static void
547 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
548 {
549 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
550 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
551 
552 	ctx->hwqps_responded++;
553 
554 	if (ctx->hwqps_responded < ctx->num_hwqps) {
555 		/* Wait for all pollers to complete. */
556 		return;
557 	}
558 
559 	/* Free the queue sync poller args. */
560 	free(ctx->sync_poller_args);
561 
562 	/* Mark as queue synced */
563 	ctx->queue_synced = true;
564 
565 	/* Reset the ctx values */
566 	ctx->hwqps_responded = 0;
567 	ctx->handled = false;
568 
569 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
570 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
571 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
572 
573 	/* Resend ABTS to pollers */
574 	args = ctx->abts_poller_args;
575 	for (int i = 0; i < ctx->num_hwqps; i++) {
576 		poller_arg = args + i;
577 		nvmf_fc_poller_api_func(poller_arg->hwqp,
578 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
579 					poller_arg);
580 	}
581 }
582 
583 static int
584 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
585 {
586 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
587 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
588 
589 	/* check if FC driver supports queue sync */
590 	if (!nvmf_fc_q_sync_available()) {
591 		return -EPERM;
592 	}
593 
594 	assert(ctx);
595 	if (!ctx) {
596 		SPDK_ERRLOG("NULL ctx pointer");
597 		return -EINVAL;
598 	}
599 
600 	/* Reset the ctx values */
601 	ctx->hwqps_responded = 0;
602 
603 	args = calloc(ctx->num_hwqps,
604 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
605 	if (!args) {
606 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
607 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
608 		return -ENOMEM;
609 	}
610 	ctx->sync_poller_args = args;
611 
612 	abts_args = ctx->abts_poller_args;
613 	for (int i = 0; i < ctx->num_hwqps; i++) {
614 		abts_poller_arg = abts_args + i;
615 		poller_arg = args + i;
616 		poller_arg->u_id = ctx->u_id;
617 		poller_arg->hwqp = abts_poller_arg->hwqp;
618 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
619 		poller_arg->cb_info.cb_data = ctx;
620 		poller_arg->cb_info.cb_thread = spdk_get_thread();
621 
622 		/* Send a Queue sync message to interested pollers */
623 		nvmf_fc_poller_api_func(poller_arg->hwqp,
624 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
625 					poller_arg);
626 	}
627 
628 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
629 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
630 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
631 
632 	/* Post Marker to queue to track aborted request */
633 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
634 
635 	return 0;
636 }
637 
638 static void
639 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
640 {
641 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
642 	struct spdk_nvmf_fc_nport *nport  = NULL;
643 
644 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
645 		ctx->handled = true;
646 	}
647 
648 	ctx->hwqps_responded++;
649 
650 	if (ctx->hwqps_responded < ctx->num_hwqps) {
651 		/* Wait for all pollers to complete. */
652 		return;
653 	}
654 
655 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
656 
657 	if (ctx->nport != nport) {
658 		/* Nport can be deleted while this abort is being
659 		 * processed by the pollers.
660 		 */
661 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
662 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
663 	} else {
664 		if (!ctx->handled) {
665 			/* Try syncing the queues and try one more time */
666 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
667 				SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
668 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
669 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
670 				return;
671 			} else {
672 				/* Send Reject */
673 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
674 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
675 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
676 			}
677 		} else {
678 			/* Send Accept */
679 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
680 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
681 					    0, NULL, NULL);
682 		}
683 	}
684 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
685 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
686 
687 	free(ctx->abts_poller_args);
688 	free(ctx);
689 }
690 
691 void
692 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
693 			  uint16_t oxid, uint16_t rxid)
694 {
695 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
696 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
697 	struct spdk_nvmf_fc_association *assoc = NULL;
698 	struct spdk_nvmf_fc_conn *conn = NULL;
699 	uint32_t hwqp_cnt = 0;
700 	bool skip_hwqp_cnt;
701 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
702 	uint32_t i;
703 
704 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
705 		       nport->nport_hdl, rpi, oxid, rxid);
706 
707 	/* Allocate memory to track hwqp's with at least 1 active connection. */
708 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
709 	if (hwqps == NULL) {
710 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
711 		goto bls_rej;
712 	}
713 
714 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
715 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
716 			if (conn->rpi != rpi) {
717 				continue;
718 			}
719 
720 			skip_hwqp_cnt = false;
721 			for (i = 0; i < hwqp_cnt; i++) {
722 				if (hwqps[i] == conn->hwqp) {
723 					/* Skip. This is already present */
724 					skip_hwqp_cnt = true;
725 					break;
726 				}
727 			}
728 			if (!skip_hwqp_cnt) {
729 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
730 				hwqps[hwqp_cnt] = conn->hwqp;
731 				hwqp_cnt++;
732 			}
733 		}
734 	}
735 
736 	if (!hwqp_cnt) {
737 		goto bls_rej;
738 	}
739 
740 	args = calloc(hwqp_cnt,
741 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
742 	if (!args) {
743 		goto bls_rej;
744 	}
745 
746 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
747 	if (!ctx) {
748 		goto bls_rej;
749 	}
750 	ctx->rpi = rpi;
751 	ctx->oxid = oxid;
752 	ctx->rxid = rxid;
753 	ctx->nport = nport;
754 	ctx->nport_hdl = nport->nport_hdl;
755 	ctx->port_hdl = nport->fc_port->port_hdl;
756 	ctx->num_hwqps = hwqp_cnt;
757 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
758 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
759 	ctx->abts_poller_args = args;
760 
761 	/* Get a unique context for this ABTS */
762 	ctx->u_id = nvmf_fc_get_abts_unique_id();
763 
764 	for (i = 0; i < hwqp_cnt; i++) {
765 		poller_arg = args + i;
766 		poller_arg->hwqp = hwqps[i];
767 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
768 		poller_arg->cb_info.cb_data = ctx;
769 		poller_arg->cb_info.cb_thread = spdk_get_thread();
770 		poller_arg->ctx = ctx;
771 
772 		nvmf_fc_poller_api_func(poller_arg->hwqp,
773 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
774 					poller_arg);
775 	}
776 
777 	free(hwqps);
778 
779 	return;
780 bls_rej:
781 	free(args);
782 	free(hwqps);
783 
784 	/* Send Reject */
785 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
786 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
787 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
788 		       nport->nport_hdl, rpi, oxid, rxid);
789 	return;
790 }
791 
792 /*** Accessor functions for the FC structures - BEGIN */
793 /*
794  * Returns true if the port is in offline state.
795  */
796 bool
797 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
798 {
799 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
800 		return true;
801 	}
802 
803 	return false;
804 }
805 
806 /*
807  * Returns true if the port is in online state.
808  */
809 bool
810 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
811 {
812 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
813 		return true;
814 	}
815 
816 	return false;
817 }
818 
819 int
820 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
821 {
822 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
823 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
824 		return 0;
825 	}
826 
827 	return -EPERM;
828 }
829 
830 int
831 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
832 {
833 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
834 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
835 		return 0;
836 	}
837 
838 	return -EPERM;
839 }
840 
841 int
842 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
843 {
844 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
845 		hwqp->state = SPDK_FC_HWQP_ONLINE;
846 		/* reset some queue counters */
847 		hwqp->num_conns = 0;
848 		return nvmf_fc_set_q_online_state(hwqp, true);
849 	}
850 
851 	return -EPERM;
852 }
853 
854 int
855 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
856 {
857 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
858 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
859 		return nvmf_fc_set_q_online_state(hwqp, false);
860 	}
861 
862 	return -EPERM;
863 }
864 
865 void
866 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
867 {
868 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
869 }
870 
871 struct spdk_nvmf_fc_port *
872 nvmf_fc_port_lookup(uint8_t port_hdl)
873 {
874 	struct spdk_nvmf_fc_port *fc_port = NULL;
875 
876 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
877 		if (fc_port->port_hdl == port_hdl) {
878 			return fc_port;
879 		}
880 	}
881 	return NULL;
882 }
883 
884 static void
885 nvmf_fc_port_cleanup(void)
886 {
887 	struct spdk_nvmf_fc_port *fc_port, *tmp;
888 	struct spdk_nvmf_fc_hwqp *hwqp;
889 	uint32_t i;
890 
891 	TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) {
892 		TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list,  fc_port, link);
893 		for (i = 0; i < fc_port->num_io_queues; i++) {
894 			hwqp = &fc_port->io_queues[i];
895 			if (hwqp->fc_reqs_buf) {
896 				free(hwqp->fc_reqs_buf);
897 			}
898 		}
899 		free(fc_port);
900 	}
901 }
902 
903 uint32_t
904 nvmf_fc_get_prli_service_params(void)
905 {
906 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
907 }
908 
909 int
910 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
911 		       struct spdk_nvmf_fc_nport *nport)
912 {
913 	if (fc_port) {
914 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
915 		fc_port->num_nports++;
916 		return 0;
917 	}
918 
919 	return -EINVAL;
920 }
921 
922 int
923 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
924 			  struct spdk_nvmf_fc_nport *nport)
925 {
926 	if (fc_port && nport) {
927 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
928 		fc_port->num_nports--;
929 		return 0;
930 	}
931 
932 	return -EINVAL;
933 }
934 
935 static struct spdk_nvmf_fc_nport *
936 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
937 {
938 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
939 
940 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
941 		if (fc_nport->nport_hdl == nport_hdl) {
942 			return fc_nport;
943 		}
944 	}
945 
946 	return NULL;
947 }
948 
949 struct spdk_nvmf_fc_nport *
950 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
951 {
952 	struct spdk_nvmf_fc_port *fc_port = NULL;
953 
954 	fc_port = nvmf_fc_port_lookup(port_hdl);
955 	if (fc_port) {
956 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
957 	}
958 
959 	return NULL;
960 }
961 
962 static inline int
963 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
964 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
965 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
966 {
967 	struct spdk_nvmf_fc_nport *n_port;
968 	struct spdk_nvmf_fc_remote_port_info *r_port;
969 
970 	assert(hwqp);
971 	if (hwqp == NULL) {
972 		SPDK_ERRLOG("Error: hwqp is NULL\n");
973 		return -EINVAL;
974 	}
975 	assert(nport);
976 	if (nport == NULL) {
977 		SPDK_ERRLOG("Error: nport is NULL\n");
978 		return -EINVAL;
979 	}
980 	assert(rport);
981 	if (rport == NULL) {
982 		SPDK_ERRLOG("Error: rport is NULL\n");
983 		return -EINVAL;
984 	}
985 
986 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
987 		if (n_port->d_id == d_id) {
988 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
989 				if (r_port->s_id == s_id) {
990 					*nport = n_port;
991 					*rport = r_port;
992 					return 0;
993 				}
994 			}
995 			break;
996 		}
997 	}
998 
999 	return -ENOENT;
1000 }
1001 
1002 /* Returns true if the Nport is empty of all rem_ports */
1003 bool
1004 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1005 {
1006 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1007 		assert(nport->rport_count == 0);
1008 		return true;
1009 	} else {
1010 		return false;
1011 	}
1012 }
1013 
1014 int
1015 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1016 			enum spdk_nvmf_fc_object_state state)
1017 {
1018 	if (nport) {
1019 		nport->nport_state = state;
1020 		return 0;
1021 	} else {
1022 		return -EINVAL;
1023 	}
1024 }
1025 
1026 bool
1027 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1028 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1029 {
1030 	if (nport && rem_port) {
1031 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1032 		nport->rport_count++;
1033 		return 0;
1034 	} else {
1035 		return -EINVAL;
1036 	}
1037 }
1038 
1039 bool
1040 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1041 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1042 {
1043 	if (nport && rem_port) {
1044 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1045 		nport->rport_count--;
1046 		return 0;
1047 	} else {
1048 		return -EINVAL;
1049 	}
1050 }
1051 
1052 int
1053 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1054 			enum spdk_nvmf_fc_object_state state)
1055 {
1056 	if (rport) {
1057 		rport->rport_state = state;
1058 		return 0;
1059 	} else {
1060 		return -EINVAL;
1061 	}
1062 }
1063 int
1064 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1065 			enum spdk_nvmf_fc_object_state state)
1066 {
1067 	if (assoc) {
1068 		assoc->assoc_state = state;
1069 		return 0;
1070 	} else {
1071 		return -EINVAL;
1072 	}
1073 }
1074 
1075 static struct spdk_nvmf_fc_association *
1076 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1077 {
1078 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1079 	struct spdk_nvmf_fc_conn *fc_conn;
1080 
1081 	if (!qpair) {
1082 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1083 		return NULL;
1084 	}
1085 
1086 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1087 
1088 	return fc_conn->fc_assoc;
1089 }
1090 
1091 bool
1092 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1093 		       struct spdk_nvmf_ctrlr *ctrlr)
1094 {
1095 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1096 	struct spdk_nvmf_fc_association *assoc = NULL;
1097 
1098 	if (!ctrlr) {
1099 		return false;
1100 	}
1101 
1102 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1103 	if (!fc_nport) {
1104 		return false;
1105 	}
1106 
1107 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1108 	if (assoc && assoc->tgtport == fc_nport) {
1109 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1110 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1111 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1112 			      nport_hdl);
1113 		return true;
1114 	}
1115 	return false;
1116 }
1117 
1118 static inline bool
1119 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
1120 {
1121 	switch (fc_req->state) {
1122 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1123 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1124 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1125 		return true;
1126 	default:
1127 		return false;
1128 	}
1129 }
1130 
1131 static inline bool
1132 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
1133 {
1134 	struct spdk_nvmf_request *tmp = NULL;
1135 
1136 	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) {
1137 		if (tmp == &fc_req->req) {
1138 			return true;
1139 		}
1140 	}
1141 	return false;
1142 }
1143 
1144 static void
1145 nvmf_fc_req_bdev_abort(void *arg1)
1146 {
1147 	struct spdk_nvmf_fc_request *fc_req = arg1;
1148 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1149 
1150 	/* Initial release - we don't have to abort Admin Queue or
1151 	 * Fabric commands. The AQ commands supported at this time are
1152 	 * Get-Log-Page,
1153 	 * Identify
1154 	 * Set Features
1155 	 * Get Features
1156 	 * AER -> Special case and handled differently.
1157 	 * Every one of the above Admin commands (except AER) run
1158 	 * to completion and so an Abort of such commands doesn't
1159 	 * make sense.
1160 	 */
1161 	/* The Fabric commands supported are
1162 	 * Property Set
1163 	 * Property Get
1164 	 * Connect -> Special case (async. handling). Not sure how to
1165 	 * handle at this point. Let it run to completion.
1166 	 */
1167 	if (ctrlr->aer_req == &fc_req->req) {
1168 		SPDK_NOTICELOG("Abort AER request\n");
1169 		nvmf_qpair_free_aer(fc_req->req.qpair);
1170 	}
1171 }
1172 
1173 void
1174 nvmf_fc_request_abort_complete(void *arg1)
1175 {
1176 	struct spdk_nvmf_fc_request *fc_req =
1177 		(struct spdk_nvmf_fc_request *)arg1;
1178 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1179 
1180 	/* Request abort completed. Notify all the callbacks */
1181 	TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) {
1182 		/* Notify */
1183 		ctx->cb(fc_req->hwqp, 0, ctx->cb_args);
1184 		/* Remove */
1185 		TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link);
1186 		/* free */
1187 		free(ctx);
1188 	}
1189 
1190 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1191 		       fc_req_state_strs[fc_req->state]);
1192 
1193 	_nvmf_fc_request_free(fc_req);
1194 }
1195 
1196 void
1197 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1198 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1199 {
1200 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1201 	bool kill_req = false;
1202 
1203 	/* Add the cb to list */
1204 	if (cb) {
1205 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1206 		if (!ctx) {
1207 			SPDK_ERRLOG("ctx alloc failed.\n");
1208 			return;
1209 		}
1210 		ctx->cb = cb;
1211 		ctx->cb_args = cb_args;
1212 
1213 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1214 	}
1215 
1216 	if (!fc_req->is_aborted) {
1217 		/* Increment aborted command counter */
1218 		fc_req->hwqp->counters.num_aborted++;
1219 	}
1220 
1221 	/* If port is dead, skip abort wqe */
1222 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1223 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1224 		fc_req->is_aborted = true;
1225 		goto complete;
1226 	}
1227 
1228 	/* Check if the request is already marked for deletion */
1229 	if (fc_req->is_aborted) {
1230 		return;
1231 	}
1232 
1233 	/* Mark request as aborted */
1234 	fc_req->is_aborted = true;
1235 
1236 	/* If xchg is allocated, then save if we need to send abts or not. */
1237 	if (fc_req->xchg) {
1238 		fc_req->xchg->send_abts = send_abts;
1239 		fc_req->xchg->aborted	= true;
1240 	}
1241 
1242 	if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) {
1243 		/* Aborted by backend */
1244 		goto complete;
1245 	} else if (nvmf_fc_req_in_bdev(fc_req)) {
1246 		/* Notify bdev */
1247 		spdk_thread_send_msg(fc_req->hwqp->thread,
1248 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1249 	} else if (nvmf_fc_req_in_xfer(fc_req)) {
1250 		/* Notify HBA to abort this exchange  */
1251 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1252 	} else if (nvmf_fc_req_in_get_buff(fc_req)) {
1253 		/* Will be completed by request_complete callback. */
1254 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
1255 	} else if (nvmf_fc_req_in_pending(fc_req)) {
1256 		/* Remove from pending */
1257 		STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
1258 			      spdk_nvmf_request, buf_link);
1259 		goto complete;
1260 	} else {
1261 		/* Should never happen */
1262 		SPDK_ERRLOG("Request in invalid state\n");
1263 		goto complete;
1264 	}
1265 
1266 	return;
1267 complete:
1268 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1269 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1270 				(void *)fc_req);
1271 }
1272 
1273 static int
1274 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1275 {
1276 	uint32_t length = fc_req->req.length;
1277 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1278 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1279 	struct spdk_nvmf_transport *transport = group->transport;
1280 
1281 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1282 		return -ENOMEM;
1283 	}
1284 
1285 	return 0;
1286 }
1287 
1288 static int
1289 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1290 {
1291 	/* Allocate an XCHG if we dont use send frame for this command. */
1292 	if (!nvmf_fc_use_send_frame(&fc_req->req)) {
1293 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1294 		if (!fc_req->xchg) {
1295 			fc_req->hwqp->counters.no_xchg++;
1296 			printf("NO XCHGs!\n");
1297 			goto pending;
1298 		}
1299 	}
1300 
1301 	if (fc_req->req.length) {
1302 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1303 			fc_req->hwqp->counters.buf_alloc_err++;
1304 			goto pending;
1305 		}
1306 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1307 	}
1308 
1309 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1310 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "WRITE CMD.\n");
1311 
1312 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1313 
1314 		if (nvmf_fc_recv_data(fc_req)) {
1315 			/* Dropped return success to caller */
1316 			fc_req->hwqp->counters.unexpected_err++;
1317 			_nvmf_fc_request_free(fc_req);
1318 		}
1319 	} else {
1320 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "READ/NONE CMD\n");
1321 
1322 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1323 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1324 		} else {
1325 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1326 		}
1327 		spdk_nvmf_request_exec(&fc_req->req);
1328 	}
1329 
1330 	return 0;
1331 
1332 pending:
1333 	if (fc_req->xchg) {
1334 		nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1335 		fc_req->xchg = NULL;
1336 	}
1337 
1338 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1339 
1340 	return -EAGAIN;
1341 }
1342 
1343 static int
1344 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1345 			    uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1346 {
1347 	uint16_t cmnd_len;
1348 	uint64_t rqst_conn_id;
1349 	struct spdk_nvmf_fc_request *fc_req = NULL;
1350 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1351 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1352 	enum spdk_nvme_data_transfer xfer;
1353 
1354 	cmd_iu = buffer->virt;
1355 	cmnd_len = cmd_iu->cmnd_iu_len;
1356 	cmnd_len = from_be16(&cmnd_len);
1357 
1358 	/* check for a valid cmnd_iu format */
1359 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1360 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1361 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1362 		SPDK_ERRLOG("IU CMD error\n");
1363 		hwqp->counters.nvme_cmd_iu_err++;
1364 		return -ENXIO;
1365 	}
1366 
1367 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1368 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1369 		SPDK_ERRLOG("IU CMD xfer error\n");
1370 		hwqp->counters.nvme_cmd_xfer_err++;
1371 		return -EPERM;
1372 	}
1373 
1374 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1375 
1376 	/* Check if conn id is valid */
1377 	fc_conn = nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id);
1378 	if (!fc_conn) {
1379 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1380 		hwqp->counters.invalid_conn_err++;
1381 		return -ENODEV;
1382 	}
1383 
1384 	/* If association/connection is being deleted - return */
1385 	if (fc_conn->fc_assoc->assoc_state !=  SPDK_NVMF_FC_OBJECT_CREATED) {
1386 		SPDK_ERRLOG("Association state not valid\n");
1387 		return -EACCES;
1388 	}
1389 
1390 	if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) {
1391 		return -EACCES;
1392 	}
1393 
1394 	/* Make sure xfer len is according to mdts */
1395 	if (from_be32(&cmd_iu->data_len) >
1396 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1397 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1398 		return -EINVAL;
1399 	}
1400 
1401 	/* allocate a request buffer */
1402 	fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp);
1403 	if (fc_req == NULL) {
1404 		/* Should not happen. Since fc_reqs == RQ buffers */
1405 		return -ENOMEM;
1406 	}
1407 
1408 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1409 	fc_req->req.qpair = &fc_conn->qpair;
1410 	fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd;
1411 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1412 	fc_req->oxid = frame->ox_id;
1413 	fc_req->oxid = from_be16(&fc_req->oxid);
1414 	fc_req->rpi = fc_conn->rpi;
1415 	fc_req->buf_index = buf_idx;
1416 	fc_req->poller_lcore = hwqp->lcore_id;
1417 	fc_req->poller_thread = hwqp->thread;
1418 	fc_req->hwqp = hwqp;
1419 	fc_req->fc_conn = fc_conn;
1420 	fc_req->req.xfer = xfer;
1421 	fc_req->s_id = (uint32_t)frame->s_id;
1422 	fc_req->d_id = (uint32_t)frame->d_id;
1423 	fc_req->s_id = from_be32(&fc_req->s_id) >> 8;
1424 	fc_req->d_id = from_be32(&fc_req->d_id) >> 8;
1425 
1426 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1427 	if (nvmf_fc_request_execute(fc_req)) {
1428 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 /*
1435  * These functions are called from the FC LLD
1436  */
1437 
1438 void
1439 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1440 {
1441 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1442 	struct spdk_nvmf_fc_poll_group *fgroup = hwqp->fgroup;
1443 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1444 	struct spdk_nvmf_transport *transport = group->transport;
1445 
1446 	if (!fc_req) {
1447 		return;
1448 	}
1449 
1450 	if (fc_req->xchg) {
1451 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1452 		fc_req->xchg = NULL;
1453 	}
1454 
1455 	/* Release IO buffers */
1456 	if (fc_req->req.data_from_pool) {
1457 		spdk_nvmf_request_free_buffers(&fc_req->req, group, transport);
1458 	}
1459 	fc_req->req.data = NULL;
1460 	fc_req->req.iovcnt  = 0;
1461 
1462 	/* Release Q buffer */
1463 	nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index);
1464 
1465 	/* Free Fc request */
1466 	nvmf_fc_hwqp_free_fc_request(hwqp, fc_req);
1467 }
1468 
1469 void
1470 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1471 			  enum spdk_nvmf_fc_request_state state)
1472 {
1473 	assert(fc_req->magic != 0xDEADBEEF);
1474 
1475 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1476 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1477 		      nvmf_fc_request_get_state_str(fc_req->state),
1478 		      nvmf_fc_request_get_state_str(state));
1479 	nvmf_fc_record_req_trace_point(fc_req, state);
1480 	fc_req->state = state;
1481 }
1482 
1483 char *
1484 nvmf_fc_request_get_state_str(int state)
1485 {
1486 	static char *unk_str = "unknown";
1487 
1488 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1489 		fc_req_state_strs[state] : unk_str);
1490 }
1491 
1492 int
1493 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1494 			   uint32_t buff_idx,
1495 			   struct spdk_nvmf_fc_frame_hdr *frame,
1496 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1497 			   uint32_t plen)
1498 {
1499 	int rc = 0;
1500 	uint32_t s_id, d_id;
1501 	struct spdk_nvmf_fc_nport *nport = NULL;
1502 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1503 
1504 	s_id = (uint32_t)frame->s_id;
1505 	d_id = (uint32_t)frame->d_id;
1506 	s_id = from_be32(&s_id) >> 8;
1507 	d_id = from_be32(&d_id) >> 8;
1508 
1509 	/* Note: In tracelog below, we directly do endian conversion on rx_id and.
1510 	 * ox_id Since these are fields, we can't pass address to from_be16().
1511 	 * Since ox_id and rx_id are only needed for tracelog, assigning to local
1512 	 * vars. and doing conversion is a waste of time in non-debug builds. */
1513 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1514 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1515 		      s_id, d_id,
1516 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1517 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1518 
1519 	rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1520 	if (rc) {
1521 		if (nport == NULL) {
1522 			SPDK_ERRLOG("Nport not found. Dropping\n");
1523 			/* increment invalid nport counter */
1524 			hwqp->counters.nport_invalid++;
1525 		} else if (rport == NULL) {
1526 			SPDK_ERRLOG("Rport not found. Dropping\n");
1527 			/* increment invalid rport counter */
1528 			hwqp->counters.rport_invalid++;
1529 		}
1530 		return rc;
1531 	}
1532 
1533 	if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1534 	    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1535 		SPDK_ERRLOG("%s state not created. Dropping\n",
1536 			    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1537 			    "Nport" : "Rport");
1538 		return -EACCES;
1539 	}
1540 
1541 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1542 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1543 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1544 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1545 
1546 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process LS NVME frame\n");
1547 
1548 		/* Use the RQ buffer for holding LS request. */
1549 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1550 
1551 		/* Fill in the LS request structure */
1552 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1553 		ls_rqst->rqstbuf.phys = buffer->phys +
1554 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1555 		ls_rqst->rqstbuf.buf_index = buff_idx;
1556 		ls_rqst->rqst_len = plen;
1557 
1558 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1559 		ls_rqst->rspbuf.phys = buffer->phys +
1560 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1561 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1562 
1563 		ls_rqst->private_data = (void *)hwqp;
1564 		ls_rqst->rpi = rport->rpi;
1565 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1566 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1567 		ls_rqst->s_id = s_id;
1568 		ls_rqst->d_id = d_id;
1569 		ls_rqst->nport = nport;
1570 		ls_rqst->rport = rport;
1571 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1572 
1573 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1574 		if (ls_rqst->xchg) {
1575 			/* Handover the request to LS module */
1576 			nvmf_fc_handle_ls_rqst(ls_rqst);
1577 		} else {
1578 			/* No XCHG available. Add to pending list. */
1579 			hwqp->counters.no_xchg++;
1580 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1581 		}
1582 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1583 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1584 
1585 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process IO NVME frame\n");
1586 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen);
1587 	} else {
1588 
1589 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1590 		hwqp->counters.unknown_frame++;
1591 		rc = -EINVAL;
1592 	}
1593 
1594 	return rc;
1595 }
1596 
1597 void
1598 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1599 {
1600 	struct spdk_nvmf_request *req = NULL, *tmp;
1601 	struct spdk_nvmf_fc_request *fc_req;
1602 	int budget = 64;
1603 
1604 	if (!hwqp->fgroup) {
1605 		/* LS queue is tied to acceptor_poll group and LS pending requests
1606 		 * are stagged and processed using hwqp->ls_pending_queue.
1607 		 */
1608 		return;
1609 	}
1610 
1611 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1612 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1613 		if (!nvmf_fc_request_execute(fc_req)) {
1614 			/* Succesfuly posted, Delete from pending. */
1615 			STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link);
1616 		}
1617 
1618 		if (budget) {
1619 			budget--;
1620 		} else {
1621 			return;
1622 		}
1623 	}
1624 }
1625 
1626 void
1627 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1628 {
1629 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1630 	struct spdk_nvmf_fc_nport *nport = NULL;
1631 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1632 
1633 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1634 		/* lookup nport and rport again - make sure they are still valid */
1635 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1636 		if (rc) {
1637 			if (nport == NULL) {
1638 				SPDK_ERRLOG("Nport not found. Dropping\n");
1639 				/* increment invalid nport counter */
1640 				hwqp->counters.nport_invalid++;
1641 			} else if (rport == NULL) {
1642 				SPDK_ERRLOG("Rport not found. Dropping\n");
1643 				/* increment invalid rport counter */
1644 				hwqp->counters.rport_invalid++;
1645 			}
1646 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1647 			/* Return buffer to chip */
1648 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1649 			continue;
1650 		}
1651 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1652 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1653 			SPDK_ERRLOG("%s state not created. Dropping\n",
1654 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1655 				    "Nport" : "Rport");
1656 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1657 			/* Return buffer to chip */
1658 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1659 			continue;
1660 		}
1661 
1662 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1663 		if (ls_rqst->xchg) {
1664 			/* Got an XCHG */
1665 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1666 			/* Handover the request to LS module */
1667 			nvmf_fc_handle_ls_rqst(ls_rqst);
1668 		} else {
1669 			/* No more XCHGs. Stop processing. */
1670 			hwqp->counters.no_xchg++;
1671 			return;
1672 		}
1673 	}
1674 }
1675 
1676 int
1677 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1678 {
1679 	int rc = 0;
1680 	struct spdk_nvmf_request *req = &fc_req->req;
1681 	struct spdk_nvmf_qpair *qpair = req->qpair;
1682 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1683 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1684 	uint16_t ersp_len = 0;
1685 
1686 	/* set sq head value in resp */
1687 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1688 
1689 	/* Increment connection responses */
1690 	fc_conn->rsp_count++;
1691 
1692 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1693 				       fc_req->transfered_len)) {
1694 		/* Fill ERSP Len */
1695 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1696 				    sizeof(uint32_t)));
1697 		fc_req->ersp.ersp_len = ersp_len;
1698 
1699 		/* Fill RSN */
1700 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1701 		fc_conn->rsn++;
1702 
1703 		/* Fill transfer length */
1704 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1705 
1706 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting ERSP.\n");
1707 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1708 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1709 	} else {
1710 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting RSP.\n");
1711 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1712 	}
1713 
1714 	return rc;
1715 }
1716 
1717 bool
1718 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1719 			   uint32_t rsp_cnt, uint32_t xfer_len)
1720 {
1721 	struct spdk_nvmf_request *req = &fc_req->req;
1722 	struct spdk_nvmf_qpair *qpair = req->qpair;
1723 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1724 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1725 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1726 	uint16_t status = *((uint16_t *)&rsp->status);
1727 
1728 	/*
1729 	 * Check if we need to send ERSP
1730 	 * 1) For every N responses where N == ersp_ratio
1731 	 * 2) Fabric commands.
1732 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1733 	 * 4) SQ == 90% full.
1734 	 * 5) Transfer length not equal to CMD IU length
1735 	 */
1736 
1737 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1738 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1739 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1740 	    (req->length != xfer_len)) {
1741 		return true;
1742 	}
1743 	return false;
1744 }
1745 
1746 static int
1747 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1748 {
1749 	int rc = 0;
1750 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1751 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1752 
1753 	if (fc_req->is_aborted) {
1754 		/* Defer this to make sure we dont call io cleanup in same context. */
1755 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1756 					(void *)fc_req);
1757 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1758 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1759 
1760 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1761 
1762 		rc = nvmf_fc_send_data(fc_req);
1763 	} else {
1764 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1765 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1766 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1767 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1768 		} else {
1769 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1770 		}
1771 
1772 		rc = nvmf_fc_handle_rsp(fc_req);
1773 	}
1774 
1775 	if (rc) {
1776 		SPDK_ERRLOG("Error in request complete.\n");
1777 		_nvmf_fc_request_free(fc_req);
1778 	}
1779 	return 0;
1780 }
1781 
1782 struct spdk_nvmf_tgt *
1783 nvmf_fc_get_tgt(void)
1784 {
1785 	if (g_nvmf_ftransport) {
1786 		return g_nvmf_ftransport->transport.tgt;
1787 	}
1788 	return NULL;
1789 }
1790 
1791 /*
1792  * FC Transport Public API begins here
1793  */
1794 
1795 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1796 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1797 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1798 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1799 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1800 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1801 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1802 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1803 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1804 
1805 static void
1806 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1807 {
1808 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1809 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1810 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1811 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1812 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1813 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1814 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1815 }
1816 
1817 static struct spdk_nvmf_transport *
1818 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1819 {
1820 	uint32_t sge_count;
1821 
1822 	SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
1823 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1824 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1825 		     "  max_aq_depth=%d\n",
1826 		     opts->max_queue_depth,
1827 		     opts->max_io_size,
1828 		     opts->max_qpairs_per_ctrlr - 1,
1829 		     opts->io_unit_size,
1830 		     opts->max_aq_depth);
1831 
1832 	if (g_nvmf_ftransport) {
1833 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1834 		return NULL;
1835 	}
1836 
1837 	if (spdk_env_get_last_core() < 1) {
1838 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1839 			    spdk_env_get_last_core() + 1);
1840 		return NULL;
1841 	}
1842 
1843 	sge_count = opts->max_io_size / opts->io_unit_size;
1844 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1845 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1846 		return NULL;
1847 	}
1848 
1849 	g_nvmf_fc_master_thread = spdk_get_thread();
1850 	g_nvmf_fgroup_count = 0;
1851 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1852 
1853 	if (!g_nvmf_ftransport) {
1854 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1855 		return NULL;
1856 	}
1857 
1858 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1859 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1860 		free(g_nvmf_ftransport);
1861 		g_nvmf_ftransport = NULL;
1862 		return NULL;
1863 	}
1864 
1865 	/* initialize the low level FC driver */
1866 	nvmf_fc_lld_init();
1867 
1868 	return &g_nvmf_ftransport->transport;
1869 }
1870 
1871 static int
1872 nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
1873 {
1874 	if (transport) {
1875 		struct spdk_nvmf_fc_transport *ftransport;
1876 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
1877 
1878 		ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1879 
1880 		free(ftransport);
1881 
1882 		/* clean up any FC poll groups still around */
1883 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
1884 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1885 			free(fgroup);
1886 		}
1887 		g_nvmf_fgroup_count = 0;
1888 
1889 		/* low level FC driver clean up */
1890 		nvmf_fc_lld_fini();
1891 
1892 		nvmf_fc_port_cleanup();
1893 	}
1894 
1895 	return 0;
1896 }
1897 
1898 static int
1899 nvmf_fc_listen(struct spdk_nvmf_transport *transport,
1900 	       const struct spdk_nvme_transport_id *trid)
1901 {
1902 	return 0;
1903 }
1904 
1905 static void
1906 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
1907 		    const struct spdk_nvme_transport_id *_trid)
1908 {
1909 }
1910 
1911 static void
1912 nvmf_fc_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg)
1913 {
1914 	struct spdk_nvmf_fc_port *fc_port = NULL;
1915 	static bool start_lld = false;
1916 
1917 	if (spdk_unlikely(!start_lld)) {
1918 		start_lld  = true;
1919 		nvmf_fc_lld_start();
1920 	}
1921 
1922 	/* poll the LS queue on each port */
1923 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
1924 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
1925 			fc_port->new_qp_cb = cb_fn;
1926 			nvmf_fc_process_queue(&fc_port->ls_queue);
1927 		}
1928 	}
1929 }
1930 
1931 static void
1932 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
1933 		 struct spdk_nvme_transport_id *trid,
1934 		 struct spdk_nvmf_discovery_log_page_entry *entry)
1935 {
1936 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
1937 	entry->adrfam = trid->adrfam;
1938 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
1939 
1940 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1941 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1942 }
1943 
1944 static struct spdk_nvmf_transport_poll_group *
1945 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
1946 {
1947 	struct spdk_nvmf_fc_poll_group *fgroup;
1948 	struct spdk_nvmf_fc_transport *ftransport =
1949 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1950 
1951 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
1952 	if (!fgroup) {
1953 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
1954 		return NULL;
1955 	}
1956 
1957 	TAILQ_INIT(&fgroup->hwqp_list);
1958 
1959 	pthread_mutex_lock(&ftransport->lock);
1960 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
1961 	g_nvmf_fgroup_count++;
1962 	pthread_mutex_unlock(&ftransport->lock);
1963 
1964 	return &fgroup->group;
1965 }
1966 
1967 static void
1968 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1969 {
1970 	struct spdk_nvmf_fc_poll_group *fgroup;
1971 	struct spdk_nvmf_fc_transport *ftransport =
1972 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
1973 
1974 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
1975 	pthread_mutex_lock(&ftransport->lock);
1976 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1977 	g_nvmf_fgroup_count--;
1978 	pthread_mutex_unlock(&ftransport->lock);
1979 
1980 	free(fgroup);
1981 }
1982 
1983 static int
1984 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
1985 		       struct spdk_nvmf_qpair *qpair)
1986 {
1987 	struct spdk_nvmf_fc_poll_group *fgroup;
1988 	struct spdk_nvmf_fc_conn *fc_conn;
1989 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
1990 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
1991 	bool hwqp_found = false;
1992 
1993 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
1994 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1995 
1996 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
1997 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
1998 			hwqp_found = true;
1999 			break;
2000 		}
2001 	}
2002 
2003 	if (!hwqp_found) {
2004 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2005 		goto err;
2006 	}
2007 
2008 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2009 					 &fc_conn->conn_id,
2010 					 fc_conn->max_queue_depth)) {
2011 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2012 		goto err;
2013 	}
2014 
2015 	fc_conn->hwqp = hwqp;
2016 
2017 	/* If this is for ADMIN connection, then update assoc ID. */
2018 	if (fc_conn->qpair.qid == 0) {
2019 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2020 	}
2021 
2022 	api_data = &fc_conn->create_opd->u.add_conn;
2023 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2024 	return 0;
2025 err:
2026 	return -1;
2027 }
2028 
2029 static int
2030 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2031 {
2032 	uint32_t count = 0;
2033 	struct spdk_nvmf_fc_poll_group *fgroup;
2034 	struct spdk_nvmf_fc_hwqp *hwqp;
2035 
2036 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2037 
2038 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2039 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2040 			count += nvmf_fc_process_queue(hwqp);
2041 		}
2042 	}
2043 
2044 	return (int) count;
2045 }
2046 
2047 static int
2048 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2049 {
2050 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2051 
2052 	if (!fc_req->is_aborted) {
2053 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2054 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2055 	} else {
2056 		nvmf_fc_request_abort_complete(fc_req);
2057 	}
2058 	return 0;
2059 }
2060 
2061 
2062 static void
2063 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair)
2064 {
2065 	struct spdk_nvmf_fc_conn *fc_conn;
2066 
2067 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2068 
2069 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2070 		/* QP creation failure in FC tranport. Cleanup. */
2071 		spdk_thread_send_msg(nvmf_fc_get_master_thread(),
2072 				     nvmf_fc_handle_connection_failure, fc_conn);
2073 	} else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id &&
2074 		   fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2075 		/* Admin connection */
2076 		spdk_thread_send_msg(nvmf_fc_get_master_thread(),
2077 				     nvmf_fc_handle_assoc_deletion, fc_conn);
2078 	}
2079 }
2080 
2081 static int
2082 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2083 			    struct spdk_nvme_transport_id *trid)
2084 {
2085 	struct spdk_nvmf_fc_conn *fc_conn;
2086 
2087 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2088 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2089 	return 0;
2090 }
2091 
2092 static int
2093 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2094 			     struct spdk_nvme_transport_id *trid)
2095 {
2096 	struct spdk_nvmf_fc_conn *fc_conn;
2097 
2098 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2099 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2100 	return 0;
2101 }
2102 
2103 static int
2104 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2105 			      struct spdk_nvme_transport_id *trid)
2106 {
2107 	struct spdk_nvmf_fc_conn *fc_conn;
2108 
2109 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2110 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2111 	return 0;
2112 }
2113 
2114 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2115 	.name = "FC",
2116 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2117 	.opts_init = nvmf_fc_opts_init,
2118 	.create = nvmf_fc_create,
2119 	.destroy = nvmf_fc_destroy,
2120 
2121 	.listen = nvmf_fc_listen,
2122 	.stop_listen = nvmf_fc_stop_listen,
2123 	.accept = nvmf_fc_accept,
2124 
2125 	.listener_discover = nvmf_fc_discover,
2126 
2127 	.poll_group_create = nvmf_fc_poll_group_create,
2128 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2129 	.poll_group_add = nvmf_fc_poll_group_add,
2130 	.poll_group_poll = nvmf_fc_poll_group_poll,
2131 
2132 	.req_complete = nvmf_fc_request_complete,
2133 	.req_free = nvmf_fc_request_free,
2134 	.qpair_fini = nvmf_fc_close_qpair,
2135 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2136 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2137 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2138 };
2139 
2140 /*
2141  * Re-initialize the FC-Port after an offline event.
2142  * Only the queue information needs to be populated. XCHG, lcore and other hwqp information remains
2143  * unchanged after the first initialization.
2144  *
2145  */
2146 static int
2147 nvmf_fc_adm_hw_port_reinit_validate(struct spdk_nvmf_fc_port *fc_port,
2148 				    struct spdk_nvmf_fc_hw_port_init_args *args)
2149 {
2150 	uint32_t i;
2151 
2152 	/* Verify that the port was previously in offline or quiesced state */
2153 	if (nvmf_fc_port_is_online(fc_port)) {
2154 		SPDK_ERRLOG("SPDK FC port %d already initialized and online.\n", args->port_handle);
2155 		return -EINVAL;
2156 	}
2157 
2158 	/* Reinit information in new LS queue from previous queue */
2159 	nvmf_fc_hwqp_reinit_poller_queues(&fc_port->ls_queue, args->ls_queue);
2160 
2161 	fc_port->fcp_rq_id = args->fcp_rq_id;
2162 
2163 	/* Initialize the LS queue */
2164 	fc_port->ls_queue.queues = args->ls_queue;
2165 	nvmf_fc_init_poller_queues(fc_port->ls_queue.queues);
2166 
2167 	for (i = 0; i < fc_port->num_io_queues; i++) {
2168 		/* Reinit information in new IO queue from previous queue */
2169 		nvmf_fc_hwqp_reinit_poller_queues(&fc_port->io_queues[i],
2170 						  args->io_queues[i]);
2171 		fc_port->io_queues[i].queues = args->io_queues[i];
2172 		/* Initialize the IO queues */
2173 		nvmf_fc_init_poller_queues(fc_port->io_queues[i].queues);
2174 	}
2175 
2176 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2177 
2178 	/* Validate the port information */
2179 	DEV_VERIFY(TAILQ_EMPTY(&fc_port->nport_list));
2180 	DEV_VERIFY(fc_port->num_nports == 0);
2181 	if (!TAILQ_EMPTY(&fc_port->nport_list) || (fc_port->num_nports != 0)) {
2182 		return -EINVAL;
2183 	}
2184 
2185 	return 0;
2186 }
2187 
2188 /* Initializes the data for the creation of a FC-Port object in the SPDK
2189  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2190  * the API to the library. The contents added to this well defined structure
2191  * is private to each vendors implementation.
2192  */
2193 static int
2194 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2195 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2196 {
2197 	/* Used a high number for the LS HWQP so that it does not clash with the
2198 	 * IO HWQP's and immediately shows a LS queue during tracing.
2199 	 */
2200 	uint32_t i;
2201 
2202 	fc_port->port_hdl       = args->port_handle;
2203 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2204 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2205 	fc_port->num_io_queues  = args->io_queue_cnt;
2206 
2207 	/*
2208 	 * Set port context from init args. Used for FCP port stats.
2209 	 */
2210 	fc_port->port_ctx = args->port_ctx;
2211 
2212 	/*
2213 	 * Initialize the LS queue wherever needed.
2214 	 */
2215 	fc_port->ls_queue.queues = args->ls_queue;
2216 	fc_port->ls_queue.thread = nvmf_fc_get_master_thread();
2217 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2218 
2219 	/*
2220 	 * Initialize the LS queue.
2221 	 */
2222 	nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2223 
2224 	/*
2225 	 * Initialize the IO queues.
2226 	 */
2227 	for (i = 0; i < args->io_queue_cnt; i++) {
2228 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2229 		hwqp->hwqp_id = i;
2230 		hwqp->queues = args->io_queues[i];
2231 		hwqp->rq_size = args->io_queue_size;
2232 		nvmf_fc_init_hwqp(fc_port, hwqp);
2233 	}
2234 
2235 	/*
2236 	 * Initialize the LS processing for port
2237 	 */
2238 	nvmf_fc_ls_init(fc_port);
2239 
2240 	/*
2241 	 * Initialize the list of nport on this HW port.
2242 	 */
2243 	TAILQ_INIT(&fc_port->nport_list);
2244 	fc_port->num_nports = 0;
2245 
2246 	return 0;
2247 }
2248 
2249 static void
2250 nvmf_fc_adm_port_hwqp_offline_del_poller(struct spdk_nvmf_fc_port *fc_port)
2251 {
2252 	struct spdk_nvmf_fc_hwqp *hwqp    = NULL;
2253 	int i = 0;
2254 
2255 	hwqp = &fc_port->ls_queue;
2256 	(void)nvmf_fc_hwqp_set_offline(hwqp);
2257 
2258 	/*  Remove poller for all the io queues. */
2259 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2260 		hwqp = &fc_port->io_queues[i];
2261 		(void)nvmf_fc_hwqp_set_offline(hwqp);
2262 		nvmf_fc_poll_group_remove_hwqp(hwqp);
2263 	}
2264 }
2265 
2266 /*
2267  * Callback function for HW port link break operation.
2268  *
2269  * Notice that this callback is being triggered when spdk_fc_nport_delete()
2270  * completes, if that spdk_fc_nport_delete() called is issued by
2271  * nvmf_fc_adm_evnt_hw_port_link_break().
2272  *
2273  * Since nvmf_fc_adm_evnt_hw_port_link_break() can invoke spdk_fc_nport_delete() multiple
2274  * times (one per nport in the HW port's nport_list), a single call to
2275  * nvmf_fc_adm_evnt_hw_port_link_break() can result in multiple calls to this callback function.
2276  *
2277  * As a result, this function only invokes a callback to the caller of
2278  * nvmf_fc_adm_evnt_hw_port_link_break() only when the HW port's nport_list is empty.
2279  */
2280 static void
2281 nvmf_fc_adm_hw_port_link_break_cb(uint8_t port_handle,
2282 				  enum spdk_fc_event event_type, void *cb_args, int spdk_err)
2283 {
2284 	ASSERT_SPDK_FC_MASTER_THREAD();
2285 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *offline_cb_args = cb_args;
2286 	struct spdk_nvmf_hw_port_link_break_args *offline_args = NULL;
2287 	spdk_nvmf_fc_callback cb_func = NULL;
2288 	int err = 0;
2289 	struct spdk_nvmf_fc_port *fc_port = NULL;
2290 	int num_nports = 0;
2291 	char log_str[256];
2292 
2293 	if (0 != spdk_err) {
2294 		DEV_VERIFY(!"port link break cb: spdk_err not success.");
2295 		SPDK_ERRLOG("port link break cb: spdk_err:%d.\n", spdk_err);
2296 		goto out;
2297 	}
2298 
2299 	if (!offline_cb_args) {
2300 		DEV_VERIFY(!"port link break cb: port_offline_args is NULL.");
2301 		err = -EINVAL;
2302 		goto out;
2303 	}
2304 
2305 	offline_args = offline_cb_args->args;
2306 	if (!offline_args) {
2307 		DEV_VERIFY(!"port link break cb: offline_args is NULL.");
2308 		err = -EINVAL;
2309 		goto out;
2310 	}
2311 
2312 	if (port_handle != offline_args->port_handle) {
2313 		DEV_VERIFY(!"port link break cb: port_handle mismatch.");
2314 		err = -EINVAL;
2315 		goto out;
2316 	}
2317 
2318 	cb_func = offline_cb_args->cb_func;
2319 	if (!cb_func) {
2320 		DEV_VERIFY(!"port link break cb: cb_func is NULL.");
2321 		err = -EINVAL;
2322 		goto out;
2323 	}
2324 
2325 	fc_port = nvmf_fc_port_lookup(port_handle);
2326 	if (!fc_port) {
2327 		DEV_VERIFY(!"port link break cb: fc_port is NULL.");
2328 		SPDK_ERRLOG("port link break cb: Unable to find port:%d\n",
2329 			    offline_args->port_handle);
2330 		err = -EINVAL;
2331 		goto out;
2332 	}
2333 
2334 	num_nports = fc_port->num_nports;
2335 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2336 		/*
2337 		 * Don't call the callback unless all nports have been deleted.
2338 		 */
2339 		goto out;
2340 	}
2341 
2342 	if (num_nports != 0) {
2343 		DEV_VERIFY(!"port link break cb: num_nports in non-zero.");
2344 		SPDK_ERRLOG("port link break cb: # of ports should be 0. Instead, num_nports:%d\n",
2345 			    num_nports);
2346 		err = -EINVAL;
2347 	}
2348 
2349 	/*
2350 	 * Mark the hwqps as offline and unregister the pollers.
2351 	 */
2352 	(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
2353 
2354 	/*
2355 	 * Since there are no more nports, execute the callback(s).
2356 	 */
2357 	(void)cb_func(port_handle, SPDK_FC_LINK_BREAK,
2358 		      (void *)offline_args->cb_ctx, spdk_err);
2359 
2360 out:
2361 	free(offline_cb_args);
2362 
2363 	snprintf(log_str, sizeof(log_str),
2364 		 "port link break cb: port:%d evt_type:%d num_nports:%d err:%d spdk_err:%d.\n",
2365 		 port_handle, event_type, num_nports, err, spdk_err);
2366 
2367 	if (err != 0) {
2368 		SPDK_ERRLOG("%s", log_str);
2369 	} else {
2370 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2371 	}
2372 	return;
2373 }
2374 
2375 /*
2376  * FC port must have all its nports deleted before transitioning to offline state.
2377  */
2378 static void
2379 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2380 {
2381 	struct spdk_nvmf_fc_nport *nport = NULL;
2382 	/* All nports must have been deleted at this point for this fc port */
2383 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2384 	DEV_VERIFY(fc_port->num_nports == 0);
2385 	/* Mark the nport states to be zombie, if they exist */
2386 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2387 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2388 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2389 		}
2390 	}
2391 }
2392 
2393 static void
2394 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2395 {
2396 	ASSERT_SPDK_FC_MASTER_THREAD();
2397 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2398 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2399 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2400 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2401 	int spdk_err = 0;
2402 	uint8_t port_handle = cb_data->port_handle;
2403 	uint32_t s_id = rport->s_id;
2404 	uint32_t rpi = rport->rpi;
2405 	uint32_t assoc_count = rport->assoc_count;
2406 	uint32_t nport_hdl = nport->nport_hdl;
2407 	uint32_t d_id = nport->d_id;
2408 	char log_str[256];
2409 
2410 	/*
2411 	 * Assert on any delete failure.
2412 	 */
2413 	if (0 != err) {
2414 		DEV_VERIFY(!"Error in IT Delete callback.");
2415 		goto out;
2416 	}
2417 
2418 	if (cb_func != NULL) {
2419 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2420 	}
2421 
2422 out:
2423 	free(cb_data);
2424 
2425 	snprintf(log_str, sizeof(log_str),
2426 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2427 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2428 
2429 	if (err != 0) {
2430 		SPDK_ERRLOG("%s", log_str);
2431 	} else {
2432 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2433 	}
2434 }
2435 
2436 static void
2437 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2438 {
2439 	ASSERT_SPDK_FC_MASTER_THREAD();
2440 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2441 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2442 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2443 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2444 	uint32_t s_id = rport->s_id;
2445 	uint32_t rpi = rport->rpi;
2446 	uint32_t assoc_count = rport->assoc_count;
2447 	uint32_t nport_hdl = nport->nport_hdl;
2448 	uint32_t d_id = nport->d_id;
2449 	char log_str[256];
2450 
2451 	/*
2452 	 * Assert on any association delete failure. We continue to delete other
2453 	 * associations in promoted builds.
2454 	 */
2455 	if (0 != err) {
2456 		DEV_VERIFY(!"Nport's association delete callback returned error");
2457 		if (nport->assoc_count > 0) {
2458 			nport->assoc_count--;
2459 		}
2460 		if (rport->assoc_count > 0) {
2461 			rport->assoc_count--;
2462 		}
2463 	}
2464 
2465 	/*
2466 	 * If this is the last association being deleted for the ITN,
2467 	 * execute the callback(s).
2468 	 */
2469 	if (0 == rport->assoc_count) {
2470 		/* Remove the rport from the remote port list. */
2471 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2472 			SPDK_ERRLOG("Error while removing rport from list.\n");
2473 			DEV_VERIFY(!"Error while removing rport from list.");
2474 		}
2475 
2476 		if (cb_func != NULL) {
2477 			/*
2478 			 * Callback function is provided by the caller
2479 			 * of nvmf_fc_adm_i_t_delete_assoc().
2480 			 */
2481 			(void)cb_func(cb_data->cb_ctx, 0);
2482 		}
2483 		free(rport);
2484 		free(args);
2485 	}
2486 
2487 	snprintf(log_str, sizeof(log_str),
2488 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2489 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2490 
2491 	if (err != 0) {
2492 		SPDK_ERRLOG("%s", log_str);
2493 	} else {
2494 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2495 	}
2496 }
2497 
2498 /**
2499  * Process a IT delete.
2500  */
2501 static void
2502 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2503 			     struct spdk_nvmf_fc_remote_port_info *rport,
2504 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2505 			     void *cb_ctx)
2506 {
2507 	int err = 0;
2508 	struct spdk_nvmf_fc_association *assoc = NULL;
2509 	int assoc_err = 0;
2510 	uint32_t num_assoc = 0;
2511 	uint32_t num_assoc_del_scheduled = 0;
2512 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2513 	uint8_t port_hdl = nport->port_hdl;
2514 	uint32_t s_id = rport->s_id;
2515 	uint32_t rpi = rport->rpi;
2516 	uint32_t assoc_count = rport->assoc_count;
2517 	char log_str[256];
2518 
2519 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete associations on nport:%d begin.\n",
2520 		      nport->nport_hdl);
2521 
2522 	/*
2523 	 * Allocate memory for callback data.
2524 	 * This memory will be freed by the callback function.
2525 	 */
2526 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2527 	if (NULL == cb_data) {
2528 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2529 		err = -ENOMEM;
2530 		goto out;
2531 	}
2532 	cb_data->nport       = nport;
2533 	cb_data->rport       = rport;
2534 	cb_data->port_handle = port_hdl;
2535 	cb_data->cb_func     = cb_func;
2536 	cb_data->cb_ctx      = cb_ctx;
2537 
2538 	/*
2539 	 * Delete all associations, if any, related with this ITN/remote_port.
2540 	 */
2541 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2542 		num_assoc++;
2543 		if (assoc->s_id == s_id) {
2544 			assoc_err = nvmf_fc_delete_association(nport,
2545 							       assoc->assoc_id,
2546 							       false /* send abts */, false,
2547 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2548 			if (0 != assoc_err) {
2549 				/*
2550 				 * Mark this association as zombie.
2551 				 */
2552 				err = -EINVAL;
2553 				DEV_VERIFY(!"Error while deleting association");
2554 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2555 			} else {
2556 				num_assoc_del_scheduled++;
2557 			}
2558 		}
2559 	}
2560 
2561 out:
2562 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2563 		/*
2564 		 * Since there are no association_delete calls
2565 		 * successfully scheduled, the association_delete
2566 		 * callback function will never be called.
2567 		 * In this case, call the callback function now.
2568 		 */
2569 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2570 	}
2571 
2572 	snprintf(log_str, sizeof(log_str),
2573 		 "IT delete associations on nport:%d end. "
2574 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2575 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2576 
2577 	if (err == 0) {
2578 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2579 	} else {
2580 		SPDK_ERRLOG("%s", log_str);
2581 	}
2582 }
2583 
2584 static void
2585 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2586 {
2587 	ASSERT_SPDK_FC_MASTER_THREAD();
2588 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2589 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2590 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2591 	struct spdk_nvmf_fc_port *fc_port = NULL;
2592 	int err = 0;
2593 
2594 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2595 	hwqp = quiesce_api_data->hwqp;
2596 	fc_port = hwqp->fc_port;
2597 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2598 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2599 
2600 	/*
2601 	 * Decrement the callback/quiesced queue count.
2602 	 */
2603 	port_quiesce_ctx->quiesce_count--;
2604 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2605 
2606 	free(quiesce_api_data);
2607 	/*
2608 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2609 	 */
2610 	if (port_quiesce_ctx->quiesce_count > 0) {
2611 		return;
2612 	}
2613 
2614 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2615 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2616 	} else {
2617 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesced.\n", fc_port->port_hdl);
2618 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2619 	}
2620 
2621 	if (cb_func) {
2622 		/*
2623 		 * Callback function for the called of quiesce.
2624 		 */
2625 		cb_func(port_quiesce_ctx->ctx, err);
2626 	}
2627 
2628 	/*
2629 	 * Free the context structure.
2630 	 */
2631 	free(port_quiesce_ctx);
2632 
2633 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2634 		      err);
2635 }
2636 
2637 static int
2638 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2639 			     spdk_nvmf_fc_poller_api_cb cb_func)
2640 {
2641 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2642 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2643 	int err = 0;
2644 
2645 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2646 
2647 	if (args == NULL) {
2648 		err = -ENOMEM;
2649 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2650 		goto done;
2651 	}
2652 	args->hwqp = fc_hwqp;
2653 	args->ctx = ctx;
2654 	args->cb_info.cb_func = cb_func;
2655 	args->cb_info.cb_data = args;
2656 	args->cb_info.cb_thread = spdk_get_thread();
2657 
2658 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2659 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2660 	if (rc) {
2661 		free(args);
2662 		err = -EINVAL;
2663 	}
2664 
2665 done:
2666 	return err;
2667 }
2668 
2669 /*
2670  * Hw port Quiesce
2671  */
2672 static int
2673 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2674 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2675 {
2676 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2677 	uint32_t i = 0;
2678 	int err = 0;
2679 
2680 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2681 
2682 	/*
2683 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2684 	 * and execute the callback.
2685 	 */
2686 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2687 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2688 	}
2689 
2690 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2691 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Port %d already in quiesced state.\n",
2692 			      fc_port->port_hdl);
2693 		/*
2694 		 * Execute the callback function directly.
2695 		 */
2696 		cb_func(ctx, err);
2697 		goto out;
2698 	}
2699 
2700 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2701 
2702 	if (port_quiesce_ctx == NULL) {
2703 		err = -ENOMEM;
2704 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2705 			    fc_port->port_hdl);
2706 		goto out;
2707 	}
2708 
2709 	port_quiesce_ctx->quiesce_count = 0;
2710 	port_quiesce_ctx->ctx = ctx;
2711 	port_quiesce_ctx->cb_func = cb_func;
2712 
2713 	/*
2714 	 * Quiesce the LS queue.
2715 	 */
2716 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2717 					   nvmf_fc_adm_queue_quiesce_cb);
2718 	if (err != 0) {
2719 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2720 		goto out;
2721 	}
2722 	port_quiesce_ctx->quiesce_count++;
2723 
2724 	/*
2725 	 * Quiesce the IO queues.
2726 	 */
2727 	for (i = 0; i < fc_port->num_io_queues; i++) {
2728 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2729 						   port_quiesce_ctx,
2730 						   nvmf_fc_adm_queue_quiesce_cb);
2731 		if (err != 0) {
2732 			DEV_VERIFY(0);
2733 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2734 		}
2735 		port_quiesce_ctx->quiesce_count++;
2736 	}
2737 
2738 out:
2739 	if (port_quiesce_ctx && err != 0) {
2740 		free(port_quiesce_ctx);
2741 	}
2742 	return err;
2743 }
2744 
2745 /*
2746  * Initialize and add a HW port entry to the global
2747  * HW port list.
2748  */
2749 static void
2750 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2751 {
2752 	ASSERT_SPDK_FC_MASTER_THREAD();
2753 	struct spdk_nvmf_fc_port *fc_port = NULL;
2754 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2755 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2756 			api_data->api_args;
2757 	int err = 0;
2758 
2759 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2760 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2761 		err = EINVAL;
2762 		goto abort_port_init;
2763 	}
2764 
2765 	/*
2766 	 * 1. Check for duplicate initialization.
2767 	 */
2768 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2769 	if (fc_port != NULL) {
2770 		/* Port already exists, check if it has to be re-initialized */
2771 		err = nvmf_fc_adm_hw_port_reinit_validate(fc_port, args);
2772 		if (err) {
2773 			/*
2774 			 * In case of an error we do not want to free the fc_port
2775 			 * so we set that pointer to NULL.
2776 			 */
2777 			fc_port = NULL;
2778 		}
2779 		goto abort_port_init;
2780 	}
2781 
2782 	/*
2783 	 * 2. Get the memory to instantiate a fc port.
2784 	 */
2785 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2786 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2787 	if (fc_port == NULL) {
2788 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2789 		err = -ENOMEM;
2790 		goto abort_port_init;
2791 	}
2792 
2793 	/* assign the io_queues array */
2794 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2795 				     struct spdk_nvmf_fc_port));
2796 
2797 	/*
2798 	 * 3. Initialize the contents for the FC-port
2799 	 */
2800 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2801 
2802 	if (err != 0) {
2803 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2804 		DEV_VERIFY(!"Data initialization failed for fc_port");
2805 		goto abort_port_init;
2806 	}
2807 
2808 	/*
2809 	 * 4. Add this port to the global fc port list in the library.
2810 	 */
2811 	nvmf_fc_port_add(fc_port);
2812 
2813 abort_port_init:
2814 	if (err && fc_port) {
2815 		free(fc_port);
2816 	}
2817 	if (api_data->cb_func != NULL) {
2818 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2819 	}
2820 
2821 	free(arg);
2822 
2823 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d initialize done, rc = %d.\n",
2824 		      args->port_handle, err);
2825 }
2826 
2827 /*
2828  * Online a HW port.
2829  */
2830 static void
2831 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2832 {
2833 	ASSERT_SPDK_FC_MASTER_THREAD();
2834 	struct spdk_nvmf_fc_port *fc_port = NULL;
2835 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2836 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2837 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2838 			api_data->api_args;
2839 	int i = 0;
2840 	int err = 0;
2841 
2842 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2843 	if (fc_port) {
2844 		/* Set the port state to online */
2845 		err = nvmf_fc_port_set_online(fc_port);
2846 		if (err != 0) {
2847 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2848 			DEV_VERIFY(!"Hw port online failed");
2849 			goto out;
2850 		}
2851 
2852 		hwqp = &fc_port->ls_queue;
2853 		hwqp->context = NULL;
2854 		(void)nvmf_fc_hwqp_set_online(hwqp);
2855 
2856 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2857 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2858 			hwqp = &fc_port->io_queues[i];
2859 			hwqp->context = NULL;
2860 			(void)nvmf_fc_hwqp_set_online(hwqp);
2861 			nvmf_fc_poll_group_add_hwqp(hwqp);
2862 		}
2863 	} else {
2864 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2865 		err = -EINVAL;
2866 	}
2867 
2868 out:
2869 	if (api_data->cb_func != NULL) {
2870 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2871 	}
2872 
2873 	free(arg);
2874 
2875 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d online done, rc = %d.\n", args->port_handle,
2876 		      err);
2877 }
2878 
2879 /*
2880  * Offline a HW port.
2881  */
2882 static void
2883 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
2884 {
2885 	ASSERT_SPDK_FC_MASTER_THREAD();
2886 	struct spdk_nvmf_fc_port *fc_port = NULL;
2887 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2888 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2889 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
2890 			api_data->api_args;
2891 	int i = 0;
2892 	int err = 0;
2893 
2894 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2895 	if (fc_port) {
2896 		/* Set the port state to offline, if it is not already. */
2897 		err = nvmf_fc_port_set_offline(fc_port);
2898 		if (err != 0) {
2899 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
2900 			err = 0;
2901 			goto out;
2902 		}
2903 
2904 		hwqp = &fc_port->ls_queue;
2905 		(void)nvmf_fc_hwqp_set_offline(hwqp);
2906 
2907 		/* Remove poller for all the io queues. */
2908 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2909 			hwqp = &fc_port->io_queues[i];
2910 			(void)nvmf_fc_hwqp_set_offline(hwqp);
2911 			nvmf_fc_poll_group_remove_hwqp(hwqp);
2912 		}
2913 
2914 		/*
2915 		 * Delete all the nports. Ideally, the nports should have been purged
2916 		 * before the offline event, in which case, only a validation is required.
2917 		 */
2918 		nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
2919 	} else {
2920 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2921 		err = -EINVAL;
2922 	}
2923 out:
2924 	if (api_data->cb_func != NULL) {
2925 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
2926 	}
2927 
2928 	free(arg);
2929 
2930 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d offline done, rc = %d.\n", args->port_handle,
2931 		      err);
2932 }
2933 
2934 struct nvmf_fc_add_rem_listener_ctx {
2935 	struct spdk_nvmf_subsystem *subsystem;
2936 	bool add_listener;
2937 	struct spdk_nvme_transport_id trid;
2938 };
2939 
2940 static void
2941 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2942 {
2943 	ASSERT_SPDK_FC_MASTER_THREAD();
2944 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2945 	free(ctx);
2946 }
2947 
2948 static void
2949 nvmf_fc_adm_listen_done(void *cb_arg, int status)
2950 {
2951 	ASSERT_SPDK_FC_MASTER_THREAD();
2952 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
2953 
2954 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
2955 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
2956 		free(ctx);
2957 	}
2958 }
2959 
2960 static void
2961 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2962 {
2963 	ASSERT_SPDK_FC_MASTER_THREAD();
2964 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2965 
2966 	if (ctx->add_listener) {
2967 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
2968 	} else {
2969 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
2970 		nvmf_fc_adm_listen_done(ctx, 0);
2971 	}
2972 }
2973 
2974 static int
2975 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
2976 {
2977 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
2978 	struct spdk_nvmf_subsystem *subsystem;
2979 
2980 	if (!tgt) {
2981 		SPDK_ERRLOG("No nvmf target defined\n");
2982 		return -EINVAL;
2983 	}
2984 
2985 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
2986 	while (subsystem) {
2987 		struct nvmf_fc_add_rem_listener_ctx *ctx;
2988 
2989 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
2990 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
2991 			if (ctx) {
2992 				ctx->add_listener = add;
2993 				ctx->subsystem = subsystem;
2994 				nvmf_fc_create_trid(&ctx->trid,
2995 						    nport->fc_nodename.u.wwn,
2996 						    nport->fc_portname.u.wwn);
2997 
2998 				if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) {
2999 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3000 						    ctx->trid.traddr);
3001 					free(ctx);
3002 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3003 								     nvmf_fc_adm_subsystem_paused_cb,
3004 								     ctx)) {
3005 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3006 						    subsystem->subnqn);
3007 					free(ctx);
3008 				}
3009 			}
3010 		}
3011 
3012 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3013 	}
3014 
3015 	return 0;
3016 }
3017 
3018 /*
3019  * Create a Nport.
3020  */
3021 static void
3022 nvmf_fc_adm_evnt_nport_create(void *arg)
3023 {
3024 	ASSERT_SPDK_FC_MASTER_THREAD();
3025 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3026 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3027 			api_data->api_args;
3028 	struct spdk_nvmf_fc_nport *nport = NULL;
3029 	struct spdk_nvmf_fc_port *fc_port = NULL;
3030 	int err = 0;
3031 
3032 	/*
3033 	 * Get the physical port.
3034 	 */
3035 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3036 	if (fc_port == NULL) {
3037 		err = -EINVAL;
3038 		goto out;
3039 	}
3040 
3041 	/*
3042 	 * Check for duplicate initialization.
3043 	 */
3044 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3045 	if (nport != NULL) {
3046 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3047 			    args->port_handle);
3048 		err = -EINVAL;
3049 		goto out;
3050 	}
3051 
3052 	/*
3053 	 * Get the memory to instantiate a fc nport.
3054 	 */
3055 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3056 	if (nport == NULL) {
3057 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3058 			    args->nport_handle);
3059 		err = -ENOMEM;
3060 		goto out;
3061 	}
3062 
3063 	/*
3064 	 * Initialize the contents for the nport
3065 	 */
3066 	nport->nport_hdl    = args->nport_handle;
3067 	nport->port_hdl     = args->port_handle;
3068 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3069 	nport->fc_nodename  = args->fc_nodename;
3070 	nport->fc_portname  = args->fc_portname;
3071 	nport->d_id         = args->d_id;
3072 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3073 
3074 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3075 	TAILQ_INIT(&nport->rem_port_list);
3076 	nport->rport_count = 0;
3077 	TAILQ_INIT(&nport->fc_associations);
3078 	nport->assoc_count = 0;
3079 
3080 	/*
3081 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3082 	 */
3083 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3084 
3085 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3086 out:
3087 	if (err && nport) {
3088 		free(nport);
3089 	}
3090 
3091 	if (api_data->cb_func != NULL) {
3092 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3093 	}
3094 
3095 	free(arg);
3096 }
3097 
3098 static void
3099 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3100 			    void *cb_args, int spdk_err)
3101 {
3102 	ASSERT_SPDK_FC_MASTER_THREAD();
3103 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3104 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3105 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3106 	int err = 0;
3107 	uint16_t nport_hdl = 0;
3108 	char log_str[256];
3109 
3110 	/*
3111 	 * Assert on any delete failure.
3112 	 */
3113 	if (nport == NULL) {
3114 		SPDK_ERRLOG("Nport delete callback returned null nport");
3115 		DEV_VERIFY(!"nport is null.");
3116 		goto out;
3117 	}
3118 
3119 	nport_hdl = nport->nport_hdl;
3120 	if (0 != spdk_err) {
3121 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3122 			    "%d, Nport: %d\n",
3123 			    nport->port_hdl, nport->nport_hdl);
3124 		DEV_VERIFY(!"nport delete callback error.");
3125 	}
3126 
3127 	/*
3128 	 * Free the nport if this is the last rport being deleted and
3129 	 * execute the callback(s).
3130 	 */
3131 	if (nvmf_fc_nport_has_no_rport(nport)) {
3132 		if (0 != nport->assoc_count) {
3133 			SPDK_ERRLOG("association count != 0\n");
3134 			DEV_VERIFY(!"association count != 0");
3135 		}
3136 
3137 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3138 		if (0 != err) {
3139 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3140 				    "nport from nport list. FC Port:%d Nport:%d\n",
3141 				    nport->port_hdl, nport->nport_hdl);
3142 		}
3143 		/* Free the nport */
3144 		free(nport);
3145 
3146 		if (cb_func != NULL) {
3147 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3148 		}
3149 		free(cb_data);
3150 	}
3151 out:
3152 	snprintf(log_str, sizeof(log_str),
3153 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3154 		 port_handle, nport_hdl, event_type, spdk_err);
3155 
3156 	if (err != 0) {
3157 		SPDK_ERRLOG("%s", log_str);
3158 	} else {
3159 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3160 	}
3161 }
3162 
3163 /*
3164  * Delete Nport.
3165  */
3166 static void
3167 nvmf_fc_adm_evnt_nport_delete(void *arg)
3168 {
3169 	ASSERT_SPDK_FC_MASTER_THREAD();
3170 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3171 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3172 			api_data->api_args;
3173 	struct spdk_nvmf_fc_nport *nport = NULL;
3174 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3175 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3176 	int err = 0;
3177 	uint32_t rport_cnt = 0;
3178 	int rc = 0;
3179 
3180 	/*
3181 	 * Make sure that the nport exists.
3182 	 */
3183 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3184 	if (nport == NULL) {
3185 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3186 			    args->port_handle);
3187 		err = -EINVAL;
3188 		goto out;
3189 	}
3190 
3191 	/*
3192 	 * Allocate memory for callback data.
3193 	 */
3194 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3195 	if (NULL == cb_data) {
3196 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3197 		err = -ENOMEM;
3198 		goto out;
3199 	}
3200 
3201 	cb_data->nport = nport;
3202 	cb_data->port_handle = args->port_handle;
3203 	cb_data->fc_cb_func = api_data->cb_func;
3204 	cb_data->fc_cb_ctx = args->cb_ctx;
3205 
3206 	/*
3207 	 * Begin nport tear down
3208 	 */
3209 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3210 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3211 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3212 		/*
3213 		 * Deletion of this nport already in progress. Register callback
3214 		 * and return.
3215 		 */
3216 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3217 		err = -ENODEV;
3218 		goto out;
3219 	} else {
3220 		/* nport partially created/deleted */
3221 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3222 		DEV_VERIFY(0 != "Nport in zombie state");
3223 		err = -ENODEV;
3224 		goto out;
3225 	}
3226 
3227 	/*
3228 	 * Remove this nport from listening addresses across subsystems
3229 	 */
3230 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3231 
3232 	if (0 != rc) {
3233 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3234 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3235 			    nport->nport_hdl);
3236 		goto out;
3237 	}
3238 
3239 	/*
3240 	 * Delete all the remote ports (if any) for the nport
3241 	 */
3242 	/* TODO - Need to do this with a "first" and a "next" accessor function
3243 	 * for completeness. Look at app-subsystem as examples.
3244 	 */
3245 	if (nvmf_fc_nport_has_no_rport(nport)) {
3246 		/* No rports to delete. Complete the nport deletion. */
3247 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3248 		goto out;
3249 	}
3250 
3251 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3252 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3253 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3254 
3255 		if (it_del_args == NULL) {
3256 			err = -ENOMEM;
3257 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3258 				    rport_iter->rpi, rport_iter->s_id);
3259 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3260 			goto out;
3261 		}
3262 
3263 		rport_cnt++;
3264 		it_del_args->port_handle = nport->port_hdl;
3265 		it_del_args->nport_handle = nport->nport_hdl;
3266 		it_del_args->cb_ctx = (void *)cb_data;
3267 		it_del_args->rpi = rport_iter->rpi;
3268 		it_del_args->s_id = rport_iter->s_id;
3269 
3270 		nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3271 					     nvmf_fc_adm_delete_nport_cb);
3272 	}
3273 
3274 out:
3275 	/* On failure, execute the callback function now */
3276 	if ((err != 0) || (rc != 0)) {
3277 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3278 			    "rport_cnt:%d rc:%d.\n",
3279 			    args->nport_handle, err, args->port_handle,
3280 			    rport_cnt, rc);
3281 		if (cb_data) {
3282 			free(cb_data);
3283 		}
3284 		if (api_data->cb_func != NULL) {
3285 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3286 		}
3287 
3288 	} else {
3289 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3290 			      "NPort %d delete done succesfully, fc port:%d. "
3291 			      "rport_cnt:%d\n",
3292 			      args->nport_handle, args->port_handle, rport_cnt);
3293 	}
3294 
3295 	free(arg);
3296 }
3297 
3298 /*
3299  * Process an PRLI/IT add.
3300  */
3301 static void
3302 nvmf_fc_adm_evnt_i_t_add(void *arg)
3303 {
3304 	ASSERT_SPDK_FC_MASTER_THREAD();
3305 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3306 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3307 			api_data->api_args;
3308 	struct spdk_nvmf_fc_nport *nport = NULL;
3309 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3310 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3311 	int err = 0;
3312 
3313 	/*
3314 	 * Make sure the nport port exists.
3315 	 */
3316 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3317 	if (nport == NULL) {
3318 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3319 		err = -EINVAL;
3320 		goto out;
3321 	}
3322 
3323 	/*
3324 	 * Check for duplicate i_t_add.
3325 	 */
3326 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3327 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3328 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3329 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3330 			err = -EEXIST;
3331 			goto out;
3332 		}
3333 	}
3334 
3335 	/*
3336 	 * Get the memory to instantiate the remote port
3337 	 */
3338 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3339 	if (rport == NULL) {
3340 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3341 		err = -ENOMEM;
3342 		goto out;
3343 	}
3344 
3345 	/*
3346 	 * Initialize the contents for the rport
3347 	 */
3348 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3349 	rport->s_id = args->s_id;
3350 	rport->rpi = args->rpi;
3351 	rport->fc_nodename = args->fc_nodename;
3352 	rport->fc_portname = args->fc_portname;
3353 
3354 	/*
3355 	 * Add remote port to nport
3356 	 */
3357 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3358 		DEV_VERIFY(!"Error while adding rport to list");
3359 	};
3360 
3361 	/*
3362 	 * TODO: Do we validate the initiators service parameters?
3363 	 */
3364 
3365 	/*
3366 	 * Get the targets service parameters from the library
3367 	 * to return back to the driver.
3368 	 */
3369 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3370 
3371 out:
3372 	if (api_data->cb_func != NULL) {
3373 		/*
3374 		 * Passing pointer to the args struct as the first argument.
3375 		 * The cb_func should handle this appropriately.
3376 		 */
3377 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3378 	}
3379 
3380 	free(arg);
3381 
3382 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3383 		      "IT add on nport %d done, rc = %d.\n",
3384 		      args->nport_handle, err);
3385 }
3386 
3387 /**
3388  * Process a IT delete.
3389  */
3390 static void
3391 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3392 {
3393 	ASSERT_SPDK_FC_MASTER_THREAD();
3394 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3395 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3396 			api_data->api_args;
3397 	int rc = 0;
3398 	struct spdk_nvmf_fc_nport *nport = NULL;
3399 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3400 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3401 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3402 	uint32_t num_rport = 0;
3403 	char log_str[256];
3404 
3405 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete on nport:%d begin.\n", args->nport_handle);
3406 
3407 	/*
3408 	 * Make sure the nport port exists. If it does not, error out.
3409 	 */
3410 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3411 	if (nport == NULL) {
3412 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3413 		rc = -EINVAL;
3414 		goto out;
3415 	}
3416 
3417 	/*
3418 	 * Find this ITN / rport (remote port).
3419 	 */
3420 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3421 		num_rport++;
3422 		if ((rport_iter->s_id == args->s_id) &&
3423 		    (rport_iter->rpi == args->rpi) &&
3424 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3425 			rport = rport_iter;
3426 			break;
3427 		}
3428 	}
3429 
3430 	/*
3431 	 * We should find either zero or exactly one rport.
3432 	 *
3433 	 * If we find zero rports, that means that a previous request has
3434 	 * removed the rport by the time we reached here. In this case,
3435 	 * simply return out.
3436 	 */
3437 	if (rport == NULL) {
3438 		rc = -ENODEV;
3439 		goto out;
3440 	}
3441 
3442 	/*
3443 	 * We have found exactly one rport. Allocate memory for callback data.
3444 	 */
3445 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3446 	if (NULL == cb_data) {
3447 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3448 		rc = -ENOMEM;
3449 		goto out;
3450 	}
3451 
3452 	cb_data->nport = nport;
3453 	cb_data->rport = rport;
3454 	cb_data->port_handle = args->port_handle;
3455 	cb_data->fc_cb_func = api_data->cb_func;
3456 	cb_data->fc_cb_ctx = args->cb_ctx;
3457 
3458 	/*
3459 	 * Validate rport object state.
3460 	 */
3461 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3462 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3463 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3464 		/*
3465 		 * Deletion of this rport already in progress. Register callback
3466 		 * and return.
3467 		 */
3468 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3469 		rc = -ENODEV;
3470 		goto out;
3471 	} else {
3472 		/* rport partially created/deleted */
3473 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3474 		DEV_VERIFY(!"Invalid rport_state");
3475 		rc = -ENODEV;
3476 		goto out;
3477 	}
3478 
3479 	/*
3480 	 * We have successfully found a rport to delete. Call
3481 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3482 	 * IT-delete processing as well as free the cb_data.
3483 	 */
3484 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3485 				     (void *)cb_data);
3486 
3487 out:
3488 	if (rc != 0) {
3489 		/*
3490 		 * We have entered here because either we encountered an
3491 		 * error, or we did not find a rport to delete.
3492 		 * As a result, we will not call the function
3493 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3494 		 * processing. Therefore, execute the callback function now.
3495 		 */
3496 		if (cb_data) {
3497 			free(cb_data);
3498 		}
3499 		if (api_data->cb_func != NULL) {
3500 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3501 		}
3502 	}
3503 
3504 	snprintf(log_str, sizeof(log_str),
3505 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3506 		 args->nport_handle, num_rport, rc);
3507 
3508 	if (rc != 0) {
3509 		SPDK_ERRLOG("%s", log_str);
3510 	} else {
3511 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3512 	}
3513 
3514 	free(arg);
3515 }
3516 
3517 /*
3518  * Process ABTS received
3519  */
3520 static void
3521 nvmf_fc_adm_evnt_abts_recv(void *arg)
3522 {
3523 	ASSERT_SPDK_FC_MASTER_THREAD();
3524 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3525 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3526 	struct spdk_nvmf_fc_nport *nport = NULL;
3527 	int err = 0;
3528 
3529 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3530 		      args->oxid, args->rxid);
3531 
3532 	/*
3533 	 * 1. Make sure the nport port exists.
3534 	 */
3535 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3536 	if (nport == NULL) {
3537 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3538 		err = -EINVAL;
3539 		goto out;
3540 	}
3541 
3542 	/*
3543 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3544 	 */
3545 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3546 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3547 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3548 			      args->rpi, args->oxid, args->rxid);
3549 		err = 0;
3550 		goto out;
3551 
3552 	}
3553 
3554 	/*
3555 	 * 3. Pass the received ABTS-LS to the library for handling.
3556 	 */
3557 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3558 
3559 out:
3560 	if (api_data->cb_func != NULL) {
3561 		/*
3562 		 * Passing pointer to the args struct as the first argument.
3563 		 * The cb_func should handle this appropriately.
3564 		 */
3565 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3566 	} else {
3567 		/* No callback set, free the args */
3568 		free(args);
3569 	}
3570 
3571 	free(arg);
3572 }
3573 
3574 /*
3575  * Callback function for hw port quiesce.
3576  */
3577 static void
3578 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3579 {
3580 	ASSERT_SPDK_FC_MASTER_THREAD();
3581 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3582 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3583 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3584 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3585 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3586 	struct spdk_nvmf_fc_port *fc_port = NULL;
3587 	char *dump_buf = NULL;
3588 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3589 
3590 	/*
3591 	 * Free the callback context struct.
3592 	 */
3593 	free(ctx);
3594 
3595 	if (err != 0) {
3596 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3597 		goto out;
3598 	}
3599 
3600 	if (args->dump_queues == false) {
3601 		/*
3602 		 * Queues need not be dumped.
3603 		 */
3604 		goto out;
3605 	}
3606 
3607 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3608 
3609 	/*
3610 	 * Get the fc port.
3611 	 */
3612 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3613 	if (fc_port == NULL) {
3614 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3615 		err = -EINVAL;
3616 		goto out;
3617 	}
3618 
3619 	/*
3620 	 * Allocate memory for the dump buffer.
3621 	 * This memory will be freed by FCT.
3622 	 */
3623 	dump_buf = (char *)calloc(1, dump_buf_size);
3624 	if (dump_buf == NULL) {
3625 		err = -ENOMEM;
3626 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3627 		goto out;
3628 	}
3629 	*args->dump_buf  = (uint32_t *)dump_buf;
3630 	dump_info.buffer = dump_buf;
3631 	dump_info.offset = 0;
3632 
3633 	/*
3634 	 * Add the dump reason to the top of the buffer.
3635 	 */
3636 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3637 
3638 	/*
3639 	 * Dump the hwqp.
3640 	 */
3641 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3642 				fc_port->num_io_queues, &dump_info);
3643 
3644 out:
3645 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3646 		      args->port_handle, args->dump_queues, err);
3647 
3648 	if (cb_func != NULL) {
3649 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3650 	}
3651 }
3652 
3653 /*
3654  * HW port reset
3655 
3656  */
3657 static void
3658 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3659 {
3660 	ASSERT_SPDK_FC_MASTER_THREAD();
3661 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3662 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3663 			api_data->api_args;
3664 	struct spdk_nvmf_fc_port *fc_port = NULL;
3665 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3666 	int err = 0;
3667 
3668 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump\n", args->port_handle);
3669 
3670 	/*
3671 	 * Make sure the physical port exists.
3672 	 */
3673 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3674 	if (fc_port == NULL) {
3675 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3676 		err = -EINVAL;
3677 		goto out;
3678 	}
3679 
3680 	/*
3681 	 * Save the reset event args and the callback in a context struct.
3682 	 */
3683 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3684 
3685 	if (ctx == NULL) {
3686 		err = -ENOMEM;
3687 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3688 		goto fail;
3689 	}
3690 
3691 	ctx->reset_args = arg;
3692 	ctx->reset_cb_func = api_data->cb_func;
3693 
3694 	/*
3695 	 * Quiesce the hw port.
3696 	 */
3697 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3698 	if (err != 0) {
3699 		goto fail;
3700 	}
3701 
3702 	/*
3703 	 * Once the ports are successfully quiesced the reset processing
3704 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3705 	 */
3706 	return;
3707 fail:
3708 	free(ctx);
3709 
3710 out:
3711 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump done, rc = %d.\n", args->port_handle,
3712 		      err);
3713 
3714 	if (api_data->cb_func != NULL) {
3715 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3716 	}
3717 
3718 	free(arg);
3719 }
3720 
3721 /*
3722  * Process a link break event on a HW port.
3723  */
3724 static void
3725 nvmf_fc_adm_evnt_hw_port_link_break(void *arg)
3726 {
3727 	ASSERT_SPDK_FC_MASTER_THREAD();
3728 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3729 	struct spdk_nvmf_hw_port_link_break_args *args = (struct spdk_nvmf_hw_port_link_break_args *)
3730 			api_data->api_args;
3731 	struct spdk_nvmf_fc_port *fc_port = NULL;
3732 	int err = 0;
3733 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *cb_data = NULL;
3734 	struct spdk_nvmf_fc_nport *nport = NULL;
3735 	uint32_t nport_deletes_sent = 0;
3736 	uint32_t nport_deletes_skipped = 0;
3737 	struct spdk_nvmf_fc_nport_delete_args *nport_del_args = NULL;
3738 	char log_str[256];
3739 
3740 	/*
3741 	 * Get the fc port using the port handle.
3742 	 */
3743 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3744 	if (!fc_port) {
3745 		SPDK_ERRLOG("port link break: Unable to find the SPDK FC port %d\n",
3746 			    args->port_handle);
3747 		err = -EINVAL;
3748 		goto out;
3749 	}
3750 
3751 	/*
3752 	 * Set the port state to offline, if it is not already.
3753 	 */
3754 	err = nvmf_fc_port_set_offline(fc_port);
3755 	if (err != 0) {
3756 		SPDK_ERRLOG("port link break: HW port %d already offline. rc = %d\n",
3757 			    fc_port->port_hdl, err);
3758 		err = 0;
3759 		goto out;
3760 	}
3761 
3762 	/*
3763 	 * Delete all the nports, if any.
3764 	 */
3765 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
3766 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
3767 			/* Skipped the nports that are not in CREATED state */
3768 			if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
3769 				nport_deletes_skipped++;
3770 				continue;
3771 			}
3772 
3773 			/* Allocate memory for callback data. */
3774 			cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_port_link_break_cb_data));
3775 			if (NULL == cb_data) {
3776 				SPDK_ERRLOG("port link break: Failed to allocate memory for cb_data %d.\n",
3777 					    args->port_handle);
3778 				err = -ENOMEM;
3779 				goto out;
3780 			}
3781 			cb_data->args = args;
3782 			cb_data->cb_func = api_data->cb_func;
3783 			nport_del_args = &cb_data->nport_del_args;
3784 			nport_del_args->port_handle = args->port_handle;
3785 			nport_del_args->nport_handle = nport->nport_hdl;
3786 			nport_del_args->cb_ctx = cb_data;
3787 
3788 			nvmf_fc_master_enqueue_event(SPDK_FC_NPORT_DELETE,
3789 						     (void *)nport_del_args,
3790 						     nvmf_fc_adm_hw_port_link_break_cb);
3791 
3792 			nport_deletes_sent++;
3793 		}
3794 	}
3795 
3796 	if (nport_deletes_sent == 0 && err == 0) {
3797 		/*
3798 		 * Mark the hwqps as offline and unregister the pollers.
3799 		 */
3800 		(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
3801 	}
3802 
3803 out:
3804 	snprintf(log_str, sizeof(log_str),
3805 		 "port link break done: port:%d nport_deletes_sent:%d nport_deletes_skipped:%d rc:%d.\n",
3806 		 args->port_handle, nport_deletes_sent, nport_deletes_skipped, err);
3807 
3808 	if (err != 0) {
3809 		SPDK_ERRLOG("%s", log_str);
3810 	} else {
3811 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3812 	}
3813 
3814 	if ((api_data->cb_func != NULL) && (nport_deletes_sent == 0)) {
3815 		/*
3816 		 * No nport_deletes are sent, which would have eventually
3817 		 * called the port_link_break callback. Therefore, call the
3818 		 * port_link_break callback here.
3819 		 */
3820 		(void)api_data->cb_func(args->port_handle, SPDK_FC_LINK_BREAK, args->cb_ctx, err);
3821 	}
3822 
3823 	free(arg);
3824 }
3825 
3826 static inline void
3827 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args)
3828 {
3829 	if (nvmf_fc_get_master_thread()) {
3830 		spdk_thread_send_msg(nvmf_fc_get_master_thread(), fn, args);
3831 	}
3832 }
3833 
3834 /*
3835  * Queue up an event in the SPDK masters event queue.
3836  * Used by the FC driver to notify the SPDK master of FC related events.
3837  */
3838 int
3839 nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args,
3840 			     spdk_nvmf_fc_callback cb_func)
3841 {
3842 	int err = 0;
3843 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3844 
3845 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d.\n", event_type);
3846 
3847 	if (event_type >= SPDK_FC_EVENT_MAX) {
3848 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3849 		err = -EINVAL;
3850 		goto done;
3851 	}
3852 
3853 	if (args == NULL) {
3854 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3855 		err = -EINVAL;
3856 		goto done;
3857 	}
3858 
3859 	api_data = calloc(1, sizeof(*api_data));
3860 
3861 	if (api_data == NULL) {
3862 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3863 		err = -ENOMEM;
3864 		goto done;
3865 	}
3866 
3867 	api_data->api_args = args;
3868 	api_data->cb_func = cb_func;
3869 
3870 	switch (event_type) {
3871 	case SPDK_FC_HW_PORT_INIT:
3872 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_init,
3873 						 (void *)api_data);
3874 		break;
3875 
3876 	case SPDK_FC_HW_PORT_ONLINE:
3877 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_online,
3878 						 (void *)api_data);
3879 		break;
3880 
3881 	case SPDK_FC_HW_PORT_OFFLINE:
3882 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_offline,
3883 						 (void *)api_data);
3884 		break;
3885 
3886 	case SPDK_FC_NPORT_CREATE:
3887 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_create,
3888 						 (void *)api_data);
3889 		break;
3890 
3891 	case SPDK_FC_NPORT_DELETE:
3892 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_delete,
3893 						 (void *)api_data);
3894 		break;
3895 
3896 	case SPDK_FC_IT_ADD:
3897 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_add,
3898 						 (void *)api_data);
3899 		break;
3900 
3901 	case SPDK_FC_IT_DELETE:
3902 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_delete,
3903 						 (void *)api_data);
3904 		break;
3905 
3906 	case SPDK_FC_ABTS_RECV:
3907 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_abts_recv,
3908 						 (void *)api_data);
3909 		break;
3910 
3911 	case SPDK_FC_LINK_BREAK:
3912 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_link_break,
3913 						 (void *)api_data);
3914 		break;
3915 
3916 	case SPDK_FC_HW_PORT_RESET:
3917 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_reset,
3918 						 (void *)api_data);
3919 		break;
3920 
3921 	case SPDK_FC_UNRECOVERABLE_ERR:
3922 	default:
3923 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3924 		err = -EINVAL;
3925 		break;
3926 	}
3927 
3928 done:
3929 
3930 	if (err == 0) {
3931 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d done successfully\n", event_type);
3932 	} else {
3933 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3934 		if (api_data) {
3935 			free(api_data);
3936 		}
3937 	}
3938 
3939 	return err;
3940 }
3941 
3942 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3943 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc_adm_api", SPDK_LOG_NVMF_FC_ADM_API);
3944 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc", SPDK_LOG_NVMF_FC)
3945