xref: /spdk/lib/nvmf/fc.c (revision 91a594ad8b3c0d00f25c9a20dfc8f1f2dfce81ce)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/event.h"
45 #include "spdk/likely.h"
46 #include "spdk/endian.h"
47 #include "spdk/log.h"
48 #include "spdk/thread.h"
49 
50 #include "spdk_internal/log.h"
51 
52 #include "nvmf_fc.h"
53 #include "fc_lld.h"
54 
55 #ifndef DEV_VERIFY
56 #define DEV_VERIFY assert
57 #endif
58 
59 #ifndef ASSERT_SPDK_FC_MASTER_THREAD
60 #define ASSERT_SPDK_FC_MASTER_THREAD() \
61         DEV_VERIFY(spdk_get_thread() == spdk_nvmf_fc_get_master_thread());
62 #endif
63 
64 /*
65  * PRLI service parameters
66  */
67 enum spdk_nvmf_fc_service_parameters {
68 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
69 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
70 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
71 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
72 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
73 };
74 
75 static char *fc_req_state_strs[] = {
76 	"SPDK_NVMF_FC_REQ_INIT",
77 	"SPDK_NVMF_FC_REQ_READ_BDEV",
78 	"SPDK_NVMF_FC_REQ_READ_XFER",
79 	"SPDK_NVMF_FC_REQ_READ_RSP",
80 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
81 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
82 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
83 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
84 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
85 	"SPDK_NVMF_FC_REQ_NONE_RSP",
86 	"SPDK_NVMF_FC_REQ_SUCCESS",
87 	"SPDK_NVMF_FC_REQ_FAILED",
88 	"SPDK_NVMF_FC_REQ_ABORTED",
89 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
90 	"SPDK_NVMF_FC_REQ_PENDING"
91 };
92 
93 #define OBJECT_NVMF_FC_IO				0xA0
94 
95 #define TRACE_GROUP_NVMF_FC				0x8
96 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
97 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
98 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
99 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
100 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
101 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
102 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
103 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
104 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
105 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
106 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
107 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
108 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
109 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
110 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
111 
112 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
113 {
114 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
115 	spdk_trace_register_description("FC_REQ_NEW",
116 					TRACE_FC_REQ_INIT,
117 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
118 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
119 					TRACE_FC_REQ_READ_BDEV,
120 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
121 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
122 					TRACE_FC_REQ_READ_XFER,
123 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
124 	spdk_trace_register_description("FC_REQ_READ_RSP",
125 					TRACE_FC_REQ_READ_RSP,
126 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
127 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
128 					TRACE_FC_REQ_WRITE_BUFFS,
129 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
130 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
131 					TRACE_FC_REQ_WRITE_XFER,
132 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
133 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
134 					TRACE_FC_REQ_WRITE_BDEV,
135 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
136 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
137 					TRACE_FC_REQ_WRITE_RSP,
138 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
139 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
140 					TRACE_FC_REQ_NONE_BDEV,
141 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
142 	spdk_trace_register_description("FC_REQ_NONE_RSP",
143 					TRACE_FC_REQ_NONE_RSP,
144 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
145 	spdk_trace_register_description("FC_REQ_SUCCESS",
146 					TRACE_FC_REQ_SUCCESS,
147 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
148 	spdk_trace_register_description("FC_REQ_FAILED",
149 					TRACE_FC_REQ_FAILED,
150 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
151 	spdk_trace_register_description("FC_REQ_ABORTED",
152 					TRACE_FC_REQ_ABORTED,
153 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
154 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
155 					TRACE_FC_REQ_BDEV_ABORTED,
156 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
157 	spdk_trace_register_description("FC_REQ_PENDING",
158 					TRACE_FC_REQ_PENDING,
159 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
160 }
161 
162 /**
163  * The structure used by all fc adm functions
164  */
165 struct spdk_nvmf_fc_adm_api_data {
166 	void *api_args;
167 	spdk_nvmf_fc_callback cb_func;
168 };
169 
170 /**
171  * The callback structure for nport-delete
172  */
173 struct spdk_nvmf_fc_adm_nport_del_cb_data {
174 	struct spdk_nvmf_fc_nport *nport;
175 	uint8_t port_handle;
176 	spdk_nvmf_fc_callback fc_cb_func;
177 	void *fc_cb_ctx;
178 };
179 
180 /**
181  * The callback structure for it-delete
182  */
183 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
184 	struct spdk_nvmf_fc_nport *nport;
185 	struct spdk_nvmf_fc_remote_port_info *rport;
186 	uint8_t port_handle;
187 	spdk_nvmf_fc_callback fc_cb_func;
188 	void *fc_cb_ctx;
189 };
190 
191 
192 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
193 
194 /**
195  * The callback structure for the it-delete-assoc callback
196  */
197 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
198 	struct spdk_nvmf_fc_nport *nport;
199 	struct spdk_nvmf_fc_remote_port_info *rport;
200 	uint8_t port_handle;
201 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
202 	void *cb_ctx;
203 };
204 
205 /*
206  * Call back function pointer for HW port quiesce.
207  */
208 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
209 
210 /**
211  * Context structure for quiescing a hardware port
212  */
213 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
214 	int quiesce_count;
215 	void *ctx;
216 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
217 };
218 
219 /**
220  * Context structure used to reset a hardware port
221  */
222 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
223 	void *reset_args;
224 	spdk_nvmf_fc_callback reset_cb_func;
225 };
226 
227 /**
228  * The callback structure for HW port link break event
229  */
230 struct spdk_nvmf_fc_adm_port_link_break_cb_data {
231 	struct spdk_nvmf_hw_port_link_break_args *args;
232 	struct spdk_nvmf_fc_nport_delete_args nport_del_args;
233 	spdk_nvmf_fc_callback cb_func;
234 };
235 
236 struct spdk_nvmf_fc_transport {
237 	struct spdk_nvmf_transport transport;
238 	pthread_mutex_t lock;
239 };
240 
241 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
242 
243 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
244 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
245 
246 static struct spdk_thread *g_nvmf_fc_master_thread = NULL;
247 
248 static uint32_t g_nvmf_fgroup_count = 0;
249 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
250 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
251 
252 struct spdk_thread *
253 spdk_nvmf_fc_get_master_thread(void)
254 {
255 	return g_nvmf_fc_master_thread;
256 }
257 
258 static inline void
259 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
260 			       enum spdk_nvmf_fc_request_state state)
261 {
262 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
263 
264 	switch (state) {
265 	case SPDK_NVMF_FC_REQ_INIT:
266 		/* Start IO tracing */
267 		tpoint_id = TRACE_FC_REQ_INIT;
268 		break;
269 	case SPDK_NVMF_FC_REQ_READ_BDEV:
270 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
271 		break;
272 	case SPDK_NVMF_FC_REQ_READ_XFER:
273 		tpoint_id = TRACE_FC_REQ_READ_XFER;
274 		break;
275 	case SPDK_NVMF_FC_REQ_READ_RSP:
276 		tpoint_id = TRACE_FC_REQ_READ_RSP;
277 		break;
278 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
279 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
280 		break;
281 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
282 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
283 		break;
284 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
285 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
286 		break;
287 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
288 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
289 		break;
290 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
291 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
292 		break;
293 	case SPDK_NVMF_FC_REQ_NONE_RSP:
294 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
295 		break;
296 	case SPDK_NVMF_FC_REQ_SUCCESS:
297 		tpoint_id = TRACE_FC_REQ_SUCCESS;
298 		break;
299 	case SPDK_NVMF_FC_REQ_FAILED:
300 		tpoint_id = TRACE_FC_REQ_FAILED;
301 		break;
302 	case SPDK_NVMF_FC_REQ_ABORTED:
303 		tpoint_id = TRACE_FC_REQ_ABORTED;
304 		break;
305 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
306 		tpoint_id = TRACE_FC_REQ_ABORTED;
307 		break;
308 	case SPDK_NVMF_FC_REQ_PENDING:
309 		tpoint_id = TRACE_FC_REQ_PENDING;
310 		break;
311 	default:
312 		assert(0);
313 		break;
314 	}
315 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
316 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
317 				  (uint64_t)(&fc_req->req), 0);
318 	}
319 }
320 
321 static void
322 nvmf_fc_handle_connection_failure(void *arg)
323 {
324 	struct spdk_nvmf_fc_conn *fc_conn = arg;
325 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
326 
327 	if (!fc_conn->create_opd) {
328 		return;
329 	}
330 	api_data = &fc_conn->create_opd->u.add_conn;
331 
332 	nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
333 				    api_data->args.fc_conn, api_data->aq_conn);
334 }
335 
336 static void
337 nvmf_fc_handle_assoc_deletion(void *arg)
338 {
339 	struct spdk_nvmf_fc_conn *fc_conn = arg;
340 
341 	spdk_nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport,
342 					fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL);
343 }
344 
345 static int
346 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp)
347 {
348 	uint32_t i;
349 	struct spdk_nvmf_fc_request *fc_req;
350 
351 	TAILQ_INIT(&hwqp->free_reqs);
352 	TAILQ_INIT(&hwqp->in_use_reqs);
353 
354 	hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request));
355 	if (hwqp->fc_reqs_buf == NULL) {
356 		SPDK_ERRLOG("create fc request pool failed\n");
357 		return -ENOMEM;
358 	}
359 
360 	for (i = 0; i < hwqp->rq_size; i++) {
361 		fc_req = hwqp->fc_reqs_buf + i;
362 
363 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
364 		TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link);
365 	}
366 
367 	return 0;
368 }
369 
370 static inline struct spdk_nvmf_fc_request *
371 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp)
372 {
373 	struct spdk_nvmf_fc_request *fc_req;
374 
375 	if (TAILQ_EMPTY(&hwqp->free_reqs)) {
376 		SPDK_ERRLOG("Alloc request buffer failed\n");
377 		return NULL;
378 	}
379 
380 	fc_req = TAILQ_FIRST(&hwqp->free_reqs);
381 	TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link);
382 
383 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
384 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
385 	TAILQ_INIT(&fc_req->abort_cbs);
386 	return fc_req;
387 }
388 
389 static inline void
390 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req)
391 {
392 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
393 		/* Log an error for debug purpose. */
394 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
395 	}
396 
397 	/* set the magic to mark req as no longer valid. */
398 	fc_req->magic = 0xDEADBEEF;
399 
400 	TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link);
401 	TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link);
402 }
403 
404 static inline bool
405 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
406 {
407 	switch (fc_req->state) {
408 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
409 		return true;
410 	default:
411 		return false;
412 	}
413 }
414 
415 void
416 spdk_nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp)
417 {
418 	nvmf_fc_init_rqpair_buffers(hwqp);
419 }
420 
421 struct spdk_nvmf_fc_conn *
422 spdk_nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
423 {
424 	struct spdk_nvmf_fc_conn *fc_conn;
425 
426 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
427 		if (fc_conn->conn_id == conn_id) {
428 			return fc_conn;
429 		}
430 	}
431 
432 	return NULL;
433 }
434 
435 void
436 spdk_nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr)
437 {
438 	struct spdk_nvmf_fc_abts_ctx *ctx;
439 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
440 
441 	/* Clean up any pending sync callbacks */
442 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
443 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
444 		ctx = args->cb_info.cb_data;
445 		if (ctx) {
446 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
447 				free(ctx->sync_poller_args);
448 				free(ctx->abts_poller_args);
449 				free(ctx);
450 			}
451 		}
452 	}
453 
454 	nvmf_fc_reinit_q(hwqp->queues, queues_curr);
455 }
456 
457 void
458 spdk_nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
459 {
460 	hwqp->fc_port = fc_port;
461 
462 	/* clear counters */
463 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
464 
465 	spdk_nvmf_fc_init_poller_queues(hwqp);
466 	if (&fc_port->ls_queue != hwqp) {
467 		nvmf_fc_create_req_mempool(hwqp);
468 	}
469 
470 	nvmf_fc_init_q(hwqp);
471 	TAILQ_INIT(&hwqp->connection_list);
472 	TAILQ_INIT(&hwqp->sync_cbs);
473 	TAILQ_INIT(&hwqp->ls_pending_queue);
474 }
475 
476 static struct spdk_nvmf_fc_poll_group *
477 nvmf_fc_get_idlest_poll_group(void)
478 {
479 	uint32_t max_count = UINT32_MAX;
480 	struct spdk_nvmf_fc_poll_group *fgroup;
481 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
482 
483 	/* find poll group with least number of hwqp's assigned to it */
484 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
485 		if (fgroup->hwqp_count < max_count) {
486 			ret_fgroup = fgroup;
487 			max_count = fgroup->hwqp_count;
488 		}
489 	}
490 
491 	return ret_fgroup;
492 }
493 
494 void
495 spdk_nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
496 {
497 	struct spdk_nvmf_fc_poll_group *fgroup = NULL;
498 
499 	assert(hwqp);
500 	if (hwqp == NULL) {
501 		SPDK_ERRLOG("Error: hwqp is NULL\n");
502 		return;
503 	}
504 
505 	assert(g_nvmf_fgroup_count);
506 
507 	fgroup = nvmf_fc_get_idlest_poll_group();
508 	if (!fgroup) {
509 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
510 		return;
511 	}
512 
513 	hwqp->thread = fgroup->group.group->thread;
514 	hwqp->fgroup = fgroup;
515 	fgroup->hwqp_count++;
516 	spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
517 }
518 
519 void
520 spdk_nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
521 {
522 	assert(hwqp);
523 
524 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
525 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
526 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
527 
528 	if (!hwqp->fgroup) {
529 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
530 	} else {
531 		hwqp->fgroup->hwqp_count--;
532 		spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL);
533 	}
534 }
535 
536 /*
537  * Note: This needs to be used only on master poller.
538  */
539 static uint64_t
540 nvmf_fc_get_abts_unique_id(void)
541 {
542 	static uint32_t u_id = 0;
543 
544 	return (uint64_t)(++u_id);
545 }
546 
547 static void
548 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
549 {
550 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
551 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
552 
553 	ctx->hwqps_responded++;
554 
555 	if (ctx->hwqps_responded < ctx->num_hwqps) {
556 		/* Wait for all pollers to complete. */
557 		return;
558 	}
559 
560 	/* Free the queue sync poller args. */
561 	free(ctx->sync_poller_args);
562 
563 	/* Mark as queue synced */
564 	ctx->queue_synced = true;
565 
566 	/* Reset the ctx values */
567 	ctx->hwqps_responded = 0;
568 	ctx->handled = false;
569 
570 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
571 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
572 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
573 
574 	/* Resend ABTS to pollers */
575 	args = ctx->abts_poller_args;
576 	for (int i = 0; i < ctx->num_hwqps; i++) {
577 		poller_arg = args + i;
578 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
579 					     SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
580 					     poller_arg);
581 	}
582 }
583 
584 static int
585 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
586 {
587 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
588 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
589 
590 	/* check if FC driver supports queue sync */
591 	if (!nvmf_fc_q_sync_available()) {
592 		return -EPERM;
593 	}
594 
595 	assert(ctx);
596 	if (!ctx) {
597 		SPDK_ERRLOG("NULL ctx pointer");
598 		return -EINVAL;
599 	}
600 
601 	/* Reset the ctx values */
602 	ctx->hwqps_responded = 0;
603 
604 	args = calloc(ctx->num_hwqps,
605 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
606 	if (!args) {
607 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
608 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
609 		return -ENOMEM;
610 	}
611 	ctx->sync_poller_args = args;
612 
613 	abts_args = ctx->abts_poller_args;
614 	for (int i = 0; i < ctx->num_hwqps; i++) {
615 		abts_poller_arg = abts_args + i;
616 		poller_arg = args + i;
617 		poller_arg->u_id = ctx->u_id;
618 		poller_arg->hwqp = abts_poller_arg->hwqp;
619 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
620 		poller_arg->cb_info.cb_data = ctx;
621 		poller_arg->cb_info.cb_thread = spdk_get_thread();
622 
623 		/* Send a Queue sync message to interested pollers */
624 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
625 					     SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
626 					     poller_arg);
627 	}
628 
629 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
630 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
631 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
632 
633 	/* Post Marker to queue to track aborted request */
634 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
635 
636 	return 0;
637 }
638 
639 static void
640 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
641 {
642 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
643 	struct spdk_nvmf_fc_nport *nport  = NULL;
644 
645 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
646 		ctx->handled = true;
647 	}
648 
649 	ctx->hwqps_responded++;
650 
651 	if (ctx->hwqps_responded < ctx->num_hwqps) {
652 		/* Wait for all pollers to complete. */
653 		return;
654 	}
655 
656 	nport = spdk_nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
657 
658 	if (ctx->nport != nport) {
659 		/* Nport can be deleted while this abort is being
660 		 * processed by the pollers.
661 		 */
662 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
663 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
664 	} else {
665 		if (!ctx->handled) {
666 			/* Try syncing the queues and try one more time */
667 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
668 				SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
669 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
670 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
671 				return;
672 			} else {
673 				/* Send Reject */
674 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
675 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
676 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
677 			}
678 		} else {
679 			/* Send Accept */
680 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
681 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
682 					    0, NULL, NULL);
683 		}
684 	}
685 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
686 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
687 
688 	free(ctx->abts_poller_args);
689 	free(ctx);
690 }
691 
692 void
693 spdk_nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
694 			       uint16_t oxid, uint16_t rxid)
695 {
696 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
697 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
698 	struct spdk_nvmf_fc_association *assoc = NULL;
699 	struct spdk_nvmf_fc_conn *conn = NULL;
700 	uint32_t hwqp_cnt = 0;
701 	bool skip_hwqp_cnt;
702 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
703 	uint32_t i;
704 
705 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
706 		       nport->nport_hdl, rpi, oxid, rxid);
707 
708 	/* Allocate memory to track hwqp's with at least 1 active connection. */
709 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
710 	if (hwqps == NULL) {
711 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
712 		goto bls_rej;
713 	}
714 
715 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
716 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
717 			if (conn->rpi != rpi) {
718 				continue;
719 			}
720 
721 			skip_hwqp_cnt = false;
722 			for (i = 0; i < hwqp_cnt; i++) {
723 				if (hwqps[i] == conn->hwqp) {
724 					/* Skip. This is already present */
725 					skip_hwqp_cnt = true;
726 					break;
727 				}
728 			}
729 			if (!skip_hwqp_cnt) {
730 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
731 				hwqps[hwqp_cnt] = conn->hwqp;
732 				hwqp_cnt++;
733 			}
734 		}
735 	}
736 
737 	if (!hwqp_cnt) {
738 		goto bls_rej;
739 	}
740 
741 	args = calloc(hwqp_cnt,
742 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
743 	if (!args) {
744 		goto bls_rej;
745 	}
746 
747 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
748 	if (!ctx) {
749 		goto bls_rej;
750 	}
751 	ctx->rpi = rpi;
752 	ctx->oxid = oxid;
753 	ctx->rxid = rxid;
754 	ctx->nport = nport;
755 	ctx->nport_hdl = nport->nport_hdl;
756 	ctx->port_hdl = nport->fc_port->port_hdl;
757 	ctx->num_hwqps = hwqp_cnt;
758 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
759 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
760 	ctx->abts_poller_args = args;
761 
762 	/* Get a unique context for this ABTS */
763 	ctx->u_id = nvmf_fc_get_abts_unique_id();
764 
765 	for (i = 0; i < hwqp_cnt; i++) {
766 		poller_arg = args + i;
767 		poller_arg->hwqp = hwqps[i];
768 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
769 		poller_arg->cb_info.cb_data = ctx;
770 		poller_arg->cb_info.cb_thread = spdk_get_thread();
771 		poller_arg->ctx = ctx;
772 
773 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
774 					     SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
775 					     poller_arg);
776 	}
777 
778 	free(hwqps);
779 
780 	return;
781 bls_rej:
782 	free(args);
783 	free(hwqps);
784 
785 	/* Send Reject */
786 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
787 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
788 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
789 		       nport->nport_hdl, rpi, oxid, rxid);
790 	return;
791 }
792 
793 /*** Accessor functions for the FC structures - BEGIN */
794 /*
795  * Returns true if the port is in offline state.
796  */
797 bool
798 spdk_nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
799 {
800 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
801 		return true;
802 	}
803 
804 	return false;
805 }
806 
807 /*
808  * Returns true if the port is in online state.
809  */
810 bool
811 spdk_nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
812 {
813 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
814 		return true;
815 	}
816 
817 	return false;
818 }
819 
820 int
821 spdk_nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
822 {
823 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
824 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
825 		return 0;
826 	}
827 
828 	return -EPERM;
829 }
830 
831 int
832 spdk_nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
833 {
834 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
835 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
836 		return 0;
837 	}
838 
839 	return -EPERM;
840 }
841 
842 int
843 spdk_nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
844 {
845 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
846 		hwqp->state = SPDK_FC_HWQP_ONLINE;
847 		/* reset some queue counters */
848 		hwqp->num_conns = 0;
849 		return nvmf_fc_set_q_online_state(hwqp, true);
850 	}
851 
852 	return -EPERM;
853 }
854 
855 int
856 spdk_nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
857 {
858 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
859 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
860 		return nvmf_fc_set_q_online_state(hwqp, false);
861 	}
862 
863 	return -EPERM;
864 }
865 
866 void
867 spdk_nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
868 {
869 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
870 }
871 
872 struct spdk_nvmf_fc_port *
873 spdk_nvmf_fc_port_lookup(uint8_t port_hdl)
874 {
875 	struct spdk_nvmf_fc_port *fc_port = NULL;
876 
877 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
878 		if (fc_port->port_hdl == port_hdl) {
879 			return fc_port;
880 		}
881 	}
882 	return NULL;
883 }
884 
885 static void
886 nvmf_fc_port_cleanup(void)
887 {
888 	struct spdk_nvmf_fc_port *fc_port, *tmp;
889 	struct spdk_nvmf_fc_hwqp *hwqp;
890 	uint32_t i;
891 
892 	TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) {
893 		TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list,  fc_port, link);
894 		for (i = 0; i < fc_port->num_io_queues; i++) {
895 			hwqp = &fc_port->io_queues[i];
896 			if (hwqp->fc_reqs_buf) {
897 				free(hwqp->fc_reqs_buf);
898 			}
899 		}
900 		free(fc_port);
901 	}
902 }
903 
904 uint32_t
905 spdk_nvmf_fc_get_prli_service_params(void)
906 {
907 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
908 }
909 
910 int
911 spdk_nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
912 			    struct spdk_nvmf_fc_nport *nport)
913 {
914 	if (fc_port) {
915 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
916 		fc_port->num_nports++;
917 		return 0;
918 	}
919 
920 	return -EINVAL;
921 }
922 
923 int
924 spdk_nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
925 			       struct spdk_nvmf_fc_nport *nport)
926 {
927 	if (fc_port && nport) {
928 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
929 		fc_port->num_nports--;
930 		return 0;
931 	}
932 
933 	return -EINVAL;
934 }
935 
936 static struct spdk_nvmf_fc_nport *
937 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
938 {
939 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
940 
941 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
942 		if (fc_nport->nport_hdl == nport_hdl) {
943 			return fc_nport;
944 		}
945 	}
946 
947 	return NULL;
948 }
949 
950 struct spdk_nvmf_fc_nport *
951 spdk_nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
952 {
953 	struct spdk_nvmf_fc_port *fc_port = NULL;
954 
955 	fc_port = spdk_nvmf_fc_port_lookup(port_hdl);
956 	if (fc_port) {
957 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
958 	}
959 
960 	return NULL;
961 }
962 
963 static inline int
964 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
965 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
966 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
967 {
968 	struct spdk_nvmf_fc_nport *n_port;
969 	struct spdk_nvmf_fc_remote_port_info *r_port;
970 
971 	assert(hwqp);
972 	if (hwqp == NULL) {
973 		SPDK_ERRLOG("Error: hwqp is NULL\n");
974 		return -EINVAL;
975 	}
976 	assert(nport);
977 	if (nport == NULL) {
978 		SPDK_ERRLOG("Error: nport is NULL\n");
979 		return -EINVAL;
980 	}
981 	assert(rport);
982 	if (rport == NULL) {
983 		SPDK_ERRLOG("Error: rport is NULL\n");
984 		return -EINVAL;
985 	}
986 
987 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
988 		if (n_port->d_id == d_id) {
989 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
990 				if (r_port->s_id == s_id) {
991 					*nport = n_port;
992 					*rport = r_port;
993 					return 0;
994 				}
995 			}
996 			break;
997 		}
998 	}
999 
1000 	return -ENOENT;
1001 }
1002 
1003 /* Returns true if the Nport is empty of all rem_ports */
1004 bool
1005 spdk_nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1006 {
1007 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1008 		assert(nport->rport_count == 0);
1009 		return true;
1010 	} else {
1011 		return false;
1012 	}
1013 }
1014 
1015 int
1016 spdk_nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1017 			     enum spdk_nvmf_fc_object_state state)
1018 {
1019 	if (nport) {
1020 		nport->nport_state = state;
1021 		return 0;
1022 	} else {
1023 		return -EINVAL;
1024 	}
1025 }
1026 
1027 bool
1028 spdk_nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1029 				struct spdk_nvmf_fc_remote_port_info *rem_port)
1030 {
1031 	if (nport && rem_port) {
1032 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1033 		nport->rport_count++;
1034 		return 0;
1035 	} else {
1036 		return -EINVAL;
1037 	}
1038 }
1039 
1040 bool
1041 spdk_nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1042 				   struct spdk_nvmf_fc_remote_port_info *rem_port)
1043 {
1044 	if (nport && rem_port) {
1045 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1046 		nport->rport_count--;
1047 		return 0;
1048 	} else {
1049 		return -EINVAL;
1050 	}
1051 }
1052 
1053 int
1054 spdk_nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1055 			     enum spdk_nvmf_fc_object_state state)
1056 {
1057 	if (rport) {
1058 		rport->rport_state = state;
1059 		return 0;
1060 	} else {
1061 		return -EINVAL;
1062 	}
1063 }
1064 int
1065 spdk_nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1066 			     enum spdk_nvmf_fc_object_state state)
1067 {
1068 	if (assoc) {
1069 		assoc->assoc_state = state;
1070 		return 0;
1071 	} else {
1072 		return -EINVAL;
1073 	}
1074 }
1075 
1076 static struct spdk_nvmf_fc_association *
1077 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1078 {
1079 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1080 	struct spdk_nvmf_fc_conn *fc_conn;
1081 
1082 	if (!qpair) {
1083 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1084 		return NULL;
1085 	}
1086 
1087 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1088 
1089 	return fc_conn->fc_assoc;
1090 }
1091 
1092 bool
1093 spdk_nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1094 			    struct spdk_nvmf_ctrlr *ctrlr)
1095 {
1096 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1097 	struct spdk_nvmf_fc_association *assoc = NULL;
1098 
1099 	if (!ctrlr) {
1100 		return false;
1101 	}
1102 
1103 	fc_nport = spdk_nvmf_fc_nport_find(port_hdl, nport_hdl);
1104 	if (!fc_nport) {
1105 		return false;
1106 	}
1107 
1108 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1109 	if (assoc && assoc->tgtport == fc_nport) {
1110 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1111 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1112 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1113 			      nport_hdl);
1114 		return true;
1115 	}
1116 	return false;
1117 }
1118 
1119 static inline bool
1120 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
1121 {
1122 	switch (fc_req->state) {
1123 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1124 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1125 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1126 		return true;
1127 	default:
1128 		return false;
1129 	}
1130 }
1131 
1132 static inline bool
1133 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
1134 {
1135 	struct spdk_nvmf_request *tmp = NULL;
1136 
1137 	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) {
1138 		if (tmp == &fc_req->req) {
1139 			return true;
1140 		}
1141 	}
1142 	return false;
1143 }
1144 
1145 static void
1146 nvmf_fc_req_bdev_abort(void *arg1)
1147 {
1148 	struct spdk_nvmf_fc_request *fc_req = arg1;
1149 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1150 
1151 	/* Initial release - we don't have to abort Admin Queue or
1152 	 * Fabric commands. The AQ commands supported at this time are
1153 	 * Get-Log-Page,
1154 	 * Identify
1155 	 * Set Features
1156 	 * Get Features
1157 	 * AER -> Special case and handled differently.
1158 	 * Every one of the above Admin commands (except AER) run
1159 	 * to completion and so an Abort of such commands doesn't
1160 	 * make sense.
1161 	 */
1162 	/* The Fabric commands supported are
1163 	 * Property Set
1164 	 * Property Get
1165 	 * Connect -> Special case (async. handling). Not sure how to
1166 	 * handle at this point. Let it run to completion.
1167 	 */
1168 	if (ctrlr->aer_req == &fc_req->req) {
1169 		SPDK_NOTICELOG("Abort AER request\n");
1170 		nvmf_qpair_free_aer(fc_req->req.qpair);
1171 	}
1172 }
1173 
1174 void
1175 spdk_nvmf_fc_request_abort_complete(void *arg1)
1176 {
1177 	struct spdk_nvmf_fc_request *fc_req =
1178 		(struct spdk_nvmf_fc_request *)arg1;
1179 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1180 
1181 	/* Request abort completed. Notify all the callbacks */
1182 	TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) {
1183 		/* Notify */
1184 		ctx->cb(fc_req->hwqp, 0, ctx->cb_args);
1185 		/* Remove */
1186 		TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link);
1187 		/* free */
1188 		free(ctx);
1189 	}
1190 
1191 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1192 		       fc_req_state_strs[fc_req->state]);
1193 
1194 	spdk_nvmf_fc_request_free(fc_req);
1195 }
1196 
1197 void
1198 spdk_nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1199 			   spdk_nvmf_fc_caller_cb cb, void *cb_args)
1200 {
1201 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1202 	bool kill_req = false;
1203 
1204 	/* Add the cb to list */
1205 	if (cb) {
1206 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1207 		if (!ctx) {
1208 			SPDK_ERRLOG("ctx alloc failed.\n");
1209 			return;
1210 		}
1211 		ctx->cb = cb;
1212 		ctx->cb_args = cb_args;
1213 
1214 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1215 	}
1216 
1217 	if (!fc_req->is_aborted) {
1218 		/* Increment aborted command counter */
1219 		fc_req->hwqp->counters.num_aborted++;
1220 	}
1221 
1222 	/* If port is dead, skip abort wqe */
1223 	kill_req = spdk_nvmf_fc_is_port_dead(fc_req->hwqp);
1224 	if (kill_req && spdk_nvmf_fc_req_in_xfer(fc_req)) {
1225 		fc_req->is_aborted = true;
1226 		goto complete;
1227 	}
1228 
1229 	/* Check if the request is already marked for deletion */
1230 	if (fc_req->is_aborted) {
1231 		return;
1232 	}
1233 
1234 	/* Mark request as aborted */
1235 	fc_req->is_aborted = true;
1236 
1237 	/* If xchg is allocated, then save if we need to send abts or not. */
1238 	if (fc_req->xchg) {
1239 		fc_req->xchg->send_abts = send_abts;
1240 		fc_req->xchg->aborted	= true;
1241 	}
1242 
1243 	if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) {
1244 		/* Aborted by backend */
1245 		goto complete;
1246 	} else if (nvmf_fc_req_in_bdev(fc_req)) {
1247 		/* Notify bdev */
1248 		spdk_thread_send_msg(fc_req->hwqp->thread,
1249 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1250 	} else if (spdk_nvmf_fc_req_in_xfer(fc_req)) {
1251 		/* Notify HBA to abort this exchange  */
1252 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1253 	} else if (nvmf_fc_req_in_get_buff(fc_req)) {
1254 		/* Will be completed by request_complete callback. */
1255 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
1256 	} else if (nvmf_fc_req_in_pending(fc_req)) {
1257 		/* Remove from pending */
1258 		STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
1259 			      spdk_nvmf_request, buf_link);
1260 		goto complete;
1261 	} else {
1262 		/* Should never happen */
1263 		SPDK_ERRLOG("Request in invalid state\n");
1264 		goto complete;
1265 	}
1266 
1267 	return;
1268 complete:
1269 	spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1270 	spdk_nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1271 				     (void *)fc_req);
1272 }
1273 
1274 static int
1275 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1276 {
1277 	uint32_t length = fc_req->req.length;
1278 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1279 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1280 	struct spdk_nvmf_transport *transport = group->transport;
1281 
1282 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1283 		return -ENOMEM;
1284 	}
1285 
1286 	return 0;
1287 }
1288 
1289 static int
1290 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1291 {
1292 	/* Allocate an XCHG if we dont use send frame for this command. */
1293 	if (!spdk_nvmf_fc_use_send_frame(&fc_req->req)) {
1294 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1295 		if (!fc_req->xchg) {
1296 			fc_req->hwqp->counters.no_xchg++;
1297 			printf("NO XCHGs!\n");
1298 			goto pending;
1299 		}
1300 	}
1301 
1302 	if (fc_req->req.length) {
1303 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1304 			fc_req->hwqp->counters.buf_alloc_err++;
1305 			goto pending;
1306 		}
1307 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1308 	}
1309 
1310 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1311 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "WRITE CMD.\n");
1312 
1313 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1314 
1315 		if (nvmf_fc_recv_data(fc_req)) {
1316 			/* Dropped return success to caller */
1317 			fc_req->hwqp->counters.unexpected_err++;
1318 			spdk_nvmf_fc_request_free(fc_req);
1319 		}
1320 	} else {
1321 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "READ/NONE CMD\n");
1322 
1323 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1324 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1325 		} else {
1326 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1327 		}
1328 		spdk_nvmf_request_exec(&fc_req->req);
1329 	}
1330 
1331 	return 0;
1332 
1333 pending:
1334 	if (fc_req->xchg) {
1335 		nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1336 		fc_req->xchg = NULL;
1337 	}
1338 
1339 	spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1340 
1341 	return -EAGAIN;
1342 }
1343 
1344 static int
1345 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1346 			    uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1347 {
1348 	uint16_t cmnd_len;
1349 	uint64_t rqst_conn_id;
1350 	struct spdk_nvmf_fc_request *fc_req = NULL;
1351 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1352 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1353 	enum spdk_nvme_data_transfer xfer;
1354 
1355 	cmd_iu = buffer->virt;
1356 	cmnd_len = cmd_iu->cmnd_iu_len;
1357 	cmnd_len = from_be16(&cmnd_len);
1358 
1359 	/* check for a valid cmnd_iu format */
1360 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1361 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1362 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1363 		SPDK_ERRLOG("IU CMD error\n");
1364 		hwqp->counters.nvme_cmd_iu_err++;
1365 		return -ENXIO;
1366 	}
1367 
1368 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1369 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1370 		SPDK_ERRLOG("IU CMD xfer error\n");
1371 		hwqp->counters.nvme_cmd_xfer_err++;
1372 		return -EPERM;
1373 	}
1374 
1375 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1376 
1377 	/* Check if conn id is valid */
1378 	fc_conn = spdk_nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id);
1379 	if (!fc_conn) {
1380 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1381 		hwqp->counters.invalid_conn_err++;
1382 		return -ENODEV;
1383 	}
1384 
1385 	/* If association/connection is being deleted - return */
1386 	if (fc_conn->fc_assoc->assoc_state !=  SPDK_NVMF_FC_OBJECT_CREATED) {
1387 		SPDK_ERRLOG("Association state not valid\n");
1388 		return -EACCES;
1389 	}
1390 
1391 	if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) {
1392 		return -EACCES;
1393 	}
1394 
1395 	/* Make sure xfer len is according to mdts */
1396 	if (from_be32(&cmd_iu->data_len) >
1397 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1398 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1399 		return -EINVAL;
1400 	}
1401 
1402 	/* allocate a request buffer */
1403 	fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp);
1404 	if (fc_req == NULL) {
1405 		/* Should not happen. Since fc_reqs == RQ buffers */
1406 		return -ENOMEM;
1407 	}
1408 
1409 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1410 	fc_req->req.qpair = &fc_conn->qpair;
1411 	fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd;
1412 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1413 	fc_req->oxid = frame->ox_id;
1414 	fc_req->oxid = from_be16(&fc_req->oxid);
1415 	fc_req->rpi = fc_conn->rpi;
1416 	fc_req->buf_index = buf_idx;
1417 	fc_req->poller_lcore = hwqp->lcore_id;
1418 	fc_req->poller_thread = hwqp->thread;
1419 	fc_req->hwqp = hwqp;
1420 	fc_req->fc_conn = fc_conn;
1421 	fc_req->req.xfer = xfer;
1422 	fc_req->s_id = (uint32_t)frame->s_id;
1423 	fc_req->d_id = (uint32_t)frame->d_id;
1424 	fc_req->s_id = from_be32(&fc_req->s_id) >> 8;
1425 	fc_req->d_id = from_be32(&fc_req->d_id) >> 8;
1426 
1427 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1428 	if (nvmf_fc_request_execute(fc_req)) {
1429 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1430 	}
1431 
1432 	return 0;
1433 }
1434 
1435 /*
1436  * These functions are called from the FC LLD
1437  */
1438 
1439 void
1440 spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1441 {
1442 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1443 	struct spdk_nvmf_fc_poll_group *fgroup = hwqp->fgroup;
1444 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1445 	struct spdk_nvmf_transport *transport = group->transport;
1446 
1447 	if (!fc_req) {
1448 		return;
1449 	}
1450 
1451 	if (fc_req->xchg) {
1452 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1453 		fc_req->xchg = NULL;
1454 	}
1455 
1456 	/* Release IO buffers */
1457 	if (fc_req->req.data_from_pool) {
1458 		spdk_nvmf_request_free_buffers(&fc_req->req, group, transport);
1459 	}
1460 	fc_req->req.data = NULL;
1461 	fc_req->req.iovcnt  = 0;
1462 
1463 	/* Release Q buffer */
1464 	nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index);
1465 
1466 	/* Free Fc request */
1467 	nvmf_fc_hwqp_free_fc_request(hwqp, fc_req);
1468 }
1469 
1470 void
1471 spdk_nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1472 			       enum spdk_nvmf_fc_request_state state)
1473 {
1474 	assert(fc_req->magic != 0xDEADBEEF);
1475 
1476 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1477 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1478 		      spdk_nvmf_fc_request_get_state_str(fc_req->state),
1479 		      spdk_nvmf_fc_request_get_state_str(state));
1480 	nvmf_fc_record_req_trace_point(fc_req, state);
1481 	fc_req->state = state;
1482 }
1483 
1484 char *
1485 spdk_nvmf_fc_request_get_state_str(int state)
1486 {
1487 	static char *unk_str = "unknown";
1488 
1489 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1490 		fc_req_state_strs[state] : unk_str);
1491 }
1492 
1493 int
1494 spdk_nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1495 				uint32_t buff_idx,
1496 				struct spdk_nvmf_fc_frame_hdr *frame,
1497 				struct spdk_nvmf_fc_buffer_desc *buffer,
1498 				uint32_t plen)
1499 {
1500 	int rc = 0;
1501 	uint32_t s_id, d_id;
1502 	struct spdk_nvmf_fc_nport *nport = NULL;
1503 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1504 
1505 	s_id = (uint32_t)frame->s_id;
1506 	d_id = (uint32_t)frame->d_id;
1507 	s_id = from_be32(&s_id) >> 8;
1508 	d_id = from_be32(&d_id) >> 8;
1509 
1510 	/* Note: In tracelog below, we directly do endian conversion on rx_id and.
1511 	 * ox_id Since these are fields, we can't pass address to from_be16().
1512 	 * Since ox_id and rx_id are only needed for tracelog, assigning to local
1513 	 * vars. and doing conversion is a waste of time in non-debug builds. */
1514 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1515 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1516 		      s_id, d_id,
1517 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1518 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1519 
1520 	rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1521 	if (rc) {
1522 		if (nport == NULL) {
1523 			SPDK_ERRLOG("Nport not found. Dropping\n");
1524 			/* increment invalid nport counter */
1525 			hwqp->counters.nport_invalid++;
1526 		} else if (rport == NULL) {
1527 			SPDK_ERRLOG("Rport not found. Dropping\n");
1528 			/* increment invalid rport counter */
1529 			hwqp->counters.rport_invalid++;
1530 		}
1531 		return rc;
1532 	}
1533 
1534 	if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1535 	    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1536 		SPDK_ERRLOG("%s state not created. Dropping\n",
1537 			    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1538 			    "Nport" : "Rport");
1539 		return -EACCES;
1540 	}
1541 
1542 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1543 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1544 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1545 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1546 
1547 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process LS NVME frame\n");
1548 
1549 		/* Use the RQ buffer for holding LS request. */
1550 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1551 
1552 		/* Fill in the LS request structure */
1553 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1554 		ls_rqst->rqstbuf.phys = buffer->phys +
1555 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1556 		ls_rqst->rqstbuf.buf_index = buff_idx;
1557 		ls_rqst->rqst_len = plen;
1558 
1559 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1560 		ls_rqst->rspbuf.phys = buffer->phys +
1561 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1562 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1563 
1564 		ls_rqst->private_data = (void *)hwqp;
1565 		ls_rqst->rpi = rport->rpi;
1566 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1567 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1568 		ls_rqst->s_id = s_id;
1569 		ls_rqst->d_id = d_id;
1570 		ls_rqst->nport = nport;
1571 		ls_rqst->rport = rport;
1572 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1573 
1574 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1575 		if (ls_rqst->xchg) {
1576 			/* Handover the request to LS module */
1577 			spdk_nvmf_fc_handle_ls_rqst(ls_rqst);
1578 		} else {
1579 			/* No XCHG available. Add to pending list. */
1580 			hwqp->counters.no_xchg++;
1581 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1582 		}
1583 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1584 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1585 
1586 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process IO NVME frame\n");
1587 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen);
1588 	} else {
1589 
1590 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1591 		hwqp->counters.unknown_frame++;
1592 		rc = -EINVAL;
1593 	}
1594 
1595 	return rc;
1596 }
1597 
1598 void
1599 spdk_nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1600 {
1601 	struct spdk_nvmf_request *req = NULL, *tmp;
1602 	struct spdk_nvmf_fc_request *fc_req;
1603 	int budget = 64;
1604 
1605 	if (!hwqp->fgroup) {
1606 		/* LS queue is tied to acceptor_poll group and LS pending requests
1607 		 * are stagged and processed using hwqp->ls_pending_queue.
1608 		 */
1609 		return;
1610 	}
1611 
1612 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1613 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1614 		if (!nvmf_fc_request_execute(fc_req)) {
1615 			/* Succesfuly posted, Delete from pending. */
1616 			STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link);
1617 		}
1618 
1619 		if (budget) {
1620 			budget--;
1621 		} else {
1622 			return;
1623 		}
1624 	}
1625 }
1626 
1627 void
1628 spdk_nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1629 {
1630 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1631 	struct spdk_nvmf_fc_nport *nport = NULL;
1632 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1633 
1634 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1635 		/* lookup nport and rport again - make sure they are still valid */
1636 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1637 		if (rc) {
1638 			if (nport == NULL) {
1639 				SPDK_ERRLOG("Nport not found. Dropping\n");
1640 				/* increment invalid nport counter */
1641 				hwqp->counters.nport_invalid++;
1642 			} else if (rport == NULL) {
1643 				SPDK_ERRLOG("Rport not found. Dropping\n");
1644 				/* increment invalid rport counter */
1645 				hwqp->counters.rport_invalid++;
1646 			}
1647 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1648 			/* Return buffer to chip */
1649 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1650 			continue;
1651 		}
1652 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1653 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1654 			SPDK_ERRLOG("%s state not created. Dropping\n",
1655 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1656 				    "Nport" : "Rport");
1657 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1658 			/* Return buffer to chip */
1659 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1660 			continue;
1661 		}
1662 
1663 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1664 		if (ls_rqst->xchg) {
1665 			/* Got an XCHG */
1666 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1667 			/* Handover the request to LS module */
1668 			spdk_nvmf_fc_handle_ls_rqst(ls_rqst);
1669 		} else {
1670 			/* No more XCHGs. Stop processing. */
1671 			hwqp->counters.no_xchg++;
1672 			return;
1673 		}
1674 	}
1675 }
1676 
1677 int
1678 spdk_nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1679 {
1680 	int rc = 0;
1681 	struct spdk_nvmf_request *req = &fc_req->req;
1682 	struct spdk_nvmf_qpair *qpair = req->qpair;
1683 	struct spdk_nvmf_fc_conn *fc_conn = spdk_nvmf_fc_get_conn(qpair);
1684 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1685 	uint16_t ersp_len = 0;
1686 
1687 	/* set sq head value in resp */
1688 	rsp->sqhd = spdk_nvmf_fc_advance_conn_sqhead(qpair);
1689 
1690 	/* Increment connection responses */
1691 	fc_conn->rsp_count++;
1692 
1693 	if (spdk_nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1694 					    fc_req->transfered_len)) {
1695 		/* Fill ERSP Len */
1696 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1697 				    sizeof(uint32_t)));
1698 		fc_req->ersp.ersp_len = ersp_len;
1699 
1700 		/* Fill RSN */
1701 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1702 		fc_conn->rsn++;
1703 
1704 		/* Fill transfer length */
1705 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1706 
1707 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting ERSP.\n");
1708 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1709 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1710 	} else {
1711 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting RSP.\n");
1712 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1713 	}
1714 
1715 	return rc;
1716 }
1717 
1718 int
1719 spdk_nvmf_fc_xmt_ls_rsp(struct spdk_nvmf_fc_nport *tgtport,
1720 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1721 {
1722 	return nvmf_fc_xmt_ls_rsp(tgtport, ls_rqst);
1723 }
1724 
1725 int
1726 spdk_nvmf_fc_xmt_srsr_req(struct spdk_nvmf_fc_hwqp *hwqp,
1727 			  struct spdk_nvmf_fc_srsr_bufs *srsr_bufs,
1728 			  spdk_nvmf_fc_caller_cb cb, void *cb_args)
1729 {
1730 	return nvmf_fc_xmt_srsr_req(hwqp, srsr_bufs, cb, cb_args);
1731 }
1732 
1733 bool
1734 spdk_nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1735 				uint32_t rsp_cnt, uint32_t xfer_len)
1736 {
1737 	struct spdk_nvmf_request *req = &fc_req->req;
1738 	struct spdk_nvmf_qpair *qpair = req->qpair;
1739 	struct spdk_nvmf_fc_conn *fc_conn = spdk_nvmf_fc_get_conn(qpair);
1740 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1741 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1742 	uint16_t status = *((uint16_t *)&rsp->status);
1743 
1744 	/*
1745 	 * Check if we need to send ERSP
1746 	 * 1) For every N responses where N == ersp_ratio
1747 	 * 2) Fabric commands.
1748 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1749 	 * 4) SQ == 90% full.
1750 	 * 5) Transfer length not equal to CMD IU length
1751 	 */
1752 
1753 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1754 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1755 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1756 	    (req->length != xfer_len)) {
1757 		return true;
1758 	}
1759 	return false;
1760 }
1761 
1762 void
1763 spdk_nvmf_fc_dump_all_queues(struct spdk_nvmf_fc_port *fc_port,
1764 			     struct spdk_nvmf_fc_queue_dump_info *dump_info)
1765 {
1766 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
1767 				fc_port->num_io_queues, dump_info);
1768 }
1769 
1770 static int
1771 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1772 {
1773 	int rc = 0;
1774 	struct spdk_nvmf_fc_request *fc_req = spdk_nvmf_fc_get_fc_req(req);
1775 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1776 
1777 	if (fc_req->is_aborted) {
1778 		/* Defer this to make sure we dont call io cleanup in same context. */
1779 		spdk_nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1780 					     (void *)fc_req);
1781 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1782 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1783 
1784 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1785 
1786 		rc = nvmf_fc_send_data(fc_req);
1787 	} else {
1788 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1789 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1790 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1791 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1792 		} else {
1793 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1794 		}
1795 
1796 		rc = spdk_nvmf_fc_handle_rsp(fc_req);
1797 	}
1798 
1799 	if (rc) {
1800 		SPDK_ERRLOG("Error in request complete.\n");
1801 		spdk_nvmf_fc_request_free(fc_req);
1802 	}
1803 	return 0;
1804 }
1805 
1806 struct spdk_nvmf_tgt *
1807 spdk_nvmf_fc_get_tgt(void)
1808 {
1809 	if (g_nvmf_ftransport) {
1810 		return g_nvmf_ftransport->transport.tgt;
1811 	}
1812 	return NULL;
1813 }
1814 
1815 /*
1816  * FC Transport Public API begins here
1817  */
1818 
1819 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1820 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1821 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1822 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1823 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1824 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1825 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1826 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1827 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1828 
1829 static void
1830 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1831 {
1832 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1833 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1834 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1835 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1836 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1837 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1838 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1839 }
1840 
1841 static struct spdk_nvmf_transport *
1842 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1843 {
1844 	uint32_t sge_count;
1845 
1846 	SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
1847 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1848 		     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1849 		     "  max_aq_depth=%d\n",
1850 		     opts->max_queue_depth,
1851 		     opts->max_io_size,
1852 		     opts->max_qpairs_per_ctrlr,
1853 		     opts->io_unit_size,
1854 		     opts->max_aq_depth);
1855 
1856 	if (g_nvmf_ftransport) {
1857 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1858 		return NULL;
1859 	}
1860 
1861 	if (spdk_env_get_last_core() < 1) {
1862 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1863 			    spdk_env_get_last_core() + 1);
1864 		return NULL;
1865 	}
1866 
1867 	sge_count = opts->max_io_size / opts->io_unit_size;
1868 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1869 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1870 		return NULL;
1871 	}
1872 
1873 	g_nvmf_fc_master_thread = spdk_get_thread();
1874 	g_nvmf_fgroup_count = 0;
1875 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1876 
1877 	if (!g_nvmf_ftransport) {
1878 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1879 		return NULL;
1880 	}
1881 
1882 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1883 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1884 		free(g_nvmf_ftransport);
1885 		g_nvmf_ftransport = NULL;
1886 		return NULL;
1887 	}
1888 
1889 	/* initialize the low level FC driver */
1890 	nvmf_fc_lld_init();
1891 
1892 	spdk_nvmf_ctrlr_data_init(opts, &g_nvmf_ftransport->transport.cdata);
1893 
1894 	return &g_nvmf_ftransport->transport;
1895 }
1896 
1897 static int
1898 nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
1899 {
1900 	if (transport) {
1901 		struct spdk_nvmf_fc_transport *ftransport;
1902 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
1903 
1904 		ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1905 
1906 		free(ftransport);
1907 
1908 		/* clean up any FC poll groups still around */
1909 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
1910 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1911 			free(fgroup);
1912 		}
1913 		g_nvmf_fgroup_count = 0;
1914 
1915 		/* low level FC driver clean up */
1916 		nvmf_fc_lld_fini();
1917 
1918 		nvmf_fc_port_cleanup();
1919 	}
1920 
1921 	return 0;
1922 }
1923 
1924 static int
1925 nvmf_fc_listen(struct spdk_nvmf_transport *transport,
1926 	       const struct spdk_nvme_transport_id *trid)
1927 {
1928 	return 0;
1929 }
1930 
1931 static void
1932 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
1933 		    const struct spdk_nvme_transport_id *_trid)
1934 {
1935 }
1936 
1937 static void
1938 nvmf_fc_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg)
1939 {
1940 	struct spdk_nvmf_fc_port *fc_port = NULL;
1941 	static bool start_lld = false;
1942 
1943 	if (spdk_unlikely(!start_lld)) {
1944 		start_lld  = true;
1945 		nvmf_fc_lld_start();
1946 	}
1947 
1948 	/* poll the LS queue on each port */
1949 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
1950 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
1951 			fc_port->new_qp_cb = cb_fn;
1952 			nvmf_fc_process_queue(&fc_port->ls_queue);
1953 		}
1954 	}
1955 }
1956 
1957 static void
1958 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
1959 		 struct spdk_nvme_transport_id *trid,
1960 		 struct spdk_nvmf_discovery_log_page_entry *entry)
1961 {
1962 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
1963 	entry->adrfam = trid->adrfam;
1964 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
1965 
1966 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1967 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1968 }
1969 
1970 static struct spdk_nvmf_transport_poll_group *
1971 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
1972 {
1973 	struct spdk_nvmf_fc_poll_group *fgroup;
1974 	struct spdk_nvmf_fc_transport *ftransport =
1975 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1976 
1977 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
1978 	if (!fgroup) {
1979 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
1980 		return NULL;
1981 	}
1982 
1983 	TAILQ_INIT(&fgroup->hwqp_list);
1984 
1985 	pthread_mutex_lock(&ftransport->lock);
1986 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
1987 	g_nvmf_fgroup_count++;
1988 	pthread_mutex_unlock(&ftransport->lock);
1989 
1990 	return &fgroup->group;
1991 }
1992 
1993 static void
1994 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1995 {
1996 	struct spdk_nvmf_fc_poll_group *fgroup;
1997 	struct spdk_nvmf_fc_transport *ftransport =
1998 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
1999 
2000 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2001 	pthread_mutex_lock(&ftransport->lock);
2002 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2003 	g_nvmf_fgroup_count--;
2004 	pthread_mutex_unlock(&ftransport->lock);
2005 
2006 	free(fgroup);
2007 }
2008 
2009 static int
2010 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2011 		       struct spdk_nvmf_qpair *qpair)
2012 {
2013 	struct spdk_nvmf_fc_poll_group *fgroup;
2014 	struct spdk_nvmf_fc_conn *fc_conn;
2015 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2016 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2017 	bool hwqp_found = false;
2018 
2019 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2020 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2021 
2022 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2023 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2024 			hwqp_found = true;
2025 			break;
2026 		}
2027 	}
2028 
2029 	if (!hwqp_found) {
2030 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2031 		goto err;
2032 	}
2033 
2034 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2035 					 &fc_conn->conn_id,
2036 					 fc_conn->max_queue_depth)) {
2037 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2038 		goto err;
2039 	}
2040 
2041 	fc_conn->hwqp = hwqp;
2042 
2043 	/* If this is for ADMIN connection, then update assoc ID. */
2044 	if (fc_conn->qpair.qid == 0) {
2045 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2046 	}
2047 
2048 	api_data = &fc_conn->create_opd->u.add_conn;
2049 	spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2050 	return 0;
2051 err:
2052 	return -1;
2053 }
2054 
2055 static int
2056 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2057 {
2058 	uint32_t count = 0;
2059 	struct spdk_nvmf_fc_poll_group *fgroup;
2060 	struct spdk_nvmf_fc_hwqp *hwqp;
2061 
2062 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2063 
2064 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2065 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2066 			count += nvmf_fc_process_queue(hwqp);
2067 		}
2068 	}
2069 
2070 	return (int) count;
2071 }
2072 
2073 static int
2074 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2075 {
2076 	struct spdk_nvmf_fc_request *fc_req = spdk_nvmf_fc_get_fc_req(req);
2077 
2078 	if (!fc_req->is_aborted) {
2079 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2080 		spdk_nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2081 	} else {
2082 		spdk_nvmf_fc_request_abort_complete(fc_req);
2083 	}
2084 	return 0;
2085 }
2086 
2087 
2088 static void
2089 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair)
2090 {
2091 	struct spdk_nvmf_fc_conn *fc_conn;
2092 
2093 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2094 
2095 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2096 		/* QP creation failure in FC tranport. Cleanup. */
2097 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(),
2098 				     nvmf_fc_handle_connection_failure, fc_conn);
2099 	} else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id &&
2100 		   fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2101 		/* Admin connection */
2102 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(),
2103 				     nvmf_fc_handle_assoc_deletion, fc_conn);
2104 	}
2105 }
2106 
2107 static int
2108 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2109 			    struct spdk_nvme_transport_id *trid)
2110 {
2111 	struct spdk_nvmf_fc_conn *fc_conn;
2112 
2113 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2114 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2115 	return 0;
2116 }
2117 
2118 static int
2119 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2120 			     struct spdk_nvme_transport_id *trid)
2121 {
2122 	struct spdk_nvmf_fc_conn *fc_conn;
2123 
2124 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2125 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2126 	return 0;
2127 }
2128 
2129 static int
2130 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2131 			      struct spdk_nvme_transport_id *trid)
2132 {
2133 	struct spdk_nvmf_fc_conn *fc_conn;
2134 
2135 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2136 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2137 	return 0;
2138 }
2139 
2140 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2141 	.name = "FC",
2142 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2143 	.opts_init = nvmf_fc_opts_init,
2144 	.create = nvmf_fc_create,
2145 	.destroy = nvmf_fc_destroy,
2146 
2147 	.listen = nvmf_fc_listen,
2148 	.stop_listen = nvmf_fc_stop_listen,
2149 	.accept = nvmf_fc_accept,
2150 
2151 	.listener_discover = nvmf_fc_discover,
2152 
2153 	.poll_group_create = nvmf_fc_poll_group_create,
2154 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2155 	.poll_group_add = nvmf_fc_poll_group_add,
2156 	.poll_group_poll = nvmf_fc_poll_group_poll,
2157 
2158 	.req_complete = nvmf_fc_request_complete,
2159 	.req_free = nvmf_fc_request_free,
2160 	.qpair_fini = nvmf_fc_close_qpair,
2161 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2162 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2163 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2164 };
2165 
2166 /*
2167  * Re-initialize the FC-Port after an offline event.
2168  * Only the queue information needs to be populated. XCHG, lcore and other hwqp information remains
2169  * unchanged after the first initialization.
2170  *
2171  */
2172 static int
2173 nvmf_fc_adm_hw_port_reinit_validate(struct spdk_nvmf_fc_port *fc_port,
2174 				    struct spdk_nvmf_fc_hw_port_init_args *args)
2175 {
2176 	uint32_t i;
2177 
2178 	/* Verify that the port was previously in offline or quiesced state */
2179 	if (spdk_nvmf_fc_port_is_online(fc_port)) {
2180 		SPDK_ERRLOG("SPDK FC port %d already initialized and online.\n", args->port_handle);
2181 		return -EINVAL;
2182 	}
2183 
2184 	/* Reinit information in new LS queue from previous queue */
2185 	spdk_nvmf_fc_hwqp_reinit_poller_queues(&fc_port->ls_queue, args->ls_queue);
2186 
2187 	fc_port->fcp_rq_id = args->fcp_rq_id;
2188 
2189 	/* Initialize the LS queue */
2190 	fc_port->ls_queue.queues = args->ls_queue;
2191 	spdk_nvmf_fc_init_poller_queues(fc_port->ls_queue.queues);
2192 
2193 	for (i = 0; i < fc_port->num_io_queues; i++) {
2194 		/* Reinit information in new IO queue from previous queue */
2195 		spdk_nvmf_fc_hwqp_reinit_poller_queues(&fc_port->io_queues[i],
2196 						       args->io_queues[i]);
2197 		fc_port->io_queues[i].queues = args->io_queues[i];
2198 		/* Initialize the IO queues */
2199 		spdk_nvmf_fc_init_poller_queues(fc_port->io_queues[i].queues);
2200 	}
2201 
2202 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2203 
2204 	/* Validate the port information */
2205 	DEV_VERIFY(TAILQ_EMPTY(&fc_port->nport_list));
2206 	DEV_VERIFY(fc_port->num_nports == 0);
2207 	if (!TAILQ_EMPTY(&fc_port->nport_list) || (fc_port->num_nports != 0)) {
2208 		return -EINVAL;
2209 	}
2210 
2211 	return 0;
2212 }
2213 
2214 /* Initializes the data for the creation of a FC-Port object in the SPDK
2215  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2216  * the API to the library. The contents added to this well defined structure
2217  * is private to each vendors implementation.
2218  */
2219 static int
2220 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2221 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2222 {
2223 	/* Used a high number for the LS HWQP so that it does not clash with the
2224 	 * IO HWQP's and immediately shows a LS queue during tracing.
2225 	 */
2226 	uint32_t i;
2227 
2228 	fc_port->port_hdl       = args->port_handle;
2229 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2230 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2231 	fc_port->num_io_queues  = args->io_queue_cnt;
2232 
2233 	/*
2234 	 * Set port context from init args. Used for FCP port stats.
2235 	 */
2236 	fc_port->port_ctx = args->port_ctx;
2237 
2238 	/*
2239 	 * Initialize the LS queue wherever needed.
2240 	 */
2241 	fc_port->ls_queue.queues = args->ls_queue;
2242 	fc_port->ls_queue.thread = spdk_nvmf_fc_get_master_thread();
2243 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2244 
2245 	/*
2246 	 * Initialize the LS queue.
2247 	 */
2248 	spdk_nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2249 
2250 	/*
2251 	 * Initialize the IO queues.
2252 	 */
2253 	for (i = 0; i < args->io_queue_cnt; i++) {
2254 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2255 		hwqp->hwqp_id = i;
2256 		hwqp->queues = args->io_queues[i];
2257 		hwqp->rq_size = args->io_queue_size;
2258 		spdk_nvmf_fc_init_hwqp(fc_port, hwqp);
2259 	}
2260 
2261 	/*
2262 	 * Initialize the LS processing for port
2263 	 */
2264 	spdk_nvmf_fc_ls_init(fc_port);
2265 
2266 	/*
2267 	 * Initialize the list of nport on this HW port.
2268 	 */
2269 	TAILQ_INIT(&fc_port->nport_list);
2270 	fc_port->num_nports = 0;
2271 
2272 	return 0;
2273 }
2274 
2275 static void
2276 nvmf_fc_adm_port_hwqp_offline_del_poller(struct spdk_nvmf_fc_port *fc_port)
2277 {
2278 	struct spdk_nvmf_fc_hwqp *hwqp    = NULL;
2279 	int i = 0;
2280 
2281 	hwqp = &fc_port->ls_queue;
2282 	(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2283 
2284 	/*  Remove poller for all the io queues. */
2285 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2286 		hwqp = &fc_port->io_queues[i];
2287 		(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2288 		spdk_nvmf_fc_poll_group_remove_hwqp(hwqp);
2289 	}
2290 }
2291 
2292 /*
2293  * Callback function for HW port link break operation.
2294  *
2295  * Notice that this callback is being triggered when spdk_fc_nport_delete()
2296  * completes, if that spdk_fc_nport_delete() called is issued by
2297  * nvmf_fc_adm_evnt_hw_port_link_break().
2298  *
2299  * Since nvmf_fc_adm_evnt_hw_port_link_break() can invoke spdk_fc_nport_delete() multiple
2300  * times (one per nport in the HW port's nport_list), a single call to
2301  * nvmf_fc_adm_evnt_hw_port_link_break() can result in multiple calls to this callback function.
2302  *
2303  * As a result, this function only invokes a callback to the caller of
2304  * nvmf_fc_adm_evnt_hw_port_link_break() only when the HW port's nport_list is empty.
2305  */
2306 static void
2307 nvmf_fc_adm_hw_port_link_break_cb(uint8_t port_handle,
2308 				  enum spdk_fc_event event_type, void *cb_args, int spdk_err)
2309 {
2310 	ASSERT_SPDK_FC_MASTER_THREAD();
2311 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *offline_cb_args = cb_args;
2312 	struct spdk_nvmf_hw_port_link_break_args *offline_args = NULL;
2313 	spdk_nvmf_fc_callback cb_func = NULL;
2314 	int err = 0;
2315 	struct spdk_nvmf_fc_port *fc_port = NULL;
2316 	int num_nports = 0;
2317 	char log_str[256];
2318 
2319 	if (0 != spdk_err) {
2320 		DEV_VERIFY(!"port link break cb: spdk_err not success.");
2321 		SPDK_ERRLOG("port link break cb: spdk_err:%d.\n", spdk_err);
2322 		goto out;
2323 	}
2324 
2325 	if (!offline_cb_args) {
2326 		DEV_VERIFY(!"port link break cb: port_offline_args is NULL.");
2327 		err = -EINVAL;
2328 		goto out;
2329 	}
2330 
2331 	offline_args = offline_cb_args->args;
2332 	if (!offline_args) {
2333 		DEV_VERIFY(!"port link break cb: offline_args is NULL.");
2334 		err = -EINVAL;
2335 		goto out;
2336 	}
2337 
2338 	if (port_handle != offline_args->port_handle) {
2339 		DEV_VERIFY(!"port link break cb: port_handle mismatch.");
2340 		err = -EINVAL;
2341 		goto out;
2342 	}
2343 
2344 	cb_func = offline_cb_args->cb_func;
2345 	if (!cb_func) {
2346 		DEV_VERIFY(!"port link break cb: cb_func is NULL.");
2347 		err = -EINVAL;
2348 		goto out;
2349 	}
2350 
2351 	fc_port = spdk_nvmf_fc_port_lookup(port_handle);
2352 	if (!fc_port) {
2353 		DEV_VERIFY(!"port link break cb: fc_port is NULL.");
2354 		SPDK_ERRLOG("port link break cb: Unable to find port:%d\n",
2355 			    offline_args->port_handle);
2356 		err = -EINVAL;
2357 		goto out;
2358 	}
2359 
2360 	num_nports = fc_port->num_nports;
2361 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2362 		/*
2363 		 * Don't call the callback unless all nports have been deleted.
2364 		 */
2365 		goto out;
2366 	}
2367 
2368 	if (num_nports != 0) {
2369 		DEV_VERIFY(!"port link break cb: num_nports in non-zero.");
2370 		SPDK_ERRLOG("port link break cb: # of ports should be 0. Instead, num_nports:%d\n",
2371 			    num_nports);
2372 		err = -EINVAL;
2373 	}
2374 
2375 	/*
2376 	 * Mark the hwqps as offline and unregister the pollers.
2377 	 */
2378 	(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
2379 
2380 	/*
2381 	 * Since there are no more nports, execute the callback(s).
2382 	 */
2383 	(void)cb_func(port_handle, SPDK_FC_LINK_BREAK,
2384 		      (void *)offline_args->cb_ctx, spdk_err);
2385 
2386 out:
2387 	free(offline_cb_args);
2388 
2389 	snprintf(log_str, sizeof(log_str),
2390 		 "port link break cb: port:%d evt_type:%d num_nports:%d err:%d spdk_err:%d.\n",
2391 		 port_handle, event_type, num_nports, err, spdk_err);
2392 
2393 	if (err != 0) {
2394 		SPDK_ERRLOG("%s", log_str);
2395 	} else {
2396 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2397 	}
2398 	return;
2399 }
2400 
2401 /*
2402  * FC port must have all its nports deleted before transitioning to offline state.
2403  */
2404 static void
2405 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2406 {
2407 	struct spdk_nvmf_fc_nport *nport = NULL;
2408 	/* All nports must have been deleted at this point for this fc port */
2409 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2410 	DEV_VERIFY(fc_port->num_nports == 0);
2411 	/* Mark the nport states to be zombie, if they exist */
2412 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2413 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2414 			(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2415 		}
2416 	}
2417 }
2418 
2419 static void
2420 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2421 {
2422 	ASSERT_SPDK_FC_MASTER_THREAD();
2423 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2424 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2425 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2426 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2427 	int spdk_err = 0;
2428 	uint8_t port_handle = cb_data->port_handle;
2429 	uint32_t s_id = rport->s_id;
2430 	uint32_t rpi = rport->rpi;
2431 	uint32_t assoc_count = rport->assoc_count;
2432 	uint32_t nport_hdl = nport->nport_hdl;
2433 	uint32_t d_id = nport->d_id;
2434 	char log_str[256];
2435 
2436 	/*
2437 	 * Assert on any delete failure.
2438 	 */
2439 	if (0 != err) {
2440 		DEV_VERIFY(!"Error in IT Delete callback.");
2441 		goto out;
2442 	}
2443 
2444 	if (cb_func != NULL) {
2445 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2446 	}
2447 
2448 out:
2449 	free(cb_data);
2450 
2451 	snprintf(log_str, sizeof(log_str),
2452 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2453 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2454 
2455 	if (err != 0) {
2456 		SPDK_ERRLOG("%s", log_str);
2457 	} else {
2458 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2459 	}
2460 }
2461 
2462 static void
2463 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2464 {
2465 	ASSERT_SPDK_FC_MASTER_THREAD();
2466 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2467 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2468 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2469 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2470 	uint32_t s_id = rport->s_id;
2471 	uint32_t rpi = rport->rpi;
2472 	uint32_t assoc_count = rport->assoc_count;
2473 	uint32_t nport_hdl = nport->nport_hdl;
2474 	uint32_t d_id = nport->d_id;
2475 	char log_str[256];
2476 
2477 	/*
2478 	 * Assert on any association delete failure. We continue to delete other
2479 	 * associations in promoted builds.
2480 	 */
2481 	if (0 != err) {
2482 		DEV_VERIFY(!"Nport's association delete callback returned error");
2483 		if (nport->assoc_count > 0) {
2484 			nport->assoc_count--;
2485 		}
2486 		if (rport->assoc_count > 0) {
2487 			rport->assoc_count--;
2488 		}
2489 	}
2490 
2491 	/*
2492 	 * If this is the last association being deleted for the ITN,
2493 	 * execute the callback(s).
2494 	 */
2495 	if (0 == rport->assoc_count) {
2496 		/* Remove the rport from the remote port list. */
2497 		if (spdk_nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2498 			SPDK_ERRLOG("Error while removing rport from list.\n");
2499 			DEV_VERIFY(!"Error while removing rport from list.");
2500 		}
2501 
2502 		if (cb_func != NULL) {
2503 			/*
2504 			 * Callback function is provided by the caller
2505 			 * of nvmf_fc_adm_i_t_delete_assoc().
2506 			 */
2507 			(void)cb_func(cb_data->cb_ctx, 0);
2508 		}
2509 		free(rport);
2510 		free(args);
2511 	}
2512 
2513 	snprintf(log_str, sizeof(log_str),
2514 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2515 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2516 
2517 	if (err != 0) {
2518 		SPDK_ERRLOG("%s", log_str);
2519 	} else {
2520 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2521 	}
2522 }
2523 
2524 /**
2525  * Process a IT delete.
2526  */
2527 static void
2528 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2529 			     struct spdk_nvmf_fc_remote_port_info *rport,
2530 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2531 			     void *cb_ctx)
2532 {
2533 	int err = 0;
2534 	struct spdk_nvmf_fc_association *assoc = NULL;
2535 	int assoc_err = 0;
2536 	uint32_t num_assoc = 0;
2537 	uint32_t num_assoc_del_scheduled = 0;
2538 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2539 	uint8_t port_hdl = nport->port_hdl;
2540 	uint32_t s_id = rport->s_id;
2541 	uint32_t rpi = rport->rpi;
2542 	uint32_t assoc_count = rport->assoc_count;
2543 	char log_str[256];
2544 
2545 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete associations on nport:%d begin.\n",
2546 		      nport->nport_hdl);
2547 
2548 	/*
2549 	 * Allocate memory for callback data.
2550 	 * This memory will be freed by the callback function.
2551 	 */
2552 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2553 	if (NULL == cb_data) {
2554 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2555 		err = -ENOMEM;
2556 		goto out;
2557 	}
2558 	cb_data->nport       = nport;
2559 	cb_data->rport       = rport;
2560 	cb_data->port_handle = port_hdl;
2561 	cb_data->cb_func     = cb_func;
2562 	cb_data->cb_ctx      = cb_ctx;
2563 
2564 	/*
2565 	 * Delete all associations, if any, related with this ITN/remote_port.
2566 	 */
2567 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2568 		num_assoc++;
2569 		if (assoc->s_id == s_id) {
2570 			assoc_err = spdk_nvmf_fc_delete_association(nport,
2571 					assoc->assoc_id,
2572 					false /* send abts */, false,
2573 					nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2574 			if (0 != assoc_err) {
2575 				/*
2576 				 * Mark this association as zombie.
2577 				 */
2578 				err = -EINVAL;
2579 				DEV_VERIFY(!"Error while deleting association");
2580 				(void)spdk_nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2581 			} else {
2582 				num_assoc_del_scheduled++;
2583 			}
2584 		}
2585 	}
2586 
2587 out:
2588 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2589 		/*
2590 		 * Since there are no association_delete calls
2591 		 * successfully scheduled, the association_delete
2592 		 * callback function will never be called.
2593 		 * In this case, call the callback function now.
2594 		 */
2595 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2596 	}
2597 
2598 	snprintf(log_str, sizeof(log_str),
2599 		 "IT delete associations on nport:%d end. "
2600 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2601 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2602 
2603 	if (err == 0) {
2604 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2605 	} else {
2606 		SPDK_ERRLOG("%s", log_str);
2607 	}
2608 }
2609 
2610 static void
2611 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2612 {
2613 	ASSERT_SPDK_FC_MASTER_THREAD();
2614 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2615 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2616 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2617 	struct spdk_nvmf_fc_port *fc_port = NULL;
2618 	int err = 0;
2619 
2620 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2621 	hwqp = quiesce_api_data->hwqp;
2622 	fc_port = hwqp->fc_port;
2623 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2624 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2625 
2626 	/*
2627 	 * Decrement the callback/quiesced queue count.
2628 	 */
2629 	port_quiesce_ctx->quiesce_count--;
2630 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2631 
2632 	free(quiesce_api_data);
2633 	/*
2634 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2635 	 */
2636 	if (port_quiesce_ctx->quiesce_count > 0) {
2637 		return;
2638 	}
2639 
2640 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2641 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2642 	} else {
2643 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesced.\n", fc_port->port_hdl);
2644 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2645 	}
2646 
2647 	if (cb_func) {
2648 		/*
2649 		 * Callback function for the called of quiesce.
2650 		 */
2651 		cb_func(port_quiesce_ctx->ctx, err);
2652 	}
2653 
2654 	/*
2655 	 * Free the context structure.
2656 	 */
2657 	free(port_quiesce_ctx);
2658 
2659 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2660 		      err);
2661 }
2662 
2663 static int
2664 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2665 			     spdk_nvmf_fc_poller_api_cb cb_func)
2666 {
2667 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2668 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2669 	int err = 0;
2670 
2671 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2672 
2673 	if (args == NULL) {
2674 		err = -ENOMEM;
2675 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2676 		goto done;
2677 	}
2678 	args->hwqp = fc_hwqp;
2679 	args->ctx = ctx;
2680 	args->cb_info.cb_func = cb_func;
2681 	args->cb_info.cb_data = args;
2682 	args->cb_info.cb_thread = spdk_get_thread();
2683 
2684 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2685 	rc = spdk_nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2686 	if (rc) {
2687 		free(args);
2688 		err = -EINVAL;
2689 	}
2690 
2691 done:
2692 	return err;
2693 }
2694 
2695 /*
2696  * Hw port Quiesce
2697  */
2698 static int
2699 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2700 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2701 {
2702 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2703 	uint32_t i = 0;
2704 	int err = 0;
2705 
2706 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2707 
2708 	/*
2709 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2710 	 * and execute the callback.
2711 	 */
2712 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2713 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2714 	}
2715 
2716 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2717 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Port %d already in quiesced state.\n",
2718 			      fc_port->port_hdl);
2719 		/*
2720 		 * Execute the callback function directly.
2721 		 */
2722 		cb_func(ctx, err);
2723 		goto out;
2724 	}
2725 
2726 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2727 
2728 	if (port_quiesce_ctx == NULL) {
2729 		err = -ENOMEM;
2730 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2731 			    fc_port->port_hdl);
2732 		goto out;
2733 	}
2734 
2735 	port_quiesce_ctx->quiesce_count = 0;
2736 	port_quiesce_ctx->ctx = ctx;
2737 	port_quiesce_ctx->cb_func = cb_func;
2738 
2739 	/*
2740 	 * Quiesce the LS queue.
2741 	 */
2742 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2743 					   nvmf_fc_adm_queue_quiesce_cb);
2744 	if (err != 0) {
2745 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2746 		goto out;
2747 	}
2748 	port_quiesce_ctx->quiesce_count++;
2749 
2750 	/*
2751 	 * Quiesce the IO queues.
2752 	 */
2753 	for (i = 0; i < fc_port->num_io_queues; i++) {
2754 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2755 						   port_quiesce_ctx,
2756 						   nvmf_fc_adm_queue_quiesce_cb);
2757 		if (err != 0) {
2758 			DEV_VERIFY(0);
2759 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2760 		}
2761 		port_quiesce_ctx->quiesce_count++;
2762 	}
2763 
2764 out:
2765 	if (port_quiesce_ctx && err != 0) {
2766 		free(port_quiesce_ctx);
2767 	}
2768 	return err;
2769 }
2770 
2771 /*
2772  * Initialize and add a HW port entry to the global
2773  * HW port list.
2774  */
2775 static void
2776 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2777 {
2778 	ASSERT_SPDK_FC_MASTER_THREAD();
2779 	struct spdk_nvmf_fc_port *fc_port = NULL;
2780 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2781 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2782 			api_data->api_args;
2783 	int err = 0;
2784 
2785 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2786 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2787 		err = EINVAL;
2788 		goto abort_port_init;
2789 	}
2790 
2791 	/*
2792 	 * 1. Check for duplicate initialization.
2793 	 */
2794 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2795 	if (fc_port != NULL) {
2796 		/* Port already exists, check if it has to be re-initialized */
2797 		err = nvmf_fc_adm_hw_port_reinit_validate(fc_port, args);
2798 		if (err) {
2799 			/*
2800 			 * In case of an error we do not want to free the fc_port
2801 			 * so we set that pointer to NULL.
2802 			 */
2803 			fc_port = NULL;
2804 		}
2805 		goto abort_port_init;
2806 	}
2807 
2808 	/*
2809 	 * 2. Get the memory to instantiate a fc port.
2810 	 */
2811 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2812 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2813 	if (fc_port == NULL) {
2814 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2815 		err = -ENOMEM;
2816 		goto abort_port_init;
2817 	}
2818 
2819 	/* assign the io_queues array */
2820 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2821 				     struct spdk_nvmf_fc_port));
2822 
2823 	/*
2824 	 * 3. Initialize the contents for the FC-port
2825 	 */
2826 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2827 
2828 	if (err != 0) {
2829 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2830 		DEV_VERIFY(!"Data initialization failed for fc_port");
2831 		goto abort_port_init;
2832 	}
2833 
2834 	/*
2835 	 * 4. Add this port to the global fc port list in the library.
2836 	 */
2837 	spdk_nvmf_fc_port_add(fc_port);
2838 
2839 abort_port_init:
2840 	if (err && fc_port) {
2841 		free(fc_port);
2842 	}
2843 	if (api_data->cb_func != NULL) {
2844 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2845 	}
2846 
2847 	free(arg);
2848 
2849 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d initialize done, rc = %d.\n",
2850 		      args->port_handle, err);
2851 }
2852 
2853 /*
2854  * Online a HW port.
2855  */
2856 static void
2857 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2858 {
2859 	ASSERT_SPDK_FC_MASTER_THREAD();
2860 	struct spdk_nvmf_fc_port *fc_port = NULL;
2861 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2862 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2863 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2864 			api_data->api_args;
2865 	int i = 0;
2866 	int err = 0;
2867 
2868 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2869 	if (fc_port) {
2870 		/* Set the port state to online */
2871 		err = spdk_nvmf_fc_port_set_online(fc_port);
2872 		if (err != 0) {
2873 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2874 			DEV_VERIFY(!"Hw port online failed");
2875 			goto out;
2876 		}
2877 
2878 		hwqp = &fc_port->ls_queue;
2879 		hwqp->context = NULL;
2880 		(void)spdk_nvmf_fc_hwqp_set_online(hwqp);
2881 
2882 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2883 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2884 			hwqp = &fc_port->io_queues[i];
2885 			hwqp->context = NULL;
2886 			(void)spdk_nvmf_fc_hwqp_set_online(hwqp);
2887 			spdk_nvmf_fc_poll_group_add_hwqp(hwqp);
2888 		}
2889 	} else {
2890 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2891 		err = -EINVAL;
2892 	}
2893 
2894 out:
2895 	if (api_data->cb_func != NULL) {
2896 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2897 	}
2898 
2899 	free(arg);
2900 
2901 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d online done, rc = %d.\n", args->port_handle,
2902 		      err);
2903 }
2904 
2905 /*
2906  * Offline a HW port.
2907  */
2908 static void
2909 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
2910 {
2911 	ASSERT_SPDK_FC_MASTER_THREAD();
2912 	struct spdk_nvmf_fc_port *fc_port = NULL;
2913 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2914 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2915 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
2916 			api_data->api_args;
2917 	int i = 0;
2918 	int err = 0;
2919 
2920 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2921 	if (fc_port) {
2922 		/* Set the port state to offline, if it is not already. */
2923 		err = spdk_nvmf_fc_port_set_offline(fc_port);
2924 		if (err != 0) {
2925 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
2926 			err = 0;
2927 			goto out;
2928 		}
2929 
2930 		hwqp = &fc_port->ls_queue;
2931 		(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2932 
2933 		/* Remove poller for all the io queues. */
2934 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2935 			hwqp = &fc_port->io_queues[i];
2936 			(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2937 			spdk_nvmf_fc_poll_group_remove_hwqp(hwqp);
2938 		}
2939 
2940 		/*
2941 		 * Delete all the nports. Ideally, the nports should have been purged
2942 		 * before the offline event, in which case, only a validation is required.
2943 		 */
2944 		nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
2945 	} else {
2946 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2947 		err = -EINVAL;
2948 	}
2949 out:
2950 	if (api_data->cb_func != NULL) {
2951 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
2952 	}
2953 
2954 	free(arg);
2955 
2956 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d offline done, rc = %d.\n", args->port_handle,
2957 		      err);
2958 }
2959 
2960 struct nvmf_fc_add_rem_listener_ctx {
2961 	struct spdk_nvmf_subsystem *subsystem;
2962 	bool add_listener;
2963 	struct spdk_nvme_transport_id trid;
2964 };
2965 
2966 static void
2967 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2968 {
2969 	ASSERT_SPDK_FC_MASTER_THREAD();
2970 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2971 	free(ctx);
2972 }
2973 
2974 static void
2975 nvmf_fc_adm_listen_done(void *cb_arg, int status)
2976 {
2977 	ASSERT_SPDK_FC_MASTER_THREAD();
2978 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
2979 
2980 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
2981 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
2982 		free(ctx);
2983 	}
2984 }
2985 
2986 static void
2987 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2988 {
2989 	ASSERT_SPDK_FC_MASTER_THREAD();
2990 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2991 
2992 	if (ctx->add_listener) {
2993 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
2994 	} else {
2995 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
2996 		nvmf_fc_adm_listen_done(ctx, 0);
2997 	}
2998 }
2999 
3000 static int
3001 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3002 {
3003 	struct spdk_nvmf_tgt *tgt = spdk_nvmf_fc_get_tgt();
3004 	struct spdk_nvmf_subsystem *subsystem;
3005 
3006 	if (!tgt) {
3007 		SPDK_ERRLOG("No nvmf target defined\n");
3008 		return -EINVAL;
3009 	}
3010 
3011 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3012 	while (subsystem) {
3013 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3014 
3015 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3016 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3017 			if (ctx) {
3018 				ctx->add_listener = add;
3019 				ctx->subsystem = subsystem;
3020 				spdk_nvmf_fc_create_trid(&ctx->trid,
3021 							 nport->fc_nodename.u.wwn,
3022 							 nport->fc_portname.u.wwn);
3023 
3024 				if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) {
3025 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3026 						    ctx->trid.traddr);
3027 					free(ctx);
3028 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3029 								     nvmf_fc_adm_subsystem_paused_cb,
3030 								     ctx)) {
3031 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3032 						    subsystem->subnqn);
3033 					free(ctx);
3034 				}
3035 			}
3036 		}
3037 
3038 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3039 	}
3040 
3041 	return 0;
3042 }
3043 
3044 /*
3045  * Create a Nport.
3046  */
3047 static void
3048 nvmf_fc_adm_evnt_nport_create(void *arg)
3049 {
3050 	ASSERT_SPDK_FC_MASTER_THREAD();
3051 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3052 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3053 			api_data->api_args;
3054 	struct spdk_nvmf_fc_nport *nport = NULL;
3055 	struct spdk_nvmf_fc_port *fc_port = NULL;
3056 	int err = 0;
3057 
3058 	/*
3059 	 * Get the physical port.
3060 	 */
3061 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3062 	if (fc_port == NULL) {
3063 		err = -EINVAL;
3064 		goto out;
3065 	}
3066 
3067 	/*
3068 	 * Check for duplicate initialization.
3069 	 */
3070 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3071 	if (nport != NULL) {
3072 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3073 			    args->port_handle);
3074 		err = -EINVAL;
3075 		goto out;
3076 	}
3077 
3078 	/*
3079 	 * Get the memory to instantiate a fc nport.
3080 	 */
3081 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3082 	if (nport == NULL) {
3083 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3084 			    args->nport_handle);
3085 		err = -ENOMEM;
3086 		goto out;
3087 	}
3088 
3089 	/*
3090 	 * Initialize the contents for the nport
3091 	 */
3092 	nport->nport_hdl    = args->nport_handle;
3093 	nport->port_hdl     = args->port_handle;
3094 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3095 	nport->fc_nodename  = args->fc_nodename;
3096 	nport->fc_portname  = args->fc_portname;
3097 	nport->d_id         = args->d_id;
3098 	nport->fc_port      = spdk_nvmf_fc_port_lookup(args->port_handle);
3099 
3100 	(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3101 	TAILQ_INIT(&nport->rem_port_list);
3102 	nport->rport_count = 0;
3103 	TAILQ_INIT(&nport->fc_associations);
3104 	nport->assoc_count = 0;
3105 
3106 	/*
3107 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3108 	 */
3109 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3110 
3111 	(void)spdk_nvmf_fc_port_add_nport(fc_port, nport);
3112 out:
3113 	if (err && nport) {
3114 		free(nport);
3115 	}
3116 
3117 	if (api_data->cb_func != NULL) {
3118 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3119 	}
3120 
3121 	free(arg);
3122 }
3123 
3124 static void
3125 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3126 			    void *cb_args, int spdk_err)
3127 {
3128 	ASSERT_SPDK_FC_MASTER_THREAD();
3129 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3130 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3131 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3132 	int err = 0;
3133 	uint16_t nport_hdl = 0;
3134 	char log_str[256];
3135 
3136 	/*
3137 	 * Assert on any delete failure.
3138 	 */
3139 	if (nport == NULL) {
3140 		SPDK_ERRLOG("Nport delete callback returned null nport");
3141 		DEV_VERIFY(!"nport is null.");
3142 		goto out;
3143 	}
3144 
3145 	nport_hdl = nport->nport_hdl;
3146 	if (0 != spdk_err) {
3147 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3148 			    "%d, Nport: %d\n",
3149 			    nport->port_hdl, nport->nport_hdl);
3150 		DEV_VERIFY(!"nport delete callback error.");
3151 	}
3152 
3153 	/*
3154 	 * Free the nport if this is the last rport being deleted and
3155 	 * execute the callback(s).
3156 	 */
3157 	if (spdk_nvmf_fc_nport_has_no_rport(nport)) {
3158 		if (0 != nport->assoc_count) {
3159 			SPDK_ERRLOG("association count != 0\n");
3160 			DEV_VERIFY(!"association count != 0");
3161 		}
3162 
3163 		err = spdk_nvmf_fc_port_remove_nport(nport->fc_port, nport);
3164 		if (0 != err) {
3165 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3166 				    "nport from nport list. FC Port:%d Nport:%d\n",
3167 				    nport->port_hdl, nport->nport_hdl);
3168 		}
3169 		/* Free the nport */
3170 		free(nport);
3171 
3172 		if (cb_func != NULL) {
3173 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3174 		}
3175 		free(cb_data);
3176 	}
3177 out:
3178 	snprintf(log_str, sizeof(log_str),
3179 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3180 		 port_handle, nport_hdl, event_type, spdk_err);
3181 
3182 	if (err != 0) {
3183 		SPDK_ERRLOG("%s", log_str);
3184 	} else {
3185 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3186 	}
3187 }
3188 
3189 /*
3190  * Delete Nport.
3191  */
3192 static void
3193 nvmf_fc_adm_evnt_nport_delete(void *arg)
3194 {
3195 	ASSERT_SPDK_FC_MASTER_THREAD();
3196 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3197 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3198 			api_data->api_args;
3199 	struct spdk_nvmf_fc_nport *nport = NULL;
3200 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3201 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3202 	int err = 0;
3203 	uint32_t rport_cnt = 0;
3204 	int rc = 0;
3205 
3206 	/*
3207 	 * Make sure that the nport exists.
3208 	 */
3209 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3210 	if (nport == NULL) {
3211 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3212 			    args->port_handle);
3213 		err = -EINVAL;
3214 		goto out;
3215 	}
3216 
3217 	/*
3218 	 * Allocate memory for callback data.
3219 	 */
3220 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3221 	if (NULL == cb_data) {
3222 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3223 		err = -ENOMEM;
3224 		goto out;
3225 	}
3226 
3227 	cb_data->nport = nport;
3228 	cb_data->port_handle = args->port_handle;
3229 	cb_data->fc_cb_func = api_data->cb_func;
3230 	cb_data->fc_cb_ctx = args->cb_ctx;
3231 
3232 	/*
3233 	 * Begin nport tear down
3234 	 */
3235 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3236 		(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3237 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3238 		/*
3239 		 * Deletion of this nport already in progress. Register callback
3240 		 * and return.
3241 		 */
3242 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3243 		err = -ENODEV;
3244 		goto out;
3245 	} else {
3246 		/* nport partially created/deleted */
3247 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3248 		DEV_VERIFY(0 != "Nport in zombie state");
3249 		err = -ENODEV;
3250 		goto out;
3251 	}
3252 
3253 	/*
3254 	 * Remove this nport from listening addresses across subsystems
3255 	 */
3256 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3257 
3258 	if (0 != rc) {
3259 		err = spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3260 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3261 			    nport->nport_hdl);
3262 		goto out;
3263 	}
3264 
3265 	/*
3266 	 * Delete all the remote ports (if any) for the nport
3267 	 */
3268 	/* TODO - Need to do this with a "first" and a "next" accessor function
3269 	 * for completeness. Look at app-subsystem as examples.
3270 	 */
3271 	if (spdk_nvmf_fc_nport_has_no_rport(nport)) {
3272 		/* No rports to delete. Complete the nport deletion. */
3273 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3274 		goto out;
3275 	}
3276 
3277 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3278 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3279 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3280 
3281 		if (it_del_args == NULL) {
3282 			err = -ENOMEM;
3283 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3284 				    rport_iter->rpi, rport_iter->s_id);
3285 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3286 			goto out;
3287 		}
3288 
3289 		rport_cnt++;
3290 		it_del_args->port_handle = nport->port_hdl;
3291 		it_del_args->nport_handle = nport->nport_hdl;
3292 		it_del_args->cb_ctx = (void *)cb_data;
3293 		it_del_args->rpi = rport_iter->rpi;
3294 		it_del_args->s_id = rport_iter->s_id;
3295 
3296 		spdk_nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3297 						  nvmf_fc_adm_delete_nport_cb);
3298 	}
3299 
3300 out:
3301 	/* On failure, execute the callback function now */
3302 	if ((err != 0) || (rc != 0)) {
3303 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3304 			    "rport_cnt:%d rc:%d.\n",
3305 			    args->nport_handle, err, args->port_handle,
3306 			    rport_cnt, rc);
3307 		if (cb_data) {
3308 			free(cb_data);
3309 		}
3310 		if (api_data->cb_func != NULL) {
3311 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3312 		}
3313 
3314 	} else {
3315 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3316 			      "NPort %d delete done succesfully, fc port:%d. "
3317 			      "rport_cnt:%d\n",
3318 			      args->nport_handle, args->port_handle, rport_cnt);
3319 	}
3320 
3321 	free(arg);
3322 }
3323 
3324 /*
3325  * Process an PRLI/IT add.
3326  */
3327 static void
3328 nvmf_fc_adm_evnt_i_t_add(void *arg)
3329 {
3330 	ASSERT_SPDK_FC_MASTER_THREAD();
3331 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3332 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3333 			api_data->api_args;
3334 	struct spdk_nvmf_fc_nport *nport = NULL;
3335 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3336 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3337 	int err = 0;
3338 
3339 	/*
3340 	 * Make sure the nport port exists.
3341 	 */
3342 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3343 	if (nport == NULL) {
3344 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3345 		err = -EINVAL;
3346 		goto out;
3347 	}
3348 
3349 	/*
3350 	 * Check for duplicate i_t_add.
3351 	 */
3352 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3353 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3354 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3355 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3356 			err = -EEXIST;
3357 			goto out;
3358 		}
3359 	}
3360 
3361 	/*
3362 	 * Get the memory to instantiate the remote port
3363 	 */
3364 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3365 	if (rport == NULL) {
3366 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3367 		err = -ENOMEM;
3368 		goto out;
3369 	}
3370 
3371 	/*
3372 	 * Initialize the contents for the rport
3373 	 */
3374 	(void)spdk_nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3375 	rport->s_id = args->s_id;
3376 	rport->rpi = args->rpi;
3377 	rport->fc_nodename = args->fc_nodename;
3378 	rport->fc_portname = args->fc_portname;
3379 
3380 	/*
3381 	 * Add remote port to nport
3382 	 */
3383 	if (spdk_nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3384 		DEV_VERIFY(!"Error while adding rport to list");
3385 	};
3386 
3387 	/*
3388 	 * TODO: Do we validate the initiators service parameters?
3389 	 */
3390 
3391 	/*
3392 	 * Get the targets service parameters from the library
3393 	 * to return back to the driver.
3394 	 */
3395 	args->target_prli_info = spdk_nvmf_fc_get_prli_service_params();
3396 
3397 out:
3398 	if (api_data->cb_func != NULL) {
3399 		/*
3400 		 * Passing pointer to the args struct as the first argument.
3401 		 * The cb_func should handle this appropriately.
3402 		 */
3403 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3404 	}
3405 
3406 	free(arg);
3407 
3408 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3409 		      "IT add on nport %d done, rc = %d.\n",
3410 		      args->nport_handle, err);
3411 }
3412 
3413 /**
3414  * Process a IT delete.
3415  */
3416 static void
3417 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3418 {
3419 	ASSERT_SPDK_FC_MASTER_THREAD();
3420 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3421 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3422 			api_data->api_args;
3423 	int rc = 0;
3424 	struct spdk_nvmf_fc_nport *nport = NULL;
3425 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3426 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3427 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3428 	uint32_t num_rport = 0;
3429 	char log_str[256];
3430 
3431 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete on nport:%d begin.\n", args->nport_handle);
3432 
3433 	/*
3434 	 * Make sure the nport port exists. If it does not, error out.
3435 	 */
3436 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3437 	if (nport == NULL) {
3438 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3439 		rc = -EINVAL;
3440 		goto out;
3441 	}
3442 
3443 	/*
3444 	 * Find this ITN / rport (remote port).
3445 	 */
3446 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3447 		num_rport++;
3448 		if ((rport_iter->s_id == args->s_id) &&
3449 		    (rport_iter->rpi == args->rpi) &&
3450 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3451 			rport = rport_iter;
3452 			break;
3453 		}
3454 	}
3455 
3456 	/*
3457 	 * We should find either zero or exactly one rport.
3458 	 *
3459 	 * If we find zero rports, that means that a previous request has
3460 	 * removed the rport by the time we reached here. In this case,
3461 	 * simply return out.
3462 	 */
3463 	if (rport == NULL) {
3464 		rc = -ENODEV;
3465 		goto out;
3466 	}
3467 
3468 	/*
3469 	 * We have found exactly one rport. Allocate memory for callback data.
3470 	 */
3471 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3472 	if (NULL == cb_data) {
3473 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3474 		rc = -ENOMEM;
3475 		goto out;
3476 	}
3477 
3478 	cb_data->nport = nport;
3479 	cb_data->rport = rport;
3480 	cb_data->port_handle = args->port_handle;
3481 	cb_data->fc_cb_func = api_data->cb_func;
3482 	cb_data->fc_cb_ctx = args->cb_ctx;
3483 
3484 	/*
3485 	 * Validate rport object state.
3486 	 */
3487 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3488 		(void)spdk_nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3489 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3490 		/*
3491 		 * Deletion of this rport already in progress. Register callback
3492 		 * and return.
3493 		 */
3494 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3495 		rc = -ENODEV;
3496 		goto out;
3497 	} else {
3498 		/* rport partially created/deleted */
3499 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3500 		DEV_VERIFY(!"Invalid rport_state");
3501 		rc = -ENODEV;
3502 		goto out;
3503 	}
3504 
3505 	/*
3506 	 * We have successfully found a rport to delete. Call
3507 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3508 	 * IT-delete processing as well as free the cb_data.
3509 	 */
3510 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3511 				     (void *)cb_data);
3512 
3513 out:
3514 	if (rc != 0) {
3515 		/*
3516 		 * We have entered here because either we encountered an
3517 		 * error, or we did not find a rport to delete.
3518 		 * As a result, we will not call the function
3519 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3520 		 * processing. Therefore, execute the callback function now.
3521 		 */
3522 		if (cb_data) {
3523 			free(cb_data);
3524 		}
3525 		if (api_data->cb_func != NULL) {
3526 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3527 		}
3528 	}
3529 
3530 	snprintf(log_str, sizeof(log_str),
3531 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3532 		 args->nport_handle, num_rport, rc);
3533 
3534 	if (rc != 0) {
3535 		SPDK_ERRLOG("%s", log_str);
3536 	} else {
3537 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3538 	}
3539 
3540 	free(arg);
3541 }
3542 
3543 /*
3544  * Process ABTS received
3545  */
3546 static void
3547 nvmf_fc_adm_evnt_abts_recv(void *arg)
3548 {
3549 	ASSERT_SPDK_FC_MASTER_THREAD();
3550 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3551 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3552 	struct spdk_nvmf_fc_nport *nport = NULL;
3553 	int err = 0;
3554 
3555 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3556 		      args->oxid, args->rxid);
3557 
3558 	/*
3559 	 * 1. Make sure the nport port exists.
3560 	 */
3561 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3562 	if (nport == NULL) {
3563 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3564 		err = -EINVAL;
3565 		goto out;
3566 	}
3567 
3568 	/*
3569 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3570 	 */
3571 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3572 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3573 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3574 			      args->rpi, args->oxid, args->rxid);
3575 		err = 0;
3576 		goto out;
3577 
3578 	}
3579 
3580 	/*
3581 	 * 3. Pass the received ABTS-LS to the library for handling.
3582 	 */
3583 	spdk_nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3584 
3585 out:
3586 	if (api_data->cb_func != NULL) {
3587 		/*
3588 		 * Passing pointer to the args struct as the first argument.
3589 		 * The cb_func should handle this appropriately.
3590 		 */
3591 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3592 	} else {
3593 		/* No callback set, free the args */
3594 		free(args);
3595 	}
3596 
3597 	free(arg);
3598 }
3599 
3600 /*
3601  * Callback function for hw port quiesce.
3602  */
3603 static void
3604 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3605 {
3606 	ASSERT_SPDK_FC_MASTER_THREAD();
3607 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3608 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3609 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3610 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3611 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3612 	struct spdk_nvmf_fc_port *fc_port = NULL;
3613 	char *dump_buf = NULL;
3614 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3615 
3616 	/*
3617 	 * Free the callback context struct.
3618 	 */
3619 	free(ctx);
3620 
3621 	if (err != 0) {
3622 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3623 		goto out;
3624 	}
3625 
3626 	if (args->dump_queues == false) {
3627 		/*
3628 		 * Queues need not be dumped.
3629 		 */
3630 		goto out;
3631 	}
3632 
3633 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3634 
3635 	/*
3636 	 * Get the fc port.
3637 	 */
3638 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3639 	if (fc_port == NULL) {
3640 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3641 		err = -EINVAL;
3642 		goto out;
3643 	}
3644 
3645 	/*
3646 	 * Allocate memory for the dump buffer.
3647 	 * This memory will be freed by FCT.
3648 	 */
3649 	dump_buf = (char *)calloc(1, dump_buf_size);
3650 	if (dump_buf == NULL) {
3651 		err = -ENOMEM;
3652 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3653 		goto out;
3654 	}
3655 	*args->dump_buf  = (uint32_t *)dump_buf;
3656 	dump_info.buffer = dump_buf;
3657 	dump_info.offset = 0;
3658 
3659 	/*
3660 	 * Add the dump reason to the top of the buffer.
3661 	 */
3662 	spdk_nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3663 
3664 	/*
3665 	 * Dump the hwqp.
3666 	 */
3667 	spdk_nvmf_fc_dump_all_queues(fc_port, &dump_info);
3668 
3669 out:
3670 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3671 		      args->port_handle, args->dump_queues, err);
3672 
3673 	if (cb_func != NULL) {
3674 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3675 	}
3676 }
3677 
3678 /*
3679  * HW port reset
3680 
3681  */
3682 static void
3683 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3684 {
3685 	ASSERT_SPDK_FC_MASTER_THREAD();
3686 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3687 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3688 			api_data->api_args;
3689 	struct spdk_nvmf_fc_port *fc_port = NULL;
3690 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3691 	int err = 0;
3692 
3693 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump\n", args->port_handle);
3694 
3695 	/*
3696 	 * Make sure the physical port exists.
3697 	 */
3698 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3699 	if (fc_port == NULL) {
3700 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3701 		err = -EINVAL;
3702 		goto out;
3703 	}
3704 
3705 	/*
3706 	 * Save the reset event args and the callback in a context struct.
3707 	 */
3708 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3709 
3710 	if (ctx == NULL) {
3711 		err = -ENOMEM;
3712 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3713 		goto fail;
3714 	}
3715 
3716 	ctx->reset_args = arg;
3717 	ctx->reset_cb_func = api_data->cb_func;
3718 
3719 	/*
3720 	 * Quiesce the hw port.
3721 	 */
3722 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3723 	if (err != 0) {
3724 		goto fail;
3725 	}
3726 
3727 	/*
3728 	 * Once the ports are successfully quiesced the reset processing
3729 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3730 	 */
3731 	return;
3732 fail:
3733 	free(ctx);
3734 
3735 out:
3736 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump done, rc = %d.\n", args->port_handle,
3737 		      err);
3738 
3739 	if (api_data->cb_func != NULL) {
3740 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3741 	}
3742 
3743 	free(arg);
3744 }
3745 
3746 /*
3747  * Process a link break event on a HW port.
3748  */
3749 static void
3750 nvmf_fc_adm_evnt_hw_port_link_break(void *arg)
3751 {
3752 	ASSERT_SPDK_FC_MASTER_THREAD();
3753 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3754 	struct spdk_nvmf_hw_port_link_break_args *args = (struct spdk_nvmf_hw_port_link_break_args *)
3755 			api_data->api_args;
3756 	struct spdk_nvmf_fc_port *fc_port = NULL;
3757 	int err = 0;
3758 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *cb_data = NULL;
3759 	struct spdk_nvmf_fc_nport *nport = NULL;
3760 	uint32_t nport_deletes_sent = 0;
3761 	uint32_t nport_deletes_skipped = 0;
3762 	struct spdk_nvmf_fc_nport_delete_args *nport_del_args = NULL;
3763 	char log_str[256];
3764 
3765 	/*
3766 	 * Get the fc port using the port handle.
3767 	 */
3768 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3769 	if (!fc_port) {
3770 		SPDK_ERRLOG("port link break: Unable to find the SPDK FC port %d\n",
3771 			    args->port_handle);
3772 		err = -EINVAL;
3773 		goto out;
3774 	}
3775 
3776 	/*
3777 	 * Set the port state to offline, if it is not already.
3778 	 */
3779 	err = spdk_nvmf_fc_port_set_offline(fc_port);
3780 	if (err != 0) {
3781 		SPDK_ERRLOG("port link break: HW port %d already offline. rc = %d\n",
3782 			    fc_port->port_hdl, err);
3783 		err = 0;
3784 		goto out;
3785 	}
3786 
3787 	/*
3788 	 * Delete all the nports, if any.
3789 	 */
3790 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
3791 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
3792 			/* Skipped the nports that are not in CREATED state */
3793 			if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
3794 				nport_deletes_skipped++;
3795 				continue;
3796 			}
3797 
3798 			/* Allocate memory for callback data. */
3799 			cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_port_link_break_cb_data));
3800 			if (NULL == cb_data) {
3801 				SPDK_ERRLOG("port link break: Failed to allocate memory for cb_data %d.\n",
3802 					    args->port_handle);
3803 				err = -ENOMEM;
3804 				goto out;
3805 			}
3806 			cb_data->args = args;
3807 			cb_data->cb_func = api_data->cb_func;
3808 			nport_del_args = &cb_data->nport_del_args;
3809 			nport_del_args->port_handle = args->port_handle;
3810 			nport_del_args->nport_handle = nport->nport_hdl;
3811 			nport_del_args->cb_ctx = cb_data;
3812 
3813 			spdk_nvmf_fc_master_enqueue_event(SPDK_FC_NPORT_DELETE,
3814 							  (void *)nport_del_args,
3815 							  nvmf_fc_adm_hw_port_link_break_cb);
3816 
3817 			nport_deletes_sent++;
3818 		}
3819 	}
3820 
3821 	if (nport_deletes_sent == 0 && err == 0) {
3822 		/*
3823 		 * Mark the hwqps as offline and unregister the pollers.
3824 		 */
3825 		(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
3826 	}
3827 
3828 out:
3829 	snprintf(log_str, sizeof(log_str),
3830 		 "port link break done: port:%d nport_deletes_sent:%d nport_deletes_skipped:%d rc:%d.\n",
3831 		 args->port_handle, nport_deletes_sent, nport_deletes_skipped, err);
3832 
3833 	if (err != 0) {
3834 		SPDK_ERRLOG("%s", log_str);
3835 	} else {
3836 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3837 	}
3838 
3839 	if ((api_data->cb_func != NULL) && (nport_deletes_sent == 0)) {
3840 		/*
3841 		 * No nport_deletes are sent, which would have eventually
3842 		 * called the port_link_break callback. Therefore, call the
3843 		 * port_link_break callback here.
3844 		 */
3845 		(void)api_data->cb_func(args->port_handle, SPDK_FC_LINK_BREAK, args->cb_ctx, err);
3846 	}
3847 
3848 	free(arg);
3849 }
3850 
3851 static inline void
3852 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args)
3853 {
3854 	if (spdk_nvmf_fc_get_master_thread()) {
3855 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(), fn, args);
3856 	}
3857 }
3858 
3859 /*
3860  * Queue up an event in the SPDK masters event queue.
3861  * Used by the FC driver to notify the SPDK master of FC related events.
3862  */
3863 int
3864 spdk_nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args,
3865 				  spdk_nvmf_fc_callback cb_func)
3866 {
3867 	int err = 0;
3868 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3869 
3870 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d.\n", event_type);
3871 
3872 	if (event_type >= SPDK_FC_EVENT_MAX) {
3873 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3874 		err = -EINVAL;
3875 		goto done;
3876 	}
3877 
3878 	if (args == NULL) {
3879 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3880 		err = -EINVAL;
3881 		goto done;
3882 	}
3883 
3884 	api_data = calloc(1, sizeof(*api_data));
3885 
3886 	if (api_data == NULL) {
3887 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3888 		err = -ENOMEM;
3889 		goto done;
3890 	}
3891 
3892 	api_data->api_args = args;
3893 	api_data->cb_func = cb_func;
3894 
3895 	switch (event_type) {
3896 	case SPDK_FC_HW_PORT_INIT:
3897 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_init,
3898 						 (void *)api_data);
3899 		break;
3900 
3901 	case SPDK_FC_HW_PORT_ONLINE:
3902 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_online,
3903 						 (void *)api_data);
3904 		break;
3905 
3906 	case SPDK_FC_HW_PORT_OFFLINE:
3907 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_offline,
3908 						 (void *)api_data);
3909 		break;
3910 
3911 	case SPDK_FC_NPORT_CREATE:
3912 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_create,
3913 						 (void *)api_data);
3914 		break;
3915 
3916 	case SPDK_FC_NPORT_DELETE:
3917 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_delete,
3918 						 (void *)api_data);
3919 		break;
3920 
3921 	case SPDK_FC_IT_ADD:
3922 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_add,
3923 						 (void *)api_data);
3924 		break;
3925 
3926 	case SPDK_FC_IT_DELETE:
3927 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_delete,
3928 						 (void *)api_data);
3929 		break;
3930 
3931 	case SPDK_FC_ABTS_RECV:
3932 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_abts_recv,
3933 						 (void *)api_data);
3934 		break;
3935 
3936 	case SPDK_FC_LINK_BREAK:
3937 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_link_break,
3938 						 (void *)api_data);
3939 		break;
3940 
3941 	case SPDK_FC_HW_PORT_RESET:
3942 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_reset,
3943 						 (void *)api_data);
3944 		break;
3945 
3946 	case SPDK_FC_UNRECOVERABLE_ERR:
3947 	default:
3948 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3949 		err = -EINVAL;
3950 		break;
3951 	}
3952 
3953 done:
3954 
3955 	if (err == 0) {
3956 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d done successfully\n", event_type);
3957 	} else {
3958 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3959 		if (api_data) {
3960 			free(api_data);
3961 		}
3962 	}
3963 
3964 	return err;
3965 }
3966 
3967 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3968 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc_adm_api", SPDK_LOG_NVMF_FC_ADM_API);
3969 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc", SPDK_LOG_NVMF_FC)
3970