xref: /spdk/lib/nvmf/fc.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 #include "spdk/endian.h"
46 #include "spdk/log.h"
47 #include "spdk/thread.h"
48 
49 #include "nvmf_fc.h"
50 #include "fc_lld.h"
51 
52 #include "spdk_internal/trace_defs.h"
53 
54 #ifndef DEV_VERIFY
55 #define DEV_VERIFY assert
56 #endif
57 
58 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
59 #define ASSERT_SPDK_FC_MAIN_THREAD() \
60         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
61 #endif
62 
63 /*
64  * PRLI service parameters
65  */
66 enum spdk_nvmf_fc_service_parameters {
67 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
68 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
69 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
70 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
71 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
72 };
73 
74 static char *fc_req_state_strs[] = {
75 	"SPDK_NVMF_FC_REQ_INIT",
76 	"SPDK_NVMF_FC_REQ_READ_BDEV",
77 	"SPDK_NVMF_FC_REQ_READ_XFER",
78 	"SPDK_NVMF_FC_REQ_READ_RSP",
79 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
80 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
81 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
82 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
83 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
84 	"SPDK_NVMF_FC_REQ_NONE_RSP",
85 	"SPDK_NVMF_FC_REQ_SUCCESS",
86 	"SPDK_NVMF_FC_REQ_FAILED",
87 	"SPDK_NVMF_FC_REQ_ABORTED",
88 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
89 	"SPDK_NVMF_FC_REQ_PENDING",
90 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
91 };
92 
93 #define HWQP_CONN_TABLE_SIZE			8192
94 #define HWQP_RPI_TABLE_SIZE			4096
95 
96 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
97 {
98 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
99 	spdk_trace_register_description("FC_NEW",
100 					TRACE_FC_REQ_INIT,
101 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1,
102 					SPDK_TRACE_ARG_TYPE_INT, "");
103 	spdk_trace_register_description("FC_READ_SBMT_TO_BDEV",
104 					TRACE_FC_REQ_READ_BDEV,
105 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
106 					SPDK_TRACE_ARG_TYPE_INT, "");
107 	spdk_trace_register_description("FC_READ_XFER_DATA",
108 					TRACE_FC_REQ_READ_XFER,
109 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
110 					SPDK_TRACE_ARG_TYPE_INT, "");
111 	spdk_trace_register_description("FC_READ_RSP",
112 					TRACE_FC_REQ_READ_RSP,
113 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
114 					SPDK_TRACE_ARG_TYPE_INT, "");
115 	spdk_trace_register_description("FC_WRITE_NEED_BUFFER",
116 					TRACE_FC_REQ_WRITE_BUFFS,
117 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
118 					SPDK_TRACE_ARG_TYPE_INT, "");
119 	spdk_trace_register_description("FC_WRITE_XFER_DATA",
120 					TRACE_FC_REQ_WRITE_XFER,
121 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
122 					SPDK_TRACE_ARG_TYPE_INT, "");
123 	spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV",
124 					TRACE_FC_REQ_WRITE_BDEV,
125 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
126 					SPDK_TRACE_ARG_TYPE_INT, "");
127 	spdk_trace_register_description("FC_WRITE_RSP",
128 					TRACE_FC_REQ_WRITE_RSP,
129 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
130 					SPDK_TRACE_ARG_TYPE_INT, "");
131 	spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV",
132 					TRACE_FC_REQ_NONE_BDEV,
133 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
134 					SPDK_TRACE_ARG_TYPE_INT, "");
135 	spdk_trace_register_description("FC_NONE_RSP",
136 					TRACE_FC_REQ_NONE_RSP,
137 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
138 					SPDK_TRACE_ARG_TYPE_INT, "");
139 	spdk_trace_register_description("FC_SUCCESS",
140 					TRACE_FC_REQ_SUCCESS,
141 					OWNER_NONE, OBJECT_NONE, 0,
142 					SPDK_TRACE_ARG_TYPE_INT, "");
143 	spdk_trace_register_description("FC_FAILED",
144 					TRACE_FC_REQ_FAILED,
145 					OWNER_NONE, OBJECT_NONE, 0,
146 					SPDK_TRACE_ARG_TYPE_INT, "");
147 	spdk_trace_register_description("FC_ABRT",
148 					TRACE_FC_REQ_ABORTED,
149 					OWNER_NONE, OBJECT_NONE, 0,
150 					SPDK_TRACE_ARG_TYPE_INT, "");
151 	spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV",
152 					TRACE_FC_REQ_BDEV_ABORTED,
153 					OWNER_NONE, OBJECT_NONE, 0,
154 					SPDK_TRACE_ARG_TYPE_INT, "");
155 	spdk_trace_register_description("FC_PENDING",
156 					TRACE_FC_REQ_PENDING,
157 					OWNER_NONE, OBJECT_NONE, 0,
158 					SPDK_TRACE_ARG_TYPE_INT, "");
159 	spdk_trace_register_description("FC_FUSED_WAITING",
160 					TRACE_FC_REQ_FUSED_WAITING,
161 					OWNER_NONE, OBJECT_NONE, 0,
162 					SPDK_TRACE_ARG_TYPE_INT, "");
163 }
164 
165 /**
166  * The structure used by all fc adm functions
167  */
168 struct spdk_nvmf_fc_adm_api_data {
169 	void *api_args;
170 	spdk_nvmf_fc_callback cb_func;
171 };
172 
173 /**
174  * The callback structure for nport-delete
175  */
176 struct spdk_nvmf_fc_adm_nport_del_cb_data {
177 	struct spdk_nvmf_fc_nport *nport;
178 	uint8_t port_handle;
179 	spdk_nvmf_fc_callback fc_cb_func;
180 	void *fc_cb_ctx;
181 };
182 
183 /**
184  * The callback structure for it-delete
185  */
186 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
187 	struct spdk_nvmf_fc_nport *nport;
188 	struct spdk_nvmf_fc_remote_port_info *rport;
189 	uint8_t port_handle;
190 	spdk_nvmf_fc_callback fc_cb_func;
191 	void *fc_cb_ctx;
192 };
193 
194 
195 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
196 
197 /**
198  * The callback structure for the it-delete-assoc callback
199  */
200 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
201 	struct spdk_nvmf_fc_nport *nport;
202 	struct spdk_nvmf_fc_remote_port_info *rport;
203 	uint8_t port_handle;
204 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
205 	void *cb_ctx;
206 };
207 
208 /*
209  * Call back function pointer for HW port quiesce.
210  */
211 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
212 
213 /**
214  * Context structure for quiescing a hardware port
215  */
216 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
217 	int quiesce_count;
218 	void *ctx;
219 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
220 };
221 
222 /**
223  * Context structure used to reset a hardware port
224  */
225 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
226 	void *reset_args;
227 	spdk_nvmf_fc_callback reset_cb_func;
228 };
229 
230 struct spdk_nvmf_fc_transport {
231 	struct spdk_nvmf_transport transport;
232 	pthread_mutex_t lock;
233 };
234 
235 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
236 
237 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
238 
239 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
240 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
241 
242 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
243 
244 static uint32_t g_nvmf_fgroup_count = 0;
245 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
246 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
247 
248 struct spdk_thread *
249 nvmf_fc_get_main_thread(void)
250 {
251 	return g_nvmf_fc_main_thread;
252 }
253 
254 static inline void
255 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
256 			       enum spdk_nvmf_fc_request_state state)
257 {
258 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
259 
260 	switch (state) {
261 	case SPDK_NVMF_FC_REQ_INIT:
262 		/* Start IO tracing */
263 		tpoint_id = TRACE_FC_REQ_INIT;
264 		break;
265 	case SPDK_NVMF_FC_REQ_READ_BDEV:
266 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
267 		break;
268 	case SPDK_NVMF_FC_REQ_READ_XFER:
269 		tpoint_id = TRACE_FC_REQ_READ_XFER;
270 		break;
271 	case SPDK_NVMF_FC_REQ_READ_RSP:
272 		tpoint_id = TRACE_FC_REQ_READ_RSP;
273 		break;
274 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
275 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
276 		break;
277 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
278 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
279 		break;
280 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
281 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
282 		break;
283 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
284 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
285 		break;
286 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
287 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
288 		break;
289 	case SPDK_NVMF_FC_REQ_NONE_RSP:
290 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
291 		break;
292 	case SPDK_NVMF_FC_REQ_SUCCESS:
293 		tpoint_id = TRACE_FC_REQ_SUCCESS;
294 		break;
295 	case SPDK_NVMF_FC_REQ_FAILED:
296 		tpoint_id = TRACE_FC_REQ_FAILED;
297 		break;
298 	case SPDK_NVMF_FC_REQ_ABORTED:
299 		tpoint_id = TRACE_FC_REQ_ABORTED;
300 		break;
301 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
302 		tpoint_id = TRACE_FC_REQ_ABORTED;
303 		break;
304 	case SPDK_NVMF_FC_REQ_PENDING:
305 		tpoint_id = TRACE_FC_REQ_PENDING;
306 		break;
307 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
308 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
309 		break;
310 	default:
311 		assert(0);
312 		break;
313 	}
314 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
315 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
316 				  (uint64_t)(&fc_req->req));
317 	}
318 }
319 
320 static struct rte_hash *
321 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
322 {
323 	struct rte_hash_parameters hash_params = { 0 };
324 
325 	hash_params.entries = num_entries;
326 	hash_params.key_len = key_len;
327 	hash_params.name = name;
328 
329 	return rte_hash_create(&hash_params);
330 }
331 
332 void
333 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
334 {
335 	free(fc_conn->pool_memory);
336 	fc_conn->pool_memory = NULL;
337 }
338 
339 int
340 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
341 {
342 	uint32_t i, qd;
343 	struct spdk_nvmf_fc_pooled_request *req;
344 
345 	/*
346 	 * Create number of fc-requests to be more than the actual SQ size.
347 	 * This is to handle race conditions where the target driver may send
348 	 * back a RSP and before the target driver gets to process the CQE
349 	 * for the RSP, the initiator may have sent a new command.
350 	 * Depending on the load on the HWQP, there is a slim possibility
351 	 * that the target reaps the RQE corresponding to the new
352 	 * command before processing the CQE corresponding to the RSP.
353 	 */
354 	qd = fc_conn->max_queue_depth * 2;
355 
356 	STAILQ_INIT(&fc_conn->pool_queue);
357 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
358 				      sizeof(struct spdk_nvmf_fc_request));
359 	if (!fc_conn->pool_memory) {
360 		SPDK_ERRLOG("create fc req ring objects failed\n");
361 		goto error;
362 	}
363 	fc_conn->pool_size = qd;
364 	fc_conn->pool_free_elems = qd;
365 
366 	/* Initialise value in ring objects and link the objects */
367 	for (i = 0; i < qd; i++) {
368 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
369 				i * sizeof(struct spdk_nvmf_fc_request));
370 
371 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
372 	}
373 	return 0;
374 error:
375 	nvmf_fc_free_conn_reqpool(fc_conn);
376 	return -1;
377 }
378 
379 static inline struct spdk_nvmf_fc_request *
380 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
381 {
382 	struct spdk_nvmf_fc_request *fc_req;
383 	struct spdk_nvmf_fc_pooled_request *pooled_req;
384 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
385 
386 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
387 	if (!pooled_req) {
388 		SPDK_ERRLOG("Alloc request buffer failed\n");
389 		return NULL;
390 	}
391 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
392 	fc_conn->pool_free_elems -= 1;
393 
394 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
395 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
396 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
397 
398 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
399 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
400 	TAILQ_INIT(&fc_req->abort_cbs);
401 	return fc_req;
402 }
403 
404 static inline void
405 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
406 {
407 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
408 		/* Log an error for debug purpose. */
409 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
410 	}
411 
412 	/* set the magic to mark req as no longer valid. */
413 	fc_req->magic = 0xDEADBEEF;
414 
415 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
416 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
417 
418 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
419 	fc_conn->pool_free_elems += 1;
420 }
421 
422 static inline void
423 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
424 {
425 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
426 		      spdk_nvmf_request, buf_link);
427 }
428 
429 int
430 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
431 {
432 	char name[64];
433 
434 	hwqp->fc_port = fc_port;
435 
436 	/* clear counters */
437 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
438 
439 	TAILQ_INIT(&hwqp->in_use_reqs);
440 	TAILQ_INIT(&hwqp->sync_cbs);
441 	TAILQ_INIT(&hwqp->ls_pending_queue);
442 
443 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
444 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
445 				     sizeof(uint64_t));
446 	if (!hwqp->connection_list_hash) {
447 		SPDK_ERRLOG("Failed to create connection hash table.\n");
448 		return -ENOMEM;
449 	}
450 
451 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
452 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
453 	if (!hwqp->rport_list_hash) {
454 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
455 		rte_hash_free(hwqp->connection_list_hash);
456 		return -ENOMEM;
457 	}
458 
459 	/* Init low level driver queues */
460 	nvmf_fc_init_q(hwqp);
461 	return 0;
462 }
463 
464 static struct spdk_nvmf_fc_poll_group *
465 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
466 {
467 	uint32_t max_count = UINT32_MAX;
468 	struct spdk_nvmf_fc_poll_group *fgroup;
469 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
470 
471 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
472 	/* find poll group with least number of hwqp's assigned to it */
473 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
474 		if (fgroup->hwqp_count < max_count) {
475 			ret_fgroup = fgroup;
476 			max_count = fgroup->hwqp_count;
477 		}
478 	}
479 
480 	if (ret_fgroup) {
481 		ret_fgroup->hwqp_count++;
482 		hwqp->thread = ret_fgroup->group.group->thread;
483 		hwqp->fgroup = ret_fgroup;
484 	}
485 
486 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
487 
488 	return ret_fgroup;
489 }
490 
491 bool
492 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
493 {
494 	struct spdk_nvmf_fc_poll_group *tmp;
495 	bool rc = false;
496 
497 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
498 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
499 		if (tmp == fgroup) {
500 			rc = true;
501 			break;
502 		}
503 	}
504 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
505 	return rc;
506 }
507 
508 void
509 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
510 {
511 	assert(hwqp);
512 	if (hwqp == NULL) {
513 		SPDK_ERRLOG("Error: hwqp is NULL\n");
514 		return;
515 	}
516 
517 	assert(g_nvmf_fgroup_count);
518 
519 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
520 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
521 		return;
522 	}
523 
524 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
525 }
526 
527 static void
528 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
529 {
530 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
531 
532 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
533 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
534 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
535 	} else {
536 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
537 	}
538 
539 	if (args->cb_fn) {
540 		args->cb_fn(args->cb_ctx, 0);
541 	}
542 
543 	free(args);
544 }
545 
546 void
547 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
548 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
549 {
550 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
551 	struct spdk_nvmf_fc_poll_group *tmp;
552 	int rc = 0;
553 
554 	assert(hwqp);
555 
556 	SPDK_DEBUGLOG(nvmf_fc,
557 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
558 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
559 
560 	if (!hwqp->fgroup) {
561 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
562 	} else {
563 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
564 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
565 			if (tmp == hwqp->fgroup) {
566 				hwqp->fgroup->hwqp_count--;
567 				break;
568 			}
569 		}
570 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
571 
572 		if (tmp != hwqp->fgroup) {
573 			/* Pollgroup was already removed. Dont bother. */
574 			goto done;
575 		}
576 
577 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
578 		if (args == NULL) {
579 			rc = -ENOMEM;
580 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
581 			goto done;
582 		}
583 
584 		args->hwqp   = hwqp;
585 		args->cb_fn  = cb_fn;
586 		args->cb_ctx = cb_ctx;
587 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
588 		args->cb_info.cb_data = args;
589 		args->cb_info.cb_thread = spdk_get_thread();
590 
591 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
592 		if (rc) {
593 			rc = -EINVAL;
594 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
595 			free(args);
596 			goto done;
597 		}
598 		return;
599 	}
600 done:
601 	if (cb_fn) {
602 		cb_fn(cb_ctx, rc);
603 	}
604 }
605 
606 /*
607  * Note: This needs to be used only on main poller.
608  */
609 static uint64_t
610 nvmf_fc_get_abts_unique_id(void)
611 {
612 	static uint32_t u_id = 0;
613 
614 	return (uint64_t)(++u_id);
615 }
616 
617 static void
618 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
619 {
620 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
621 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
622 
623 	ctx->hwqps_responded++;
624 
625 	if (ctx->hwqps_responded < ctx->num_hwqps) {
626 		/* Wait for all pollers to complete. */
627 		return;
628 	}
629 
630 	/* Free the queue sync poller args. */
631 	free(ctx->sync_poller_args);
632 
633 	/* Mark as queue synced */
634 	ctx->queue_synced = true;
635 
636 	/* Reset the ctx values */
637 	ctx->hwqps_responded = 0;
638 	ctx->handled = false;
639 
640 	SPDK_DEBUGLOG(nvmf_fc,
641 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
642 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
643 
644 	/* Resend ABTS to pollers */
645 	args = ctx->abts_poller_args;
646 	for (int i = 0; i < ctx->num_hwqps; i++) {
647 		poller_arg = args + i;
648 		nvmf_fc_poller_api_func(poller_arg->hwqp,
649 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
650 					poller_arg);
651 	}
652 }
653 
654 static int
655 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
656 {
657 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
658 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
659 
660 	/* check if FC driver supports queue sync */
661 	if (!nvmf_fc_q_sync_available()) {
662 		return -EPERM;
663 	}
664 
665 	assert(ctx);
666 	if (!ctx) {
667 		SPDK_ERRLOG("NULL ctx pointer");
668 		return -EINVAL;
669 	}
670 
671 	/* Reset the ctx values */
672 	ctx->hwqps_responded = 0;
673 
674 	args = calloc(ctx->num_hwqps,
675 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
676 	if (!args) {
677 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
678 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
679 		return -ENOMEM;
680 	}
681 	ctx->sync_poller_args = args;
682 
683 	abts_args = ctx->abts_poller_args;
684 	for (int i = 0; i < ctx->num_hwqps; i++) {
685 		abts_poller_arg = abts_args + i;
686 		poller_arg = args + i;
687 		poller_arg->u_id = ctx->u_id;
688 		poller_arg->hwqp = abts_poller_arg->hwqp;
689 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
690 		poller_arg->cb_info.cb_data = ctx;
691 		poller_arg->cb_info.cb_thread = spdk_get_thread();
692 
693 		/* Send a Queue sync message to interested pollers */
694 		nvmf_fc_poller_api_func(poller_arg->hwqp,
695 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
696 					poller_arg);
697 	}
698 
699 	SPDK_DEBUGLOG(nvmf_fc,
700 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
701 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
702 
703 	/* Post Marker to queue to track aborted request */
704 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
705 
706 	return 0;
707 }
708 
709 static void
710 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
711 {
712 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
713 	struct spdk_nvmf_fc_nport *nport  = NULL;
714 
715 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
716 		ctx->handled = true;
717 	}
718 
719 	ctx->hwqps_responded++;
720 
721 	if (ctx->hwqps_responded < ctx->num_hwqps) {
722 		/* Wait for all pollers to complete. */
723 		return;
724 	}
725 
726 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
727 
728 	if (ctx->nport != nport) {
729 		/* Nport can be deleted while this abort is being
730 		 * processed by the pollers.
731 		 */
732 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
733 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
734 	} else {
735 		if (!ctx->handled) {
736 			/* Try syncing the queues and try one more time */
737 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
738 				SPDK_DEBUGLOG(nvmf_fc,
739 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
740 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
741 				return;
742 			} else {
743 				/* Send Reject */
744 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
745 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
746 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
747 			}
748 		} else {
749 			/* Send Accept */
750 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
751 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
752 					    0, NULL, NULL);
753 		}
754 	}
755 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
756 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
757 
758 	free(ctx->abts_poller_args);
759 	free(ctx);
760 }
761 
762 void
763 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
764 			  uint16_t oxid, uint16_t rxid)
765 {
766 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
767 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
768 	struct spdk_nvmf_fc_association *assoc = NULL;
769 	struct spdk_nvmf_fc_conn *conn = NULL;
770 	uint32_t hwqp_cnt = 0;
771 	bool skip_hwqp_cnt;
772 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
773 	uint32_t i;
774 
775 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
776 		       nport->nport_hdl, rpi, oxid, rxid);
777 
778 	/* Allocate memory to track hwqp's with at least 1 active connection. */
779 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
780 	if (hwqps == NULL) {
781 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
782 		goto bls_rej;
783 	}
784 
785 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
786 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
787 			if ((conn->rpi != rpi) || !conn->hwqp) {
788 				continue;
789 			}
790 
791 			skip_hwqp_cnt = false;
792 			for (i = 0; i < hwqp_cnt; i++) {
793 				if (hwqps[i] == conn->hwqp) {
794 					/* Skip. This is already present */
795 					skip_hwqp_cnt = true;
796 					break;
797 				}
798 			}
799 			if (!skip_hwqp_cnt) {
800 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
801 				hwqps[hwqp_cnt] = conn->hwqp;
802 				hwqp_cnt++;
803 			}
804 		}
805 	}
806 
807 	if (!hwqp_cnt) {
808 		goto bls_rej;
809 	}
810 
811 	args = calloc(hwqp_cnt,
812 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
813 	if (!args) {
814 		goto bls_rej;
815 	}
816 
817 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
818 	if (!ctx) {
819 		goto bls_rej;
820 	}
821 	ctx->rpi = rpi;
822 	ctx->oxid = oxid;
823 	ctx->rxid = rxid;
824 	ctx->nport = nport;
825 	ctx->nport_hdl = nport->nport_hdl;
826 	ctx->port_hdl = nport->fc_port->port_hdl;
827 	ctx->num_hwqps = hwqp_cnt;
828 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
829 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
830 	ctx->abts_poller_args = args;
831 
832 	/* Get a unique context for this ABTS */
833 	ctx->u_id = nvmf_fc_get_abts_unique_id();
834 
835 	for (i = 0; i < hwqp_cnt; i++) {
836 		poller_arg = args + i;
837 		poller_arg->hwqp = hwqps[i];
838 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
839 		poller_arg->cb_info.cb_data = ctx;
840 		poller_arg->cb_info.cb_thread = spdk_get_thread();
841 		poller_arg->ctx = ctx;
842 
843 		nvmf_fc_poller_api_func(poller_arg->hwqp,
844 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
845 					poller_arg);
846 	}
847 
848 	free(hwqps);
849 
850 	return;
851 bls_rej:
852 	free(args);
853 	free(hwqps);
854 
855 	/* Send Reject */
856 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
857 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
858 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
859 		       nport->nport_hdl, rpi, oxid, rxid);
860 	return;
861 }
862 
863 /*** Accessor functions for the FC structures - BEGIN */
864 /*
865  * Returns true if the port is in offline state.
866  */
867 bool
868 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
869 {
870 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
871 		return true;
872 	}
873 
874 	return false;
875 }
876 
877 /*
878  * Returns true if the port is in online state.
879  */
880 bool
881 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
882 {
883 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
884 		return true;
885 	}
886 
887 	return false;
888 }
889 
890 int
891 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
892 {
893 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
894 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
895 		return 0;
896 	}
897 
898 	return -EPERM;
899 }
900 
901 int
902 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
903 {
904 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
905 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
906 		return 0;
907 	}
908 
909 	return -EPERM;
910 }
911 
912 int
913 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
914 {
915 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
916 		hwqp->state = SPDK_FC_HWQP_ONLINE;
917 		/* reset some queue counters */
918 		hwqp->num_conns = 0;
919 		return nvmf_fc_set_q_online_state(hwqp, true);
920 	}
921 
922 	return -EPERM;
923 }
924 
925 int
926 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
927 {
928 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
929 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
930 		return nvmf_fc_set_q_online_state(hwqp, false);
931 	}
932 
933 	return -EPERM;
934 }
935 
936 void
937 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
938 {
939 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
940 
941 	/*
942 	 * Let LLD add the port to its list.
943 	 */
944 	nvmf_fc_lld_port_add(fc_port);
945 }
946 
947 static void
948 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
949 {
950 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
951 
952 	/*
953 	 * Let LLD remove the port from its list.
954 	 */
955 	nvmf_fc_lld_port_remove(fc_port);
956 }
957 
958 struct spdk_nvmf_fc_port *
959 nvmf_fc_port_lookup(uint8_t port_hdl)
960 {
961 	struct spdk_nvmf_fc_port *fc_port = NULL;
962 
963 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
964 		if (fc_port->port_hdl == port_hdl) {
965 			return fc_port;
966 		}
967 	}
968 	return NULL;
969 }
970 
971 uint32_t
972 nvmf_fc_get_prli_service_params(void)
973 {
974 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
975 }
976 
977 int
978 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
979 		       struct spdk_nvmf_fc_nport *nport)
980 {
981 	if (fc_port) {
982 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
983 		fc_port->num_nports++;
984 		return 0;
985 	}
986 
987 	return -EINVAL;
988 }
989 
990 int
991 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
992 			  struct spdk_nvmf_fc_nport *nport)
993 {
994 	if (fc_port && nport) {
995 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
996 		fc_port->num_nports--;
997 		return 0;
998 	}
999 
1000 	return -EINVAL;
1001 }
1002 
1003 static struct spdk_nvmf_fc_nport *
1004 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
1005 {
1006 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1007 
1008 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
1009 		if (fc_nport->nport_hdl == nport_hdl) {
1010 			return fc_nport;
1011 		}
1012 	}
1013 
1014 	return NULL;
1015 }
1016 
1017 struct spdk_nvmf_fc_nport *
1018 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
1019 {
1020 	struct spdk_nvmf_fc_port *fc_port = NULL;
1021 
1022 	fc_port = nvmf_fc_port_lookup(port_hdl);
1023 	if (fc_port) {
1024 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
1025 	}
1026 
1027 	return NULL;
1028 }
1029 
1030 static inline int
1031 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1032 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1033 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1034 {
1035 	struct spdk_nvmf_fc_nport *n_port;
1036 	struct spdk_nvmf_fc_remote_port_info *r_port;
1037 
1038 	assert(hwqp);
1039 	if (hwqp == NULL) {
1040 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1041 		return -EINVAL;
1042 	}
1043 	assert(nport);
1044 	if (nport == NULL) {
1045 		SPDK_ERRLOG("Error: nport is NULL\n");
1046 		return -EINVAL;
1047 	}
1048 	assert(rport);
1049 	if (rport == NULL) {
1050 		SPDK_ERRLOG("Error: rport is NULL\n");
1051 		return -EINVAL;
1052 	}
1053 
1054 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1055 		if (n_port->d_id == d_id) {
1056 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1057 				if (r_port->s_id == s_id) {
1058 					*nport = n_port;
1059 					*rport = r_port;
1060 					return 0;
1061 				}
1062 			}
1063 			break;
1064 		}
1065 	}
1066 
1067 	return -ENOENT;
1068 }
1069 
1070 /* Returns true if the Nport is empty of all rem_ports */
1071 bool
1072 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1073 {
1074 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1075 		assert(nport->rport_count == 0);
1076 		return true;
1077 	} else {
1078 		return false;
1079 	}
1080 }
1081 
1082 int
1083 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1084 			enum spdk_nvmf_fc_object_state state)
1085 {
1086 	if (nport) {
1087 		nport->nport_state = state;
1088 		return 0;
1089 	} else {
1090 		return -EINVAL;
1091 	}
1092 }
1093 
1094 bool
1095 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1096 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1097 {
1098 	if (nport && rem_port) {
1099 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1100 		nport->rport_count++;
1101 		return 0;
1102 	} else {
1103 		return -EINVAL;
1104 	}
1105 }
1106 
1107 bool
1108 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1109 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1110 {
1111 	if (nport && rem_port) {
1112 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1113 		nport->rport_count--;
1114 		return 0;
1115 	} else {
1116 		return -EINVAL;
1117 	}
1118 }
1119 
1120 int
1121 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1122 			enum spdk_nvmf_fc_object_state state)
1123 {
1124 	if (rport) {
1125 		rport->rport_state = state;
1126 		return 0;
1127 	} else {
1128 		return -EINVAL;
1129 	}
1130 }
1131 int
1132 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1133 			enum spdk_nvmf_fc_object_state state)
1134 {
1135 	if (assoc) {
1136 		assoc->assoc_state = state;
1137 		return 0;
1138 	} else {
1139 		return -EINVAL;
1140 	}
1141 }
1142 
1143 static struct spdk_nvmf_fc_association *
1144 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1145 {
1146 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1147 	struct spdk_nvmf_fc_conn *fc_conn;
1148 
1149 	if (!qpair) {
1150 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1151 		return NULL;
1152 	}
1153 
1154 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1155 
1156 	return fc_conn->fc_assoc;
1157 }
1158 
1159 bool
1160 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1161 		       struct spdk_nvmf_ctrlr *ctrlr)
1162 {
1163 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1164 	struct spdk_nvmf_fc_association *assoc = NULL;
1165 
1166 	if (!ctrlr) {
1167 		return false;
1168 	}
1169 
1170 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1171 	if (!fc_nport) {
1172 		return false;
1173 	}
1174 
1175 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1176 	if (assoc && assoc->tgtport == fc_nport) {
1177 		SPDK_DEBUGLOG(nvmf_fc,
1178 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1179 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1180 			      nport_hdl);
1181 		return true;
1182 	}
1183 	return false;
1184 }
1185 
1186 static void
1187 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1188 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1189 {
1190 	assert(ls_rqst);
1191 
1192 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1193 
1194 	/* Return buffer to chip */
1195 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1196 }
1197 
1198 static int
1199 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1200 			  struct spdk_nvmf_fc_nport *nport,
1201 			  struct spdk_nvmf_fc_remote_port_info *rport)
1202 {
1203 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1204 	int num_deleted = 0;
1205 
1206 	assert(hwqp);
1207 	assert(nport);
1208 	assert(rport);
1209 
1210 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1211 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1212 			num_deleted++;
1213 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1214 		}
1215 	}
1216 	return num_deleted;
1217 }
1218 
1219 static void
1220 nvmf_fc_req_bdev_abort(void *arg1)
1221 {
1222 	struct spdk_nvmf_fc_request *fc_req = arg1;
1223 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1224 	int i;
1225 
1226 	/* Initial release - we don't have to abort Admin Queue or
1227 	 * Fabric commands. The AQ commands supported at this time are
1228 	 * Get-Log-Page,
1229 	 * Identify
1230 	 * Set Features
1231 	 * Get Features
1232 	 * AER -> Special case and handled differently.
1233 	 * Every one of the above Admin commands (except AER) run
1234 	 * to completion and so an Abort of such commands doesn't
1235 	 * make sense.
1236 	 */
1237 	/* The Fabric commands supported are
1238 	 * Property Set
1239 	 * Property Get
1240 	 * Connect -> Special case (async. handling). Not sure how to
1241 	 * handle at this point. Let it run to completion.
1242 	 */
1243 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1244 		if (ctrlr->aer_req[i] == &fc_req->req) {
1245 			SPDK_NOTICELOG("Abort AER request\n");
1246 			nvmf_qpair_free_aer(fc_req->req.qpair);
1247 		}
1248 	}
1249 }
1250 
1251 void
1252 nvmf_fc_request_abort_complete(void *arg1)
1253 {
1254 	struct spdk_nvmf_fc_request *fc_req =
1255 		(struct spdk_nvmf_fc_request *)arg1;
1256 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1257 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1258 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1259 
1260 	/* Make a copy of the cb list from fc_req */
1261 	TAILQ_INIT(&abort_cbs);
1262 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1263 
1264 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1265 		       fc_req_state_strs[fc_req->state]);
1266 
1267 	_nvmf_fc_request_free(fc_req);
1268 
1269 	/* Request abort completed. Notify all the callbacks */
1270 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1271 		/* Notify */
1272 		ctx->cb(hwqp, 0, ctx->cb_args);
1273 		/* Remove */
1274 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1275 		/* free */
1276 		free(ctx);
1277 	}
1278 }
1279 
1280 void
1281 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1282 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1283 {
1284 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1285 	bool kill_req = false;
1286 
1287 	/* Add the cb to list */
1288 	if (cb) {
1289 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1290 		if (!ctx) {
1291 			SPDK_ERRLOG("ctx alloc failed.\n");
1292 			return;
1293 		}
1294 		ctx->cb = cb;
1295 		ctx->cb_args = cb_args;
1296 
1297 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1298 	}
1299 
1300 	if (!fc_req->is_aborted) {
1301 		/* Increment aborted command counter */
1302 		fc_req->hwqp->counters.num_aborted++;
1303 	}
1304 
1305 	/* If port is dead, skip abort wqe */
1306 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1307 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1308 		fc_req->is_aborted = true;
1309 		goto complete;
1310 	}
1311 
1312 	/* Check if the request is already marked for deletion */
1313 	if (fc_req->is_aborted) {
1314 		return;
1315 	}
1316 
1317 	/* Mark request as aborted */
1318 	fc_req->is_aborted = true;
1319 
1320 	/* If xchg is allocated, then save if we need to send abts or not. */
1321 	if (fc_req->xchg) {
1322 		fc_req->xchg->send_abts = send_abts;
1323 		fc_req->xchg->aborted	= true;
1324 	}
1325 
1326 	switch (fc_req->state) {
1327 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1328 		/* Aborted by backend */
1329 		goto complete;
1330 
1331 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1332 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1333 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1334 		/* Notify bdev */
1335 		spdk_thread_send_msg(fc_req->hwqp->thread,
1336 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1337 		break;
1338 
1339 	case SPDK_NVMF_FC_REQ_READ_XFER:
1340 	case SPDK_NVMF_FC_REQ_READ_RSP:
1341 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1342 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1343 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1344 		/* Notify HBA to abort this exchange  */
1345 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1346 		break;
1347 
1348 	case SPDK_NVMF_FC_REQ_PENDING:
1349 		/* Remove from pending */
1350 		nvmf_fc_request_remove_from_pending(fc_req);
1351 		goto complete;
1352 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1353 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1354 		goto complete;
1355 	default:
1356 		SPDK_ERRLOG("Request in invalid state.\n");
1357 		goto complete;
1358 	}
1359 
1360 	return;
1361 complete:
1362 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1363 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1364 				(void *)fc_req);
1365 }
1366 
1367 static int
1368 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1369 {
1370 	uint32_t length = fc_req->req.length;
1371 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1372 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1373 	struct spdk_nvmf_transport *transport = group->transport;
1374 
1375 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1376 		return -ENOMEM;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 static int
1383 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1384 {
1385 	/* Allocate an XCHG if we dont use send frame for this command. */
1386 	if (!nvmf_fc_use_send_frame(fc_req)) {
1387 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1388 		if (!fc_req->xchg) {
1389 			fc_req->hwqp->counters.no_xchg++;
1390 			return -EAGAIN;
1391 		}
1392 	}
1393 
1394 	if (fc_req->req.length) {
1395 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1396 			fc_req->hwqp->counters.buf_alloc_err++;
1397 			if (fc_req->xchg) {
1398 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1399 				fc_req->xchg = NULL;
1400 			}
1401 			return -EAGAIN;
1402 		}
1403 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1404 	}
1405 
1406 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1407 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1408 
1409 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1410 
1411 		if (nvmf_fc_recv_data(fc_req)) {
1412 			/* Dropped return success to caller */
1413 			fc_req->hwqp->counters.unexpected_err++;
1414 			_nvmf_fc_request_free(fc_req);
1415 		}
1416 	} else {
1417 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1418 
1419 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1420 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1421 		} else {
1422 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1423 		}
1424 		spdk_nvmf_request_exec(&fc_req->req);
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 static void
1431 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1432 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1433 {
1434 	uint8_t df_ctl = fchdr->df_ctl;
1435 	uint32_t f_ctl = fchdr->f_ctl;
1436 
1437 	/* VMID */
1438 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1439 		struct spdk_nvmf_fc_vm_header *vhdr;
1440 		uint32_t vmhdr_offset = 0;
1441 
1442 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1443 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1444 		}
1445 
1446 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1447 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1448 		}
1449 
1450 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1451 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1452 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1453 	}
1454 
1455 	/* Priority */
1456 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1457 		fc_req->csctl = fchdr->cs_ctl;
1458 	}
1459 }
1460 
1461 static int
1462 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1463 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1464 {
1465 	uint16_t cmnd_len;
1466 	uint64_t rqst_conn_id;
1467 	struct spdk_nvmf_fc_request *fc_req = NULL;
1468 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1469 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1470 	enum spdk_nvme_data_transfer xfer;
1471 	uint32_t s_id, d_id;
1472 
1473 	s_id = (uint32_t)frame->s_id;
1474 	d_id = (uint32_t)frame->d_id;
1475 	s_id = from_be32(&s_id) >> 8;
1476 	d_id = from_be32(&d_id) >> 8;
1477 
1478 	cmd_iu = buffer->virt;
1479 	cmnd_len = cmd_iu->cmnd_iu_len;
1480 	cmnd_len = from_be16(&cmnd_len);
1481 
1482 	/* check for a valid cmnd_iu format */
1483 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1484 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1485 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1486 		SPDK_ERRLOG("IU CMD error\n");
1487 		hwqp->counters.nvme_cmd_iu_err++;
1488 		return -ENXIO;
1489 	}
1490 
1491 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1492 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1493 		SPDK_ERRLOG("IU CMD xfer error\n");
1494 		hwqp->counters.nvme_cmd_xfer_err++;
1495 		return -EPERM;
1496 	}
1497 
1498 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1499 
1500 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1501 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1502 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1503 		hwqp->counters.invalid_conn_err++;
1504 		return -ENODEV;
1505 	}
1506 
1507 	/* Validate s_id and d_id */
1508 	if (s_id != fc_conn->s_id) {
1509 		hwqp->counters.rport_invalid++;
1510 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1511 		return -ENODEV;
1512 	}
1513 
1514 	if (d_id != fc_conn->d_id) {
1515 		hwqp->counters.nport_invalid++;
1516 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1517 		return -ENODEV;
1518 	}
1519 
1520 	/* If association/connection is being deleted - return */
1521 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1522 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1523 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1524 		return -EACCES;
1525 	}
1526 
1527 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1528 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1529 			    rqst_conn_id, fc_conn->conn_state);
1530 		return -EACCES;
1531 	}
1532 
1533 	if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1534 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1535 			    rqst_conn_id, fc_conn->qpair.state);
1536 		return -EACCES;
1537 	}
1538 
1539 	/* Make sure xfer len is according to mdts */
1540 	if (from_be32(&cmd_iu->data_len) >
1541 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1542 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1543 		return -EINVAL;
1544 	}
1545 
1546 	/* allocate a request buffer */
1547 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1548 	if (fc_req == NULL) {
1549 		return -ENOMEM;
1550 	}
1551 
1552 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1553 	fc_req->req.qpair = &fc_conn->qpair;
1554 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1555 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1556 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1557 	fc_req->oxid = frame->ox_id;
1558 	fc_req->oxid = from_be16(&fc_req->oxid);
1559 	fc_req->rpi = fc_conn->rpi;
1560 	fc_req->poller_lcore = hwqp->lcore_id;
1561 	fc_req->poller_thread = hwqp->thread;
1562 	fc_req->hwqp = hwqp;
1563 	fc_req->fc_conn = fc_conn;
1564 	fc_req->req.xfer = xfer;
1565 	fc_req->s_id = s_id;
1566 	fc_req->d_id = d_id;
1567 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1568 	nvmf_fc_set_vmid_priority(fc_req, frame);
1569 
1570 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1571 
1572 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1573 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1574 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1575 	}
1576 
1577 	return 0;
1578 }
1579 
1580 /*
1581  * These functions are called from the FC LLD
1582  */
1583 
1584 void
1585 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1586 {
1587 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1588 	struct spdk_nvmf_transport_poll_group *group;
1589 
1590 	if (!fc_req) {
1591 		return;
1592 	}
1593 
1594 	if (fc_req->xchg) {
1595 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1596 		fc_req->xchg = NULL;
1597 	}
1598 
1599 	/* Release IO buffers */
1600 	if (fc_req->req.data_from_pool) {
1601 		group = &hwqp->fgroup->group;
1602 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1603 					       group->transport);
1604 	}
1605 	fc_req->req.data = NULL;
1606 	fc_req->req.iovcnt  = 0;
1607 
1608 	/* Free Fc request */
1609 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1610 }
1611 
1612 void
1613 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1614 			  enum spdk_nvmf_fc_request_state state)
1615 {
1616 	assert(fc_req->magic != 0xDEADBEEF);
1617 
1618 	SPDK_DEBUGLOG(nvmf_fc,
1619 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1620 		      nvmf_fc_request_get_state_str(fc_req->state),
1621 		      nvmf_fc_request_get_state_str(state));
1622 	nvmf_fc_record_req_trace_point(fc_req, state);
1623 	fc_req->state = state;
1624 }
1625 
1626 char *
1627 nvmf_fc_request_get_state_str(int state)
1628 {
1629 	static char *unk_str = "unknown";
1630 
1631 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1632 		fc_req_state_strs[state] : unk_str);
1633 }
1634 
1635 int
1636 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1637 			   uint32_t buff_idx,
1638 			   struct spdk_nvmf_fc_frame_hdr *frame,
1639 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1640 			   uint32_t plen)
1641 {
1642 	int rc = 0;
1643 	uint32_t s_id, d_id;
1644 	struct spdk_nvmf_fc_nport *nport = NULL;
1645 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1646 
1647 	s_id = (uint32_t)frame->s_id;
1648 	d_id = (uint32_t)frame->d_id;
1649 	s_id = from_be32(&s_id) >> 8;
1650 	d_id = from_be32(&d_id) >> 8;
1651 
1652 	SPDK_DEBUGLOG(nvmf_fc,
1653 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1654 		      s_id, d_id,
1655 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1656 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1657 
1658 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1659 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1660 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1661 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1662 
1663 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1664 
1665 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1666 		if (rc) {
1667 			if (nport == NULL) {
1668 				SPDK_ERRLOG("Nport not found. Dropping\n");
1669 				/* increment invalid nport counter */
1670 				hwqp->counters.nport_invalid++;
1671 			} else if (rport == NULL) {
1672 				SPDK_ERRLOG("Rport not found. Dropping\n");
1673 				/* increment invalid rport counter */
1674 				hwqp->counters.rport_invalid++;
1675 			}
1676 			return rc;
1677 		}
1678 
1679 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1680 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1681 			SPDK_ERRLOG("%s state not created. Dropping\n",
1682 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1683 				    "Nport" : "Rport");
1684 			return -EACCES;
1685 		}
1686 
1687 		/* Use the RQ buffer for holding LS request. */
1688 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1689 
1690 		/* Fill in the LS request structure */
1691 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1692 		ls_rqst->rqstbuf.phys = buffer->phys +
1693 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1694 		ls_rqst->rqstbuf.buf_index = buff_idx;
1695 		ls_rqst->rqst_len = plen;
1696 
1697 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1698 		ls_rqst->rspbuf.phys = buffer->phys +
1699 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1700 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1701 
1702 		ls_rqst->private_data = (void *)hwqp;
1703 		ls_rqst->rpi = rport->rpi;
1704 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1705 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1706 		ls_rqst->s_id = s_id;
1707 		ls_rqst->d_id = d_id;
1708 		ls_rqst->nport = nport;
1709 		ls_rqst->rport = rport;
1710 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1711 
1712 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1713 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1714 		} else {
1715 			ls_rqst->xchg = NULL;
1716 		}
1717 
1718 		if (ls_rqst->xchg) {
1719 			/* Handover the request to LS module */
1720 			nvmf_fc_handle_ls_rqst(ls_rqst);
1721 		} else {
1722 			/* No XCHG available. Add to pending list. */
1723 			hwqp->counters.no_xchg++;
1724 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1725 		}
1726 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1727 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1728 
1729 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1730 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1731 		if (!rc) {
1732 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1733 		}
1734 	} else {
1735 
1736 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1737 		hwqp->counters.unknown_frame++;
1738 		rc = -EINVAL;
1739 	}
1740 
1741 	return rc;
1742 }
1743 
1744 void
1745 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1746 {
1747 	struct spdk_nvmf_request *req = NULL, *tmp;
1748 	struct spdk_nvmf_fc_request *fc_req;
1749 	int budget = 64;
1750 
1751 	if (!hwqp->fgroup) {
1752 		/* LS queue is tied to acceptor_poll group and LS pending requests
1753 		 * are stagged and processed using hwqp->ls_pending_queue.
1754 		 */
1755 		return;
1756 	}
1757 
1758 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1759 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1760 		if (!nvmf_fc_request_execute(fc_req)) {
1761 			/* Successfully posted, Delete from pending. */
1762 			nvmf_fc_request_remove_from_pending(fc_req);
1763 		}
1764 
1765 		if (budget) {
1766 			budget--;
1767 		} else {
1768 			return;
1769 		}
1770 	}
1771 }
1772 
1773 void
1774 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1775 {
1776 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1777 	struct spdk_nvmf_fc_nport *nport = NULL;
1778 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1779 
1780 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1781 		/* lookup nport and rport again - make sure they are still valid */
1782 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1783 		if (rc) {
1784 			if (nport == NULL) {
1785 				SPDK_ERRLOG("Nport not found. Dropping\n");
1786 				/* increment invalid nport counter */
1787 				hwqp->counters.nport_invalid++;
1788 			} else if (rport == NULL) {
1789 				SPDK_ERRLOG("Rport not found. Dropping\n");
1790 				/* increment invalid rport counter */
1791 				hwqp->counters.rport_invalid++;
1792 			}
1793 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1794 			continue;
1795 		}
1796 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1797 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1798 			SPDK_ERRLOG("%s state not created. Dropping\n",
1799 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1800 				    "Nport" : "Rport");
1801 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1802 			continue;
1803 		}
1804 
1805 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1806 		if (ls_rqst->xchg) {
1807 			/* Got an XCHG */
1808 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1809 			/* Handover the request to LS module */
1810 			nvmf_fc_handle_ls_rqst(ls_rqst);
1811 		} else {
1812 			/* No more XCHGs. Stop processing. */
1813 			hwqp->counters.no_xchg++;
1814 			return;
1815 		}
1816 	}
1817 }
1818 
1819 int
1820 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1821 {
1822 	int rc = 0;
1823 	struct spdk_nvmf_request *req = &fc_req->req;
1824 	struct spdk_nvmf_qpair *qpair = req->qpair;
1825 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1826 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1827 	uint16_t ersp_len = 0;
1828 
1829 	/* set sq head value in resp */
1830 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1831 
1832 	/* Increment connection responses */
1833 	fc_conn->rsp_count++;
1834 
1835 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1836 				       fc_req->transferred_len)) {
1837 		/* Fill ERSP Len */
1838 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1839 				    sizeof(uint32_t)));
1840 		fc_req->ersp.ersp_len = ersp_len;
1841 
1842 		/* Fill RSN */
1843 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1844 		fc_conn->rsn++;
1845 
1846 		/* Fill transfer length */
1847 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
1848 
1849 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1850 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1851 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1852 	} else {
1853 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1854 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1855 	}
1856 
1857 	return rc;
1858 }
1859 
1860 bool
1861 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1862 			   uint32_t rsp_cnt, uint32_t xfer_len)
1863 {
1864 	struct spdk_nvmf_request *req = &fc_req->req;
1865 	struct spdk_nvmf_qpair *qpair = req->qpair;
1866 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1867 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1868 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1869 	uint16_t status = *((uint16_t *)&rsp->status);
1870 
1871 	/*
1872 	 * Check if we need to send ERSP
1873 	 * 1) For every N responses where N == ersp_ratio
1874 	 * 2) Fabric commands.
1875 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1876 	 * 4) SQ == 90% full.
1877 	 * 5) Transfer length not equal to CMD IU length
1878 	 */
1879 
1880 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1881 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1882 	    (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 ||
1883 	    (req->length != xfer_len)) {
1884 		return true;
1885 	}
1886 	return false;
1887 }
1888 
1889 static int
1890 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1891 {
1892 	int rc = 0;
1893 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1894 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1895 
1896 	if (fc_req->is_aborted) {
1897 		/* Defer this to make sure we dont call io cleanup in same context. */
1898 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1899 					(void *)fc_req);
1900 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1901 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1902 
1903 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1904 
1905 		rc = nvmf_fc_send_data(fc_req);
1906 	} else {
1907 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1908 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1909 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1910 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1911 		} else {
1912 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1913 		}
1914 
1915 		rc = nvmf_fc_handle_rsp(fc_req);
1916 	}
1917 
1918 	if (rc) {
1919 		SPDK_ERRLOG("Error in request complete.\n");
1920 		_nvmf_fc_request_free(fc_req);
1921 	}
1922 	return 0;
1923 }
1924 
1925 struct spdk_nvmf_tgt *
1926 nvmf_fc_get_tgt(void)
1927 {
1928 	if (g_nvmf_ftransport) {
1929 		return g_nvmf_ftransport->transport.tgt;
1930 	}
1931 	return NULL;
1932 }
1933 
1934 /*
1935  * FC Transport Public API begins here
1936  */
1937 
1938 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1939 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1940 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1941 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1942 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1943 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1944 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1945 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1946 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1947 
1948 static void
1949 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1950 {
1951 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1952 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1953 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1954 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1955 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1956 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1957 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1958 }
1959 
1960 static struct spdk_nvmf_transport *
1961 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1962 {
1963 	uint32_t sge_count;
1964 
1965 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1966 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1967 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1968 		     "  max_aq_depth=%d\n",
1969 		     opts->max_queue_depth,
1970 		     opts->max_io_size,
1971 		     opts->max_qpairs_per_ctrlr - 1,
1972 		     opts->io_unit_size,
1973 		     opts->max_aq_depth);
1974 
1975 	if (g_nvmf_ftransport) {
1976 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1977 		return NULL;
1978 	}
1979 
1980 	if (spdk_env_get_last_core() < 1) {
1981 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1982 			    spdk_env_get_last_core() + 1);
1983 		return NULL;
1984 	}
1985 
1986 	sge_count = opts->max_io_size / opts->io_unit_size;
1987 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1988 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1989 		return NULL;
1990 	}
1991 
1992 	g_nvmf_fc_main_thread = spdk_get_thread();
1993 	g_nvmf_fgroup_count = 0;
1994 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1995 
1996 	if (!g_nvmf_ftransport) {
1997 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1998 		return NULL;
1999 	}
2000 
2001 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
2002 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
2003 		free(g_nvmf_ftransport);
2004 		g_nvmf_ftransport = NULL;
2005 		return NULL;
2006 	}
2007 
2008 	/* initialize the low level FC driver */
2009 	nvmf_fc_lld_init();
2010 
2011 	return &g_nvmf_ftransport->transport;
2012 }
2013 
2014 static void
2015 nvmf_fc_destroy_done_cb(void *cb_arg)
2016 {
2017 	free(g_nvmf_ftransport);
2018 	if (g_transport_destroy_done_cb) {
2019 		g_transport_destroy_done_cb(cb_arg);
2020 		g_transport_destroy_done_cb = NULL;
2021 	}
2022 }
2023 
2024 static int
2025 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2026 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2027 {
2028 	if (transport) {
2029 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2030 
2031 		/* clean up any FC poll groups still around */
2032 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2033 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2034 			free(fgroup);
2035 		}
2036 		g_nvmf_fgroup_count = 0;
2037 		g_transport_destroy_done_cb = cb_fn;
2038 
2039 		/* low level FC driver clean up */
2040 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2041 	}
2042 
2043 	return 0;
2044 }
2045 
2046 static int
2047 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2048 	       struct spdk_nvmf_listen_opts *listen_opts)
2049 {
2050 	return 0;
2051 }
2052 
2053 static void
2054 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2055 		    const struct spdk_nvme_transport_id *_trid)
2056 {
2057 }
2058 
2059 static uint32_t
2060 nvmf_fc_accept(struct spdk_nvmf_transport *transport)
2061 {
2062 	struct spdk_nvmf_fc_port *fc_port = NULL;
2063 	uint32_t count = 0;
2064 	static bool start_lld = false;
2065 
2066 	if (spdk_unlikely(!start_lld)) {
2067 		start_lld  = true;
2068 		nvmf_fc_lld_start();
2069 	}
2070 
2071 	/* poll the LS queue on each port */
2072 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2073 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2074 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2075 		}
2076 	}
2077 
2078 	return count;
2079 }
2080 
2081 static void
2082 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2083 		 struct spdk_nvme_transport_id *trid,
2084 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2085 {
2086 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2087 	entry->adrfam = trid->adrfam;
2088 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2089 
2090 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2091 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2092 }
2093 
2094 static struct spdk_nvmf_transport_poll_group *
2095 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
2096 {
2097 	struct spdk_nvmf_fc_poll_group *fgroup;
2098 	struct spdk_nvmf_fc_transport *ftransport =
2099 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2100 
2101 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2102 	if (!fgroup) {
2103 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2104 		return NULL;
2105 	}
2106 
2107 	TAILQ_INIT(&fgroup->hwqp_list);
2108 
2109 	pthread_mutex_lock(&ftransport->lock);
2110 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2111 	g_nvmf_fgroup_count++;
2112 	pthread_mutex_unlock(&ftransport->lock);
2113 
2114 	return &fgroup->group;
2115 }
2116 
2117 static void
2118 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2119 {
2120 	struct spdk_nvmf_fc_poll_group *fgroup;
2121 	struct spdk_nvmf_fc_transport *ftransport =
2122 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2123 
2124 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2125 	pthread_mutex_lock(&ftransport->lock);
2126 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2127 	g_nvmf_fgroup_count--;
2128 	pthread_mutex_unlock(&ftransport->lock);
2129 
2130 	free(fgroup);
2131 }
2132 
2133 static int
2134 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2135 		       struct spdk_nvmf_qpair *qpair)
2136 {
2137 	struct spdk_nvmf_fc_poll_group *fgroup;
2138 	struct spdk_nvmf_fc_conn *fc_conn;
2139 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2140 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2141 	bool hwqp_found = false;
2142 
2143 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2144 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2145 
2146 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2147 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2148 			hwqp_found = true;
2149 			break;
2150 		}
2151 	}
2152 
2153 	if (!hwqp_found) {
2154 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2155 		goto err;
2156 	}
2157 
2158 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2159 					 &fc_conn->conn_id,
2160 					 fc_conn->max_queue_depth)) {
2161 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2162 		goto err;
2163 	}
2164 
2165 	fc_conn->hwqp = hwqp;
2166 
2167 	/* If this is for ADMIN connection, then update assoc ID. */
2168 	if (fc_conn->qpair.qid == 0) {
2169 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2170 	}
2171 
2172 	api_data = &fc_conn->create_opd->u.add_conn;
2173 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2174 	return 0;
2175 err:
2176 	return -1;
2177 }
2178 
2179 static int
2180 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2181 {
2182 	uint32_t count = 0;
2183 	struct spdk_nvmf_fc_poll_group *fgroup;
2184 	struct spdk_nvmf_fc_hwqp *hwqp;
2185 
2186 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2187 
2188 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2189 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2190 			count += nvmf_fc_process_queue(hwqp);
2191 		}
2192 	}
2193 
2194 	return (int) count;
2195 }
2196 
2197 static int
2198 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2199 {
2200 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2201 
2202 	if (!fc_req->is_aborted) {
2203 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2204 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2205 	} else {
2206 		nvmf_fc_request_abort_complete(fc_req);
2207 	}
2208 
2209 	return 0;
2210 }
2211 
2212 static void
2213 nvmf_fc_connection_delete_done_cb(void *arg)
2214 {
2215 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2216 
2217 	if (fc_ctx->cb_fn) {
2218 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2219 	}
2220 	free(fc_ctx);
2221 }
2222 
2223 static void
2224 _nvmf_fc_close_qpair(void *arg)
2225 {
2226 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2227 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2228 	struct spdk_nvmf_fc_conn *fc_conn;
2229 	int rc;
2230 
2231 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2232 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2233 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2234 
2235 		if (fc_conn->create_opd) {
2236 			api_data = &fc_conn->create_opd->u.add_conn;
2237 
2238 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2239 						    api_data->args.fc_conn, api_data->aq_conn);
2240 		}
2241 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2242 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2243 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2244 		if (!rc) {
2245 			/* Wait for transport to complete its work. */
2246 			return;
2247 		}
2248 
2249 		SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__);
2250 	}
2251 
2252 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2253 }
2254 
2255 static void
2256 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2257 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2258 {
2259 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2260 
2261 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2262 	if (!fc_ctx) {
2263 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2264 		if (cb_fn) {
2265 			cb_fn(cb_arg);
2266 		}
2267 		return;
2268 	}
2269 	fc_ctx->qpair = qpair;
2270 	fc_ctx->cb_fn = cb_fn;
2271 	fc_ctx->cb_ctx = cb_arg;
2272 	fc_ctx->qpair_thread = spdk_get_thread();
2273 
2274 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2275 }
2276 
2277 static int
2278 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2279 			    struct spdk_nvme_transport_id *trid)
2280 {
2281 	struct spdk_nvmf_fc_conn *fc_conn;
2282 
2283 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2284 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2285 	return 0;
2286 }
2287 
2288 static int
2289 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2290 			     struct spdk_nvme_transport_id *trid)
2291 {
2292 	struct spdk_nvmf_fc_conn *fc_conn;
2293 
2294 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2295 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2296 	return 0;
2297 }
2298 
2299 static int
2300 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2301 			      struct spdk_nvme_transport_id *trid)
2302 {
2303 	struct spdk_nvmf_fc_conn *fc_conn;
2304 
2305 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2306 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2307 	return 0;
2308 }
2309 
2310 static void
2311 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2312 			    struct spdk_nvmf_request *req)
2313 {
2314 	spdk_nvmf_request_complete(req);
2315 }
2316 
2317 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2318 	.name = "FC",
2319 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2320 	.opts_init = nvmf_fc_opts_init,
2321 	.create = nvmf_fc_create,
2322 	.destroy = nvmf_fc_destroy,
2323 
2324 	.listen = nvmf_fc_listen,
2325 	.stop_listen = nvmf_fc_stop_listen,
2326 	.accept = nvmf_fc_accept,
2327 
2328 	.listener_discover = nvmf_fc_discover,
2329 
2330 	.poll_group_create = nvmf_fc_poll_group_create,
2331 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2332 	.poll_group_add = nvmf_fc_poll_group_add,
2333 	.poll_group_poll = nvmf_fc_poll_group_poll,
2334 
2335 	.req_complete = nvmf_fc_request_complete,
2336 	.req_free = nvmf_fc_request_free,
2337 	.qpair_fini = nvmf_fc_close_qpair,
2338 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2339 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2340 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2341 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2342 };
2343 
2344 /* Initializes the data for the creation of a FC-Port object in the SPDK
2345  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2346  * the API to the library. The contents added to this well defined structure
2347  * is private to each vendors implementation.
2348  */
2349 static int
2350 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2351 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2352 {
2353 	int rc = 0;
2354 	/* Used a high number for the LS HWQP so that it does not clash with the
2355 	 * IO HWQP's and immediately shows a LS queue during tracing.
2356 	 */
2357 	uint32_t i;
2358 
2359 	fc_port->port_hdl       = args->port_handle;
2360 	fc_port->lld_fc_port	= args->lld_fc_port;
2361 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2362 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2363 	fc_port->num_io_queues  = args->io_queue_cnt;
2364 
2365 	/*
2366 	 * Set port context from init args. Used for FCP port stats.
2367 	 */
2368 	fc_port->port_ctx = args->port_ctx;
2369 
2370 	/*
2371 	 * Initialize the LS queue wherever needed.
2372 	 */
2373 	fc_port->ls_queue.queues = args->ls_queue;
2374 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2375 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2376 	fc_port->ls_queue.is_ls_queue = true;
2377 
2378 	/*
2379 	 * Initialize the LS queue.
2380 	 */
2381 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2382 	if (rc) {
2383 		return rc;
2384 	}
2385 
2386 	/*
2387 	 * Initialize the IO queues.
2388 	 */
2389 	for (i = 0; i < args->io_queue_cnt; i++) {
2390 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2391 		hwqp->hwqp_id = i;
2392 		hwqp->queues = args->io_queues[i];
2393 		hwqp->is_ls_queue = false;
2394 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2395 		if (rc) {
2396 			for (; i > 0; --i) {
2397 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2398 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2399 			}
2400 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2401 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2402 			return rc;
2403 		}
2404 	}
2405 
2406 	/*
2407 	 * Initialize the LS processing for port
2408 	 */
2409 	nvmf_fc_ls_init(fc_port);
2410 
2411 	/*
2412 	 * Initialize the list of nport on this HW port.
2413 	 */
2414 	TAILQ_INIT(&fc_port->nport_list);
2415 	fc_port->num_nports = 0;
2416 
2417 	return 0;
2418 }
2419 
2420 /*
2421  * FC port must have all its nports deleted before transitioning to offline state.
2422  */
2423 static void
2424 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2425 {
2426 	struct spdk_nvmf_fc_nport *nport = NULL;
2427 	/* All nports must have been deleted at this point for this fc port */
2428 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2429 	DEV_VERIFY(fc_port->num_nports == 0);
2430 	/* Mark the nport states to be zombie, if they exist */
2431 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2432 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2433 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2434 		}
2435 	}
2436 }
2437 
2438 static void
2439 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2440 {
2441 	ASSERT_SPDK_FC_MAIN_THREAD();
2442 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2443 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2444 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2445 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2446 	int spdk_err = 0;
2447 	uint8_t port_handle = cb_data->port_handle;
2448 	uint32_t s_id = rport->s_id;
2449 	uint32_t rpi = rport->rpi;
2450 	uint32_t assoc_count = rport->assoc_count;
2451 	uint32_t nport_hdl = nport->nport_hdl;
2452 	uint32_t d_id = nport->d_id;
2453 	char log_str[256];
2454 
2455 	/*
2456 	 * Assert on any delete failure.
2457 	 */
2458 	if (0 != err) {
2459 		DEV_VERIFY(!"Error in IT Delete callback.");
2460 		goto out;
2461 	}
2462 
2463 	if (cb_func != NULL) {
2464 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2465 	}
2466 
2467 out:
2468 	free(cb_data);
2469 
2470 	snprintf(log_str, sizeof(log_str),
2471 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2472 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2473 
2474 	if (err != 0) {
2475 		SPDK_ERRLOG("%s", log_str);
2476 	} else {
2477 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2478 	}
2479 }
2480 
2481 static void
2482 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2483 {
2484 	ASSERT_SPDK_FC_MAIN_THREAD();
2485 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2486 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2487 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2488 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2489 	uint32_t s_id = rport->s_id;
2490 	uint32_t rpi = rport->rpi;
2491 	uint32_t assoc_count = rport->assoc_count;
2492 	uint32_t nport_hdl = nport->nport_hdl;
2493 	uint32_t d_id = nport->d_id;
2494 	char log_str[256];
2495 
2496 	/*
2497 	 * Assert on any association delete failure. We continue to delete other
2498 	 * associations in promoted builds.
2499 	 */
2500 	if (0 != err) {
2501 		DEV_VERIFY(!"Nport's association delete callback returned error");
2502 		if (nport->assoc_count > 0) {
2503 			nport->assoc_count--;
2504 		}
2505 		if (rport->assoc_count > 0) {
2506 			rport->assoc_count--;
2507 		}
2508 	}
2509 
2510 	/*
2511 	 * If this is the last association being deleted for the ITN,
2512 	 * execute the callback(s).
2513 	 */
2514 	if (0 == rport->assoc_count) {
2515 		/* Remove the rport from the remote port list. */
2516 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2517 			SPDK_ERRLOG("Error while removing rport from list.\n");
2518 			DEV_VERIFY(!"Error while removing rport from list.");
2519 		}
2520 
2521 		if (cb_func != NULL) {
2522 			/*
2523 			 * Callback function is provided by the caller
2524 			 * of nvmf_fc_adm_i_t_delete_assoc().
2525 			 */
2526 			(void)cb_func(cb_data->cb_ctx, 0);
2527 		}
2528 		free(rport);
2529 		free(args);
2530 	}
2531 
2532 	snprintf(log_str, sizeof(log_str),
2533 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2534 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2535 
2536 	if (err != 0) {
2537 		SPDK_ERRLOG("%s", log_str);
2538 	} else {
2539 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2540 	}
2541 }
2542 
2543 /**
2544  * Process a IT delete.
2545  */
2546 static void
2547 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2548 			     struct spdk_nvmf_fc_remote_port_info *rport,
2549 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2550 			     void *cb_ctx)
2551 {
2552 	int err = 0;
2553 	struct spdk_nvmf_fc_association *assoc = NULL;
2554 	int assoc_err = 0;
2555 	uint32_t num_assoc = 0;
2556 	uint32_t num_assoc_del_scheduled = 0;
2557 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2558 	uint8_t port_hdl = nport->port_hdl;
2559 	uint32_t s_id = rport->s_id;
2560 	uint32_t rpi = rport->rpi;
2561 	uint32_t assoc_count = rport->assoc_count;
2562 	char log_str[256];
2563 
2564 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2565 		      nport->nport_hdl);
2566 
2567 	/*
2568 	 * Allocate memory for callback data.
2569 	 * This memory will be freed by the callback function.
2570 	 */
2571 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2572 	if (NULL == cb_data) {
2573 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2574 		err = -ENOMEM;
2575 		goto out;
2576 	}
2577 	cb_data->nport       = nport;
2578 	cb_data->rport       = rport;
2579 	cb_data->port_handle = port_hdl;
2580 	cb_data->cb_func     = cb_func;
2581 	cb_data->cb_ctx      = cb_ctx;
2582 
2583 	/*
2584 	 * Delete all associations, if any, related with this ITN/remote_port.
2585 	 */
2586 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2587 		num_assoc++;
2588 		if (assoc->s_id == s_id) {
2589 			assoc_err = nvmf_fc_delete_association(nport,
2590 							       assoc->assoc_id,
2591 							       false /* send abts */, false,
2592 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2593 			if (0 != assoc_err) {
2594 				/*
2595 				 * Mark this association as zombie.
2596 				 */
2597 				err = -EINVAL;
2598 				DEV_VERIFY(!"Error while deleting association");
2599 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2600 			} else {
2601 				num_assoc_del_scheduled++;
2602 			}
2603 		}
2604 	}
2605 
2606 out:
2607 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2608 		/*
2609 		 * Since there are no association_delete calls
2610 		 * successfully scheduled, the association_delete
2611 		 * callback function will never be called.
2612 		 * In this case, call the callback function now.
2613 		 */
2614 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2615 	}
2616 
2617 	snprintf(log_str, sizeof(log_str),
2618 		 "IT delete associations on nport:%d end. "
2619 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2620 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2621 
2622 	if (err == 0) {
2623 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2624 	} else {
2625 		SPDK_ERRLOG("%s", log_str);
2626 	}
2627 }
2628 
2629 static void
2630 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2631 {
2632 	ASSERT_SPDK_FC_MAIN_THREAD();
2633 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2634 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2635 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2636 	struct spdk_nvmf_fc_port *fc_port = NULL;
2637 	int err = 0;
2638 
2639 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2640 	hwqp = quiesce_api_data->hwqp;
2641 	fc_port = hwqp->fc_port;
2642 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2643 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2644 
2645 	/*
2646 	 * Decrement the callback/quiesced queue count.
2647 	 */
2648 	port_quiesce_ctx->quiesce_count--;
2649 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2650 
2651 	free(quiesce_api_data);
2652 	/*
2653 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2654 	 */
2655 	if (port_quiesce_ctx->quiesce_count > 0) {
2656 		return;
2657 	}
2658 
2659 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2660 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2661 	} else {
2662 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2663 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2664 	}
2665 
2666 	if (cb_func) {
2667 		/*
2668 		 * Callback function for the called of quiesce.
2669 		 */
2670 		cb_func(port_quiesce_ctx->ctx, err);
2671 	}
2672 
2673 	/*
2674 	 * Free the context structure.
2675 	 */
2676 	free(port_quiesce_ctx);
2677 
2678 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2679 		      err);
2680 }
2681 
2682 static int
2683 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2684 			     spdk_nvmf_fc_poller_api_cb cb_func)
2685 {
2686 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2687 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2688 	int err = 0;
2689 
2690 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2691 
2692 	if (args == NULL) {
2693 		err = -ENOMEM;
2694 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2695 		goto done;
2696 	}
2697 	args->hwqp = fc_hwqp;
2698 	args->ctx = ctx;
2699 	args->cb_info.cb_func = cb_func;
2700 	args->cb_info.cb_data = args;
2701 	args->cb_info.cb_thread = spdk_get_thread();
2702 
2703 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2704 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2705 	if (rc) {
2706 		free(args);
2707 		err = -EINVAL;
2708 	}
2709 
2710 done:
2711 	return err;
2712 }
2713 
2714 /*
2715  * Hw port Quiesce
2716  */
2717 static int
2718 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2719 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2720 {
2721 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2722 	uint32_t i = 0;
2723 	int err = 0;
2724 
2725 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2726 
2727 	/*
2728 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2729 	 * and execute the callback.
2730 	 */
2731 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2732 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2733 	}
2734 
2735 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2736 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2737 			      fc_port->port_hdl);
2738 		/*
2739 		 * Execute the callback function directly.
2740 		 */
2741 		cb_func(ctx, err);
2742 		goto out;
2743 	}
2744 
2745 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2746 
2747 	if (port_quiesce_ctx == NULL) {
2748 		err = -ENOMEM;
2749 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2750 			    fc_port->port_hdl);
2751 		goto out;
2752 	}
2753 
2754 	port_quiesce_ctx->quiesce_count = 0;
2755 	port_quiesce_ctx->ctx = ctx;
2756 	port_quiesce_ctx->cb_func = cb_func;
2757 
2758 	/*
2759 	 * Quiesce the LS queue.
2760 	 */
2761 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2762 					   nvmf_fc_adm_queue_quiesce_cb);
2763 	if (err != 0) {
2764 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2765 		goto out;
2766 	}
2767 	port_quiesce_ctx->quiesce_count++;
2768 
2769 	/*
2770 	 * Quiesce the IO queues.
2771 	 */
2772 	for (i = 0; i < fc_port->num_io_queues; i++) {
2773 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2774 						   port_quiesce_ctx,
2775 						   nvmf_fc_adm_queue_quiesce_cb);
2776 		if (err != 0) {
2777 			DEV_VERIFY(0);
2778 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2779 		}
2780 		port_quiesce_ctx->quiesce_count++;
2781 	}
2782 
2783 out:
2784 	if (port_quiesce_ctx && err != 0) {
2785 		free(port_quiesce_ctx);
2786 	}
2787 	return err;
2788 }
2789 
2790 /*
2791  * Initialize and add a HW port entry to the global
2792  * HW port list.
2793  */
2794 static void
2795 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2796 {
2797 	ASSERT_SPDK_FC_MAIN_THREAD();
2798 	struct spdk_nvmf_fc_port *fc_port = NULL;
2799 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2800 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2801 			api_data->api_args;
2802 	int err = 0;
2803 
2804 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2805 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2806 		err = EINVAL;
2807 		goto abort_port_init;
2808 	}
2809 
2810 	/*
2811 	 * 1. Check for duplicate initialization.
2812 	 */
2813 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2814 	if (fc_port != NULL) {
2815 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2816 		goto abort_port_init;
2817 	}
2818 
2819 	/*
2820 	 * 2. Get the memory to instantiate a fc port.
2821 	 */
2822 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2823 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2824 	if (fc_port == NULL) {
2825 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2826 		err = -ENOMEM;
2827 		goto abort_port_init;
2828 	}
2829 
2830 	/* assign the io_queues array */
2831 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2832 				     struct spdk_nvmf_fc_port));
2833 
2834 	/*
2835 	 * 3. Initialize the contents for the FC-port
2836 	 */
2837 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2838 
2839 	if (err != 0) {
2840 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2841 		DEV_VERIFY(!"Data initialization failed for fc_port");
2842 		goto abort_port_init;
2843 	}
2844 
2845 	/*
2846 	 * 4. Add this port to the global fc port list in the library.
2847 	 */
2848 	nvmf_fc_port_add(fc_port);
2849 
2850 abort_port_init:
2851 	if (err && fc_port) {
2852 		free(fc_port);
2853 	}
2854 	if (api_data->cb_func != NULL) {
2855 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2856 	}
2857 
2858 	free(arg);
2859 
2860 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2861 		      args->port_handle, err);
2862 }
2863 
2864 static void
2865 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2866 {
2867 	struct spdk_nvmf_fc_abts_ctx *ctx;
2868 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2869 
2870 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2871 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2872 		ctx = args->cb_info.cb_data;
2873 		if (ctx) {
2874 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2875 				free(ctx->sync_poller_args);
2876 				free(ctx->abts_poller_args);
2877 				spdk_free(ctx);
2878 			}
2879 		}
2880 	}
2881 }
2882 
2883 static void
2884 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2885 {
2886 	ASSERT_SPDK_FC_MAIN_THREAD();
2887 	int err = 0, i;
2888 	struct spdk_nvmf_fc_port *fc_port = NULL;
2889 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2890 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2891 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2892 			api_data->api_args;
2893 
2894 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2895 	if (!fc_port) {
2896 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2897 		err = -EINVAL;
2898 		goto out;
2899 	}
2900 
2901 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2902 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2903 		err = -EIO;
2904 		goto out;
2905 	}
2906 
2907 	/* Clean up and free fc_port */
2908 	hwqp = &fc_port->ls_queue;
2909 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2910 	rte_hash_free(hwqp->connection_list_hash);
2911 	rte_hash_free(hwqp->rport_list_hash);
2912 
2913 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2914 		hwqp = &fc_port->io_queues[i];
2915 
2916 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2917 		rte_hash_free(hwqp->connection_list_hash);
2918 		rte_hash_free(hwqp->rport_list_hash);
2919 	}
2920 
2921 	nvmf_fc_port_remove(fc_port);
2922 	free(fc_port);
2923 out:
2924 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2925 		      args->port_handle, err);
2926 	if (api_data->cb_func != NULL) {
2927 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2928 	}
2929 
2930 	free(arg);
2931 }
2932 
2933 /*
2934  * Online a HW port.
2935  */
2936 static void
2937 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2938 {
2939 	ASSERT_SPDK_FC_MAIN_THREAD();
2940 	struct spdk_nvmf_fc_port *fc_port = NULL;
2941 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2942 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2943 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2944 			api_data->api_args;
2945 	int i = 0;
2946 	int err = 0;
2947 
2948 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2949 	if (fc_port) {
2950 		/* Set the port state to online */
2951 		err = nvmf_fc_port_set_online(fc_port);
2952 		if (err != 0) {
2953 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2954 			DEV_VERIFY(!"Hw port online failed");
2955 			goto out;
2956 		}
2957 
2958 		hwqp = &fc_port->ls_queue;
2959 		hwqp->context = NULL;
2960 		(void)nvmf_fc_hwqp_set_online(hwqp);
2961 
2962 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2963 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2964 			hwqp = &fc_port->io_queues[i];
2965 			hwqp->context = NULL;
2966 			(void)nvmf_fc_hwqp_set_online(hwqp);
2967 			nvmf_fc_poll_group_add_hwqp(hwqp);
2968 		}
2969 	} else {
2970 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2971 		err = -EINVAL;
2972 	}
2973 
2974 out:
2975 	if (api_data->cb_func != NULL) {
2976 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2977 	}
2978 
2979 	free(arg);
2980 
2981 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
2982 		      err);
2983 }
2984 
2985 static void
2986 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
2987 {
2988 	int err = 0;
2989 	struct spdk_nvmf_fc_port *fc_port = NULL;
2990 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
2991 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
2992 
2993 	if (--remove_hwqp_args->pending_remove_hwqp) {
2994 		return;
2995 	}
2996 
2997 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2998 	if (!fc_port) {
2999 		err = -EINVAL;
3000 		SPDK_ERRLOG("fc_port not found.\n");
3001 		goto out;
3002 	}
3003 
3004 	/*
3005 	 * Delete all the nports. Ideally, the nports should have been purged
3006 	 * before the offline event, in which case, only a validation is required.
3007 	 */
3008 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3009 out:
3010 	if (remove_hwqp_args->cb_fn) {
3011 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3012 	}
3013 
3014 	free(remove_hwqp_args);
3015 }
3016 
3017 /*
3018  * Offline a HW port.
3019  */
3020 static void
3021 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3022 {
3023 	ASSERT_SPDK_FC_MAIN_THREAD();
3024 	struct spdk_nvmf_fc_port *fc_port = NULL;
3025 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3026 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3027 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3028 			api_data->api_args;
3029 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3030 	int i = 0;
3031 	int err = 0;
3032 
3033 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3034 	if (fc_port) {
3035 		/* Set the port state to offline, if it is not already. */
3036 		err = nvmf_fc_port_set_offline(fc_port);
3037 		if (err != 0) {
3038 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3039 			err = 0;
3040 			goto out;
3041 		}
3042 
3043 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3044 		if (!remove_hwqp_args) {
3045 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3046 			err = -ENOMEM;
3047 			goto out;
3048 		}
3049 		remove_hwqp_args->cb_fn = api_data->cb_func;
3050 		remove_hwqp_args->cb_args = api_data->api_args;
3051 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3052 
3053 		hwqp = &fc_port->ls_queue;
3054 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3055 
3056 		/* Remove poller for all the io queues. */
3057 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3058 			hwqp = &fc_port->io_queues[i];
3059 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3060 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3061 						       remove_hwqp_args);
3062 		}
3063 
3064 		free(arg);
3065 
3066 		/* Wait untill all the hwqps are removed from poll groups. */
3067 		return;
3068 	} else {
3069 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3070 		err = -EINVAL;
3071 	}
3072 out:
3073 	if (api_data->cb_func != NULL) {
3074 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3075 	}
3076 
3077 	free(arg);
3078 
3079 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3080 		      err);
3081 }
3082 
3083 struct nvmf_fc_add_rem_listener_ctx {
3084 	struct spdk_nvmf_subsystem *subsystem;
3085 	bool add_listener;
3086 	struct spdk_nvme_transport_id trid;
3087 };
3088 
3089 static void
3090 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3091 {
3092 	ASSERT_SPDK_FC_MAIN_THREAD();
3093 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3094 	free(ctx);
3095 }
3096 
3097 static void
3098 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3099 {
3100 	ASSERT_SPDK_FC_MAIN_THREAD();
3101 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3102 
3103 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3104 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3105 		free(ctx);
3106 	}
3107 }
3108 
3109 static void
3110 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3111 {
3112 	ASSERT_SPDK_FC_MAIN_THREAD();
3113 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3114 
3115 	if (ctx->add_listener) {
3116 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3117 	} else {
3118 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3119 		nvmf_fc_adm_listen_done(ctx, 0);
3120 	}
3121 }
3122 
3123 static int
3124 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3125 {
3126 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3127 	struct spdk_nvmf_subsystem *subsystem;
3128 	struct spdk_nvmf_listen_opts opts;
3129 
3130 	if (!tgt) {
3131 		SPDK_ERRLOG("No nvmf target defined\n");
3132 		return -EINVAL;
3133 	}
3134 
3135 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
3136 
3137 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3138 	while (subsystem) {
3139 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3140 
3141 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3142 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3143 			if (ctx) {
3144 				ctx->add_listener = add;
3145 				ctx->subsystem = subsystem;
3146 				nvmf_fc_create_trid(&ctx->trid,
3147 						    nport->fc_nodename.u.wwn,
3148 						    nport->fc_portname.u.wwn);
3149 
3150 				if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) {
3151 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3152 						    ctx->trid.traddr);
3153 					free(ctx);
3154 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3155 								     0,
3156 								     nvmf_fc_adm_subsystem_paused_cb,
3157 								     ctx)) {
3158 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3159 						    subsystem->subnqn);
3160 					free(ctx);
3161 				}
3162 			}
3163 		}
3164 
3165 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3166 	}
3167 
3168 	return 0;
3169 }
3170 
3171 /*
3172  * Create a Nport.
3173  */
3174 static void
3175 nvmf_fc_adm_evnt_nport_create(void *arg)
3176 {
3177 	ASSERT_SPDK_FC_MAIN_THREAD();
3178 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3179 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3180 			api_data->api_args;
3181 	struct spdk_nvmf_fc_nport *nport = NULL;
3182 	struct spdk_nvmf_fc_port *fc_port = NULL;
3183 	int err = 0;
3184 
3185 	/*
3186 	 * Get the physical port.
3187 	 */
3188 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3189 	if (fc_port == NULL) {
3190 		err = -EINVAL;
3191 		goto out;
3192 	}
3193 
3194 	/*
3195 	 * Check for duplicate initialization.
3196 	 */
3197 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3198 	if (nport != NULL) {
3199 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3200 			    args->port_handle);
3201 		err = -EINVAL;
3202 		goto out;
3203 	}
3204 
3205 	/*
3206 	 * Get the memory to instantiate a fc nport.
3207 	 */
3208 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3209 	if (nport == NULL) {
3210 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3211 			    args->nport_handle);
3212 		err = -ENOMEM;
3213 		goto out;
3214 	}
3215 
3216 	/*
3217 	 * Initialize the contents for the nport
3218 	 */
3219 	nport->nport_hdl    = args->nport_handle;
3220 	nport->port_hdl     = args->port_handle;
3221 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3222 	nport->fc_nodename  = args->fc_nodename;
3223 	nport->fc_portname  = args->fc_portname;
3224 	nport->d_id         = args->d_id;
3225 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3226 
3227 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3228 	TAILQ_INIT(&nport->rem_port_list);
3229 	nport->rport_count = 0;
3230 	TAILQ_INIT(&nport->fc_associations);
3231 	nport->assoc_count = 0;
3232 
3233 	/*
3234 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3235 	 */
3236 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3237 
3238 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3239 out:
3240 	if (err && nport) {
3241 		free(nport);
3242 	}
3243 
3244 	if (api_data->cb_func != NULL) {
3245 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3246 	}
3247 
3248 	free(arg);
3249 }
3250 
3251 static void
3252 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3253 			    void *cb_args, int spdk_err)
3254 {
3255 	ASSERT_SPDK_FC_MAIN_THREAD();
3256 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3257 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3258 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3259 	int err = 0;
3260 	uint16_t nport_hdl = 0;
3261 	char log_str[256];
3262 
3263 	/*
3264 	 * Assert on any delete failure.
3265 	 */
3266 	if (nport == NULL) {
3267 		SPDK_ERRLOG("Nport delete callback returned null nport");
3268 		DEV_VERIFY(!"nport is null.");
3269 		goto out;
3270 	}
3271 
3272 	nport_hdl = nport->nport_hdl;
3273 	if (0 != spdk_err) {
3274 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3275 			    "%d, Nport: %d\n",
3276 			    nport->port_hdl, nport->nport_hdl);
3277 		DEV_VERIFY(!"nport delete callback error.");
3278 	}
3279 
3280 	/*
3281 	 * Free the nport if this is the last rport being deleted and
3282 	 * execute the callback(s).
3283 	 */
3284 	if (nvmf_fc_nport_has_no_rport(nport)) {
3285 		if (0 != nport->assoc_count) {
3286 			SPDK_ERRLOG("association count != 0\n");
3287 			DEV_VERIFY(!"association count != 0");
3288 		}
3289 
3290 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3291 		if (0 != err) {
3292 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3293 				    "nport from nport list. FC Port:%d Nport:%d\n",
3294 				    nport->port_hdl, nport->nport_hdl);
3295 		}
3296 		/* Free the nport */
3297 		free(nport);
3298 
3299 		if (cb_func != NULL) {
3300 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3301 		}
3302 		free(cb_data);
3303 	}
3304 out:
3305 	snprintf(log_str, sizeof(log_str),
3306 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3307 		 port_handle, nport_hdl, event_type, spdk_err);
3308 
3309 	if (err != 0) {
3310 		SPDK_ERRLOG("%s", log_str);
3311 	} else {
3312 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3313 	}
3314 }
3315 
3316 /*
3317  * Delete Nport.
3318  */
3319 static void
3320 nvmf_fc_adm_evnt_nport_delete(void *arg)
3321 {
3322 	ASSERT_SPDK_FC_MAIN_THREAD();
3323 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3324 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3325 			api_data->api_args;
3326 	struct spdk_nvmf_fc_nport *nport = NULL;
3327 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3328 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3329 	int err = 0;
3330 	uint32_t rport_cnt = 0;
3331 	int rc = 0;
3332 
3333 	/*
3334 	 * Make sure that the nport exists.
3335 	 */
3336 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3337 	if (nport == NULL) {
3338 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3339 			    args->port_handle);
3340 		err = -EINVAL;
3341 		goto out;
3342 	}
3343 
3344 	/*
3345 	 * Allocate memory for callback data.
3346 	 */
3347 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3348 	if (NULL == cb_data) {
3349 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3350 		err = -ENOMEM;
3351 		goto out;
3352 	}
3353 
3354 	cb_data->nport = nport;
3355 	cb_data->port_handle = args->port_handle;
3356 	cb_data->fc_cb_func = api_data->cb_func;
3357 	cb_data->fc_cb_ctx = args->cb_ctx;
3358 
3359 	/*
3360 	 * Begin nport tear down
3361 	 */
3362 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3363 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3364 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3365 		/*
3366 		 * Deletion of this nport already in progress. Register callback
3367 		 * and return.
3368 		 */
3369 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3370 		err = -ENODEV;
3371 		goto out;
3372 	} else {
3373 		/* nport partially created/deleted */
3374 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3375 		DEV_VERIFY(0 != "Nport in zombie state");
3376 		err = -ENODEV;
3377 		goto out;
3378 	}
3379 
3380 	/*
3381 	 * Remove this nport from listening addresses across subsystems
3382 	 */
3383 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3384 
3385 	if (0 != rc) {
3386 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3387 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3388 			    nport->nport_hdl);
3389 		goto out;
3390 	}
3391 
3392 	/*
3393 	 * Delete all the remote ports (if any) for the nport
3394 	 */
3395 	/* TODO - Need to do this with a "first" and a "next" accessor function
3396 	 * for completeness. Look at app-subsystem as examples.
3397 	 */
3398 	if (nvmf_fc_nport_has_no_rport(nport)) {
3399 		/* No rports to delete. Complete the nport deletion. */
3400 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3401 		goto out;
3402 	}
3403 
3404 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3405 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3406 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3407 
3408 		if (it_del_args == NULL) {
3409 			err = -ENOMEM;
3410 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3411 				    rport_iter->rpi, rport_iter->s_id);
3412 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3413 			goto out;
3414 		}
3415 
3416 		rport_cnt++;
3417 		it_del_args->port_handle = nport->port_hdl;
3418 		it_del_args->nport_handle = nport->nport_hdl;
3419 		it_del_args->cb_ctx = (void *)cb_data;
3420 		it_del_args->rpi = rport_iter->rpi;
3421 		it_del_args->s_id = rport_iter->s_id;
3422 
3423 		nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3424 					   nvmf_fc_adm_delete_nport_cb);
3425 	}
3426 
3427 out:
3428 	/* On failure, execute the callback function now */
3429 	if ((err != 0) || (rc != 0)) {
3430 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3431 			    "rport_cnt:%d rc:%d.\n",
3432 			    args->nport_handle, err, args->port_handle,
3433 			    rport_cnt, rc);
3434 		if (cb_data) {
3435 			free(cb_data);
3436 		}
3437 		if (api_data->cb_func != NULL) {
3438 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3439 		}
3440 
3441 	} else {
3442 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3443 			      "NPort %d delete done successfully, fc port:%d. "
3444 			      "rport_cnt:%d\n",
3445 			      args->nport_handle, args->port_handle, rport_cnt);
3446 	}
3447 
3448 	free(arg);
3449 }
3450 
3451 /*
3452  * Process an PRLI/IT add.
3453  */
3454 static void
3455 nvmf_fc_adm_evnt_i_t_add(void *arg)
3456 {
3457 	ASSERT_SPDK_FC_MAIN_THREAD();
3458 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3459 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3460 			api_data->api_args;
3461 	struct spdk_nvmf_fc_nport *nport = NULL;
3462 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3463 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3464 	int err = 0;
3465 
3466 	/*
3467 	 * Make sure the nport port exists.
3468 	 */
3469 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3470 	if (nport == NULL) {
3471 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3472 		err = -EINVAL;
3473 		goto out;
3474 	}
3475 
3476 	/*
3477 	 * Check for duplicate i_t_add.
3478 	 */
3479 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3480 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3481 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3482 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3483 			err = -EEXIST;
3484 			goto out;
3485 		}
3486 	}
3487 
3488 	/*
3489 	 * Get the memory to instantiate the remote port
3490 	 */
3491 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3492 	if (rport == NULL) {
3493 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3494 		err = -ENOMEM;
3495 		goto out;
3496 	}
3497 
3498 	/*
3499 	 * Initialize the contents for the rport
3500 	 */
3501 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3502 	rport->s_id = args->s_id;
3503 	rport->rpi = args->rpi;
3504 	rport->fc_nodename = args->fc_nodename;
3505 	rport->fc_portname = args->fc_portname;
3506 
3507 	/*
3508 	 * Add remote port to nport
3509 	 */
3510 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3511 		DEV_VERIFY(!"Error while adding rport to list");
3512 	};
3513 
3514 	/*
3515 	 * TODO: Do we validate the initiators service parameters?
3516 	 */
3517 
3518 	/*
3519 	 * Get the targets service parameters from the library
3520 	 * to return back to the driver.
3521 	 */
3522 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3523 
3524 out:
3525 	if (api_data->cb_func != NULL) {
3526 		/*
3527 		 * Passing pointer to the args struct as the first argument.
3528 		 * The cb_func should handle this appropriately.
3529 		 */
3530 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3531 	}
3532 
3533 	free(arg);
3534 
3535 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3536 		      "IT add on nport %d done, rc = %d.\n",
3537 		      args->nport_handle, err);
3538 }
3539 
3540 /**
3541  * Process a IT delete.
3542  */
3543 static void
3544 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3545 {
3546 	ASSERT_SPDK_FC_MAIN_THREAD();
3547 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3548 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3549 			api_data->api_args;
3550 	int rc = 0;
3551 	struct spdk_nvmf_fc_nport *nport = NULL;
3552 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3553 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3554 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3555 	uint32_t num_rport = 0;
3556 	char log_str[256];
3557 
3558 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3559 
3560 	/*
3561 	 * Make sure the nport port exists. If it does not, error out.
3562 	 */
3563 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3564 	if (nport == NULL) {
3565 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3566 		rc = -EINVAL;
3567 		goto out;
3568 	}
3569 
3570 	/*
3571 	 * Find this ITN / rport (remote port).
3572 	 */
3573 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3574 		num_rport++;
3575 		if ((rport_iter->s_id == args->s_id) &&
3576 		    (rport_iter->rpi == args->rpi) &&
3577 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3578 			rport = rport_iter;
3579 			break;
3580 		}
3581 	}
3582 
3583 	/*
3584 	 * We should find either zero or exactly one rport.
3585 	 *
3586 	 * If we find zero rports, that means that a previous request has
3587 	 * removed the rport by the time we reached here. In this case,
3588 	 * simply return out.
3589 	 */
3590 	if (rport == NULL) {
3591 		rc = -ENODEV;
3592 		goto out;
3593 	}
3594 
3595 	/*
3596 	 * We have the rport slated for deletion. At this point clean up
3597 	 * any LS requests that are sitting in the pending list. Do this
3598 	 * first, then, set the states of the rport so that new LS requests
3599 	 * are not accepted. Then start the cleanup.
3600 	 */
3601 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3602 
3603 	/*
3604 	 * We have found exactly one rport. Allocate memory for callback data.
3605 	 */
3606 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3607 	if (NULL == cb_data) {
3608 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3609 		rc = -ENOMEM;
3610 		goto out;
3611 	}
3612 
3613 	cb_data->nport = nport;
3614 	cb_data->rport = rport;
3615 	cb_data->port_handle = args->port_handle;
3616 	cb_data->fc_cb_func = api_data->cb_func;
3617 	cb_data->fc_cb_ctx = args->cb_ctx;
3618 
3619 	/*
3620 	 * Validate rport object state.
3621 	 */
3622 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3623 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3624 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3625 		/*
3626 		 * Deletion of this rport already in progress. Register callback
3627 		 * and return.
3628 		 */
3629 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3630 		rc = -ENODEV;
3631 		goto out;
3632 	} else {
3633 		/* rport partially created/deleted */
3634 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3635 		DEV_VERIFY(!"Invalid rport_state");
3636 		rc = -ENODEV;
3637 		goto out;
3638 	}
3639 
3640 	/*
3641 	 * We have successfully found a rport to delete. Call
3642 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3643 	 * IT-delete processing as well as free the cb_data.
3644 	 */
3645 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3646 				     (void *)cb_data);
3647 
3648 out:
3649 	if (rc != 0) {
3650 		/*
3651 		 * We have entered here because either we encountered an
3652 		 * error, or we did not find a rport to delete.
3653 		 * As a result, we will not call the function
3654 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3655 		 * processing. Therefore, execute the callback function now.
3656 		 */
3657 		if (cb_data) {
3658 			free(cb_data);
3659 		}
3660 		if (api_data->cb_func != NULL) {
3661 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3662 		}
3663 	}
3664 
3665 	snprintf(log_str, sizeof(log_str),
3666 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3667 		 args->nport_handle, num_rport, rc);
3668 
3669 	if (rc != 0) {
3670 		SPDK_ERRLOG("%s", log_str);
3671 	} else {
3672 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3673 	}
3674 
3675 	free(arg);
3676 }
3677 
3678 /*
3679  * Process ABTS received
3680  */
3681 static void
3682 nvmf_fc_adm_evnt_abts_recv(void *arg)
3683 {
3684 	ASSERT_SPDK_FC_MAIN_THREAD();
3685 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3686 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3687 	struct spdk_nvmf_fc_nport *nport = NULL;
3688 	int err = 0;
3689 
3690 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3691 		      args->oxid, args->rxid);
3692 
3693 	/*
3694 	 * 1. Make sure the nport port exists.
3695 	 */
3696 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3697 	if (nport == NULL) {
3698 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3699 		err = -EINVAL;
3700 		goto out;
3701 	}
3702 
3703 	/*
3704 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3705 	 */
3706 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3707 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3708 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3709 			      args->rpi, args->oxid, args->rxid);
3710 		err = 0;
3711 		goto out;
3712 
3713 	}
3714 
3715 	/*
3716 	 * 3. Pass the received ABTS-LS to the library for handling.
3717 	 */
3718 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3719 
3720 out:
3721 	if (api_data->cb_func != NULL) {
3722 		/*
3723 		 * Passing pointer to the args struct as the first argument.
3724 		 * The cb_func should handle this appropriately.
3725 		 */
3726 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3727 	} else {
3728 		/* No callback set, free the args */
3729 		free(args);
3730 	}
3731 
3732 	free(arg);
3733 }
3734 
3735 /*
3736  * Callback function for hw port quiesce.
3737  */
3738 static void
3739 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3740 {
3741 	ASSERT_SPDK_FC_MAIN_THREAD();
3742 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3743 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3744 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3745 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3746 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3747 	struct spdk_nvmf_fc_port *fc_port = NULL;
3748 	char *dump_buf = NULL;
3749 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3750 
3751 	/*
3752 	 * Free the callback context struct.
3753 	 */
3754 	free(ctx);
3755 
3756 	if (err != 0) {
3757 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3758 		goto out;
3759 	}
3760 
3761 	if (args->dump_queues == false) {
3762 		/*
3763 		 * Queues need not be dumped.
3764 		 */
3765 		goto out;
3766 	}
3767 
3768 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3769 
3770 	/*
3771 	 * Get the fc port.
3772 	 */
3773 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3774 	if (fc_port == NULL) {
3775 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3776 		err = -EINVAL;
3777 		goto out;
3778 	}
3779 
3780 	/*
3781 	 * Allocate memory for the dump buffer.
3782 	 * This memory will be freed by FCT.
3783 	 */
3784 	dump_buf = (char *)calloc(1, dump_buf_size);
3785 	if (dump_buf == NULL) {
3786 		err = -ENOMEM;
3787 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3788 		goto out;
3789 	}
3790 	*args->dump_buf  = (uint32_t *)dump_buf;
3791 	dump_info.buffer = dump_buf;
3792 	dump_info.offset = 0;
3793 
3794 	/*
3795 	 * Add the dump reason to the top of the buffer.
3796 	 */
3797 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3798 
3799 	/*
3800 	 * Dump the hwqp.
3801 	 */
3802 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3803 				fc_port->num_io_queues, &dump_info);
3804 
3805 out:
3806 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3807 		      args->port_handle, args->dump_queues, err);
3808 
3809 	if (cb_func != NULL) {
3810 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3811 	}
3812 }
3813 
3814 /*
3815  * HW port reset
3816 
3817  */
3818 static void
3819 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3820 {
3821 	ASSERT_SPDK_FC_MAIN_THREAD();
3822 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3823 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3824 			api_data->api_args;
3825 	struct spdk_nvmf_fc_port *fc_port = NULL;
3826 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3827 	int err = 0;
3828 
3829 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3830 
3831 	/*
3832 	 * Make sure the physical port exists.
3833 	 */
3834 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3835 	if (fc_port == NULL) {
3836 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3837 		err = -EINVAL;
3838 		goto out;
3839 	}
3840 
3841 	/*
3842 	 * Save the reset event args and the callback in a context struct.
3843 	 */
3844 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3845 
3846 	if (ctx == NULL) {
3847 		err = -ENOMEM;
3848 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3849 		goto fail;
3850 	}
3851 
3852 	ctx->reset_args = args;
3853 	ctx->reset_cb_func = api_data->cb_func;
3854 
3855 	/*
3856 	 * Quiesce the hw port.
3857 	 */
3858 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3859 	if (err != 0) {
3860 		goto fail;
3861 	}
3862 
3863 	/*
3864 	 * Once the ports are successfully quiesced the reset processing
3865 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3866 	 */
3867 	return;
3868 fail:
3869 	free(ctx);
3870 
3871 out:
3872 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3873 		      err);
3874 
3875 	if (api_data->cb_func != NULL) {
3876 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3877 	}
3878 
3879 	free(arg);
3880 }
3881 
3882 static inline void
3883 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3884 {
3885 	if (nvmf_fc_get_main_thread()) {
3886 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3887 	}
3888 }
3889 
3890 /*
3891  * Queue up an event in the SPDK main threads event queue.
3892  * Used by the FC driver to notify the SPDK main thread of FC related events.
3893  */
3894 int
3895 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3896 			   spdk_nvmf_fc_callback cb_func)
3897 {
3898 	int err = 0;
3899 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3900 	spdk_msg_fn event_fn = NULL;
3901 
3902 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3903 
3904 	if (event_type >= SPDK_FC_EVENT_MAX) {
3905 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3906 		err = -EINVAL;
3907 		goto done;
3908 	}
3909 
3910 	if (args == NULL) {
3911 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3912 		err = -EINVAL;
3913 		goto done;
3914 	}
3915 
3916 	api_data = calloc(1, sizeof(*api_data));
3917 
3918 	if (api_data == NULL) {
3919 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3920 		err = -ENOMEM;
3921 		goto done;
3922 	}
3923 
3924 	api_data->api_args = args;
3925 	api_data->cb_func = cb_func;
3926 
3927 	switch (event_type) {
3928 	case SPDK_FC_HW_PORT_INIT:
3929 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3930 		break;
3931 
3932 	case SPDK_FC_HW_PORT_FREE:
3933 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3934 		break;
3935 
3936 	case SPDK_FC_HW_PORT_ONLINE:
3937 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3938 		break;
3939 
3940 	case SPDK_FC_HW_PORT_OFFLINE:
3941 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3942 		break;
3943 
3944 	case SPDK_FC_NPORT_CREATE:
3945 		event_fn = nvmf_fc_adm_evnt_nport_create;
3946 		break;
3947 
3948 	case SPDK_FC_NPORT_DELETE:
3949 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3950 		break;
3951 
3952 	case SPDK_FC_IT_ADD:
3953 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3954 		break;
3955 
3956 	case SPDK_FC_IT_DELETE:
3957 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3958 		break;
3959 
3960 	case SPDK_FC_ABTS_RECV:
3961 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3962 		break;
3963 
3964 	case SPDK_FC_HW_PORT_RESET:
3965 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3966 		break;
3967 
3968 	case SPDK_FC_UNRECOVERABLE_ERR:
3969 	default:
3970 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3971 		err = -EINVAL;
3972 		break;
3973 	}
3974 
3975 done:
3976 
3977 	if (err == 0) {
3978 		assert(event_fn != NULL);
3979 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
3980 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3981 	} else {
3982 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3983 		if (api_data) {
3984 			free(api_data);
3985 		}
3986 	}
3987 
3988 	return err;
3989 }
3990 
3991 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3992 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
3993 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
3994