xref: /spdk/lib/nvmf/fc.c (revision 8afdeef3becfe9409cc9e7372bd0bc10e8b7d46d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe_FC transport functions.
10  */
11 
12 #include "spdk/env.h"
13 #include "spdk/assert.h"
14 #include "spdk/nvmf_transport.h"
15 #include "spdk/string.h"
16 #include "spdk/trace.h"
17 #include "spdk/util.h"
18 #include "spdk/likely.h"
19 #include "spdk/endian.h"
20 #include "spdk/log.h"
21 #include "spdk/thread.h"
22 
23 #include "nvmf_fc.h"
24 #include "fc_lld.h"
25 
26 #include "spdk_internal/trace_defs.h"
27 
28 #ifndef DEV_VERIFY
29 #define DEV_VERIFY assert
30 #endif
31 
32 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
33 #define ASSERT_SPDK_FC_MAIN_THREAD() \
34         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
35 #endif
36 
37 /*
38  * PRLI service parameters
39  */
40 enum spdk_nvmf_fc_service_parameters {
41 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
42 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
43 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
44 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
45 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
46 };
47 
48 static char *fc_req_state_strs[] = {
49 	"SPDK_NVMF_FC_REQ_INIT",
50 	"SPDK_NVMF_FC_REQ_READ_BDEV",
51 	"SPDK_NVMF_FC_REQ_READ_XFER",
52 	"SPDK_NVMF_FC_REQ_READ_RSP",
53 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
54 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
55 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
56 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
57 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
58 	"SPDK_NVMF_FC_REQ_NONE_RSP",
59 	"SPDK_NVMF_FC_REQ_SUCCESS",
60 	"SPDK_NVMF_FC_REQ_FAILED",
61 	"SPDK_NVMF_FC_REQ_ABORTED",
62 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
63 	"SPDK_NVMF_FC_REQ_PENDING",
64 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
65 };
66 
67 #define HWQP_CONN_TABLE_SIZE			8192
68 #define HWQP_RPI_TABLE_SIZE			4096
69 
70 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
71 {
72 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
73 	spdk_trace_register_description("FC_NEW",
74 					TRACE_FC_REQ_INIT,
75 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 1,
76 					SPDK_TRACE_ARG_TYPE_INT, "");
77 	spdk_trace_register_description("FC_READ_SBMT_TO_BDEV",
78 					TRACE_FC_REQ_READ_BDEV,
79 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
80 					SPDK_TRACE_ARG_TYPE_INT, "");
81 	spdk_trace_register_description("FC_READ_XFER_DATA",
82 					TRACE_FC_REQ_READ_XFER,
83 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
84 					SPDK_TRACE_ARG_TYPE_INT, "");
85 	spdk_trace_register_description("FC_READ_RSP",
86 					TRACE_FC_REQ_READ_RSP,
87 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
88 					SPDK_TRACE_ARG_TYPE_INT, "");
89 	spdk_trace_register_description("FC_WRITE_NEED_BUFFER",
90 					TRACE_FC_REQ_WRITE_BUFFS,
91 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
92 					SPDK_TRACE_ARG_TYPE_INT, "");
93 	spdk_trace_register_description("FC_WRITE_XFER_DATA",
94 					TRACE_FC_REQ_WRITE_XFER,
95 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
96 					SPDK_TRACE_ARG_TYPE_INT, "");
97 	spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV",
98 					TRACE_FC_REQ_WRITE_BDEV,
99 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
100 					SPDK_TRACE_ARG_TYPE_INT, "");
101 	spdk_trace_register_description("FC_WRITE_RSP",
102 					TRACE_FC_REQ_WRITE_RSP,
103 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
104 					SPDK_TRACE_ARG_TYPE_INT, "");
105 	spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV",
106 					TRACE_FC_REQ_NONE_BDEV,
107 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
108 					SPDK_TRACE_ARG_TYPE_INT, "");
109 	spdk_trace_register_description("FC_NONE_RSP",
110 					TRACE_FC_REQ_NONE_RSP,
111 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
112 					SPDK_TRACE_ARG_TYPE_INT, "");
113 	spdk_trace_register_description("FC_SUCCESS",
114 					TRACE_FC_REQ_SUCCESS,
115 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
116 					SPDK_TRACE_ARG_TYPE_INT, "");
117 	spdk_trace_register_description("FC_FAILED",
118 					TRACE_FC_REQ_FAILED,
119 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
120 					SPDK_TRACE_ARG_TYPE_INT, "");
121 	spdk_trace_register_description("FC_ABRT",
122 					TRACE_FC_REQ_ABORTED,
123 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
124 					SPDK_TRACE_ARG_TYPE_INT, "");
125 	spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV",
126 					TRACE_FC_REQ_BDEV_ABORTED,
127 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
128 					SPDK_TRACE_ARG_TYPE_INT, "");
129 	spdk_trace_register_description("FC_PENDING",
130 					TRACE_FC_REQ_PENDING,
131 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
132 					SPDK_TRACE_ARG_TYPE_INT, "");
133 	spdk_trace_register_description("FC_FUSED_WAITING",
134 					TRACE_FC_REQ_FUSED_WAITING,
135 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
136 					SPDK_TRACE_ARG_TYPE_INT, "");
137 }
138 
139 /**
140  * The structure used by all fc adm functions
141  */
142 struct spdk_nvmf_fc_adm_api_data {
143 	void *api_args;
144 	spdk_nvmf_fc_callback cb_func;
145 };
146 
147 /**
148  * The callback structure for nport-delete
149  */
150 struct spdk_nvmf_fc_adm_nport_del_cb_data {
151 	struct spdk_nvmf_fc_nport *nport;
152 	uint8_t port_handle;
153 	spdk_nvmf_fc_callback fc_cb_func;
154 	void *fc_cb_ctx;
155 };
156 
157 /**
158  * The callback structure for it-delete
159  */
160 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
161 	struct spdk_nvmf_fc_nport *nport;
162 	struct spdk_nvmf_fc_remote_port_info *rport;
163 	uint8_t port_handle;
164 	spdk_nvmf_fc_callback fc_cb_func;
165 	void *fc_cb_ctx;
166 };
167 
168 
169 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
170 
171 /**
172  * The callback structure for the it-delete-assoc callback
173  */
174 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
175 	struct spdk_nvmf_fc_nport *nport;
176 	struct spdk_nvmf_fc_remote_port_info *rport;
177 	uint8_t port_handle;
178 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
179 	void *cb_ctx;
180 };
181 
182 /*
183  * Call back function pointer for HW port quiesce.
184  */
185 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
186 
187 /**
188  * Context structure for quiescing a hardware port
189  */
190 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
191 	int quiesce_count;
192 	void *ctx;
193 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
194 };
195 
196 /**
197  * Context structure used to reset a hardware port
198  */
199 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
200 	void *reset_args;
201 	spdk_nvmf_fc_callback reset_cb_func;
202 };
203 
204 struct spdk_nvmf_fc_transport {
205 	struct spdk_nvmf_transport transport;
206 	struct spdk_poller *accept_poller;
207 	pthread_mutex_t lock;
208 };
209 
210 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
211 
212 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
213 
214 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
215 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
216 
217 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
218 
219 static uint32_t g_nvmf_fgroup_count = 0;
220 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
221 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
222 
223 struct spdk_thread *
224 nvmf_fc_get_main_thread(void)
225 {
226 	return g_nvmf_fc_main_thread;
227 }
228 
229 static inline void
230 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
231 			       enum spdk_nvmf_fc_request_state state)
232 {
233 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
234 
235 	switch (state) {
236 	case SPDK_NVMF_FC_REQ_INIT:
237 		/* Start IO tracing */
238 		tpoint_id = TRACE_FC_REQ_INIT;
239 		break;
240 	case SPDK_NVMF_FC_REQ_READ_BDEV:
241 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
242 		break;
243 	case SPDK_NVMF_FC_REQ_READ_XFER:
244 		tpoint_id = TRACE_FC_REQ_READ_XFER;
245 		break;
246 	case SPDK_NVMF_FC_REQ_READ_RSP:
247 		tpoint_id = TRACE_FC_REQ_READ_RSP;
248 		break;
249 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
250 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
251 		break;
252 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
253 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
254 		break;
255 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
256 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
257 		break;
258 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
259 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
260 		break;
261 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
262 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
263 		break;
264 	case SPDK_NVMF_FC_REQ_NONE_RSP:
265 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
266 		break;
267 	case SPDK_NVMF_FC_REQ_SUCCESS:
268 		tpoint_id = TRACE_FC_REQ_SUCCESS;
269 		break;
270 	case SPDK_NVMF_FC_REQ_FAILED:
271 		tpoint_id = TRACE_FC_REQ_FAILED;
272 		break;
273 	case SPDK_NVMF_FC_REQ_ABORTED:
274 		tpoint_id = TRACE_FC_REQ_ABORTED;
275 		break;
276 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
277 		tpoint_id = TRACE_FC_REQ_ABORTED;
278 		break;
279 	case SPDK_NVMF_FC_REQ_PENDING:
280 		tpoint_id = TRACE_FC_REQ_PENDING;
281 		break;
282 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
283 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
284 		break;
285 	default:
286 		assert(0);
287 		break;
288 	}
289 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
290 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
291 				  (uint64_t)(&fc_req->req));
292 	}
293 }
294 
295 static struct rte_hash *
296 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
297 {
298 	struct rte_hash_parameters hash_params = { 0 };
299 
300 	hash_params.entries = num_entries;
301 	hash_params.key_len = key_len;
302 	hash_params.name = name;
303 
304 	return rte_hash_create(&hash_params);
305 }
306 
307 void
308 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
309 {
310 	free(fc_conn->pool_memory);
311 	fc_conn->pool_memory = NULL;
312 }
313 
314 int
315 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
316 {
317 	uint32_t i, qd;
318 	struct spdk_nvmf_fc_pooled_request *req;
319 
320 	/*
321 	 * Create number of fc-requests to be more than the actual SQ size.
322 	 * This is to handle race conditions where the target driver may send
323 	 * back a RSP and before the target driver gets to process the CQE
324 	 * for the RSP, the initiator may have sent a new command.
325 	 * Depending on the load on the HWQP, there is a slim possibility
326 	 * that the target reaps the RQE corresponding to the new
327 	 * command before processing the CQE corresponding to the RSP.
328 	 */
329 	qd = fc_conn->max_queue_depth * 2;
330 
331 	STAILQ_INIT(&fc_conn->pool_queue);
332 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
333 				      sizeof(struct spdk_nvmf_fc_request));
334 	if (!fc_conn->pool_memory) {
335 		SPDK_ERRLOG("create fc req ring objects failed\n");
336 		goto error;
337 	}
338 	fc_conn->pool_size = qd;
339 	fc_conn->pool_free_elems = qd;
340 
341 	/* Initialise value in ring objects and link the objects */
342 	for (i = 0; i < qd; i++) {
343 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
344 				i * sizeof(struct spdk_nvmf_fc_request));
345 
346 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
347 	}
348 	return 0;
349 error:
350 	nvmf_fc_free_conn_reqpool(fc_conn);
351 	return -1;
352 }
353 
354 static inline struct spdk_nvmf_fc_request *
355 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
356 {
357 	struct spdk_nvmf_fc_request *fc_req;
358 	struct spdk_nvmf_fc_pooled_request *pooled_req;
359 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
360 
361 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
362 	if (!pooled_req) {
363 		SPDK_ERRLOG("Alloc request buffer failed\n");
364 		return NULL;
365 	}
366 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
367 	fc_conn->pool_free_elems -= 1;
368 
369 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
370 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
371 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
372 
373 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
374 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
375 	TAILQ_INIT(&fc_req->abort_cbs);
376 	return fc_req;
377 }
378 
379 static inline void
380 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
381 {
382 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
383 		/* Log an error for debug purpose. */
384 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
385 	}
386 
387 	/* set the magic to mark req as no longer valid. */
388 	fc_req->magic = 0xDEADBEEF;
389 
390 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
391 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
392 
393 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
394 	fc_conn->pool_free_elems += 1;
395 }
396 
397 static inline void
398 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
399 {
400 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
401 		      spdk_nvmf_request, buf_link);
402 }
403 
404 int
405 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
406 {
407 	char name[64];
408 
409 	hwqp->fc_port = fc_port;
410 
411 	/* clear counters */
412 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
413 
414 	TAILQ_INIT(&hwqp->in_use_reqs);
415 	TAILQ_INIT(&hwqp->sync_cbs);
416 	TAILQ_INIT(&hwqp->ls_pending_queue);
417 
418 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
419 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
420 				     sizeof(uint64_t));
421 	if (!hwqp->connection_list_hash) {
422 		SPDK_ERRLOG("Failed to create connection hash table.\n");
423 		return -ENOMEM;
424 	}
425 
426 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
427 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
428 	if (!hwqp->rport_list_hash) {
429 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
430 		rte_hash_free(hwqp->connection_list_hash);
431 		return -ENOMEM;
432 	}
433 
434 	/* Init low level driver queues */
435 	nvmf_fc_init_q(hwqp);
436 	return 0;
437 }
438 
439 static struct spdk_nvmf_fc_poll_group *
440 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
441 {
442 	uint32_t max_count = UINT32_MAX;
443 	struct spdk_nvmf_fc_poll_group *fgroup;
444 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
445 
446 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
447 	/* find poll group with least number of hwqp's assigned to it */
448 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
449 		if (fgroup->hwqp_count < max_count) {
450 			ret_fgroup = fgroup;
451 			max_count = fgroup->hwqp_count;
452 		}
453 	}
454 
455 	if (ret_fgroup) {
456 		ret_fgroup->hwqp_count++;
457 		hwqp->thread = ret_fgroup->group.group->thread;
458 		hwqp->fgroup = ret_fgroup;
459 	}
460 
461 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
462 
463 	return ret_fgroup;
464 }
465 
466 bool
467 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
468 {
469 	struct spdk_nvmf_fc_poll_group *tmp;
470 	bool rc = false;
471 
472 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
473 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
474 		if (tmp == fgroup) {
475 			rc = true;
476 			break;
477 		}
478 	}
479 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
480 	return rc;
481 }
482 
483 void
484 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
485 {
486 	assert(hwqp);
487 	if (hwqp == NULL) {
488 		SPDK_ERRLOG("Error: hwqp is NULL\n");
489 		return;
490 	}
491 
492 	assert(g_nvmf_fgroup_count);
493 
494 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
495 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
496 		return;
497 	}
498 
499 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
500 }
501 
502 static void
503 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
504 {
505 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
506 
507 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
508 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
509 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
510 	} else {
511 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
512 	}
513 
514 	if (args->cb_fn) {
515 		args->cb_fn(args->cb_ctx, 0);
516 	}
517 
518 	free(args);
519 }
520 
521 void
522 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
523 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
524 {
525 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
526 	struct spdk_nvmf_fc_poll_group *tmp;
527 	int rc = 0;
528 
529 	assert(hwqp);
530 
531 	SPDK_DEBUGLOG(nvmf_fc,
532 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
533 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
534 
535 	if (!hwqp->fgroup) {
536 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
537 	} else {
538 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
539 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
540 			if (tmp == hwqp->fgroup) {
541 				hwqp->fgroup->hwqp_count--;
542 				break;
543 			}
544 		}
545 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
546 
547 		if (tmp != hwqp->fgroup) {
548 			/* Pollgroup was already removed. Dont bother. */
549 			goto done;
550 		}
551 
552 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
553 		if (args == NULL) {
554 			rc = -ENOMEM;
555 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
556 			goto done;
557 		}
558 
559 		args->hwqp   = hwqp;
560 		args->cb_fn  = cb_fn;
561 		args->cb_ctx = cb_ctx;
562 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
563 		args->cb_info.cb_data = args;
564 		args->cb_info.cb_thread = spdk_get_thread();
565 
566 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
567 		if (rc) {
568 			rc = -EINVAL;
569 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
570 			free(args);
571 			goto done;
572 		}
573 		return;
574 	}
575 done:
576 	if (cb_fn) {
577 		cb_fn(cb_ctx, rc);
578 	}
579 }
580 
581 /*
582  * Note: This needs to be used only on main poller.
583  */
584 static uint64_t
585 nvmf_fc_get_abts_unique_id(void)
586 {
587 	static uint32_t u_id = 0;
588 
589 	return (uint64_t)(++u_id);
590 }
591 
592 static void
593 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
594 {
595 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
596 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
597 
598 	ctx->hwqps_responded++;
599 
600 	if (ctx->hwqps_responded < ctx->num_hwqps) {
601 		/* Wait for all pollers to complete. */
602 		return;
603 	}
604 
605 	/* Free the queue sync poller args. */
606 	free(ctx->sync_poller_args);
607 
608 	/* Mark as queue synced */
609 	ctx->queue_synced = true;
610 
611 	/* Reset the ctx values */
612 	ctx->hwqps_responded = 0;
613 	ctx->handled = false;
614 
615 	SPDK_DEBUGLOG(nvmf_fc,
616 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
617 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
618 
619 	/* Resend ABTS to pollers */
620 	args = ctx->abts_poller_args;
621 	for (int i = 0; i < ctx->num_hwqps; i++) {
622 		poller_arg = args + i;
623 		nvmf_fc_poller_api_func(poller_arg->hwqp,
624 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
625 					poller_arg);
626 	}
627 }
628 
629 static int
630 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
631 {
632 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
633 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
634 
635 	/* check if FC driver supports queue sync */
636 	if (!nvmf_fc_q_sync_available()) {
637 		return -EPERM;
638 	}
639 
640 	assert(ctx);
641 	if (!ctx) {
642 		SPDK_ERRLOG("NULL ctx pointer");
643 		return -EINVAL;
644 	}
645 
646 	/* Reset the ctx values */
647 	ctx->hwqps_responded = 0;
648 
649 	args = calloc(ctx->num_hwqps,
650 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
651 	if (!args) {
652 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
653 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
654 		return -ENOMEM;
655 	}
656 	ctx->sync_poller_args = args;
657 
658 	abts_args = ctx->abts_poller_args;
659 	for (int i = 0; i < ctx->num_hwqps; i++) {
660 		abts_poller_arg = abts_args + i;
661 		poller_arg = args + i;
662 		poller_arg->u_id = ctx->u_id;
663 		poller_arg->hwqp = abts_poller_arg->hwqp;
664 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
665 		poller_arg->cb_info.cb_data = ctx;
666 		poller_arg->cb_info.cb_thread = spdk_get_thread();
667 
668 		/* Send a Queue sync message to interested pollers */
669 		nvmf_fc_poller_api_func(poller_arg->hwqp,
670 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
671 					poller_arg);
672 	}
673 
674 	SPDK_DEBUGLOG(nvmf_fc,
675 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
676 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
677 
678 	/* Post Marker to queue to track aborted request */
679 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
680 
681 	return 0;
682 }
683 
684 static void
685 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
686 {
687 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
688 	struct spdk_nvmf_fc_nport *nport  = NULL;
689 
690 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
691 		ctx->handled = true;
692 	}
693 
694 	ctx->hwqps_responded++;
695 
696 	if (ctx->hwqps_responded < ctx->num_hwqps) {
697 		/* Wait for all pollers to complete. */
698 		return;
699 	}
700 
701 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
702 
703 	if (ctx->nport != nport) {
704 		/* Nport can be deleted while this abort is being
705 		 * processed by the pollers.
706 		 */
707 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
708 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
709 	} else {
710 		if (!ctx->handled) {
711 			/* Try syncing the queues and try one more time */
712 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
713 				SPDK_DEBUGLOG(nvmf_fc,
714 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
715 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
716 				return;
717 			} else {
718 				/* Send Reject */
719 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
720 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
721 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
722 			}
723 		} else {
724 			/* Send Accept */
725 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
726 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
727 					    0, NULL, NULL);
728 		}
729 	}
730 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
731 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
732 
733 	free(ctx->abts_poller_args);
734 	free(ctx);
735 }
736 
737 void
738 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
739 			  uint16_t oxid, uint16_t rxid)
740 {
741 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
742 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
743 	struct spdk_nvmf_fc_association *assoc = NULL;
744 	struct spdk_nvmf_fc_conn *conn = NULL;
745 	uint32_t hwqp_cnt = 0;
746 	bool skip_hwqp_cnt;
747 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
748 	uint32_t i;
749 
750 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
751 		       nport->nport_hdl, rpi, oxid, rxid);
752 
753 	/* Allocate memory to track hwqp's with at least 1 active connection. */
754 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
755 	if (hwqps == NULL) {
756 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
757 		goto bls_rej;
758 	}
759 
760 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
761 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
762 			if ((conn->rpi != rpi) || !conn->hwqp) {
763 				continue;
764 			}
765 
766 			skip_hwqp_cnt = false;
767 			for (i = 0; i < hwqp_cnt; i++) {
768 				if (hwqps[i] == conn->hwqp) {
769 					/* Skip. This is already present */
770 					skip_hwqp_cnt = true;
771 					break;
772 				}
773 			}
774 			if (!skip_hwqp_cnt) {
775 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
776 				hwqps[hwqp_cnt] = conn->hwqp;
777 				hwqp_cnt++;
778 			}
779 		}
780 	}
781 
782 	if (!hwqp_cnt) {
783 		goto bls_rej;
784 	}
785 
786 	args = calloc(hwqp_cnt,
787 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
788 	if (!args) {
789 		goto bls_rej;
790 	}
791 
792 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
793 	if (!ctx) {
794 		goto bls_rej;
795 	}
796 	ctx->rpi = rpi;
797 	ctx->oxid = oxid;
798 	ctx->rxid = rxid;
799 	ctx->nport = nport;
800 	ctx->nport_hdl = nport->nport_hdl;
801 	ctx->port_hdl = nport->fc_port->port_hdl;
802 	ctx->num_hwqps = hwqp_cnt;
803 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
804 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
805 	ctx->abts_poller_args = args;
806 
807 	/* Get a unique context for this ABTS */
808 	ctx->u_id = nvmf_fc_get_abts_unique_id();
809 
810 	for (i = 0; i < hwqp_cnt; i++) {
811 		poller_arg = args + i;
812 		poller_arg->hwqp = hwqps[i];
813 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
814 		poller_arg->cb_info.cb_data = ctx;
815 		poller_arg->cb_info.cb_thread = spdk_get_thread();
816 		poller_arg->ctx = ctx;
817 
818 		nvmf_fc_poller_api_func(poller_arg->hwqp,
819 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
820 					poller_arg);
821 	}
822 
823 	free(hwqps);
824 
825 	return;
826 bls_rej:
827 	free(args);
828 	free(hwqps);
829 
830 	/* Send Reject */
831 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
832 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
833 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
834 		       nport->nport_hdl, rpi, oxid, rxid);
835 	return;
836 }
837 
838 /*** Accessor functions for the FC structures - BEGIN */
839 /*
840  * Returns true if the port is in offline state.
841  */
842 bool
843 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
844 {
845 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
846 		return true;
847 	}
848 
849 	return false;
850 }
851 
852 /*
853  * Returns true if the port is in online state.
854  */
855 bool
856 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
857 {
858 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
859 		return true;
860 	}
861 
862 	return false;
863 }
864 
865 int
866 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
867 {
868 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
869 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
870 		return 0;
871 	}
872 
873 	return -EPERM;
874 }
875 
876 int
877 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
878 {
879 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
880 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
881 		return 0;
882 	}
883 
884 	return -EPERM;
885 }
886 
887 int
888 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
889 {
890 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
891 		hwqp->state = SPDK_FC_HWQP_ONLINE;
892 		/* reset some queue counters */
893 		hwqp->num_conns = 0;
894 		return nvmf_fc_set_q_online_state(hwqp, true);
895 	}
896 
897 	return -EPERM;
898 }
899 
900 int
901 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
902 {
903 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
904 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
905 		return nvmf_fc_set_q_online_state(hwqp, false);
906 	}
907 
908 	return -EPERM;
909 }
910 
911 void
912 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
913 {
914 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
915 
916 	/*
917 	 * Let LLD add the port to its list.
918 	 */
919 	nvmf_fc_lld_port_add(fc_port);
920 }
921 
922 static void
923 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
924 {
925 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
926 
927 	/*
928 	 * Let LLD remove the port from its list.
929 	 */
930 	nvmf_fc_lld_port_remove(fc_port);
931 }
932 
933 struct spdk_nvmf_fc_port *
934 nvmf_fc_port_lookup(uint8_t port_hdl)
935 {
936 	struct spdk_nvmf_fc_port *fc_port = NULL;
937 
938 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
939 		if (fc_port->port_hdl == port_hdl) {
940 			return fc_port;
941 		}
942 	}
943 	return NULL;
944 }
945 
946 uint32_t
947 nvmf_fc_get_prli_service_params(void)
948 {
949 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
950 }
951 
952 int
953 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
954 		       struct spdk_nvmf_fc_nport *nport)
955 {
956 	if (fc_port) {
957 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
958 		fc_port->num_nports++;
959 		return 0;
960 	}
961 
962 	return -EINVAL;
963 }
964 
965 int
966 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
967 			  struct spdk_nvmf_fc_nport *nport)
968 {
969 	if (fc_port && nport) {
970 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
971 		fc_port->num_nports--;
972 		return 0;
973 	}
974 
975 	return -EINVAL;
976 }
977 
978 static struct spdk_nvmf_fc_nport *
979 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
980 {
981 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
982 
983 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
984 		if (fc_nport->nport_hdl == nport_hdl) {
985 			return fc_nport;
986 		}
987 	}
988 
989 	return NULL;
990 }
991 
992 struct spdk_nvmf_fc_nport *
993 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
994 {
995 	struct spdk_nvmf_fc_port *fc_port = NULL;
996 
997 	fc_port = nvmf_fc_port_lookup(port_hdl);
998 	if (fc_port) {
999 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
1000 	}
1001 
1002 	return NULL;
1003 }
1004 
1005 static inline int
1006 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1007 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1008 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1009 {
1010 	struct spdk_nvmf_fc_nport *n_port;
1011 	struct spdk_nvmf_fc_remote_port_info *r_port;
1012 
1013 	assert(hwqp);
1014 	if (hwqp == NULL) {
1015 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1016 		return -EINVAL;
1017 	}
1018 	assert(nport);
1019 	if (nport == NULL) {
1020 		SPDK_ERRLOG("Error: nport is NULL\n");
1021 		return -EINVAL;
1022 	}
1023 	assert(rport);
1024 	if (rport == NULL) {
1025 		SPDK_ERRLOG("Error: rport is NULL\n");
1026 		return -EINVAL;
1027 	}
1028 
1029 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1030 		if (n_port->d_id == d_id) {
1031 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1032 				if (r_port->s_id == s_id) {
1033 					*nport = n_port;
1034 					*rport = r_port;
1035 					return 0;
1036 				}
1037 			}
1038 			break;
1039 		}
1040 	}
1041 
1042 	return -ENOENT;
1043 }
1044 
1045 /* Returns true if the Nport is empty of all rem_ports */
1046 bool
1047 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1048 {
1049 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1050 		assert(nport->rport_count == 0);
1051 		return true;
1052 	} else {
1053 		return false;
1054 	}
1055 }
1056 
1057 int
1058 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1059 			enum spdk_nvmf_fc_object_state state)
1060 {
1061 	if (nport) {
1062 		nport->nport_state = state;
1063 		return 0;
1064 	} else {
1065 		return -EINVAL;
1066 	}
1067 }
1068 
1069 bool
1070 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1071 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1072 {
1073 	if (nport && rem_port) {
1074 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1075 		nport->rport_count++;
1076 		return 0;
1077 	} else {
1078 		return -EINVAL;
1079 	}
1080 }
1081 
1082 bool
1083 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1084 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1085 {
1086 	if (nport && rem_port) {
1087 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1088 		nport->rport_count--;
1089 		return 0;
1090 	} else {
1091 		return -EINVAL;
1092 	}
1093 }
1094 
1095 int
1096 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1097 			enum spdk_nvmf_fc_object_state state)
1098 {
1099 	if (rport) {
1100 		rport->rport_state = state;
1101 		return 0;
1102 	} else {
1103 		return -EINVAL;
1104 	}
1105 }
1106 int
1107 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1108 			enum spdk_nvmf_fc_object_state state)
1109 {
1110 	if (assoc) {
1111 		assoc->assoc_state = state;
1112 		return 0;
1113 	} else {
1114 		return -EINVAL;
1115 	}
1116 }
1117 
1118 static struct spdk_nvmf_fc_association *
1119 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1120 {
1121 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1122 	struct spdk_nvmf_fc_conn *fc_conn;
1123 
1124 	if (!qpair) {
1125 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1126 		return NULL;
1127 	}
1128 
1129 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1130 
1131 	return fc_conn->fc_assoc;
1132 }
1133 
1134 bool
1135 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1136 		       struct spdk_nvmf_ctrlr *ctrlr)
1137 {
1138 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1139 	struct spdk_nvmf_fc_association *assoc = NULL;
1140 
1141 	if (!ctrlr) {
1142 		return false;
1143 	}
1144 
1145 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1146 	if (!fc_nport) {
1147 		return false;
1148 	}
1149 
1150 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1151 	if (assoc && assoc->tgtport == fc_nport) {
1152 		SPDK_DEBUGLOG(nvmf_fc,
1153 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1154 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1155 			      nport_hdl);
1156 		return true;
1157 	}
1158 	return false;
1159 }
1160 
1161 static void
1162 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1163 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1164 {
1165 	assert(ls_rqst);
1166 
1167 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1168 
1169 	/* Return buffer to chip */
1170 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1171 }
1172 
1173 static int
1174 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1175 			  struct spdk_nvmf_fc_nport *nport,
1176 			  struct spdk_nvmf_fc_remote_port_info *rport)
1177 {
1178 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1179 	int num_deleted = 0;
1180 
1181 	assert(hwqp);
1182 	assert(nport);
1183 	assert(rport);
1184 
1185 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1186 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1187 			num_deleted++;
1188 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1189 		}
1190 	}
1191 	return num_deleted;
1192 }
1193 
1194 static void
1195 nvmf_fc_req_bdev_abort(void *arg1)
1196 {
1197 	struct spdk_nvmf_fc_request *fc_req = arg1;
1198 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1199 	int i;
1200 
1201 	/* Initial release - we don't have to abort Admin Queue or
1202 	 * Fabric commands. The AQ commands supported at this time are
1203 	 * Get-Log-Page,
1204 	 * Identify
1205 	 * Set Features
1206 	 * Get Features
1207 	 * AER -> Special case and handled differently.
1208 	 * Every one of the above Admin commands (except AER) run
1209 	 * to completion and so an Abort of such commands doesn't
1210 	 * make sense.
1211 	 */
1212 	/* The Fabric commands supported are
1213 	 * Property Set
1214 	 * Property Get
1215 	 * Connect -> Special case (async. handling). Not sure how to
1216 	 * handle at this point. Let it run to completion.
1217 	 */
1218 	for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1219 		if (ctrlr->aer_req[i] == &fc_req->req) {
1220 			SPDK_NOTICELOG("Abort AER request\n");
1221 			nvmf_qpair_free_aer(fc_req->req.qpair);
1222 		}
1223 	}
1224 }
1225 
1226 void
1227 nvmf_fc_request_abort_complete(void *arg1)
1228 {
1229 	struct spdk_nvmf_fc_request *fc_req =
1230 		(struct spdk_nvmf_fc_request *)arg1;
1231 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1232 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1233 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1234 
1235 	/* Make a copy of the cb list from fc_req */
1236 	TAILQ_INIT(&abort_cbs);
1237 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1238 
1239 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1240 		       fc_req_state_strs[fc_req->state]);
1241 
1242 	_nvmf_fc_request_free(fc_req);
1243 
1244 	/* Request abort completed. Notify all the callbacks */
1245 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1246 		/* Notify */
1247 		ctx->cb(hwqp, 0, ctx->cb_args);
1248 		/* Remove */
1249 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1250 		/* free */
1251 		free(ctx);
1252 	}
1253 }
1254 
1255 void
1256 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1257 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1258 {
1259 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1260 	bool kill_req = false;
1261 
1262 	/* Add the cb to list */
1263 	if (cb) {
1264 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1265 		if (!ctx) {
1266 			SPDK_ERRLOG("ctx alloc failed.\n");
1267 			return;
1268 		}
1269 		ctx->cb = cb;
1270 		ctx->cb_args = cb_args;
1271 
1272 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1273 	}
1274 
1275 	if (!fc_req->is_aborted) {
1276 		/* Increment aborted command counter */
1277 		fc_req->hwqp->counters.num_aborted++;
1278 	}
1279 
1280 	/* If port is dead, skip abort wqe */
1281 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1282 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1283 		fc_req->is_aborted = true;
1284 		goto complete;
1285 	}
1286 
1287 	/* Check if the request is already marked for deletion */
1288 	if (fc_req->is_aborted) {
1289 		return;
1290 	}
1291 
1292 	/* Mark request as aborted */
1293 	fc_req->is_aborted = true;
1294 
1295 	/* If xchg is allocated, then save if we need to send abts or not. */
1296 	if (fc_req->xchg) {
1297 		fc_req->xchg->send_abts = send_abts;
1298 		fc_req->xchg->aborted	= true;
1299 	}
1300 
1301 	switch (fc_req->state) {
1302 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1303 		/* Aborted by backend */
1304 		goto complete;
1305 
1306 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1307 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1308 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1309 		/* Notify bdev */
1310 		spdk_thread_send_msg(fc_req->hwqp->thread,
1311 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1312 		break;
1313 
1314 	case SPDK_NVMF_FC_REQ_READ_XFER:
1315 	case SPDK_NVMF_FC_REQ_READ_RSP:
1316 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1317 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1318 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1319 		/* Notify HBA to abort this exchange  */
1320 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1321 		break;
1322 
1323 	case SPDK_NVMF_FC_REQ_PENDING:
1324 		/* Remove from pending */
1325 		nvmf_fc_request_remove_from_pending(fc_req);
1326 		goto complete;
1327 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1328 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1329 		goto complete;
1330 	default:
1331 		SPDK_ERRLOG("Request in invalid state.\n");
1332 		goto complete;
1333 	}
1334 
1335 	return;
1336 complete:
1337 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1338 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1339 				(void *)fc_req);
1340 }
1341 
1342 static int
1343 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1344 {
1345 	uint32_t length = fc_req->req.length;
1346 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1347 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1348 	struct spdk_nvmf_transport *transport = group->transport;
1349 
1350 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1351 		return -ENOMEM;
1352 	}
1353 
1354 	return 0;
1355 }
1356 
1357 static int
1358 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1359 {
1360 	/* Allocate an XCHG if we dont use send frame for this command. */
1361 	if (!nvmf_fc_use_send_frame(fc_req)) {
1362 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1363 		if (!fc_req->xchg) {
1364 			fc_req->hwqp->counters.no_xchg++;
1365 			return -EAGAIN;
1366 		}
1367 	}
1368 
1369 	if (fc_req->req.length) {
1370 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1371 			fc_req->hwqp->counters.buf_alloc_err++;
1372 			if (fc_req->xchg) {
1373 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1374 				fc_req->xchg = NULL;
1375 			}
1376 			return -EAGAIN;
1377 		}
1378 	}
1379 
1380 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1381 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1382 
1383 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1384 
1385 		if (nvmf_fc_recv_data(fc_req)) {
1386 			/* Dropped return success to caller */
1387 			fc_req->hwqp->counters.unexpected_err++;
1388 			_nvmf_fc_request_free(fc_req);
1389 		}
1390 	} else {
1391 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1392 
1393 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1394 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1395 		} else {
1396 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1397 		}
1398 		spdk_nvmf_request_exec(&fc_req->req);
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static void
1405 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1406 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1407 {
1408 	uint8_t df_ctl = fchdr->df_ctl;
1409 	uint32_t f_ctl = fchdr->f_ctl;
1410 
1411 	/* VMID */
1412 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1413 		struct spdk_nvmf_fc_vm_header *vhdr;
1414 		uint32_t vmhdr_offset = 0;
1415 
1416 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1417 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1418 		}
1419 
1420 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1421 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1422 		}
1423 
1424 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1425 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1426 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1427 	}
1428 
1429 	/* Priority */
1430 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1431 		fc_req->csctl = fchdr->cs_ctl;
1432 	}
1433 }
1434 
1435 static int
1436 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1437 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1438 {
1439 	uint16_t cmnd_len;
1440 	uint64_t rqst_conn_id;
1441 	struct spdk_nvmf_fc_request *fc_req = NULL;
1442 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1443 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1444 	enum spdk_nvme_data_transfer xfer;
1445 	uint32_t s_id, d_id;
1446 
1447 	s_id = (uint32_t)frame->s_id;
1448 	d_id = (uint32_t)frame->d_id;
1449 	s_id = from_be32(&s_id) >> 8;
1450 	d_id = from_be32(&d_id) >> 8;
1451 
1452 	cmd_iu = buffer->virt;
1453 	cmnd_len = cmd_iu->cmnd_iu_len;
1454 	cmnd_len = from_be16(&cmnd_len);
1455 
1456 	/* check for a valid cmnd_iu format */
1457 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1458 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1459 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1460 		SPDK_ERRLOG("IU CMD error\n");
1461 		hwqp->counters.nvme_cmd_iu_err++;
1462 		return -ENXIO;
1463 	}
1464 
1465 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1466 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1467 		SPDK_ERRLOG("IU CMD xfer error\n");
1468 		hwqp->counters.nvme_cmd_xfer_err++;
1469 		return -EPERM;
1470 	}
1471 
1472 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1473 
1474 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1475 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1476 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1477 		hwqp->counters.invalid_conn_err++;
1478 		return -ENODEV;
1479 	}
1480 
1481 	/* Validate s_id and d_id */
1482 	if (s_id != fc_conn->s_id) {
1483 		hwqp->counters.rport_invalid++;
1484 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1485 		return -ENODEV;
1486 	}
1487 
1488 	if (d_id != fc_conn->d_id) {
1489 		hwqp->counters.nport_invalid++;
1490 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1491 		return -ENODEV;
1492 	}
1493 
1494 	/* If association/connection is being deleted - return */
1495 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1496 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1497 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1498 		return -EACCES;
1499 	}
1500 
1501 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1502 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1503 			    rqst_conn_id, fc_conn->conn_state);
1504 		return -EACCES;
1505 	}
1506 
1507 	if (!spdk_nvmf_qpair_is_active(&fc_conn->qpair)) {
1508 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1509 			    rqst_conn_id, fc_conn->qpair.state);
1510 		return -EACCES;
1511 	}
1512 
1513 	/* Make sure xfer len is according to mdts */
1514 	if (from_be32(&cmd_iu->data_len) >
1515 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1516 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1517 		return -EINVAL;
1518 	}
1519 
1520 	/* allocate a request buffer */
1521 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1522 	if (fc_req == NULL) {
1523 		return -ENOMEM;
1524 	}
1525 
1526 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1527 	fc_req->req.qpair = &fc_conn->qpair;
1528 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1529 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1530 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1531 	fc_req->oxid = frame->ox_id;
1532 	fc_req->oxid = from_be16(&fc_req->oxid);
1533 	fc_req->rpi = fc_conn->rpi;
1534 	fc_req->poller_lcore = hwqp->lcore_id;
1535 	fc_req->poller_thread = hwqp->thread;
1536 	fc_req->hwqp = hwqp;
1537 	fc_req->fc_conn = fc_conn;
1538 	fc_req->req.xfer = xfer;
1539 	fc_req->s_id = s_id;
1540 	fc_req->d_id = d_id;
1541 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1542 	nvmf_fc_set_vmid_priority(fc_req, frame);
1543 
1544 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1545 
1546 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1547 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1548 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 /*
1555  * These functions are called from the FC LLD
1556  */
1557 
1558 void
1559 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1560 {
1561 	struct spdk_nvmf_fc_hwqp *hwqp;
1562 	struct spdk_nvmf_transport_poll_group *group;
1563 
1564 	if (!fc_req) {
1565 		return;
1566 	}
1567 	hwqp = fc_req->hwqp;
1568 
1569 	if (fc_req->xchg) {
1570 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1571 		fc_req->xchg = NULL;
1572 	}
1573 
1574 	/* Release IO buffers */
1575 	if (fc_req->req.data_from_pool) {
1576 		group = &hwqp->fgroup->group;
1577 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1578 					       group->transport);
1579 	}
1580 	fc_req->req.iovcnt = 0;
1581 
1582 	/* Free Fc request */
1583 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1584 }
1585 
1586 void
1587 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1588 			  enum spdk_nvmf_fc_request_state state)
1589 {
1590 	assert(fc_req->magic != 0xDEADBEEF);
1591 
1592 	SPDK_DEBUGLOG(nvmf_fc,
1593 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1594 		      nvmf_fc_request_get_state_str(fc_req->state),
1595 		      nvmf_fc_request_get_state_str(state));
1596 	nvmf_fc_record_req_trace_point(fc_req, state);
1597 	fc_req->state = state;
1598 }
1599 
1600 char *
1601 nvmf_fc_request_get_state_str(int state)
1602 {
1603 	static char *unk_str = "unknown";
1604 
1605 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1606 		fc_req_state_strs[state] : unk_str);
1607 }
1608 
1609 int
1610 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1611 			   uint32_t buff_idx,
1612 			   struct spdk_nvmf_fc_frame_hdr *frame,
1613 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1614 			   uint32_t plen)
1615 {
1616 	int rc = 0;
1617 	uint32_t s_id, d_id;
1618 	struct spdk_nvmf_fc_nport *nport = NULL;
1619 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1620 
1621 	s_id = (uint32_t)frame->s_id;
1622 	d_id = (uint32_t)frame->d_id;
1623 	s_id = from_be32(&s_id) >> 8;
1624 	d_id = from_be32(&d_id) >> 8;
1625 
1626 	SPDK_DEBUGLOG(nvmf_fc,
1627 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1628 		      s_id, d_id,
1629 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1630 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1631 
1632 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1633 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1634 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1635 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1636 
1637 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1638 
1639 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1640 		if (rc) {
1641 			if (nport == NULL) {
1642 				SPDK_ERRLOG("Nport not found. Dropping\n");
1643 				/* increment invalid nport counter */
1644 				hwqp->counters.nport_invalid++;
1645 			} else if (rport == NULL) {
1646 				SPDK_ERRLOG("Rport not found. Dropping\n");
1647 				/* increment invalid rport counter */
1648 				hwqp->counters.rport_invalid++;
1649 			}
1650 			return rc;
1651 		}
1652 
1653 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1654 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1655 			SPDK_ERRLOG("%s state not created. Dropping\n",
1656 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1657 				    "Nport" : "Rport");
1658 			return -EACCES;
1659 		}
1660 
1661 		/* Use the RQ buffer for holding LS request. */
1662 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1663 
1664 		/* Fill in the LS request structure */
1665 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1666 		ls_rqst->rqstbuf.phys = buffer->phys +
1667 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1668 		ls_rqst->rqstbuf.buf_index = buff_idx;
1669 		ls_rqst->rqst_len = plen;
1670 
1671 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1672 		ls_rqst->rspbuf.phys = buffer->phys +
1673 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1674 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1675 
1676 		ls_rqst->private_data = (void *)hwqp;
1677 		ls_rqst->rpi = rport->rpi;
1678 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1679 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1680 		ls_rqst->s_id = s_id;
1681 		ls_rqst->d_id = d_id;
1682 		ls_rqst->nport = nport;
1683 		ls_rqst->rport = rport;
1684 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1685 
1686 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1687 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1688 		} else {
1689 			ls_rqst->xchg = NULL;
1690 		}
1691 
1692 		if (ls_rqst->xchg) {
1693 			/* Handover the request to LS module */
1694 			nvmf_fc_handle_ls_rqst(ls_rqst);
1695 		} else {
1696 			/* No XCHG available. Add to pending list. */
1697 			hwqp->counters.no_xchg++;
1698 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1699 		}
1700 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1701 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1702 
1703 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1704 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1705 		if (!rc) {
1706 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1707 		}
1708 	} else {
1709 
1710 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1711 		hwqp->counters.unknown_frame++;
1712 		rc = -EINVAL;
1713 	}
1714 
1715 	return rc;
1716 }
1717 
1718 void
1719 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1720 {
1721 	struct spdk_nvmf_request *req = NULL, *tmp;
1722 	struct spdk_nvmf_fc_request *fc_req;
1723 	int budget = 64;
1724 
1725 	if (!hwqp->fgroup) {
1726 		/* LS queue is tied to acceptor_poll group and LS pending requests
1727 		 * are stagged and processed using hwqp->ls_pending_queue.
1728 		 */
1729 		return;
1730 	}
1731 
1732 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1733 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1734 		if (!nvmf_fc_request_execute(fc_req)) {
1735 			/* Successfully posted, Delete from pending. */
1736 			nvmf_fc_request_remove_from_pending(fc_req);
1737 		}
1738 
1739 		if (budget) {
1740 			budget--;
1741 		} else {
1742 			return;
1743 		}
1744 	}
1745 }
1746 
1747 void
1748 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1749 {
1750 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1751 	struct spdk_nvmf_fc_nport *nport = NULL;
1752 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1753 
1754 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1755 		/* lookup nport and rport again - make sure they are still valid */
1756 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1757 		if (rc) {
1758 			if (nport == NULL) {
1759 				SPDK_ERRLOG("Nport not found. Dropping\n");
1760 				/* increment invalid nport counter */
1761 				hwqp->counters.nport_invalid++;
1762 			} else if (rport == NULL) {
1763 				SPDK_ERRLOG("Rport not found. Dropping\n");
1764 				/* increment invalid rport counter */
1765 				hwqp->counters.rport_invalid++;
1766 			}
1767 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1768 			continue;
1769 		}
1770 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1771 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1772 			SPDK_ERRLOG("%s state not created. Dropping\n",
1773 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1774 				    "Nport" : "Rport");
1775 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1776 			continue;
1777 		}
1778 
1779 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1780 		if (ls_rqst->xchg) {
1781 			/* Got an XCHG */
1782 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1783 			/* Handover the request to LS module */
1784 			nvmf_fc_handle_ls_rqst(ls_rqst);
1785 		} else {
1786 			/* No more XCHGs. Stop processing. */
1787 			hwqp->counters.no_xchg++;
1788 			return;
1789 		}
1790 	}
1791 }
1792 
1793 int
1794 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1795 {
1796 	int rc = 0;
1797 	struct spdk_nvmf_request *req = &fc_req->req;
1798 	struct spdk_nvmf_qpair *qpair = req->qpair;
1799 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1800 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1801 	uint16_t ersp_len = 0;
1802 
1803 	/* set sq head value in resp */
1804 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1805 
1806 	/* Increment connection responses */
1807 	fc_conn->rsp_count++;
1808 
1809 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1810 				       fc_req->transferred_len)) {
1811 		/* Fill ERSP Len */
1812 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1813 				    sizeof(uint32_t)));
1814 		fc_req->ersp.ersp_len = ersp_len;
1815 
1816 		/* Fill RSN */
1817 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1818 		fc_conn->rsn++;
1819 
1820 		/* Fill transfer length */
1821 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
1822 
1823 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1824 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1825 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1826 	} else {
1827 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1828 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1829 	}
1830 
1831 	return rc;
1832 }
1833 
1834 bool
1835 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1836 			   uint32_t rsp_cnt, uint32_t xfer_len)
1837 {
1838 	struct spdk_nvmf_request *req = &fc_req->req;
1839 	struct spdk_nvmf_qpair *qpair = req->qpair;
1840 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1841 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1842 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1843 	uint16_t status = *((uint16_t *)&rsp->status);
1844 
1845 	/*
1846 	 * Check if we need to send ERSP
1847 	 * 1) For every N responses where N == ersp_ratio
1848 	 * 2) Fabric commands.
1849 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1850 	 * 4) SQ == 90% full.
1851 	 * 5) Transfer length not equal to CMD IU length
1852 	 */
1853 
1854 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1855 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1856 	    (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 ||
1857 	    (req->length != xfer_len)) {
1858 		return true;
1859 	}
1860 	return false;
1861 }
1862 
1863 static int
1864 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1865 {
1866 	int rc = 0;
1867 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1868 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1869 
1870 	if (fc_req->is_aborted) {
1871 		/* Defer this to make sure we dont call io cleanup in same context. */
1872 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1873 					(void *)fc_req);
1874 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1875 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1876 
1877 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1878 
1879 		rc = nvmf_fc_send_data(fc_req);
1880 	} else {
1881 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1882 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1883 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1884 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1885 		} else {
1886 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1887 		}
1888 
1889 		rc = nvmf_fc_handle_rsp(fc_req);
1890 	}
1891 
1892 	if (rc) {
1893 		SPDK_ERRLOG("Error in request complete.\n");
1894 		_nvmf_fc_request_free(fc_req);
1895 	}
1896 	return 0;
1897 }
1898 
1899 struct spdk_nvmf_tgt *
1900 nvmf_fc_get_tgt(void)
1901 {
1902 	if (g_nvmf_ftransport) {
1903 		return g_nvmf_ftransport->transport.tgt;
1904 	}
1905 	return NULL;
1906 }
1907 
1908 /*
1909  * FC Transport Public API begins here
1910  */
1911 
1912 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1913 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1914 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1915 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1916 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1917 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1918 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1919 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1920 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1921 
1922 static void
1923 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1924 {
1925 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1926 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1927 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1928 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1929 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1930 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1931 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1932 }
1933 
1934 static int nvmf_fc_accept(void *ctx);
1935 
1936 static struct spdk_nvmf_transport *
1937 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1938 {
1939 	uint32_t sge_count;
1940 
1941 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1942 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1943 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1944 		     "  max_aq_depth=%d\n",
1945 		     opts->max_queue_depth,
1946 		     opts->max_io_size,
1947 		     opts->max_qpairs_per_ctrlr - 1,
1948 		     opts->io_unit_size,
1949 		     opts->max_aq_depth);
1950 
1951 	if (g_nvmf_ftransport) {
1952 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1953 		return NULL;
1954 	}
1955 
1956 	if (spdk_env_get_last_core() < 1) {
1957 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1958 			    spdk_env_get_last_core() + 1);
1959 		return NULL;
1960 	}
1961 
1962 	sge_count = opts->max_io_size / opts->io_unit_size;
1963 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1964 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1965 		return NULL;
1966 	}
1967 
1968 	g_nvmf_fc_main_thread = spdk_get_thread();
1969 	g_nvmf_fgroup_count = 0;
1970 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1971 
1972 	if (!g_nvmf_ftransport) {
1973 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1974 		return NULL;
1975 	}
1976 
1977 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1978 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1979 		free(g_nvmf_ftransport);
1980 		g_nvmf_ftransport = NULL;
1981 		return NULL;
1982 	}
1983 
1984 	g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept,
1985 					   &g_nvmf_ftransport->transport, opts->acceptor_poll_rate);
1986 	if (!g_nvmf_ftransport->accept_poller) {
1987 		free(g_nvmf_ftransport);
1988 		g_nvmf_ftransport = NULL;
1989 		return NULL;
1990 	}
1991 
1992 	/* initialize the low level FC driver */
1993 	nvmf_fc_lld_init();
1994 
1995 	return &g_nvmf_ftransport->transport;
1996 }
1997 
1998 static void
1999 nvmf_fc_destroy_done_cb(void *cb_arg)
2000 {
2001 	free(g_nvmf_ftransport);
2002 	if (g_transport_destroy_done_cb) {
2003 		g_transport_destroy_done_cb(cb_arg);
2004 		g_transport_destroy_done_cb = NULL;
2005 	}
2006 }
2007 
2008 static int
2009 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2010 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2011 {
2012 	if (transport) {
2013 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2014 
2015 		/* clean up any FC poll groups still around */
2016 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2017 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2018 			free(fgroup);
2019 		}
2020 
2021 		spdk_poller_unregister(&g_nvmf_ftransport->accept_poller);
2022 		g_nvmf_fgroup_count = 0;
2023 		g_transport_destroy_done_cb = cb_fn;
2024 
2025 		/* low level FC driver clean up */
2026 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2027 	}
2028 
2029 	return 0;
2030 }
2031 
2032 static int
2033 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2034 	       struct spdk_nvmf_listen_opts *listen_opts)
2035 {
2036 	return 0;
2037 }
2038 
2039 static void
2040 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2041 		    const struct spdk_nvme_transport_id *_trid)
2042 {
2043 }
2044 
2045 static int
2046 nvmf_fc_accept(void *ctx)
2047 {
2048 	struct spdk_nvmf_fc_port *fc_port = NULL;
2049 	uint32_t count = 0;
2050 	static bool start_lld = false;
2051 
2052 	if (spdk_unlikely(!start_lld)) {
2053 		start_lld  = true;
2054 		nvmf_fc_lld_start();
2055 	}
2056 
2057 	/* poll the LS queue on each port */
2058 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2059 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2060 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2061 		}
2062 	}
2063 
2064 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
2065 }
2066 
2067 static void
2068 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2069 		 struct spdk_nvme_transport_id *trid,
2070 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2071 {
2072 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2073 	entry->adrfam = trid->adrfam;
2074 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2075 
2076 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2077 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2078 }
2079 
2080 static struct spdk_nvmf_transport_poll_group *
2081 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport,
2082 			  struct spdk_nvmf_poll_group *group)
2083 {
2084 	struct spdk_nvmf_fc_poll_group *fgroup;
2085 	struct spdk_nvmf_fc_transport *ftransport =
2086 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2087 
2088 	if (spdk_interrupt_mode_is_enabled()) {
2089 		SPDK_ERRLOG("FC transport does not support interrupt mode\n");
2090 		return NULL;
2091 	}
2092 
2093 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2094 	if (!fgroup) {
2095 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2096 		return NULL;
2097 	}
2098 
2099 	TAILQ_INIT(&fgroup->hwqp_list);
2100 
2101 	pthread_mutex_lock(&ftransport->lock);
2102 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2103 	g_nvmf_fgroup_count++;
2104 	pthread_mutex_unlock(&ftransport->lock);
2105 
2106 	return &fgroup->group;
2107 }
2108 
2109 static void
2110 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2111 {
2112 	struct spdk_nvmf_fc_poll_group *fgroup;
2113 	struct spdk_nvmf_fc_transport *ftransport =
2114 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2115 
2116 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2117 	pthread_mutex_lock(&ftransport->lock);
2118 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2119 	g_nvmf_fgroup_count--;
2120 	pthread_mutex_unlock(&ftransport->lock);
2121 
2122 	free(fgroup);
2123 }
2124 
2125 static int
2126 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2127 		       struct spdk_nvmf_qpair *qpair)
2128 {
2129 	struct spdk_nvmf_fc_poll_group *fgroup;
2130 	struct spdk_nvmf_fc_conn *fc_conn;
2131 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2132 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2133 	bool hwqp_found = false;
2134 
2135 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2136 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2137 
2138 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2139 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2140 			hwqp_found = true;
2141 			break;
2142 		}
2143 	}
2144 
2145 	if (!hwqp_found) {
2146 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2147 		goto err;
2148 	}
2149 
2150 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2151 					 &fc_conn->conn_id,
2152 					 fc_conn->max_queue_depth)) {
2153 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2154 		goto err;
2155 	}
2156 
2157 	fc_conn->hwqp = hwqp;
2158 
2159 	/* If this is for ADMIN connection, then update assoc ID. */
2160 	if (fc_conn->qpair.qid == 0) {
2161 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2162 	}
2163 
2164 	api_data = &fc_conn->create_opd->u.add_conn;
2165 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2166 	return 0;
2167 err:
2168 	return -1;
2169 }
2170 
2171 static int
2172 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2173 {
2174 	uint32_t count = 0;
2175 	struct spdk_nvmf_fc_poll_group *fgroup;
2176 	struct spdk_nvmf_fc_hwqp *hwqp;
2177 
2178 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2179 
2180 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2181 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2182 			count += nvmf_fc_process_queue(hwqp);
2183 		}
2184 	}
2185 
2186 	return (int) count;
2187 }
2188 
2189 static int
2190 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2191 {
2192 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2193 
2194 	if (!fc_req->is_aborted) {
2195 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2196 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2197 	} else {
2198 		nvmf_fc_request_abort_complete(fc_req);
2199 	}
2200 
2201 	return 0;
2202 }
2203 
2204 static void
2205 nvmf_fc_connection_delete_done_cb(void *arg)
2206 {
2207 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2208 
2209 	if (fc_ctx->cb_fn) {
2210 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2211 	}
2212 	free(fc_ctx);
2213 }
2214 
2215 static void
2216 _nvmf_fc_close_qpair(void *arg)
2217 {
2218 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2219 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2220 	struct spdk_nvmf_fc_conn *fc_conn;
2221 	int rc;
2222 
2223 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2224 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2225 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2226 
2227 		if (fc_conn->create_opd) {
2228 			api_data = &fc_conn->create_opd->u.add_conn;
2229 
2230 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2231 						    api_data->args.fc_conn, api_data->aq_conn);
2232 		}
2233 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2234 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2235 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2236 		if (!rc) {
2237 			/* Wait for transport to complete its work. */
2238 			return;
2239 		}
2240 
2241 		SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__);
2242 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2243 		/* This is the case where deletion started from FC layer. */
2244 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_conn->qpair_disconnect_cb_fn,
2245 				     fc_conn->qpair_disconnect_ctx);
2246 	}
2247 
2248 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2249 }
2250 
2251 static void
2252 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2253 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2254 {
2255 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2256 
2257 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2258 	if (!fc_ctx) {
2259 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2260 		if (cb_fn) {
2261 			cb_fn(cb_arg);
2262 		}
2263 		return;
2264 	}
2265 	fc_ctx->qpair = qpair;
2266 	fc_ctx->cb_fn = cb_fn;
2267 	fc_ctx->cb_ctx = cb_arg;
2268 	fc_ctx->qpair_thread = spdk_get_thread();
2269 
2270 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2271 }
2272 
2273 static int
2274 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2275 			    struct spdk_nvme_transport_id *trid)
2276 {
2277 	struct spdk_nvmf_fc_conn *fc_conn;
2278 
2279 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2280 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2281 	return 0;
2282 }
2283 
2284 static int
2285 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2286 			     struct spdk_nvme_transport_id *trid)
2287 {
2288 	struct spdk_nvmf_fc_conn *fc_conn;
2289 
2290 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2291 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2292 	return 0;
2293 }
2294 
2295 static int
2296 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2297 			      struct spdk_nvme_transport_id *trid)
2298 {
2299 	struct spdk_nvmf_fc_conn *fc_conn;
2300 
2301 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2302 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2303 	return 0;
2304 }
2305 
2306 static void
2307 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2308 			    struct spdk_nvmf_request *req)
2309 {
2310 	spdk_nvmf_request_complete(req);
2311 }
2312 
2313 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2314 	.name = "FC",
2315 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2316 	.opts_init = nvmf_fc_opts_init,
2317 	.create = nvmf_fc_create,
2318 	.destroy = nvmf_fc_destroy,
2319 
2320 	.listen = nvmf_fc_listen,
2321 	.stop_listen = nvmf_fc_stop_listen,
2322 
2323 	.listener_discover = nvmf_fc_discover,
2324 
2325 	.poll_group_create = nvmf_fc_poll_group_create,
2326 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2327 	.poll_group_add = nvmf_fc_poll_group_add,
2328 	.poll_group_poll = nvmf_fc_poll_group_poll,
2329 
2330 	.req_complete = nvmf_fc_request_complete,
2331 	.req_free = nvmf_fc_request_free,
2332 	.qpair_fini = nvmf_fc_close_qpair,
2333 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2334 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2335 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2336 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2337 };
2338 
2339 /* Initializes the data for the creation of a FC-Port object in the SPDK
2340  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2341  * the API to the library. The contents added to this well defined structure
2342  * is private to each vendors implementation.
2343  */
2344 static int
2345 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2346 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2347 {
2348 	int rc = 0;
2349 	/* Used a high number for the LS HWQP so that it does not clash with the
2350 	 * IO HWQP's and immediately shows a LS queue during tracing.
2351 	 */
2352 	uint32_t i;
2353 
2354 	fc_port->port_hdl       = args->port_handle;
2355 	fc_port->lld_fc_port	= args->lld_fc_port;
2356 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2357 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2358 	fc_port->num_io_queues  = args->io_queue_cnt;
2359 
2360 	/*
2361 	 * Set port context from init args. Used for FCP port stats.
2362 	 */
2363 	fc_port->port_ctx = args->port_ctx;
2364 
2365 	/*
2366 	 * Initialize the LS queue wherever needed.
2367 	 */
2368 	fc_port->ls_queue.queues = args->ls_queue;
2369 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2370 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2371 	fc_port->ls_queue.is_ls_queue = true;
2372 
2373 	/*
2374 	 * Initialize the LS queue.
2375 	 */
2376 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2377 	if (rc) {
2378 		return rc;
2379 	}
2380 
2381 	/*
2382 	 * Initialize the IO queues.
2383 	 */
2384 	for (i = 0; i < args->io_queue_cnt; i++) {
2385 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2386 		hwqp->hwqp_id = i;
2387 		hwqp->queues = args->io_queues[i];
2388 		hwqp->is_ls_queue = false;
2389 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2390 		if (rc) {
2391 			for (; i > 0; --i) {
2392 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2393 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2394 			}
2395 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2396 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2397 			return rc;
2398 		}
2399 	}
2400 
2401 	/*
2402 	 * Initialize the LS processing for port
2403 	 */
2404 	nvmf_fc_ls_init(fc_port);
2405 
2406 	/*
2407 	 * Initialize the list of nport on this HW port.
2408 	 */
2409 	TAILQ_INIT(&fc_port->nport_list);
2410 	fc_port->num_nports = 0;
2411 
2412 	return 0;
2413 }
2414 
2415 /*
2416  * FC port must have all its nports deleted before transitioning to offline state.
2417  */
2418 static void
2419 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2420 {
2421 	struct spdk_nvmf_fc_nport *nport = NULL;
2422 	/* All nports must have been deleted at this point for this fc port */
2423 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2424 	DEV_VERIFY(fc_port->num_nports == 0);
2425 	/* Mark the nport states to be zombie, if they exist */
2426 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2427 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2428 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2429 		}
2430 	}
2431 }
2432 
2433 static void
2434 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2435 {
2436 	ASSERT_SPDK_FC_MAIN_THREAD();
2437 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2438 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2439 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2440 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2441 	int spdk_err = 0;
2442 	uint8_t port_handle = cb_data->port_handle;
2443 	uint32_t s_id = rport->s_id;
2444 	uint32_t rpi = rport->rpi;
2445 	uint32_t assoc_count = rport->assoc_count;
2446 	uint32_t nport_hdl = nport->nport_hdl;
2447 	uint32_t d_id = nport->d_id;
2448 	char log_str[256];
2449 
2450 	/*
2451 	 * Assert on any delete failure.
2452 	 */
2453 	if (0 != err) {
2454 		DEV_VERIFY(!"Error in IT Delete callback.");
2455 		goto out;
2456 	}
2457 
2458 	if (cb_func != NULL) {
2459 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2460 	}
2461 
2462 out:
2463 	free(cb_data);
2464 
2465 	snprintf(log_str, sizeof(log_str),
2466 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2467 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2468 
2469 	if (err != 0) {
2470 		SPDK_ERRLOG("%s", log_str);
2471 	} else {
2472 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2473 	}
2474 }
2475 
2476 static void
2477 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2478 {
2479 	ASSERT_SPDK_FC_MAIN_THREAD();
2480 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2481 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2482 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2483 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2484 	uint32_t s_id = rport->s_id;
2485 	uint32_t rpi = rport->rpi;
2486 	uint32_t assoc_count = rport->assoc_count;
2487 	uint32_t nport_hdl = nport->nport_hdl;
2488 	uint32_t d_id = nport->d_id;
2489 	char log_str[256];
2490 
2491 	/*
2492 	 * Assert on any association delete failure. We continue to delete other
2493 	 * associations in promoted builds.
2494 	 */
2495 	if (0 != err) {
2496 		DEV_VERIFY(!"Nport's association delete callback returned error");
2497 		if (nport->assoc_count > 0) {
2498 			nport->assoc_count--;
2499 		}
2500 		if (rport->assoc_count > 0) {
2501 			rport->assoc_count--;
2502 		}
2503 	}
2504 
2505 	/*
2506 	 * If this is the last association being deleted for the ITN,
2507 	 * execute the callback(s).
2508 	 */
2509 	if (0 == rport->assoc_count) {
2510 		/* Remove the rport from the remote port list. */
2511 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2512 			SPDK_ERRLOG("Error while removing rport from list.\n");
2513 			DEV_VERIFY(!"Error while removing rport from list.");
2514 		}
2515 
2516 		if (cb_func != NULL) {
2517 			/*
2518 			 * Callback function is provided by the caller
2519 			 * of nvmf_fc_adm_i_t_delete_assoc().
2520 			 */
2521 			(void)cb_func(cb_data->cb_ctx, 0);
2522 		}
2523 		free(rport);
2524 		free(args);
2525 	}
2526 
2527 	snprintf(log_str, sizeof(log_str),
2528 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2529 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2530 
2531 	if (err != 0) {
2532 		SPDK_ERRLOG("%s", log_str);
2533 	} else {
2534 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2535 	}
2536 }
2537 
2538 /**
2539  * Process a IT delete.
2540  */
2541 static void
2542 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2543 			     struct spdk_nvmf_fc_remote_port_info *rport,
2544 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2545 			     void *cb_ctx)
2546 {
2547 	int err = 0;
2548 	struct spdk_nvmf_fc_association *assoc = NULL;
2549 	int assoc_err = 0;
2550 	uint32_t num_assoc = 0;
2551 	uint32_t num_assoc_del_scheduled = 0;
2552 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2553 	uint8_t port_hdl = nport->port_hdl;
2554 	uint32_t s_id = rport->s_id;
2555 	uint32_t rpi = rport->rpi;
2556 	uint32_t assoc_count = rport->assoc_count;
2557 	char log_str[256];
2558 
2559 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2560 		      nport->nport_hdl);
2561 
2562 	/*
2563 	 * Allocate memory for callback data.
2564 	 * This memory will be freed by the callback function.
2565 	 */
2566 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2567 	if (NULL == cb_data) {
2568 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2569 		err = -ENOMEM;
2570 		goto out;
2571 	}
2572 	cb_data->nport       = nport;
2573 	cb_data->rport       = rport;
2574 	cb_data->port_handle = port_hdl;
2575 	cb_data->cb_func     = cb_func;
2576 	cb_data->cb_ctx      = cb_ctx;
2577 
2578 	/*
2579 	 * Delete all associations, if any, related with this ITN/remote_port.
2580 	 */
2581 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2582 		num_assoc++;
2583 		if (assoc->s_id == s_id) {
2584 			assoc_err = nvmf_fc_delete_association(nport,
2585 							       assoc->assoc_id,
2586 							       false /* send abts */, false,
2587 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2588 			if (0 != assoc_err) {
2589 				/*
2590 				 * Mark this association as zombie.
2591 				 */
2592 				err = -EINVAL;
2593 				DEV_VERIFY(!"Error while deleting association");
2594 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2595 			} else {
2596 				num_assoc_del_scheduled++;
2597 			}
2598 		}
2599 	}
2600 
2601 out:
2602 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2603 		/*
2604 		 * Since there are no association_delete calls
2605 		 * successfully scheduled, the association_delete
2606 		 * callback function will never be called.
2607 		 * In this case, call the callback function now.
2608 		 */
2609 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2610 	}
2611 
2612 	snprintf(log_str, sizeof(log_str),
2613 		 "IT delete associations on nport:%d end. "
2614 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2615 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2616 
2617 	if (err == 0) {
2618 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2619 	} else {
2620 		SPDK_ERRLOG("%s", log_str);
2621 	}
2622 }
2623 
2624 static void
2625 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2626 {
2627 	ASSERT_SPDK_FC_MAIN_THREAD();
2628 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2629 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2630 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2631 	struct spdk_nvmf_fc_port *fc_port = NULL;
2632 	int err = 0;
2633 
2634 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2635 	hwqp = quiesce_api_data->hwqp;
2636 	fc_port = hwqp->fc_port;
2637 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2638 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2639 
2640 	/*
2641 	 * Decrement the callback/quiesced queue count.
2642 	 */
2643 	port_quiesce_ctx->quiesce_count--;
2644 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2645 
2646 	free(quiesce_api_data);
2647 	/*
2648 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2649 	 */
2650 	if (port_quiesce_ctx->quiesce_count > 0) {
2651 		return;
2652 	}
2653 
2654 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2655 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2656 	} else {
2657 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2658 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2659 	}
2660 
2661 	if (cb_func) {
2662 		/*
2663 		 * Callback function for the called of quiesce.
2664 		 */
2665 		cb_func(port_quiesce_ctx->ctx, err);
2666 	}
2667 
2668 	/*
2669 	 * Free the context structure.
2670 	 */
2671 	free(port_quiesce_ctx);
2672 
2673 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2674 		      err);
2675 }
2676 
2677 static int
2678 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2679 			     spdk_nvmf_fc_poller_api_cb cb_func)
2680 {
2681 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2682 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2683 	int err = 0;
2684 
2685 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2686 
2687 	if (args == NULL) {
2688 		err = -ENOMEM;
2689 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2690 		goto done;
2691 	}
2692 	args->hwqp = fc_hwqp;
2693 	args->ctx = ctx;
2694 	args->cb_info.cb_func = cb_func;
2695 	args->cb_info.cb_data = args;
2696 	args->cb_info.cb_thread = spdk_get_thread();
2697 
2698 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2699 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2700 	if (rc) {
2701 		free(args);
2702 		err = -EINVAL;
2703 	}
2704 
2705 done:
2706 	return err;
2707 }
2708 
2709 /*
2710  * Hw port Quiesce
2711  */
2712 static int
2713 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2714 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2715 {
2716 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2717 	uint32_t i = 0;
2718 	int err = 0;
2719 
2720 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2721 
2722 	/*
2723 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2724 	 * and execute the callback.
2725 	 */
2726 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2727 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2728 	}
2729 
2730 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2731 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2732 			      fc_port->port_hdl);
2733 		/*
2734 		 * Execute the callback function directly.
2735 		 */
2736 		cb_func(ctx, err);
2737 		goto out;
2738 	}
2739 
2740 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2741 
2742 	if (port_quiesce_ctx == NULL) {
2743 		err = -ENOMEM;
2744 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2745 			    fc_port->port_hdl);
2746 		goto out;
2747 	}
2748 
2749 	port_quiesce_ctx->quiesce_count = 0;
2750 	port_quiesce_ctx->ctx = ctx;
2751 	port_quiesce_ctx->cb_func = cb_func;
2752 
2753 	/*
2754 	 * Quiesce the LS queue.
2755 	 */
2756 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2757 					   nvmf_fc_adm_queue_quiesce_cb);
2758 	if (err != 0) {
2759 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2760 		goto out;
2761 	}
2762 	port_quiesce_ctx->quiesce_count++;
2763 
2764 	/*
2765 	 * Quiesce the IO queues.
2766 	 */
2767 	for (i = 0; i < fc_port->num_io_queues; i++) {
2768 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2769 						   port_quiesce_ctx,
2770 						   nvmf_fc_adm_queue_quiesce_cb);
2771 		if (err != 0) {
2772 			DEV_VERIFY(0);
2773 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2774 		}
2775 		port_quiesce_ctx->quiesce_count++;
2776 	}
2777 
2778 out:
2779 	if (port_quiesce_ctx && err != 0) {
2780 		free(port_quiesce_ctx);
2781 	}
2782 	return err;
2783 }
2784 
2785 /*
2786  * Initialize and add a HW port entry to the global
2787  * HW port list.
2788  */
2789 static void
2790 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2791 {
2792 	ASSERT_SPDK_FC_MAIN_THREAD();
2793 	struct spdk_nvmf_fc_port *fc_port = NULL;
2794 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2795 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2796 			api_data->api_args;
2797 	int err = 0;
2798 
2799 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2800 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2801 		err = EINVAL;
2802 		goto abort_port_init;
2803 	}
2804 
2805 	/*
2806 	 * 1. Check for duplicate initialization.
2807 	 */
2808 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2809 	if (fc_port != NULL) {
2810 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2811 		goto abort_port_init;
2812 	}
2813 
2814 	/*
2815 	 * 2. Get the memory to instantiate a fc port.
2816 	 */
2817 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2818 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2819 	if (fc_port == NULL) {
2820 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2821 		err = -ENOMEM;
2822 		goto abort_port_init;
2823 	}
2824 
2825 	/* assign the io_queues array */
2826 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2827 				     struct spdk_nvmf_fc_port));
2828 
2829 	/*
2830 	 * 3. Initialize the contents for the FC-port
2831 	 */
2832 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2833 
2834 	if (err != 0) {
2835 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2836 		DEV_VERIFY(!"Data initialization failed for fc_port");
2837 		goto abort_port_init;
2838 	}
2839 
2840 	/*
2841 	 * 4. Add this port to the global fc port list in the library.
2842 	 */
2843 	nvmf_fc_port_add(fc_port);
2844 
2845 abort_port_init:
2846 	if (err && fc_port) {
2847 		free(fc_port);
2848 	}
2849 	if (api_data->cb_func != NULL) {
2850 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2851 	}
2852 
2853 	free(arg);
2854 
2855 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2856 		      args->port_handle, err);
2857 }
2858 
2859 static void
2860 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2861 {
2862 	struct spdk_nvmf_fc_abts_ctx *ctx;
2863 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2864 
2865 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2866 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2867 		ctx = args->cb_info.cb_data;
2868 		if (ctx) {
2869 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2870 				free(ctx->sync_poller_args);
2871 				free(ctx->abts_poller_args);
2872 				free(ctx);
2873 			}
2874 		}
2875 	}
2876 }
2877 
2878 static void
2879 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2880 {
2881 	ASSERT_SPDK_FC_MAIN_THREAD();
2882 	int err = 0, i;
2883 	struct spdk_nvmf_fc_port *fc_port = NULL;
2884 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2885 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2886 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2887 			api_data->api_args;
2888 
2889 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2890 	if (!fc_port) {
2891 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2892 		err = -EINVAL;
2893 		goto out;
2894 	}
2895 
2896 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2897 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2898 		err = -EIO;
2899 		goto out;
2900 	}
2901 
2902 	/* Clean up and free fc_port */
2903 	hwqp = &fc_port->ls_queue;
2904 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2905 	rte_hash_free(hwqp->connection_list_hash);
2906 	rte_hash_free(hwqp->rport_list_hash);
2907 
2908 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2909 		hwqp = &fc_port->io_queues[i];
2910 
2911 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2912 		rte_hash_free(hwqp->connection_list_hash);
2913 		rte_hash_free(hwqp->rport_list_hash);
2914 	}
2915 
2916 	nvmf_fc_port_remove(fc_port);
2917 	free(fc_port);
2918 out:
2919 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2920 		      args->port_handle, err);
2921 	if (api_data->cb_func != NULL) {
2922 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2923 	}
2924 
2925 	free(arg);
2926 }
2927 
2928 /*
2929  * Online a HW port.
2930  */
2931 static void
2932 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2933 {
2934 	ASSERT_SPDK_FC_MAIN_THREAD();
2935 	struct spdk_nvmf_fc_port *fc_port = NULL;
2936 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2937 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2938 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2939 			api_data->api_args;
2940 	int i = 0;
2941 	int err = 0;
2942 
2943 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2944 	if (fc_port) {
2945 		/* Set the port state to online */
2946 		err = nvmf_fc_port_set_online(fc_port);
2947 		if (err != 0) {
2948 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2949 			DEV_VERIFY(!"Hw port online failed");
2950 			goto out;
2951 		}
2952 
2953 		hwqp = &fc_port->ls_queue;
2954 		hwqp->context = NULL;
2955 		(void)nvmf_fc_hwqp_set_online(hwqp);
2956 
2957 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2958 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2959 			hwqp = &fc_port->io_queues[i];
2960 			hwqp->context = NULL;
2961 			(void)nvmf_fc_hwqp_set_online(hwqp);
2962 			nvmf_fc_poll_group_add_hwqp(hwqp);
2963 		}
2964 	} else {
2965 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2966 		err = -EINVAL;
2967 	}
2968 
2969 out:
2970 	if (api_data->cb_func != NULL) {
2971 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2972 	}
2973 
2974 	free(arg);
2975 
2976 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
2977 		      err);
2978 }
2979 
2980 static void
2981 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
2982 {
2983 	int err = 0;
2984 	struct spdk_nvmf_fc_port *fc_port = NULL;
2985 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
2986 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
2987 
2988 	if (--remove_hwqp_args->pending_remove_hwqp) {
2989 		return;
2990 	}
2991 
2992 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2993 	if (!fc_port) {
2994 		err = -EINVAL;
2995 		SPDK_ERRLOG("fc_port not found.\n");
2996 		goto out;
2997 	}
2998 
2999 	/*
3000 	 * Delete all the nports. Ideally, the nports should have been purged
3001 	 * before the offline event, in which case, only a validation is required.
3002 	 */
3003 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3004 out:
3005 	if (remove_hwqp_args->cb_fn) {
3006 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3007 	}
3008 
3009 	free(remove_hwqp_args);
3010 }
3011 
3012 /*
3013  * Offline a HW port.
3014  */
3015 static void
3016 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3017 {
3018 	ASSERT_SPDK_FC_MAIN_THREAD();
3019 	struct spdk_nvmf_fc_port *fc_port = NULL;
3020 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3021 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3022 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3023 			api_data->api_args;
3024 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3025 	int i = 0;
3026 	int err = 0;
3027 
3028 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3029 	if (fc_port) {
3030 		/* Set the port state to offline, if it is not already. */
3031 		err = nvmf_fc_port_set_offline(fc_port);
3032 		if (err != 0) {
3033 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3034 			err = 0;
3035 			goto out;
3036 		}
3037 
3038 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3039 		if (!remove_hwqp_args) {
3040 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3041 			err = -ENOMEM;
3042 			goto out;
3043 		}
3044 		remove_hwqp_args->cb_fn = api_data->cb_func;
3045 		remove_hwqp_args->cb_args = api_data->api_args;
3046 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3047 
3048 		hwqp = &fc_port->ls_queue;
3049 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3050 
3051 		/* Remove poller for all the io queues. */
3052 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3053 			hwqp = &fc_port->io_queues[i];
3054 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3055 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3056 						       remove_hwqp_args);
3057 		}
3058 
3059 		free(arg);
3060 
3061 		/* Wait until all the hwqps are removed from poll groups. */
3062 		return;
3063 	} else {
3064 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3065 		err = -EINVAL;
3066 	}
3067 out:
3068 	if (api_data->cb_func != NULL) {
3069 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3070 	}
3071 
3072 	free(arg);
3073 
3074 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3075 		      err);
3076 }
3077 
3078 struct nvmf_fc_add_rem_listener_ctx {
3079 	struct spdk_nvmf_subsystem *subsystem;
3080 	bool add_listener;
3081 	struct spdk_nvme_transport_id trid;
3082 };
3083 
3084 static void
3085 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3086 {
3087 	ASSERT_SPDK_FC_MAIN_THREAD();
3088 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3089 	free(ctx);
3090 }
3091 
3092 static void
3093 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3094 {
3095 	ASSERT_SPDK_FC_MAIN_THREAD();
3096 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3097 
3098 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3099 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3100 		free(ctx);
3101 	}
3102 }
3103 
3104 static void
3105 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3106 {
3107 	ASSERT_SPDK_FC_MAIN_THREAD();
3108 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3109 
3110 	if (ctx->add_listener) {
3111 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3112 	} else {
3113 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3114 		nvmf_fc_adm_listen_done(ctx, 0);
3115 	}
3116 }
3117 
3118 static int
3119 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3120 {
3121 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3122 	struct spdk_nvmf_subsystem *subsystem;
3123 	struct spdk_nvmf_listen_opts opts;
3124 
3125 	if (!tgt) {
3126 		SPDK_ERRLOG("No nvmf target defined\n");
3127 		return -EINVAL;
3128 	}
3129 
3130 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
3131 
3132 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3133 	while (subsystem) {
3134 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3135 
3136 		if (spdk_nvmf_subsystem_any_listener_allowed(subsystem) == true) {
3137 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3138 			if (ctx) {
3139 				ctx->add_listener = add;
3140 				ctx->subsystem = subsystem;
3141 				nvmf_fc_create_trid(&ctx->trid,
3142 						    nport->fc_nodename.u.wwn,
3143 						    nport->fc_portname.u.wwn);
3144 
3145 				if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) {
3146 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3147 						    ctx->trid.traddr);
3148 					free(ctx);
3149 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3150 								     0,
3151 								     nvmf_fc_adm_subsystem_paused_cb,
3152 								     ctx)) {
3153 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3154 						    subsystem->subnqn);
3155 					free(ctx);
3156 				}
3157 			}
3158 		}
3159 
3160 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3161 	}
3162 
3163 	return 0;
3164 }
3165 
3166 /*
3167  * Create a Nport.
3168  */
3169 static void
3170 nvmf_fc_adm_evnt_nport_create(void *arg)
3171 {
3172 	ASSERT_SPDK_FC_MAIN_THREAD();
3173 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3174 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3175 			api_data->api_args;
3176 	struct spdk_nvmf_fc_nport *nport = NULL;
3177 	struct spdk_nvmf_fc_port *fc_port = NULL;
3178 	int err = 0;
3179 
3180 	/*
3181 	 * Get the physical port.
3182 	 */
3183 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3184 	if (fc_port == NULL) {
3185 		err = -EINVAL;
3186 		goto out;
3187 	}
3188 
3189 	/*
3190 	 * Check for duplicate initialization.
3191 	 */
3192 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3193 	if (nport != NULL) {
3194 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3195 			    args->port_handle);
3196 		err = -EINVAL;
3197 		goto out;
3198 	}
3199 
3200 	/*
3201 	 * Get the memory to instantiate a fc nport.
3202 	 */
3203 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3204 	if (nport == NULL) {
3205 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3206 			    args->nport_handle);
3207 		err = -ENOMEM;
3208 		goto out;
3209 	}
3210 
3211 	/*
3212 	 * Initialize the contents for the nport
3213 	 */
3214 	nport->nport_hdl    = args->nport_handle;
3215 	nport->port_hdl     = args->port_handle;
3216 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3217 	nport->fc_nodename  = args->fc_nodename;
3218 	nport->fc_portname  = args->fc_portname;
3219 	nport->d_id         = args->d_id;
3220 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3221 
3222 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3223 	TAILQ_INIT(&nport->rem_port_list);
3224 	nport->rport_count = 0;
3225 	TAILQ_INIT(&nport->fc_associations);
3226 	nport->assoc_count = 0;
3227 
3228 	/*
3229 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3230 	 */
3231 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3232 
3233 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3234 out:
3235 	if (err && nport) {
3236 		free(nport);
3237 	}
3238 
3239 	if (api_data->cb_func != NULL) {
3240 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3241 	}
3242 
3243 	free(arg);
3244 }
3245 
3246 static void
3247 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3248 			    void *cb_args, int spdk_err)
3249 {
3250 	ASSERT_SPDK_FC_MAIN_THREAD();
3251 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3252 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3253 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3254 	int err = 0;
3255 	uint16_t nport_hdl = 0;
3256 	char log_str[256];
3257 
3258 	/*
3259 	 * Assert on any delete failure.
3260 	 */
3261 	if (nport == NULL) {
3262 		SPDK_ERRLOG("Nport delete callback returned null nport");
3263 		DEV_VERIFY(!"nport is null.");
3264 		goto out;
3265 	}
3266 
3267 	nport_hdl = nport->nport_hdl;
3268 	if (0 != spdk_err) {
3269 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3270 			    "%d, Nport: %d\n",
3271 			    nport->port_hdl, nport->nport_hdl);
3272 		DEV_VERIFY(!"nport delete callback error.");
3273 	}
3274 
3275 	/*
3276 	 * Free the nport if this is the last rport being deleted and
3277 	 * execute the callback(s).
3278 	 */
3279 	if (nvmf_fc_nport_has_no_rport(nport)) {
3280 		if (0 != nport->assoc_count) {
3281 			SPDK_ERRLOG("association count != 0\n");
3282 			DEV_VERIFY(!"association count != 0");
3283 		}
3284 
3285 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3286 		if (0 != err) {
3287 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3288 				    "nport from nport list. FC Port:%d Nport:%d\n",
3289 				    nport->port_hdl, nport->nport_hdl);
3290 		}
3291 		/* Free the nport */
3292 		free(nport);
3293 
3294 		if (cb_func != NULL) {
3295 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3296 		}
3297 		free(cb_data);
3298 	}
3299 out:
3300 	snprintf(log_str, sizeof(log_str),
3301 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3302 		 port_handle, nport_hdl, event_type, spdk_err);
3303 
3304 	if (err != 0) {
3305 		SPDK_ERRLOG("%s", log_str);
3306 	} else {
3307 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3308 	}
3309 }
3310 
3311 /*
3312  * Delete Nport.
3313  */
3314 static void
3315 nvmf_fc_adm_evnt_nport_delete(void *arg)
3316 {
3317 	ASSERT_SPDK_FC_MAIN_THREAD();
3318 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3319 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3320 			api_data->api_args;
3321 	struct spdk_nvmf_fc_nport *nport = NULL;
3322 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3323 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3324 	int err = 0;
3325 	uint32_t rport_cnt = 0;
3326 	int rc = 0;
3327 
3328 	/*
3329 	 * Make sure that the nport exists.
3330 	 */
3331 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3332 	if (nport == NULL) {
3333 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3334 			    args->port_handle);
3335 		err = -EINVAL;
3336 		goto out;
3337 	}
3338 
3339 	/*
3340 	 * Allocate memory for callback data.
3341 	 */
3342 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3343 	if (NULL == cb_data) {
3344 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3345 		err = -ENOMEM;
3346 		goto out;
3347 	}
3348 
3349 	cb_data->nport = nport;
3350 	cb_data->port_handle = args->port_handle;
3351 	cb_data->fc_cb_func = api_data->cb_func;
3352 	cb_data->fc_cb_ctx = args->cb_ctx;
3353 
3354 	/*
3355 	 * Begin nport tear down
3356 	 */
3357 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3358 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3359 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3360 		/*
3361 		 * Deletion of this nport already in progress. Register callback
3362 		 * and return.
3363 		 */
3364 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3365 		err = -ENODEV;
3366 		goto out;
3367 	} else {
3368 		/* nport partially created/deleted */
3369 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3370 		DEV_VERIFY(0 != "Nport in zombie state");
3371 		err = -ENODEV;
3372 		goto out;
3373 	}
3374 
3375 	/*
3376 	 * Remove this nport from listening addresses across subsystems
3377 	 */
3378 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3379 
3380 	if (0 != rc) {
3381 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3382 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3383 			    nport->nport_hdl);
3384 		goto out;
3385 	}
3386 
3387 	/*
3388 	 * Delete all the remote ports (if any) for the nport
3389 	 */
3390 	/* TODO - Need to do this with a "first" and a "next" accessor function
3391 	 * for completeness. Look at app-subsystem as examples.
3392 	 */
3393 	if (nvmf_fc_nport_has_no_rport(nport)) {
3394 		/* No rports to delete. Complete the nport deletion. */
3395 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3396 		goto out;
3397 	}
3398 
3399 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3400 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3401 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3402 
3403 		if (it_del_args == NULL) {
3404 			err = -ENOMEM;
3405 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3406 				    rport_iter->rpi, rport_iter->s_id);
3407 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3408 			goto out;
3409 		}
3410 
3411 		rport_cnt++;
3412 		it_del_args->port_handle = nport->port_hdl;
3413 		it_del_args->nport_handle = nport->nport_hdl;
3414 		it_del_args->cb_ctx = (void *)cb_data;
3415 		it_del_args->rpi = rport_iter->rpi;
3416 		it_del_args->s_id = rport_iter->s_id;
3417 
3418 		err = nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3419 						 nvmf_fc_adm_delete_nport_cb);
3420 		if (err) {
3421 			free(it_del_args);
3422 		}
3423 	}
3424 
3425 out:
3426 	/* On failure, execute the callback function now */
3427 	if ((err != 0) || (rc != 0)) {
3428 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3429 			    "rport_cnt:%d rc:%d.\n",
3430 			    args->nport_handle, err, args->port_handle,
3431 			    rport_cnt, rc);
3432 		if (cb_data) {
3433 			free(cb_data);
3434 		}
3435 		if (api_data->cb_func != NULL) {
3436 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3437 		}
3438 
3439 	} else {
3440 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3441 			      "NPort %d delete done successfully, fc port:%d. "
3442 			      "rport_cnt:%d\n",
3443 			      args->nport_handle, args->port_handle, rport_cnt);
3444 	}
3445 
3446 	free(arg);
3447 }
3448 
3449 /*
3450  * Process an PRLI/IT add.
3451  */
3452 static void
3453 nvmf_fc_adm_evnt_i_t_add(void *arg)
3454 {
3455 	ASSERT_SPDK_FC_MAIN_THREAD();
3456 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3457 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3458 			api_data->api_args;
3459 	struct spdk_nvmf_fc_nport *nport = NULL;
3460 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3461 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3462 	int err = 0;
3463 
3464 	/*
3465 	 * Make sure the nport port exists.
3466 	 */
3467 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3468 	if (nport == NULL) {
3469 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3470 		err = -EINVAL;
3471 		goto out;
3472 	}
3473 
3474 	/*
3475 	 * Check for duplicate i_t_add.
3476 	 */
3477 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3478 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3479 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3480 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3481 			err = -EEXIST;
3482 			goto out;
3483 		}
3484 	}
3485 
3486 	/*
3487 	 * Get the memory to instantiate the remote port
3488 	 */
3489 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3490 	if (rport == NULL) {
3491 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3492 		err = -ENOMEM;
3493 		goto out;
3494 	}
3495 
3496 	/*
3497 	 * Initialize the contents for the rport
3498 	 */
3499 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3500 	rport->s_id = args->s_id;
3501 	rport->rpi = args->rpi;
3502 	rport->fc_nodename = args->fc_nodename;
3503 	rport->fc_portname = args->fc_portname;
3504 
3505 	/*
3506 	 * Add remote port to nport
3507 	 */
3508 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3509 		DEV_VERIFY(!"Error while adding rport to list");
3510 	};
3511 
3512 	/*
3513 	 * TODO: Do we validate the initiators service parameters?
3514 	 */
3515 
3516 	/*
3517 	 * Get the targets service parameters from the library
3518 	 * to return back to the driver.
3519 	 */
3520 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3521 
3522 out:
3523 	if (api_data->cb_func != NULL) {
3524 		/*
3525 		 * Passing pointer to the args struct as the first argument.
3526 		 * The cb_func should handle this appropriately.
3527 		 */
3528 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3529 	}
3530 
3531 	free(arg);
3532 
3533 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3534 		      "IT add on nport %d done, rc = %d.\n",
3535 		      args->nport_handle, err);
3536 }
3537 
3538 /**
3539  * Process a IT delete.
3540  */
3541 static void
3542 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3543 {
3544 	ASSERT_SPDK_FC_MAIN_THREAD();
3545 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3546 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3547 			api_data->api_args;
3548 	int rc = 0;
3549 	struct spdk_nvmf_fc_nport *nport = NULL;
3550 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3551 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3552 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3553 	uint32_t num_rport = 0;
3554 	char log_str[256];
3555 
3556 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3557 
3558 	/*
3559 	 * Make sure the nport port exists. If it does not, error out.
3560 	 */
3561 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3562 	if (nport == NULL) {
3563 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3564 		rc = -EINVAL;
3565 		goto out;
3566 	}
3567 
3568 	/*
3569 	 * Find this ITN / rport (remote port).
3570 	 */
3571 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3572 		num_rport++;
3573 		if ((rport_iter->s_id == args->s_id) &&
3574 		    (rport_iter->rpi == args->rpi) &&
3575 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3576 			rport = rport_iter;
3577 			break;
3578 		}
3579 	}
3580 
3581 	/*
3582 	 * We should find either zero or exactly one rport.
3583 	 *
3584 	 * If we find zero rports, that means that a previous request has
3585 	 * removed the rport by the time we reached here. In this case,
3586 	 * simply return out.
3587 	 */
3588 	if (rport == NULL) {
3589 		rc = -ENODEV;
3590 		goto out;
3591 	}
3592 
3593 	/*
3594 	 * We have the rport slated for deletion. At this point clean up
3595 	 * any LS requests that are sitting in the pending list. Do this
3596 	 * first, then, set the states of the rport so that new LS requests
3597 	 * are not accepted. Then start the cleanup.
3598 	 */
3599 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3600 
3601 	/*
3602 	 * We have found exactly one rport. Allocate memory for callback data.
3603 	 */
3604 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3605 	if (NULL == cb_data) {
3606 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3607 		rc = -ENOMEM;
3608 		goto out;
3609 	}
3610 
3611 	cb_data->nport = nport;
3612 	cb_data->rport = rport;
3613 	cb_data->port_handle = args->port_handle;
3614 	cb_data->fc_cb_func = api_data->cb_func;
3615 	cb_data->fc_cb_ctx = args->cb_ctx;
3616 
3617 	/*
3618 	 * Validate rport object state.
3619 	 */
3620 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3621 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3622 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3623 		/*
3624 		 * Deletion of this rport already in progress. Register callback
3625 		 * and return.
3626 		 */
3627 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3628 		rc = -ENODEV;
3629 		goto out;
3630 	} else {
3631 		/* rport partially created/deleted */
3632 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3633 		DEV_VERIFY(!"Invalid rport_state");
3634 		rc = -ENODEV;
3635 		goto out;
3636 	}
3637 
3638 	/*
3639 	 * We have successfully found a rport to delete. Call
3640 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3641 	 * IT-delete processing as well as free the cb_data.
3642 	 */
3643 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3644 				     (void *)cb_data);
3645 
3646 out:
3647 	if (rc != 0) {
3648 		/*
3649 		 * We have entered here because either we encountered an
3650 		 * error, or we did not find a rport to delete.
3651 		 * As a result, we will not call the function
3652 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3653 		 * processing. Therefore, execute the callback function now.
3654 		 */
3655 		if (cb_data) {
3656 			free(cb_data);
3657 		}
3658 		if (api_data->cb_func != NULL) {
3659 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3660 		}
3661 	}
3662 
3663 	snprintf(log_str, sizeof(log_str),
3664 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3665 		 args->nport_handle, num_rport, rc);
3666 
3667 	if (rc != 0) {
3668 		SPDK_ERRLOG("%s", log_str);
3669 	} else {
3670 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3671 	}
3672 
3673 	free(arg);
3674 }
3675 
3676 /*
3677  * Process ABTS received
3678  */
3679 static void
3680 nvmf_fc_adm_evnt_abts_recv(void *arg)
3681 {
3682 	ASSERT_SPDK_FC_MAIN_THREAD();
3683 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3684 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3685 	struct spdk_nvmf_fc_nport *nport = NULL;
3686 	int err = 0;
3687 
3688 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3689 		      args->oxid, args->rxid);
3690 
3691 	/*
3692 	 * 1. Make sure the nport port exists.
3693 	 */
3694 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3695 	if (nport == NULL) {
3696 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3697 		err = -EINVAL;
3698 		goto out;
3699 	}
3700 
3701 	/*
3702 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3703 	 */
3704 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3705 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3706 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3707 			      args->rpi, args->oxid, args->rxid);
3708 		err = 0;
3709 		goto out;
3710 
3711 	}
3712 
3713 	/*
3714 	 * 3. Pass the received ABTS-LS to the library for handling.
3715 	 */
3716 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3717 
3718 out:
3719 	if (api_data->cb_func != NULL) {
3720 		/*
3721 		 * Passing pointer to the args struct as the first argument.
3722 		 * The cb_func should handle this appropriately.
3723 		 */
3724 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3725 	} else {
3726 		/* No callback set, free the args */
3727 		free(args);
3728 	}
3729 
3730 	free(arg);
3731 }
3732 
3733 /*
3734  * Callback function for hw port quiesce.
3735  */
3736 static void
3737 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3738 {
3739 	ASSERT_SPDK_FC_MAIN_THREAD();
3740 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3741 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3742 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3743 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3744 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3745 	struct spdk_nvmf_fc_port *fc_port = NULL;
3746 	char *dump_buf = NULL;
3747 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3748 
3749 	/*
3750 	 * Free the callback context struct.
3751 	 */
3752 	free(ctx);
3753 
3754 	if (err != 0) {
3755 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3756 		goto out;
3757 	}
3758 
3759 	if (args->dump_queues == false) {
3760 		/*
3761 		 * Queues need not be dumped.
3762 		 */
3763 		goto out;
3764 	}
3765 
3766 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3767 
3768 	/*
3769 	 * Get the fc port.
3770 	 */
3771 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3772 	if (fc_port == NULL) {
3773 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3774 		err = -EINVAL;
3775 		goto out;
3776 	}
3777 
3778 	/*
3779 	 * Allocate memory for the dump buffer.
3780 	 * This memory will be freed by FCT.
3781 	 */
3782 	dump_buf = (char *)calloc(1, dump_buf_size);
3783 	if (dump_buf == NULL) {
3784 		err = -ENOMEM;
3785 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3786 		goto out;
3787 	}
3788 	*args->dump_buf  = (uint32_t *)dump_buf;
3789 	dump_info.buffer = dump_buf;
3790 	dump_info.offset = 0;
3791 
3792 	/*
3793 	 * Add the dump reason to the top of the buffer.
3794 	 */
3795 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3796 
3797 	/*
3798 	 * Dump the hwqp.
3799 	 */
3800 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3801 				fc_port->num_io_queues, &dump_info);
3802 
3803 out:
3804 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3805 		      args->port_handle, args->dump_queues, err);
3806 
3807 	if (cb_func != NULL) {
3808 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3809 	}
3810 }
3811 
3812 /*
3813  * HW port reset
3814 
3815  */
3816 static void
3817 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3818 {
3819 	ASSERT_SPDK_FC_MAIN_THREAD();
3820 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3821 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3822 			api_data->api_args;
3823 	struct spdk_nvmf_fc_port *fc_port = NULL;
3824 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3825 	int err = 0;
3826 
3827 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3828 
3829 	/*
3830 	 * Make sure the physical port exists.
3831 	 */
3832 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3833 	if (fc_port == NULL) {
3834 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3835 		err = -EINVAL;
3836 		goto out;
3837 	}
3838 
3839 	/*
3840 	 * Save the reset event args and the callback in a context struct.
3841 	 */
3842 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3843 
3844 	if (ctx == NULL) {
3845 		err = -ENOMEM;
3846 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3847 		goto fail;
3848 	}
3849 
3850 	ctx->reset_args = args;
3851 	ctx->reset_cb_func = api_data->cb_func;
3852 
3853 	/*
3854 	 * Quiesce the hw port.
3855 	 */
3856 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3857 	if (err != 0) {
3858 		goto fail;
3859 	}
3860 
3861 	/*
3862 	 * Once the ports are successfully quiesced the reset processing
3863 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3864 	 */
3865 	return;
3866 fail:
3867 	free(ctx);
3868 
3869 out:
3870 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3871 		      err);
3872 
3873 	if (api_data->cb_func != NULL) {
3874 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3875 	}
3876 
3877 	free(arg);
3878 }
3879 
3880 static inline void
3881 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3882 {
3883 	if (nvmf_fc_get_main_thread()) {
3884 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3885 	}
3886 }
3887 
3888 /*
3889  * Queue up an event in the SPDK main threads event queue.
3890  * Used by the FC driver to notify the SPDK main thread of FC related events.
3891  */
3892 int
3893 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3894 			   spdk_nvmf_fc_callback cb_func)
3895 {
3896 	int err = 0;
3897 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3898 	spdk_msg_fn event_fn = NULL;
3899 
3900 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3901 
3902 	if (event_type >= SPDK_FC_EVENT_MAX) {
3903 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3904 		err = -EINVAL;
3905 		goto done;
3906 	}
3907 
3908 	if (args == NULL) {
3909 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3910 		err = -EINVAL;
3911 		goto done;
3912 	}
3913 
3914 	api_data = calloc(1, sizeof(*api_data));
3915 
3916 	if (api_data == NULL) {
3917 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3918 		err = -ENOMEM;
3919 		goto done;
3920 	}
3921 
3922 	api_data->api_args = args;
3923 	api_data->cb_func = cb_func;
3924 
3925 	switch (event_type) {
3926 	case SPDK_FC_HW_PORT_INIT:
3927 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3928 		break;
3929 
3930 	case SPDK_FC_HW_PORT_FREE:
3931 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3932 		break;
3933 
3934 	case SPDK_FC_HW_PORT_ONLINE:
3935 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3936 		break;
3937 
3938 	case SPDK_FC_HW_PORT_OFFLINE:
3939 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3940 		break;
3941 
3942 	case SPDK_FC_NPORT_CREATE:
3943 		event_fn = nvmf_fc_adm_evnt_nport_create;
3944 		break;
3945 
3946 	case SPDK_FC_NPORT_DELETE:
3947 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3948 		break;
3949 
3950 	case SPDK_FC_IT_ADD:
3951 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3952 		break;
3953 
3954 	case SPDK_FC_IT_DELETE:
3955 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3956 		break;
3957 
3958 	case SPDK_FC_ABTS_RECV:
3959 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3960 		break;
3961 
3962 	case SPDK_FC_HW_PORT_RESET:
3963 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3964 		break;
3965 
3966 	case SPDK_FC_UNRECOVERABLE_ERR:
3967 	default:
3968 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3969 		err = -EINVAL;
3970 		break;
3971 	}
3972 
3973 done:
3974 
3975 	if (err == 0) {
3976 		assert(event_fn != NULL);
3977 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
3978 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3979 	} else {
3980 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3981 		if (api_data) {
3982 			free(api_data);
3983 		}
3984 	}
3985 
3986 	return err;
3987 }
3988 
3989 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3990 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
3991 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
3992