xref: /spdk/lib/nvmf/fc.c (revision 0eab4c6fb25834da0dd7f5585b4bb2c06cfa9ac0)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe_FC transport functions.
10  */
11 
12 #include "spdk/env.h"
13 #include "spdk/assert.h"
14 #include "spdk/nvmf_transport.h"
15 #include "spdk/string.h"
16 #include "spdk/trace.h"
17 #include "spdk/util.h"
18 #include "spdk/likely.h"
19 #include "spdk/endian.h"
20 #include "spdk/log.h"
21 #include "spdk/thread.h"
22 
23 #include "nvmf_fc.h"
24 #include "fc_lld.h"
25 
26 #include "spdk_internal/trace_defs.h"
27 
28 #ifndef DEV_VERIFY
29 #define DEV_VERIFY assert
30 #endif
31 
32 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
33 #define ASSERT_SPDK_FC_MAIN_THREAD() \
34         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
35 #endif
36 
37 /*
38  * PRLI service parameters
39  */
40 enum spdk_nvmf_fc_service_parameters {
41 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
42 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
43 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
44 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
45 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
46 };
47 
48 static char *fc_req_state_strs[] = {
49 	"SPDK_NVMF_FC_REQ_INIT",
50 	"SPDK_NVMF_FC_REQ_READ_BDEV",
51 	"SPDK_NVMF_FC_REQ_READ_XFER",
52 	"SPDK_NVMF_FC_REQ_READ_RSP",
53 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
54 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
55 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
56 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
57 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
58 	"SPDK_NVMF_FC_REQ_NONE_RSP",
59 	"SPDK_NVMF_FC_REQ_SUCCESS",
60 	"SPDK_NVMF_FC_REQ_FAILED",
61 	"SPDK_NVMF_FC_REQ_ABORTED",
62 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
63 	"SPDK_NVMF_FC_REQ_PENDING",
64 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
65 };
66 
67 #define HWQP_CONN_TABLE_SIZE			8192
68 #define HWQP_RPI_TABLE_SIZE			4096
69 
70 static void
71 nvmf_fc_trace(void)
72 {
73 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
74 	spdk_trace_register_description("FC_NEW",
75 					TRACE_FC_REQ_INIT,
76 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 1,
77 					SPDK_TRACE_ARG_TYPE_INT, "");
78 	spdk_trace_register_description("FC_READ_SBMT_TO_BDEV",
79 					TRACE_FC_REQ_READ_BDEV,
80 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
81 					SPDK_TRACE_ARG_TYPE_INT, "");
82 	spdk_trace_register_description("FC_READ_XFER_DATA",
83 					TRACE_FC_REQ_READ_XFER,
84 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
85 					SPDK_TRACE_ARG_TYPE_INT, "");
86 	spdk_trace_register_description("FC_READ_RSP",
87 					TRACE_FC_REQ_READ_RSP,
88 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
89 					SPDK_TRACE_ARG_TYPE_INT, "");
90 	spdk_trace_register_description("FC_WRITE_NEED_BUFFER",
91 					TRACE_FC_REQ_WRITE_BUFFS,
92 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
93 					SPDK_TRACE_ARG_TYPE_INT, "");
94 	spdk_trace_register_description("FC_WRITE_XFER_DATA",
95 					TRACE_FC_REQ_WRITE_XFER,
96 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
97 					SPDK_TRACE_ARG_TYPE_INT, "");
98 	spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV",
99 					TRACE_FC_REQ_WRITE_BDEV,
100 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
101 					SPDK_TRACE_ARG_TYPE_INT, "");
102 	spdk_trace_register_description("FC_WRITE_RSP",
103 					TRACE_FC_REQ_WRITE_RSP,
104 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
105 					SPDK_TRACE_ARG_TYPE_INT, "");
106 	spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV",
107 					TRACE_FC_REQ_NONE_BDEV,
108 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
109 					SPDK_TRACE_ARG_TYPE_INT, "");
110 	spdk_trace_register_description("FC_NONE_RSP",
111 					TRACE_FC_REQ_NONE_RSP,
112 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
113 					SPDK_TRACE_ARG_TYPE_INT, "");
114 	spdk_trace_register_description("FC_SUCCESS",
115 					TRACE_FC_REQ_SUCCESS,
116 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
117 					SPDK_TRACE_ARG_TYPE_INT, "");
118 	spdk_trace_register_description("FC_FAILED",
119 					TRACE_FC_REQ_FAILED,
120 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
121 					SPDK_TRACE_ARG_TYPE_INT, "");
122 	spdk_trace_register_description("FC_ABRT",
123 					TRACE_FC_REQ_ABORTED,
124 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
125 					SPDK_TRACE_ARG_TYPE_INT, "");
126 	spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV",
127 					TRACE_FC_REQ_BDEV_ABORTED,
128 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
129 					SPDK_TRACE_ARG_TYPE_INT, "");
130 	spdk_trace_register_description("FC_PENDING",
131 					TRACE_FC_REQ_PENDING,
132 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
133 					SPDK_TRACE_ARG_TYPE_INT, "");
134 	spdk_trace_register_description("FC_FUSED_WAITING",
135 					TRACE_FC_REQ_FUSED_WAITING,
136 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
137 					SPDK_TRACE_ARG_TYPE_INT, "");
138 }
139 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
140 
141 /**
142  * The structure used by all fc adm functions
143  */
144 struct spdk_nvmf_fc_adm_api_data {
145 	void *api_args;
146 	spdk_nvmf_fc_callback cb_func;
147 };
148 
149 /**
150  * The callback structure for nport-delete
151  */
152 struct spdk_nvmf_fc_adm_nport_del_cb_data {
153 	struct spdk_nvmf_fc_nport *nport;
154 	uint8_t port_handle;
155 	spdk_nvmf_fc_callback fc_cb_func;
156 	void *fc_cb_ctx;
157 };
158 
159 /**
160  * The callback structure for it-delete
161  */
162 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
163 	struct spdk_nvmf_fc_nport *nport;
164 	struct spdk_nvmf_fc_remote_port_info *rport;
165 	uint8_t port_handle;
166 	spdk_nvmf_fc_callback fc_cb_func;
167 	void *fc_cb_ctx;
168 };
169 
170 
171 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
172 
173 /**
174  * The callback structure for the it-delete-assoc callback
175  */
176 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
177 	struct spdk_nvmf_fc_nport *nport;
178 	struct spdk_nvmf_fc_remote_port_info *rport;
179 	uint8_t port_handle;
180 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
181 	void *cb_ctx;
182 };
183 
184 /*
185  * Call back function pointer for HW port quiesce.
186  */
187 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
188 
189 /**
190  * Context structure for quiescing a hardware port
191  */
192 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
193 	int quiesce_count;
194 	void *ctx;
195 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
196 };
197 
198 /**
199  * Context structure used to reset a hardware port
200  */
201 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
202 	void *reset_args;
203 	spdk_nvmf_fc_callback reset_cb_func;
204 };
205 
206 struct spdk_nvmf_fc_transport {
207 	struct spdk_nvmf_transport transport;
208 	struct spdk_poller *accept_poller;
209 	pthread_mutex_t lock;
210 };
211 
212 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
213 
214 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
215 
216 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
217 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
218 
219 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
220 
221 static uint32_t g_nvmf_fgroup_count = 0;
222 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
223 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
224 
225 struct spdk_thread *
226 nvmf_fc_get_main_thread(void)
227 {
228 	return g_nvmf_fc_main_thread;
229 }
230 
231 static inline void
232 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
233 			       enum spdk_nvmf_fc_request_state state)
234 {
235 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
236 
237 	switch (state) {
238 	case SPDK_NVMF_FC_REQ_INIT:
239 		/* Start IO tracing */
240 		tpoint_id = TRACE_FC_REQ_INIT;
241 		break;
242 	case SPDK_NVMF_FC_REQ_READ_BDEV:
243 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
244 		break;
245 	case SPDK_NVMF_FC_REQ_READ_XFER:
246 		tpoint_id = TRACE_FC_REQ_READ_XFER;
247 		break;
248 	case SPDK_NVMF_FC_REQ_READ_RSP:
249 		tpoint_id = TRACE_FC_REQ_READ_RSP;
250 		break;
251 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
252 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
253 		break;
254 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
255 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
256 		break;
257 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
258 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
259 		break;
260 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
261 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
262 		break;
263 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
264 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
265 		break;
266 	case SPDK_NVMF_FC_REQ_NONE_RSP:
267 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
268 		break;
269 	case SPDK_NVMF_FC_REQ_SUCCESS:
270 		tpoint_id = TRACE_FC_REQ_SUCCESS;
271 		break;
272 	case SPDK_NVMF_FC_REQ_FAILED:
273 		tpoint_id = TRACE_FC_REQ_FAILED;
274 		break;
275 	case SPDK_NVMF_FC_REQ_ABORTED:
276 		tpoint_id = TRACE_FC_REQ_ABORTED;
277 		break;
278 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
279 		tpoint_id = TRACE_FC_REQ_ABORTED;
280 		break;
281 	case SPDK_NVMF_FC_REQ_PENDING:
282 		tpoint_id = TRACE_FC_REQ_PENDING;
283 		break;
284 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
285 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
286 		break;
287 	default:
288 		assert(0);
289 		break;
290 	}
291 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
292 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
293 				  (uint64_t)(&fc_req->req));
294 	}
295 }
296 
297 static struct rte_hash *
298 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
299 {
300 	struct rte_hash_parameters hash_params = { 0 };
301 
302 	hash_params.entries = num_entries;
303 	hash_params.key_len = key_len;
304 	hash_params.name = name;
305 
306 	return rte_hash_create(&hash_params);
307 }
308 
309 void
310 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
311 {
312 	free(fc_conn->pool_memory);
313 	fc_conn->pool_memory = NULL;
314 }
315 
316 int
317 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
318 {
319 	uint32_t i, qd;
320 	struct spdk_nvmf_fc_pooled_request *req;
321 
322 	/*
323 	 * Create number of fc-requests to be more than the actual SQ size.
324 	 * This is to handle race conditions where the target driver may send
325 	 * back a RSP and before the target driver gets to process the CQE
326 	 * for the RSP, the initiator may have sent a new command.
327 	 * Depending on the load on the HWQP, there is a slim possibility
328 	 * that the target reaps the RQE corresponding to the new
329 	 * command before processing the CQE corresponding to the RSP.
330 	 */
331 	qd = fc_conn->max_queue_depth * 2;
332 
333 	STAILQ_INIT(&fc_conn->pool_queue);
334 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
335 				      sizeof(struct spdk_nvmf_fc_request));
336 	if (!fc_conn->pool_memory) {
337 		SPDK_ERRLOG("create fc req ring objects failed\n");
338 		goto error;
339 	}
340 	fc_conn->pool_size = qd;
341 	fc_conn->pool_free_elems = qd;
342 
343 	/* Initialise value in ring objects and link the objects */
344 	for (i = 0; i < qd; i++) {
345 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
346 				i * sizeof(struct spdk_nvmf_fc_request));
347 
348 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
349 	}
350 	return 0;
351 error:
352 	nvmf_fc_free_conn_reqpool(fc_conn);
353 	return -1;
354 }
355 
356 static inline struct spdk_nvmf_fc_request *
357 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
358 {
359 	struct spdk_nvmf_fc_request *fc_req;
360 	struct spdk_nvmf_fc_pooled_request *pooled_req;
361 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
362 
363 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
364 	if (!pooled_req) {
365 		SPDK_ERRLOG("Alloc request buffer failed\n");
366 		return NULL;
367 	}
368 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
369 	fc_conn->pool_free_elems -= 1;
370 
371 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
372 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
373 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
374 
375 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
376 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
377 	TAILQ_INIT(&fc_req->abort_cbs);
378 	return fc_req;
379 }
380 
381 static inline void
382 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
383 {
384 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
385 		/* Log an error for debug purpose. */
386 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
387 	}
388 
389 	/* set the magic to mark req as no longer valid. */
390 	fc_req->magic = 0xDEADBEEF;
391 
392 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
393 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
394 
395 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
396 	fc_conn->pool_free_elems += 1;
397 }
398 
399 static inline void
400 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
401 {
402 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
403 		      spdk_nvmf_request, buf_link);
404 }
405 
406 int
407 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
408 {
409 	char name[64];
410 
411 	hwqp->fc_port = fc_port;
412 
413 	/* clear counters */
414 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
415 
416 	TAILQ_INIT(&hwqp->in_use_reqs);
417 	TAILQ_INIT(&hwqp->sync_cbs);
418 	TAILQ_INIT(&hwqp->ls_pending_queue);
419 
420 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
421 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
422 				     sizeof(uint64_t));
423 	if (!hwqp->connection_list_hash) {
424 		SPDK_ERRLOG("Failed to create connection hash table.\n");
425 		return -ENOMEM;
426 	}
427 
428 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
429 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
430 	if (!hwqp->rport_list_hash) {
431 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
432 		rte_hash_free(hwqp->connection_list_hash);
433 		return -ENOMEM;
434 	}
435 
436 	/* Init low level driver queues */
437 	nvmf_fc_init_q(hwqp);
438 	return 0;
439 }
440 
441 static struct spdk_nvmf_fc_poll_group *
442 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
443 {
444 	uint32_t max_count = UINT32_MAX;
445 	struct spdk_nvmf_fc_poll_group *fgroup;
446 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
447 
448 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
449 	/* find poll group with least number of hwqp's assigned to it */
450 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
451 		if (fgroup->hwqp_count < max_count) {
452 			ret_fgroup = fgroup;
453 			max_count = fgroup->hwqp_count;
454 		}
455 	}
456 
457 	if (ret_fgroup) {
458 		ret_fgroup->hwqp_count++;
459 		hwqp->thread = ret_fgroup->group.group->thread;
460 		hwqp->fgroup = ret_fgroup;
461 	}
462 
463 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
464 
465 	return ret_fgroup;
466 }
467 
468 bool
469 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
470 {
471 	struct spdk_nvmf_fc_poll_group *tmp;
472 	bool rc = false;
473 
474 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
475 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
476 		if (tmp == fgroup) {
477 			rc = true;
478 			break;
479 		}
480 	}
481 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
482 	return rc;
483 }
484 
485 void
486 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
487 {
488 	assert(hwqp);
489 	if (hwqp == NULL) {
490 		SPDK_ERRLOG("Error: hwqp is NULL\n");
491 		return;
492 	}
493 
494 	assert(g_nvmf_fgroup_count);
495 
496 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
497 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
498 		return;
499 	}
500 
501 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
502 }
503 
504 static void
505 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
506 {
507 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
508 
509 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
510 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
511 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
512 	} else {
513 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
514 	}
515 
516 	if (args->cb_fn) {
517 		args->cb_fn(args->cb_ctx, 0);
518 	}
519 
520 	free(args);
521 }
522 
523 void
524 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
525 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
526 {
527 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
528 	struct spdk_nvmf_fc_poll_group *tmp;
529 	int rc = 0;
530 
531 	assert(hwqp);
532 
533 	SPDK_DEBUGLOG(nvmf_fc,
534 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
535 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
536 
537 	if (!hwqp->fgroup) {
538 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
539 	} else {
540 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
541 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
542 			if (tmp == hwqp->fgroup) {
543 				hwqp->fgroup->hwqp_count--;
544 				break;
545 			}
546 		}
547 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
548 
549 		if (tmp != hwqp->fgroup) {
550 			/* Pollgroup was already removed. Dont bother. */
551 			goto done;
552 		}
553 
554 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
555 		if (args == NULL) {
556 			rc = -ENOMEM;
557 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
558 			goto done;
559 		}
560 
561 		args->hwqp   = hwqp;
562 		args->cb_fn  = cb_fn;
563 		args->cb_ctx = cb_ctx;
564 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
565 		args->cb_info.cb_data = args;
566 		args->cb_info.cb_thread = spdk_get_thread();
567 
568 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
569 		if (rc) {
570 			rc = -EINVAL;
571 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
572 			free(args);
573 			goto done;
574 		}
575 		return;
576 	}
577 done:
578 	if (cb_fn) {
579 		cb_fn(cb_ctx, rc);
580 	}
581 }
582 
583 /*
584  * Note: This needs to be used only on main poller.
585  */
586 static uint64_t
587 nvmf_fc_get_abts_unique_id(void)
588 {
589 	static uint32_t u_id = 0;
590 
591 	return (uint64_t)(++u_id);
592 }
593 
594 static void
595 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
596 {
597 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
598 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
599 
600 	ctx->hwqps_responded++;
601 
602 	if (ctx->hwqps_responded < ctx->num_hwqps) {
603 		/* Wait for all pollers to complete. */
604 		return;
605 	}
606 
607 	/* Free the queue sync poller args. */
608 	free(ctx->sync_poller_args);
609 
610 	/* Mark as queue synced */
611 	ctx->queue_synced = true;
612 
613 	/* Reset the ctx values */
614 	ctx->hwqps_responded = 0;
615 	ctx->handled = false;
616 
617 	SPDK_DEBUGLOG(nvmf_fc,
618 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
619 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
620 
621 	/* Resend ABTS to pollers */
622 	args = ctx->abts_poller_args;
623 	for (int i = 0; i < ctx->num_hwqps; i++) {
624 		poller_arg = args + i;
625 		nvmf_fc_poller_api_func(poller_arg->hwqp,
626 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
627 					poller_arg);
628 	}
629 }
630 
631 static int
632 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
633 {
634 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
635 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
636 
637 	/* check if FC driver supports queue sync */
638 	if (!nvmf_fc_q_sync_available()) {
639 		return -EPERM;
640 	}
641 
642 	assert(ctx);
643 	if (!ctx) {
644 		SPDK_ERRLOG("NULL ctx pointer");
645 		return -EINVAL;
646 	}
647 
648 	/* Reset the ctx values */
649 	ctx->hwqps_responded = 0;
650 
651 	args = calloc(ctx->num_hwqps,
652 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
653 	if (!args) {
654 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
655 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
656 		return -ENOMEM;
657 	}
658 	ctx->sync_poller_args = args;
659 
660 	abts_args = ctx->abts_poller_args;
661 	for (int i = 0; i < ctx->num_hwqps; i++) {
662 		abts_poller_arg = abts_args + i;
663 		poller_arg = args + i;
664 		poller_arg->u_id = ctx->u_id;
665 		poller_arg->hwqp = abts_poller_arg->hwqp;
666 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
667 		poller_arg->cb_info.cb_data = ctx;
668 		poller_arg->cb_info.cb_thread = spdk_get_thread();
669 
670 		/* Send a Queue sync message to interested pollers */
671 		nvmf_fc_poller_api_func(poller_arg->hwqp,
672 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
673 					poller_arg);
674 	}
675 
676 	SPDK_DEBUGLOG(nvmf_fc,
677 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
678 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
679 
680 	/* Post Marker to queue to track aborted request */
681 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
682 
683 	return 0;
684 }
685 
686 static void
687 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
688 {
689 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
690 	struct spdk_nvmf_fc_nport *nport  = NULL;
691 
692 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
693 		ctx->handled = true;
694 	}
695 
696 	ctx->hwqps_responded++;
697 
698 	if (ctx->hwqps_responded < ctx->num_hwqps) {
699 		/* Wait for all pollers to complete. */
700 		return;
701 	}
702 
703 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
704 
705 	if (ctx->nport != nport) {
706 		/* Nport can be deleted while this abort is being
707 		 * processed by the pollers.
708 		 */
709 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
710 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
711 	} else {
712 		if (!ctx->handled) {
713 			/* Try syncing the queues and try one more time */
714 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
715 				SPDK_DEBUGLOG(nvmf_fc,
716 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
717 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
718 				return;
719 			} else {
720 				/* Send Reject */
721 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
722 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
723 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
724 			}
725 		} else {
726 			/* Send Accept */
727 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
728 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
729 					    0, NULL, NULL);
730 		}
731 	}
732 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
733 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
734 
735 	free(ctx->abts_poller_args);
736 	free(ctx);
737 }
738 
739 void
740 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
741 			  uint16_t oxid, uint16_t rxid)
742 {
743 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
744 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
745 	struct spdk_nvmf_fc_association *assoc = NULL;
746 	struct spdk_nvmf_fc_conn *conn = NULL;
747 	uint32_t hwqp_cnt = 0;
748 	bool skip_hwqp_cnt;
749 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
750 	uint32_t i;
751 
752 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
753 		       nport->nport_hdl, rpi, oxid, rxid);
754 
755 	/* Allocate memory to track hwqp's with at least 1 active connection. */
756 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
757 	if (hwqps == NULL) {
758 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
759 		goto bls_rej;
760 	}
761 
762 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
763 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
764 			if ((conn->rpi != rpi) || !conn->hwqp) {
765 				continue;
766 			}
767 
768 			skip_hwqp_cnt = false;
769 			for (i = 0; i < hwqp_cnt; i++) {
770 				if (hwqps[i] == conn->hwqp) {
771 					/* Skip. This is already present */
772 					skip_hwqp_cnt = true;
773 					break;
774 				}
775 			}
776 			if (!skip_hwqp_cnt) {
777 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
778 				hwqps[hwqp_cnt] = conn->hwqp;
779 				hwqp_cnt++;
780 			}
781 		}
782 	}
783 
784 	if (!hwqp_cnt) {
785 		goto bls_rej;
786 	}
787 
788 	args = calloc(hwqp_cnt,
789 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
790 	if (!args) {
791 		goto bls_rej;
792 	}
793 
794 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
795 	if (!ctx) {
796 		goto bls_rej;
797 	}
798 	ctx->rpi = rpi;
799 	ctx->oxid = oxid;
800 	ctx->rxid = rxid;
801 	ctx->nport = nport;
802 	ctx->nport_hdl = nport->nport_hdl;
803 	ctx->port_hdl = nport->fc_port->port_hdl;
804 	ctx->num_hwqps = hwqp_cnt;
805 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
806 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
807 	ctx->abts_poller_args = args;
808 
809 	/* Get a unique context for this ABTS */
810 	ctx->u_id = nvmf_fc_get_abts_unique_id();
811 
812 	for (i = 0; i < hwqp_cnt; i++) {
813 		poller_arg = args + i;
814 		poller_arg->hwqp = hwqps[i];
815 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
816 		poller_arg->cb_info.cb_data = ctx;
817 		poller_arg->cb_info.cb_thread = spdk_get_thread();
818 		poller_arg->ctx = ctx;
819 
820 		nvmf_fc_poller_api_func(poller_arg->hwqp,
821 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
822 					poller_arg);
823 	}
824 
825 	free(hwqps);
826 
827 	return;
828 bls_rej:
829 	free(args);
830 	free(hwqps);
831 
832 	/* Send Reject */
833 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
834 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
835 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
836 		       nport->nport_hdl, rpi, oxid, rxid);
837 	return;
838 }
839 
840 /*** Accessor functions for the FC structures - BEGIN */
841 /*
842  * Returns true if the port is in offline state.
843  */
844 bool
845 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
846 {
847 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
848 		return true;
849 	}
850 
851 	return false;
852 }
853 
854 /*
855  * Returns true if the port is in online state.
856  */
857 bool
858 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
859 {
860 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
861 		return true;
862 	}
863 
864 	return false;
865 }
866 
867 int
868 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
869 {
870 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
871 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
872 		return 0;
873 	}
874 
875 	return -EPERM;
876 }
877 
878 int
879 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
880 {
881 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
882 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
883 		return 0;
884 	}
885 
886 	return -EPERM;
887 }
888 
889 int
890 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
891 {
892 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
893 		hwqp->state = SPDK_FC_HWQP_ONLINE;
894 		/* reset some queue counters */
895 		hwqp->num_conns = 0;
896 		return nvmf_fc_set_q_online_state(hwqp, true);
897 	}
898 
899 	return -EPERM;
900 }
901 
902 int
903 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
904 {
905 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
906 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
907 		return nvmf_fc_set_q_online_state(hwqp, false);
908 	}
909 
910 	return -EPERM;
911 }
912 
913 void
914 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
915 {
916 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
917 
918 	/*
919 	 * Let LLD add the port to its list.
920 	 */
921 	nvmf_fc_lld_port_add(fc_port);
922 }
923 
924 static void
925 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
926 {
927 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
928 
929 	/*
930 	 * Let LLD remove the port from its list.
931 	 */
932 	nvmf_fc_lld_port_remove(fc_port);
933 }
934 
935 struct spdk_nvmf_fc_port *
936 nvmf_fc_port_lookup(uint8_t port_hdl)
937 {
938 	struct spdk_nvmf_fc_port *fc_port = NULL;
939 
940 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
941 		if (fc_port->port_hdl == port_hdl) {
942 			return fc_port;
943 		}
944 	}
945 	return NULL;
946 }
947 
948 uint32_t
949 nvmf_fc_get_prli_service_params(void)
950 {
951 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
952 }
953 
954 int
955 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
956 		       struct spdk_nvmf_fc_nport *nport)
957 {
958 	if (fc_port) {
959 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
960 		fc_port->num_nports++;
961 		return 0;
962 	}
963 
964 	return -EINVAL;
965 }
966 
967 int
968 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
969 			  struct spdk_nvmf_fc_nport *nport)
970 {
971 	if (fc_port && nport) {
972 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
973 		fc_port->num_nports--;
974 		return 0;
975 	}
976 
977 	return -EINVAL;
978 }
979 
980 static struct spdk_nvmf_fc_nport *
981 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
982 {
983 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
984 
985 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
986 		if (fc_nport->nport_hdl == nport_hdl) {
987 			return fc_nport;
988 		}
989 	}
990 
991 	return NULL;
992 }
993 
994 struct spdk_nvmf_fc_nport *
995 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
996 {
997 	struct spdk_nvmf_fc_port *fc_port = NULL;
998 
999 	fc_port = nvmf_fc_port_lookup(port_hdl);
1000 	if (fc_port) {
1001 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
1002 	}
1003 
1004 	return NULL;
1005 }
1006 
1007 static inline int
1008 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1009 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1010 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1011 {
1012 	struct spdk_nvmf_fc_nport *n_port;
1013 	struct spdk_nvmf_fc_remote_port_info *r_port;
1014 
1015 	assert(hwqp);
1016 	if (hwqp == NULL) {
1017 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1018 		return -EINVAL;
1019 	}
1020 	assert(nport);
1021 	if (nport == NULL) {
1022 		SPDK_ERRLOG("Error: nport is NULL\n");
1023 		return -EINVAL;
1024 	}
1025 	assert(rport);
1026 	if (rport == NULL) {
1027 		SPDK_ERRLOG("Error: rport is NULL\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1032 		if (n_port->d_id == d_id) {
1033 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1034 				if (r_port->s_id == s_id) {
1035 					*nport = n_port;
1036 					*rport = r_port;
1037 					return 0;
1038 				}
1039 			}
1040 			break;
1041 		}
1042 	}
1043 
1044 	return -ENOENT;
1045 }
1046 
1047 /* Returns true if the Nport is empty of all rem_ports */
1048 bool
1049 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1050 {
1051 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1052 		assert(nport->rport_count == 0);
1053 		return true;
1054 	} else {
1055 		return false;
1056 	}
1057 }
1058 
1059 int
1060 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1061 			enum spdk_nvmf_fc_object_state state)
1062 {
1063 	if (nport) {
1064 		nport->nport_state = state;
1065 		return 0;
1066 	} else {
1067 		return -EINVAL;
1068 	}
1069 }
1070 
1071 bool
1072 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1073 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1074 {
1075 	if (nport && rem_port) {
1076 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1077 		nport->rport_count++;
1078 		return 0;
1079 	} else {
1080 		return -EINVAL;
1081 	}
1082 }
1083 
1084 bool
1085 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1086 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1087 {
1088 	if (nport && rem_port) {
1089 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1090 		nport->rport_count--;
1091 		return 0;
1092 	} else {
1093 		return -EINVAL;
1094 	}
1095 }
1096 
1097 int
1098 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1099 			enum spdk_nvmf_fc_object_state state)
1100 {
1101 	if (rport) {
1102 		rport->rport_state = state;
1103 		return 0;
1104 	} else {
1105 		return -EINVAL;
1106 	}
1107 }
1108 int
1109 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1110 			enum spdk_nvmf_fc_object_state state)
1111 {
1112 	if (assoc) {
1113 		assoc->assoc_state = state;
1114 		return 0;
1115 	} else {
1116 		return -EINVAL;
1117 	}
1118 }
1119 
1120 static struct spdk_nvmf_fc_association *
1121 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1122 {
1123 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1124 	struct spdk_nvmf_fc_conn *fc_conn;
1125 
1126 	if (!qpair) {
1127 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1128 		return NULL;
1129 	}
1130 
1131 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1132 
1133 	return fc_conn->fc_assoc;
1134 }
1135 
1136 bool
1137 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1138 		       struct spdk_nvmf_ctrlr *ctrlr)
1139 {
1140 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1141 	struct spdk_nvmf_fc_association *assoc = NULL;
1142 
1143 	if (!ctrlr) {
1144 		return false;
1145 	}
1146 
1147 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1148 	if (!fc_nport) {
1149 		return false;
1150 	}
1151 
1152 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1153 	if (assoc && assoc->tgtport == fc_nport) {
1154 		SPDK_DEBUGLOG(nvmf_fc,
1155 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1156 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1157 			      nport_hdl);
1158 		return true;
1159 	}
1160 	return false;
1161 }
1162 
1163 static void
1164 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1165 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1166 {
1167 	assert(ls_rqst);
1168 
1169 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1170 
1171 	/* Return buffer to chip */
1172 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1173 }
1174 
1175 static int
1176 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1177 			  struct spdk_nvmf_fc_nport *nport,
1178 			  struct spdk_nvmf_fc_remote_port_info *rport)
1179 {
1180 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1181 	int num_deleted = 0;
1182 
1183 	assert(hwqp);
1184 	assert(nport);
1185 	assert(rport);
1186 
1187 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1188 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1189 			num_deleted++;
1190 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1191 		}
1192 	}
1193 	return num_deleted;
1194 }
1195 
1196 static void
1197 nvmf_fc_req_bdev_abort(void *arg1)
1198 {
1199 	struct spdk_nvmf_fc_request *fc_req = arg1;
1200 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1201 	int i;
1202 
1203 	/* Initial release - we don't have to abort Admin Queue or
1204 	 * Fabric commands. The AQ commands supported at this time are
1205 	 * Get-Log-Page,
1206 	 * Identify
1207 	 * Set Features
1208 	 * Get Features
1209 	 * AER -> Special case and handled differently.
1210 	 * Every one of the above Admin commands (except AER) run
1211 	 * to completion and so an Abort of such commands doesn't
1212 	 * make sense.
1213 	 */
1214 	/* The Fabric commands supported are
1215 	 * Property Set
1216 	 * Property Get
1217 	 * Connect -> Special case (async. handling). Not sure how to
1218 	 * handle at this point. Let it run to completion.
1219 	 */
1220 	if (ctrlr) {
1221 		for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1222 			if (ctrlr->aer_req[i] == &fc_req->req) {
1223 				SPDK_NOTICELOG("Abort AER request\n");
1224 				nvmf_qpair_free_aer(fc_req->req.qpair);
1225 			}
1226 		}
1227 	}
1228 }
1229 
1230 void
1231 nvmf_fc_request_abort_complete(void *arg1)
1232 {
1233 	struct spdk_nvmf_fc_request *fc_req =
1234 		(struct spdk_nvmf_fc_request *)arg1;
1235 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1236 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1237 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1238 
1239 	/* Make a copy of the cb list from fc_req */
1240 	TAILQ_INIT(&abort_cbs);
1241 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1242 
1243 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1244 		       fc_req_state_strs[fc_req->state]);
1245 
1246 	_nvmf_fc_request_free(fc_req);
1247 
1248 	/* Request abort completed. Notify all the callbacks */
1249 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1250 		/* Notify */
1251 		ctx->cb(hwqp, 0, ctx->cb_args);
1252 		/* Remove */
1253 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1254 		/* free */
1255 		free(ctx);
1256 	}
1257 }
1258 
1259 void
1260 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1261 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1262 {
1263 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1264 	bool kill_req = false;
1265 
1266 	/* Add the cb to list */
1267 	if (cb) {
1268 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1269 		if (!ctx) {
1270 			SPDK_ERRLOG("ctx alloc failed.\n");
1271 			return;
1272 		}
1273 		ctx->cb = cb;
1274 		ctx->cb_args = cb_args;
1275 
1276 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1277 	}
1278 
1279 	if (!fc_req->is_aborted) {
1280 		/* Increment aborted command counter */
1281 		fc_req->hwqp->counters.num_aborted++;
1282 	}
1283 
1284 	/* If port is dead, skip abort wqe */
1285 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1286 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1287 		fc_req->is_aborted = true;
1288 		goto complete;
1289 	}
1290 
1291 	/* Check if the request is already marked for deletion */
1292 	if (fc_req->is_aborted) {
1293 		return;
1294 	}
1295 
1296 	/* Mark request as aborted */
1297 	fc_req->is_aborted = true;
1298 
1299 	/* If xchg is allocated, then save if we need to send abts or not. */
1300 	if (fc_req->xchg) {
1301 		fc_req->xchg->send_abts = send_abts;
1302 		fc_req->xchg->aborted	= true;
1303 	}
1304 
1305 	switch (fc_req->state) {
1306 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1307 		/* Aborted by backend */
1308 		goto complete;
1309 
1310 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1311 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1312 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1313 		/* Notify bdev */
1314 		spdk_thread_send_msg(fc_req->hwqp->thread,
1315 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1316 		break;
1317 
1318 	case SPDK_NVMF_FC_REQ_READ_XFER:
1319 	case SPDK_NVMF_FC_REQ_READ_RSP:
1320 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1321 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1322 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1323 		/* Notify HBA to abort this exchange  */
1324 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1325 		break;
1326 
1327 	case SPDK_NVMF_FC_REQ_PENDING:
1328 		/* Remove from pending */
1329 		nvmf_fc_request_remove_from_pending(fc_req);
1330 		goto complete;
1331 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1332 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1333 		goto complete;
1334 	default:
1335 		SPDK_ERRLOG("Request in invalid state.\n");
1336 		goto complete;
1337 	}
1338 
1339 	return;
1340 complete:
1341 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1342 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1343 				(void *)fc_req);
1344 }
1345 
1346 static int
1347 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1348 {
1349 	uint32_t length = fc_req->req.length;
1350 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1351 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1352 	struct spdk_nvmf_transport *transport = group->transport;
1353 
1354 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1355 		return -ENOMEM;
1356 	}
1357 
1358 	return 0;
1359 }
1360 
1361 static int
1362 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1363 {
1364 	/* Allocate an XCHG if we dont use send frame for this command. */
1365 	if (!nvmf_fc_use_send_frame(fc_req)) {
1366 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1367 		if (!fc_req->xchg) {
1368 			fc_req->hwqp->counters.no_xchg++;
1369 			return -EAGAIN;
1370 		}
1371 	}
1372 
1373 	if (fc_req->req.length) {
1374 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1375 			fc_req->hwqp->counters.buf_alloc_err++;
1376 			if (fc_req->xchg) {
1377 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1378 				fc_req->xchg = NULL;
1379 			}
1380 			return -EAGAIN;
1381 		}
1382 	}
1383 
1384 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1385 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1386 
1387 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1388 
1389 		if (nvmf_fc_recv_data(fc_req)) {
1390 			/* Dropped return success to caller */
1391 			fc_req->hwqp->counters.unexpected_err++;
1392 			_nvmf_fc_request_free(fc_req);
1393 		}
1394 	} else {
1395 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1396 
1397 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1398 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1399 		} else {
1400 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1401 		}
1402 		spdk_nvmf_request_exec(&fc_req->req);
1403 	}
1404 
1405 	return 0;
1406 }
1407 
1408 static void
1409 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1410 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1411 {
1412 	uint8_t df_ctl = fchdr->df_ctl;
1413 	uint32_t f_ctl = fchdr->f_ctl;
1414 
1415 	/* VMID */
1416 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1417 		struct spdk_nvmf_fc_vm_header *vhdr;
1418 		uint32_t vmhdr_offset = 0;
1419 
1420 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1421 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1422 		}
1423 
1424 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1425 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1426 		}
1427 
1428 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1429 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1430 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1431 	}
1432 
1433 	/* Priority */
1434 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1435 		fc_req->csctl = fchdr->cs_ctl;
1436 	}
1437 }
1438 
1439 static int
1440 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1441 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1442 {
1443 	uint16_t cmnd_len;
1444 	uint64_t rqst_conn_id;
1445 	struct spdk_nvmf_fc_request *fc_req = NULL;
1446 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1447 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1448 	enum spdk_nvme_data_transfer xfer;
1449 	uint32_t s_id, d_id;
1450 
1451 	s_id = (uint32_t)frame->s_id;
1452 	d_id = (uint32_t)frame->d_id;
1453 	s_id = from_be32(&s_id) >> 8;
1454 	d_id = from_be32(&d_id) >> 8;
1455 
1456 	cmd_iu = buffer->virt;
1457 	cmnd_len = cmd_iu->cmnd_iu_len;
1458 	cmnd_len = from_be16(&cmnd_len);
1459 
1460 	/* check for a valid cmnd_iu format */
1461 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1462 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1463 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1464 		SPDK_ERRLOG("IU CMD error\n");
1465 		hwqp->counters.nvme_cmd_iu_err++;
1466 		return -ENXIO;
1467 	}
1468 
1469 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1470 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1471 		SPDK_ERRLOG("IU CMD xfer error\n");
1472 		hwqp->counters.nvme_cmd_xfer_err++;
1473 		return -EPERM;
1474 	}
1475 
1476 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1477 
1478 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1479 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1480 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1481 		hwqp->counters.invalid_conn_err++;
1482 		return -ENODEV;
1483 	}
1484 
1485 	/* Validate s_id and d_id */
1486 	if (s_id != fc_conn->s_id) {
1487 		hwqp->counters.rport_invalid++;
1488 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1489 		return -ENODEV;
1490 	}
1491 
1492 	if (d_id != fc_conn->d_id) {
1493 		hwqp->counters.nport_invalid++;
1494 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1495 		return -ENODEV;
1496 	}
1497 
1498 	/* If association/connection is being deleted - return */
1499 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1500 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1501 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1502 		return -EACCES;
1503 	}
1504 
1505 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1506 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1507 			    rqst_conn_id, fc_conn->conn_state);
1508 		return -EACCES;
1509 	}
1510 
1511 	if (!spdk_nvmf_qpair_is_active(&fc_conn->qpair)) {
1512 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1513 			    rqst_conn_id, fc_conn->qpair.state);
1514 		return -EACCES;
1515 	}
1516 
1517 	/* Make sure xfer len is according to mdts */
1518 	if (from_be32(&cmd_iu->data_len) >
1519 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1520 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1521 		return -EINVAL;
1522 	}
1523 
1524 	/* allocate a request buffer */
1525 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1526 	if (fc_req == NULL) {
1527 		return -ENOMEM;
1528 	}
1529 
1530 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1531 	fc_req->req.qpair = &fc_conn->qpair;
1532 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1533 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1534 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1535 	fc_req->oxid = frame->ox_id;
1536 	fc_req->oxid = from_be16(&fc_req->oxid);
1537 	fc_req->rpi = fc_conn->rpi;
1538 	fc_req->poller_lcore = hwqp->lcore_id;
1539 	fc_req->poller_thread = hwqp->thread;
1540 	fc_req->hwqp = hwqp;
1541 	fc_req->fc_conn = fc_conn;
1542 	fc_req->req.xfer = xfer;
1543 	fc_req->s_id = s_id;
1544 	fc_req->d_id = d_id;
1545 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1546 	nvmf_fc_set_vmid_priority(fc_req, frame);
1547 
1548 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1549 
1550 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1551 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1552 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1553 	}
1554 
1555 	return 0;
1556 }
1557 
1558 /*
1559  * These functions are called from the FC LLD
1560  */
1561 
1562 void
1563 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1564 {
1565 	struct spdk_nvmf_fc_hwqp *hwqp;
1566 	struct spdk_nvmf_transport_poll_group *group;
1567 
1568 	if (!fc_req) {
1569 		return;
1570 	}
1571 	hwqp = fc_req->hwqp;
1572 
1573 	if (fc_req->xchg) {
1574 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1575 		fc_req->xchg = NULL;
1576 	}
1577 
1578 	/* Release IO buffers */
1579 	if (fc_req->req.data_from_pool) {
1580 		group = &hwqp->fgroup->group;
1581 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1582 					       group->transport);
1583 	}
1584 	fc_req->req.iovcnt = 0;
1585 
1586 	/* Free Fc request */
1587 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1588 }
1589 
1590 void
1591 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1592 			  enum spdk_nvmf_fc_request_state state)
1593 {
1594 	assert(fc_req->magic != 0xDEADBEEF);
1595 
1596 	SPDK_DEBUGLOG(nvmf_fc,
1597 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1598 		      nvmf_fc_request_get_state_str(fc_req->state),
1599 		      nvmf_fc_request_get_state_str(state));
1600 	nvmf_fc_record_req_trace_point(fc_req, state);
1601 	fc_req->state = state;
1602 }
1603 
1604 char *
1605 nvmf_fc_request_get_state_str(int state)
1606 {
1607 	static char *unk_str = "unknown";
1608 
1609 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1610 		fc_req_state_strs[state] : unk_str);
1611 }
1612 
1613 int
1614 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1615 			   uint32_t buff_idx,
1616 			   struct spdk_nvmf_fc_frame_hdr *frame,
1617 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1618 			   uint32_t plen)
1619 {
1620 	int rc = 0;
1621 	uint32_t s_id, d_id;
1622 	struct spdk_nvmf_fc_nport *nport = NULL;
1623 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1624 
1625 	s_id = (uint32_t)frame->s_id;
1626 	d_id = (uint32_t)frame->d_id;
1627 	s_id = from_be32(&s_id) >> 8;
1628 	d_id = from_be32(&d_id) >> 8;
1629 
1630 	SPDK_DEBUGLOG(nvmf_fc,
1631 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1632 		      s_id, d_id,
1633 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1634 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1635 
1636 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1637 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1638 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1639 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1640 
1641 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1642 
1643 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1644 		if (rc) {
1645 			if (nport == NULL) {
1646 				SPDK_ERRLOG("Nport not found. Dropping\n");
1647 				/* increment invalid nport counter */
1648 				hwqp->counters.nport_invalid++;
1649 			} else if (rport == NULL) {
1650 				SPDK_ERRLOG("Rport not found. Dropping\n");
1651 				/* increment invalid rport counter */
1652 				hwqp->counters.rport_invalid++;
1653 			}
1654 			return rc;
1655 		}
1656 
1657 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1658 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1659 			SPDK_ERRLOG("%s state not created. Dropping\n",
1660 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1661 				    "Nport" : "Rport");
1662 			return -EACCES;
1663 		}
1664 
1665 		/* Use the RQ buffer for holding LS request. */
1666 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1667 
1668 		/* Fill in the LS request structure */
1669 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1670 		ls_rqst->rqstbuf.phys = buffer->phys +
1671 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1672 		ls_rqst->rqstbuf.buf_index = buff_idx;
1673 		ls_rqst->rqst_len = plen;
1674 
1675 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1676 		ls_rqst->rspbuf.phys = buffer->phys +
1677 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1678 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1679 
1680 		ls_rqst->private_data = (void *)hwqp;
1681 		ls_rqst->rpi = rport->rpi;
1682 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1683 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1684 		ls_rqst->s_id = s_id;
1685 		ls_rqst->d_id = d_id;
1686 		ls_rqst->nport = nport;
1687 		ls_rqst->rport = rport;
1688 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1689 
1690 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1691 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1692 		} else {
1693 			ls_rqst->xchg = NULL;
1694 		}
1695 
1696 		if (ls_rqst->xchg) {
1697 			/* Handover the request to LS module */
1698 			nvmf_fc_handle_ls_rqst(ls_rqst);
1699 		} else {
1700 			/* No XCHG available. Add to pending list. */
1701 			hwqp->counters.no_xchg++;
1702 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1703 		}
1704 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1705 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1706 
1707 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1708 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1709 		if (!rc) {
1710 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1711 		}
1712 	} else {
1713 
1714 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1715 		hwqp->counters.unknown_frame++;
1716 		rc = -EINVAL;
1717 	}
1718 
1719 	return rc;
1720 }
1721 
1722 void
1723 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1724 {
1725 	struct spdk_nvmf_request *req = NULL, *tmp;
1726 	struct spdk_nvmf_fc_request *fc_req;
1727 	int budget = 64;
1728 
1729 	if (!hwqp->fgroup) {
1730 		/* LS queue is tied to acceptor_poll group and LS pending requests
1731 		 * are stagged and processed using hwqp->ls_pending_queue.
1732 		 */
1733 		return;
1734 	}
1735 
1736 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1737 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1738 		if (!nvmf_fc_request_execute(fc_req)) {
1739 			/* Successfully posted, Delete from pending. */
1740 			nvmf_fc_request_remove_from_pending(fc_req);
1741 		}
1742 
1743 		if (budget) {
1744 			budget--;
1745 		} else {
1746 			return;
1747 		}
1748 	}
1749 }
1750 
1751 void
1752 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1753 {
1754 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1755 	struct spdk_nvmf_fc_nport *nport = NULL;
1756 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1757 
1758 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1759 		/* lookup nport and rport again - make sure they are still valid */
1760 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1761 		if (rc) {
1762 			if (nport == NULL) {
1763 				SPDK_ERRLOG("Nport not found. Dropping\n");
1764 				/* increment invalid nport counter */
1765 				hwqp->counters.nport_invalid++;
1766 			} else if (rport == NULL) {
1767 				SPDK_ERRLOG("Rport not found. Dropping\n");
1768 				/* increment invalid rport counter */
1769 				hwqp->counters.rport_invalid++;
1770 			}
1771 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1772 			continue;
1773 		}
1774 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1775 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1776 			SPDK_ERRLOG("%s state not created. Dropping\n",
1777 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1778 				    "Nport" : "Rport");
1779 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1780 			continue;
1781 		}
1782 
1783 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1784 		if (ls_rqst->xchg) {
1785 			/* Got an XCHG */
1786 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1787 			/* Handover the request to LS module */
1788 			nvmf_fc_handle_ls_rqst(ls_rqst);
1789 		} else {
1790 			/* No more XCHGs. Stop processing. */
1791 			hwqp->counters.no_xchg++;
1792 			return;
1793 		}
1794 	}
1795 }
1796 
1797 int
1798 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1799 {
1800 	int rc = 0;
1801 	struct spdk_nvmf_request *req = &fc_req->req;
1802 	struct spdk_nvmf_qpair *qpair = req->qpair;
1803 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1804 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1805 	uint16_t ersp_len = 0;
1806 
1807 	/* set sq head value in resp */
1808 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1809 
1810 	/* Increment connection responses */
1811 	fc_conn->rsp_count++;
1812 
1813 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1814 				       fc_req->transferred_len)) {
1815 		/* Fill ERSP Len */
1816 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1817 				    sizeof(uint32_t)));
1818 		fc_req->ersp.ersp_len = ersp_len;
1819 
1820 		/* Fill RSN */
1821 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1822 		fc_conn->rsn++;
1823 
1824 		/* Fill transfer length */
1825 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
1826 
1827 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1828 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1829 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1830 	} else {
1831 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1832 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1833 	}
1834 
1835 	return rc;
1836 }
1837 
1838 bool
1839 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1840 			   uint32_t rsp_cnt, uint32_t xfer_len)
1841 {
1842 	struct spdk_nvmf_request *req = &fc_req->req;
1843 	struct spdk_nvmf_qpair *qpair = req->qpair;
1844 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1845 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1846 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1847 	uint16_t status = *((uint16_t *)&rsp->status);
1848 
1849 	/*
1850 	 * Check if we need to send ERSP
1851 	 * 1) For every N responses where N == ersp_ratio
1852 	 * 2) Fabric commands.
1853 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1854 	 * 4) SQ == 90% full.
1855 	 * 5) Transfer length not equal to CMD IU length
1856 	 */
1857 
1858 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1859 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1860 	    (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 ||
1861 	    (req->length != xfer_len)) {
1862 		return true;
1863 	}
1864 	return false;
1865 }
1866 
1867 static int
1868 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1869 {
1870 	int rc = 0;
1871 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1872 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1873 
1874 	if (fc_req->is_aborted) {
1875 		/* Defer this to make sure we dont call io cleanup in same context. */
1876 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1877 					(void *)fc_req);
1878 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1879 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1880 
1881 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1882 
1883 		rc = nvmf_fc_send_data(fc_req);
1884 	} else {
1885 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1886 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1887 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1888 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1889 		} else {
1890 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1891 		}
1892 
1893 		rc = nvmf_fc_handle_rsp(fc_req);
1894 	}
1895 
1896 	if (rc) {
1897 		SPDK_ERRLOG("Error in request complete.\n");
1898 		_nvmf_fc_request_free(fc_req);
1899 	}
1900 	return 0;
1901 }
1902 
1903 struct spdk_nvmf_tgt *
1904 nvmf_fc_get_tgt(void)
1905 {
1906 	if (g_nvmf_ftransport) {
1907 		return g_nvmf_ftransport->transport.tgt;
1908 	}
1909 	return NULL;
1910 }
1911 
1912 /*
1913  * FC Transport Public API begins here
1914  */
1915 
1916 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1917 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1918 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1919 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1920 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1921 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1922 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1923 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1924 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1925 
1926 static void
1927 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1928 {
1929 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1930 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1931 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1932 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1933 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1934 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1935 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1936 }
1937 
1938 static int nvmf_fc_accept(void *ctx);
1939 
1940 static struct spdk_nvmf_transport *
1941 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1942 {
1943 	uint32_t sge_count;
1944 
1945 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1946 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1947 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1948 		     "  max_aq_depth=%d\n",
1949 		     opts->max_queue_depth,
1950 		     opts->max_io_size,
1951 		     opts->max_qpairs_per_ctrlr - 1,
1952 		     opts->io_unit_size,
1953 		     opts->max_aq_depth);
1954 
1955 	if (g_nvmf_ftransport) {
1956 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1957 		return NULL;
1958 	}
1959 
1960 	if (spdk_env_get_last_core() < 1) {
1961 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1962 			    spdk_env_get_last_core() + 1);
1963 		return NULL;
1964 	}
1965 
1966 	sge_count = opts->max_io_size / opts->io_unit_size;
1967 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1968 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1969 		return NULL;
1970 	}
1971 
1972 	g_nvmf_fc_main_thread = spdk_get_thread();
1973 	g_nvmf_fgroup_count = 0;
1974 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1975 
1976 	if (!g_nvmf_ftransport) {
1977 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1978 		return NULL;
1979 	}
1980 
1981 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1982 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1983 		free(g_nvmf_ftransport);
1984 		g_nvmf_ftransport = NULL;
1985 		return NULL;
1986 	}
1987 
1988 	g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept,
1989 					   &g_nvmf_ftransport->transport, opts->acceptor_poll_rate);
1990 	if (!g_nvmf_ftransport->accept_poller) {
1991 		free(g_nvmf_ftransport);
1992 		g_nvmf_ftransport = NULL;
1993 		return NULL;
1994 	}
1995 
1996 	/* initialize the low level FC driver */
1997 	nvmf_fc_lld_init();
1998 
1999 	return &g_nvmf_ftransport->transport;
2000 }
2001 
2002 static void
2003 nvmf_fc_destroy_done_cb(void *cb_arg)
2004 {
2005 	free(g_nvmf_ftransport);
2006 	if (g_transport_destroy_done_cb) {
2007 		g_transport_destroy_done_cb(cb_arg);
2008 		g_transport_destroy_done_cb = NULL;
2009 	}
2010 }
2011 
2012 static int
2013 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2014 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2015 {
2016 	if (transport) {
2017 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2018 
2019 		/* clean up any FC poll groups still around */
2020 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2021 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2022 			free(fgroup);
2023 		}
2024 
2025 		spdk_poller_unregister(&g_nvmf_ftransport->accept_poller);
2026 		g_nvmf_fgroup_count = 0;
2027 		g_transport_destroy_done_cb = cb_fn;
2028 
2029 		/* low level FC driver clean up */
2030 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2031 	}
2032 
2033 	return 0;
2034 }
2035 
2036 static int
2037 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2038 	       struct spdk_nvmf_listen_opts *listen_opts)
2039 {
2040 	return 0;
2041 }
2042 
2043 static void
2044 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2045 		    const struct spdk_nvme_transport_id *_trid)
2046 {
2047 }
2048 
2049 static int
2050 nvmf_fc_accept(void *ctx)
2051 {
2052 	struct spdk_nvmf_fc_port *fc_port = NULL;
2053 	uint32_t count = 0;
2054 	static bool start_lld = false;
2055 
2056 	if (spdk_unlikely(!start_lld)) {
2057 		start_lld  = true;
2058 		nvmf_fc_lld_start();
2059 	}
2060 
2061 	/* poll the LS queue on each port */
2062 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2063 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2064 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2065 		}
2066 	}
2067 
2068 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
2069 }
2070 
2071 static void
2072 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2073 		 struct spdk_nvme_transport_id *trid,
2074 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2075 {
2076 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2077 	entry->adrfam = trid->adrfam;
2078 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2079 
2080 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2081 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2082 }
2083 
2084 static struct spdk_nvmf_transport_poll_group *
2085 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport,
2086 			  struct spdk_nvmf_poll_group *group)
2087 {
2088 	struct spdk_nvmf_fc_poll_group *fgroup;
2089 	struct spdk_nvmf_fc_transport *ftransport =
2090 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2091 
2092 	if (spdk_interrupt_mode_is_enabled()) {
2093 		SPDK_ERRLOG("FC transport does not support interrupt mode\n");
2094 		return NULL;
2095 	}
2096 
2097 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2098 	if (!fgroup) {
2099 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2100 		return NULL;
2101 	}
2102 
2103 	TAILQ_INIT(&fgroup->hwqp_list);
2104 
2105 	pthread_mutex_lock(&ftransport->lock);
2106 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2107 	g_nvmf_fgroup_count++;
2108 	pthread_mutex_unlock(&ftransport->lock);
2109 
2110 	return &fgroup->group;
2111 }
2112 
2113 static void
2114 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2115 {
2116 	struct spdk_nvmf_fc_poll_group *fgroup;
2117 	struct spdk_nvmf_fc_transport *ftransport =
2118 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2119 
2120 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2121 	pthread_mutex_lock(&ftransport->lock);
2122 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2123 	g_nvmf_fgroup_count--;
2124 	pthread_mutex_unlock(&ftransport->lock);
2125 
2126 	free(fgroup);
2127 }
2128 
2129 static int
2130 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2131 		       struct spdk_nvmf_qpair *qpair)
2132 {
2133 	struct spdk_nvmf_fc_poll_group *fgroup;
2134 	struct spdk_nvmf_fc_conn *fc_conn;
2135 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2136 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2137 	bool hwqp_found = false;
2138 
2139 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2140 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2141 
2142 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2143 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2144 			hwqp_found = true;
2145 			break;
2146 		}
2147 	}
2148 
2149 	if (!hwqp_found) {
2150 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2151 		goto err;
2152 	}
2153 
2154 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2155 					 &fc_conn->conn_id,
2156 					 fc_conn->max_queue_depth)) {
2157 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2158 		goto err;
2159 	}
2160 
2161 	fc_conn->hwqp = hwqp;
2162 
2163 	/* If this is for ADMIN connection, then update assoc ID. */
2164 	if (fc_conn->qpair.qid == 0) {
2165 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2166 	}
2167 
2168 	api_data = &fc_conn->create_opd->u.add_conn;
2169 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2170 	return 0;
2171 err:
2172 	return -1;
2173 }
2174 
2175 static int
2176 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2177 {
2178 	uint32_t count = 0;
2179 	struct spdk_nvmf_fc_poll_group *fgroup;
2180 	struct spdk_nvmf_fc_hwqp *hwqp;
2181 
2182 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2183 
2184 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2185 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2186 			count += nvmf_fc_process_queue(hwqp);
2187 		}
2188 	}
2189 
2190 	return (int) count;
2191 }
2192 
2193 static int
2194 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2195 {
2196 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2197 
2198 	if (!fc_req->is_aborted) {
2199 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2200 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2201 	} else {
2202 		nvmf_fc_request_abort_complete(fc_req);
2203 	}
2204 
2205 	return 0;
2206 }
2207 
2208 static void
2209 nvmf_fc_connection_delete_done_cb(void *arg)
2210 {
2211 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2212 
2213 	if (fc_ctx->cb_fn) {
2214 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2215 	}
2216 	free(fc_ctx);
2217 }
2218 
2219 static void
2220 _nvmf_fc_close_qpair(void *arg)
2221 {
2222 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2223 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2224 	struct spdk_nvmf_fc_conn *fc_conn;
2225 	int rc;
2226 
2227 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2228 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2229 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2230 
2231 		if (fc_conn->create_opd) {
2232 			api_data = &fc_conn->create_opd->u.add_conn;
2233 
2234 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2235 						    api_data->args.fc_conn, api_data->aq_conn);
2236 		}
2237 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2238 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2239 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2240 		if (!rc) {
2241 			/* Wait for transport to complete its work. */
2242 			return;
2243 		}
2244 
2245 		SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__);
2246 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2247 		/* This is the case where deletion started from FC layer. */
2248 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_conn->qpair_disconnect_cb_fn,
2249 				     fc_conn->qpair_disconnect_ctx);
2250 	}
2251 
2252 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2253 }
2254 
2255 static void
2256 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2257 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2258 {
2259 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2260 
2261 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2262 	if (!fc_ctx) {
2263 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2264 		if (cb_fn) {
2265 			cb_fn(cb_arg);
2266 		}
2267 		return;
2268 	}
2269 	fc_ctx->qpair = qpair;
2270 	fc_ctx->cb_fn = cb_fn;
2271 	fc_ctx->cb_ctx = cb_arg;
2272 	fc_ctx->qpair_thread = spdk_get_thread();
2273 
2274 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2275 }
2276 
2277 static int
2278 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2279 			    struct spdk_nvme_transport_id *trid)
2280 {
2281 	struct spdk_nvmf_fc_conn *fc_conn;
2282 
2283 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2284 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2285 	return 0;
2286 }
2287 
2288 static int
2289 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2290 			     struct spdk_nvme_transport_id *trid)
2291 {
2292 	struct spdk_nvmf_fc_conn *fc_conn;
2293 
2294 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2295 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2296 	return 0;
2297 }
2298 
2299 static int
2300 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2301 			      struct spdk_nvme_transport_id *trid)
2302 {
2303 	struct spdk_nvmf_fc_conn *fc_conn;
2304 
2305 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2306 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2307 	return 0;
2308 }
2309 
2310 static void
2311 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2312 			    struct spdk_nvmf_request *req)
2313 {
2314 	spdk_nvmf_request_complete(req);
2315 }
2316 
2317 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2318 	.name = "FC",
2319 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2320 	.opts_init = nvmf_fc_opts_init,
2321 	.create = nvmf_fc_create,
2322 	.destroy = nvmf_fc_destroy,
2323 
2324 	.listen = nvmf_fc_listen,
2325 	.stop_listen = nvmf_fc_stop_listen,
2326 
2327 	.listener_discover = nvmf_fc_discover,
2328 
2329 	.poll_group_create = nvmf_fc_poll_group_create,
2330 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2331 	.poll_group_add = nvmf_fc_poll_group_add,
2332 	.poll_group_poll = nvmf_fc_poll_group_poll,
2333 
2334 	.req_complete = nvmf_fc_request_complete,
2335 	.req_free = nvmf_fc_request_free,
2336 	.qpair_fini = nvmf_fc_close_qpair,
2337 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2338 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2339 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2340 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2341 };
2342 
2343 /* Initializes the data for the creation of a FC-Port object in the SPDK
2344  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2345  * the API to the library. The contents added to this well defined structure
2346  * is private to each vendors implementation.
2347  */
2348 static int
2349 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2350 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2351 {
2352 	int rc = 0;
2353 	/* Used a high number for the LS HWQP so that it does not clash with the
2354 	 * IO HWQP's and immediately shows a LS queue during tracing.
2355 	 */
2356 	uint32_t i;
2357 
2358 	fc_port->port_hdl       = args->port_handle;
2359 	fc_port->lld_fc_port	= args->lld_fc_port;
2360 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2361 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2362 	fc_port->num_io_queues  = args->io_queue_cnt;
2363 
2364 	/*
2365 	 * Set port context from init args. Used for FCP port stats.
2366 	 */
2367 	fc_port->port_ctx = args->port_ctx;
2368 
2369 	/*
2370 	 * Initialize the LS queue wherever needed.
2371 	 */
2372 	fc_port->ls_queue.queues = args->ls_queue;
2373 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2374 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2375 	fc_port->ls_queue.is_ls_queue = true;
2376 
2377 	/*
2378 	 * Initialize the LS queue.
2379 	 */
2380 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2381 	if (rc) {
2382 		return rc;
2383 	}
2384 
2385 	/*
2386 	 * Initialize the IO queues.
2387 	 */
2388 	for (i = 0; i < args->io_queue_cnt; i++) {
2389 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2390 		hwqp->hwqp_id = i;
2391 		hwqp->queues = args->io_queues[i];
2392 		hwqp->is_ls_queue = false;
2393 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2394 		if (rc) {
2395 			for (; i > 0; --i) {
2396 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2397 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2398 			}
2399 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2400 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2401 			return rc;
2402 		}
2403 	}
2404 
2405 	/*
2406 	 * Initialize the LS processing for port
2407 	 */
2408 	nvmf_fc_ls_init(fc_port);
2409 
2410 	/*
2411 	 * Initialize the list of nport on this HW port.
2412 	 */
2413 	TAILQ_INIT(&fc_port->nport_list);
2414 	fc_port->num_nports = 0;
2415 
2416 	return 0;
2417 }
2418 
2419 /*
2420  * FC port must have all its nports deleted before transitioning to offline state.
2421  */
2422 static void
2423 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2424 {
2425 	struct spdk_nvmf_fc_nport *nport = NULL;
2426 	/* All nports must have been deleted at this point for this fc port */
2427 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2428 	DEV_VERIFY(fc_port->num_nports == 0);
2429 	/* Mark the nport states to be zombie, if they exist */
2430 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2431 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2432 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2433 		}
2434 	}
2435 }
2436 
2437 static void
2438 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2439 {
2440 	ASSERT_SPDK_FC_MAIN_THREAD();
2441 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2442 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2443 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2444 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2445 	int spdk_err = 0;
2446 	uint8_t port_handle = cb_data->port_handle;
2447 	uint32_t s_id = rport->s_id;
2448 	uint32_t rpi = rport->rpi;
2449 	uint32_t assoc_count = rport->assoc_count;
2450 	uint32_t nport_hdl = nport->nport_hdl;
2451 	uint32_t d_id = nport->d_id;
2452 	char log_str[256];
2453 
2454 	/*
2455 	 * Assert on any delete failure.
2456 	 */
2457 	if (0 != err) {
2458 		DEV_VERIFY(!"Error in IT Delete callback.");
2459 		goto out;
2460 	}
2461 
2462 	if (cb_func != NULL) {
2463 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2464 	}
2465 
2466 out:
2467 	free(cb_data);
2468 
2469 	snprintf(log_str, sizeof(log_str),
2470 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2471 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2472 
2473 	if (err != 0) {
2474 		SPDK_ERRLOG("%s", log_str);
2475 	} else {
2476 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2477 	}
2478 }
2479 
2480 static void
2481 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2482 {
2483 	ASSERT_SPDK_FC_MAIN_THREAD();
2484 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2485 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2486 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2487 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2488 	uint32_t s_id = rport->s_id;
2489 	uint32_t rpi = rport->rpi;
2490 	uint32_t assoc_count = rport->assoc_count;
2491 	uint32_t nport_hdl = nport->nport_hdl;
2492 	uint32_t d_id = nport->d_id;
2493 	char log_str[256];
2494 
2495 	/*
2496 	 * Assert on any association delete failure. We continue to delete other
2497 	 * associations in promoted builds.
2498 	 */
2499 	if (0 != err) {
2500 		DEV_VERIFY(!"Nport's association delete callback returned error");
2501 		if (nport->assoc_count > 0) {
2502 			nport->assoc_count--;
2503 		}
2504 		if (rport->assoc_count > 0) {
2505 			rport->assoc_count--;
2506 		}
2507 	}
2508 
2509 	/*
2510 	 * If this is the last association being deleted for the ITN,
2511 	 * execute the callback(s).
2512 	 */
2513 	if (0 == rport->assoc_count) {
2514 		/* Remove the rport from the remote port list. */
2515 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2516 			SPDK_ERRLOG("Error while removing rport from list.\n");
2517 			DEV_VERIFY(!"Error while removing rport from list.");
2518 		}
2519 
2520 		if (cb_func != NULL) {
2521 			/*
2522 			 * Callback function is provided by the caller
2523 			 * of nvmf_fc_adm_i_t_delete_assoc().
2524 			 */
2525 			(void)cb_func(cb_data->cb_ctx, 0);
2526 		}
2527 		free(rport);
2528 		free(args);
2529 	}
2530 
2531 	snprintf(log_str, sizeof(log_str),
2532 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2533 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2534 
2535 	if (err != 0) {
2536 		SPDK_ERRLOG("%s", log_str);
2537 	} else {
2538 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2539 	}
2540 }
2541 
2542 /**
2543  * Process a IT delete.
2544  */
2545 static void
2546 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2547 			     struct spdk_nvmf_fc_remote_port_info *rport,
2548 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2549 			     void *cb_ctx)
2550 {
2551 	int err = 0;
2552 	struct spdk_nvmf_fc_association *assoc = NULL;
2553 	int assoc_err = 0;
2554 	uint32_t num_assoc = 0;
2555 	uint32_t num_assoc_del_scheduled = 0;
2556 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2557 	uint8_t port_hdl = nport->port_hdl;
2558 	uint32_t s_id = rport->s_id;
2559 	uint32_t rpi = rport->rpi;
2560 	uint32_t assoc_count = rport->assoc_count;
2561 	char log_str[256];
2562 
2563 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2564 		      nport->nport_hdl);
2565 
2566 	/*
2567 	 * Allocate memory for callback data.
2568 	 * This memory will be freed by the callback function.
2569 	 */
2570 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2571 	if (NULL == cb_data) {
2572 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2573 		err = -ENOMEM;
2574 		goto out;
2575 	}
2576 	cb_data->nport       = nport;
2577 	cb_data->rport       = rport;
2578 	cb_data->port_handle = port_hdl;
2579 	cb_data->cb_func     = cb_func;
2580 	cb_data->cb_ctx      = cb_ctx;
2581 
2582 	/*
2583 	 * Delete all associations, if any, related with this ITN/remote_port.
2584 	 */
2585 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2586 		num_assoc++;
2587 		if (assoc->s_id == s_id) {
2588 			assoc_err = nvmf_fc_delete_association(nport,
2589 							       assoc->assoc_id,
2590 							       false /* send abts */, false,
2591 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2592 			if (0 != assoc_err) {
2593 				/*
2594 				 * Mark this association as zombie.
2595 				 */
2596 				err = -EINVAL;
2597 				DEV_VERIFY(!"Error while deleting association");
2598 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2599 			} else {
2600 				num_assoc_del_scheduled++;
2601 			}
2602 		}
2603 	}
2604 
2605 out:
2606 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2607 		/*
2608 		 * Since there are no association_delete calls
2609 		 * successfully scheduled, the association_delete
2610 		 * callback function will never be called.
2611 		 * In this case, call the callback function now.
2612 		 */
2613 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2614 	}
2615 
2616 	snprintf(log_str, sizeof(log_str),
2617 		 "IT delete associations on nport:%d end. "
2618 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2619 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2620 
2621 	if (err == 0) {
2622 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2623 	} else {
2624 		SPDK_ERRLOG("%s", log_str);
2625 	}
2626 }
2627 
2628 static void
2629 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2630 {
2631 	ASSERT_SPDK_FC_MAIN_THREAD();
2632 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2633 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2634 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2635 	struct spdk_nvmf_fc_port *fc_port = NULL;
2636 	int err = 0;
2637 
2638 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2639 	hwqp = quiesce_api_data->hwqp;
2640 	fc_port = hwqp->fc_port;
2641 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2642 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2643 
2644 	/*
2645 	 * Decrement the callback/quiesced queue count.
2646 	 */
2647 	port_quiesce_ctx->quiesce_count--;
2648 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2649 
2650 	free(quiesce_api_data);
2651 	/*
2652 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2653 	 */
2654 	if (port_quiesce_ctx->quiesce_count > 0) {
2655 		return;
2656 	}
2657 
2658 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2659 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2660 	} else {
2661 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2662 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2663 	}
2664 
2665 	if (cb_func) {
2666 		/*
2667 		 * Callback function for the called of quiesce.
2668 		 */
2669 		cb_func(port_quiesce_ctx->ctx, err);
2670 	}
2671 
2672 	/*
2673 	 * Free the context structure.
2674 	 */
2675 	free(port_quiesce_ctx);
2676 
2677 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2678 		      err);
2679 }
2680 
2681 static int
2682 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2683 			     spdk_nvmf_fc_poller_api_cb cb_func)
2684 {
2685 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2686 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2687 	int err = 0;
2688 
2689 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2690 
2691 	if (args == NULL) {
2692 		err = -ENOMEM;
2693 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2694 		goto done;
2695 	}
2696 	args->hwqp = fc_hwqp;
2697 	args->ctx = ctx;
2698 	args->cb_info.cb_func = cb_func;
2699 	args->cb_info.cb_data = args;
2700 	args->cb_info.cb_thread = spdk_get_thread();
2701 
2702 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2703 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2704 	if (rc) {
2705 		free(args);
2706 		err = -EINVAL;
2707 	}
2708 
2709 done:
2710 	return err;
2711 }
2712 
2713 /*
2714  * Hw port Quiesce
2715  */
2716 static int
2717 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2718 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2719 {
2720 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2721 	uint32_t i = 0;
2722 	int err = 0;
2723 
2724 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2725 
2726 	/*
2727 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2728 	 * and execute the callback.
2729 	 */
2730 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2731 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2732 	}
2733 
2734 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2735 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2736 			      fc_port->port_hdl);
2737 		/*
2738 		 * Execute the callback function directly.
2739 		 */
2740 		cb_func(ctx, err);
2741 		goto out;
2742 	}
2743 
2744 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2745 
2746 	if (port_quiesce_ctx == NULL) {
2747 		err = -ENOMEM;
2748 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2749 			    fc_port->port_hdl);
2750 		goto out;
2751 	}
2752 
2753 	port_quiesce_ctx->quiesce_count = 0;
2754 	port_quiesce_ctx->ctx = ctx;
2755 	port_quiesce_ctx->cb_func = cb_func;
2756 
2757 	/*
2758 	 * Quiesce the LS queue.
2759 	 */
2760 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2761 					   nvmf_fc_adm_queue_quiesce_cb);
2762 	if (err != 0) {
2763 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2764 		goto out;
2765 	}
2766 	port_quiesce_ctx->quiesce_count++;
2767 
2768 	/*
2769 	 * Quiesce the IO queues.
2770 	 */
2771 	for (i = 0; i < fc_port->num_io_queues; i++) {
2772 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2773 						   port_quiesce_ctx,
2774 						   nvmf_fc_adm_queue_quiesce_cb);
2775 		if (err != 0) {
2776 			DEV_VERIFY(0);
2777 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2778 		}
2779 		port_quiesce_ctx->quiesce_count++;
2780 	}
2781 
2782 out:
2783 	if (port_quiesce_ctx && err != 0) {
2784 		free(port_quiesce_ctx);
2785 	}
2786 	return err;
2787 }
2788 
2789 /*
2790  * Initialize and add a HW port entry to the global
2791  * HW port list.
2792  */
2793 static void
2794 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2795 {
2796 	ASSERT_SPDK_FC_MAIN_THREAD();
2797 	struct spdk_nvmf_fc_port *fc_port = NULL;
2798 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2799 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2800 			api_data->api_args;
2801 	int err = 0;
2802 
2803 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2804 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2805 		err = EINVAL;
2806 		goto abort_port_init;
2807 	}
2808 
2809 	/*
2810 	 * 1. Check for duplicate initialization.
2811 	 */
2812 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2813 	if (fc_port != NULL) {
2814 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2815 		goto abort_port_init;
2816 	}
2817 
2818 	/*
2819 	 * 2. Get the memory to instantiate a fc port.
2820 	 */
2821 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2822 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2823 	if (fc_port == NULL) {
2824 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2825 		err = -ENOMEM;
2826 		goto abort_port_init;
2827 	}
2828 
2829 	/* assign the io_queues array */
2830 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2831 				     struct spdk_nvmf_fc_port));
2832 
2833 	/*
2834 	 * 3. Initialize the contents for the FC-port
2835 	 */
2836 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2837 
2838 	if (err != 0) {
2839 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2840 		DEV_VERIFY(!"Data initialization failed for fc_port");
2841 		goto abort_port_init;
2842 	}
2843 
2844 	/*
2845 	 * 4. Add this port to the global fc port list in the library.
2846 	 */
2847 	nvmf_fc_port_add(fc_port);
2848 
2849 abort_port_init:
2850 	if (err && fc_port) {
2851 		free(fc_port);
2852 	}
2853 	if (api_data->cb_func != NULL) {
2854 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2855 	}
2856 
2857 	free(arg);
2858 
2859 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2860 		      args->port_handle, err);
2861 }
2862 
2863 static void
2864 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2865 {
2866 	struct spdk_nvmf_fc_abts_ctx *ctx;
2867 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2868 
2869 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2870 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2871 		ctx = args->cb_info.cb_data;
2872 		if (ctx) {
2873 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2874 				free(ctx->sync_poller_args);
2875 				free(ctx->abts_poller_args);
2876 				free(ctx);
2877 			}
2878 		}
2879 	}
2880 }
2881 
2882 static void
2883 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2884 {
2885 	ASSERT_SPDK_FC_MAIN_THREAD();
2886 	int err = 0, i;
2887 	struct spdk_nvmf_fc_port *fc_port = NULL;
2888 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2889 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2890 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2891 			api_data->api_args;
2892 
2893 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2894 	if (!fc_port) {
2895 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2896 		err = -EINVAL;
2897 		goto out;
2898 	}
2899 
2900 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2901 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2902 		err = -EIO;
2903 		goto out;
2904 	}
2905 
2906 	/* Clean up and free fc_port */
2907 	hwqp = &fc_port->ls_queue;
2908 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2909 	rte_hash_free(hwqp->connection_list_hash);
2910 	rte_hash_free(hwqp->rport_list_hash);
2911 
2912 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2913 		hwqp = &fc_port->io_queues[i];
2914 
2915 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2916 		rte_hash_free(hwqp->connection_list_hash);
2917 		rte_hash_free(hwqp->rport_list_hash);
2918 	}
2919 
2920 	nvmf_fc_port_remove(fc_port);
2921 	free(fc_port);
2922 out:
2923 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2924 		      args->port_handle, err);
2925 	if (api_data->cb_func != NULL) {
2926 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2927 	}
2928 
2929 	free(arg);
2930 }
2931 
2932 /*
2933  * Online a HW port.
2934  */
2935 static void
2936 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2937 {
2938 	ASSERT_SPDK_FC_MAIN_THREAD();
2939 	struct spdk_nvmf_fc_port *fc_port = NULL;
2940 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2941 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2942 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2943 			api_data->api_args;
2944 	int i = 0;
2945 	int err = 0;
2946 
2947 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2948 	if (fc_port) {
2949 		/* Set the port state to online */
2950 		err = nvmf_fc_port_set_online(fc_port);
2951 		if (err != 0) {
2952 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2953 			DEV_VERIFY(!"Hw port online failed");
2954 			goto out;
2955 		}
2956 
2957 		hwqp = &fc_port->ls_queue;
2958 		hwqp->context = NULL;
2959 		(void)nvmf_fc_hwqp_set_online(hwqp);
2960 
2961 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2962 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2963 			hwqp = &fc_port->io_queues[i];
2964 			hwqp->context = NULL;
2965 			(void)nvmf_fc_hwqp_set_online(hwqp);
2966 			nvmf_fc_poll_group_add_hwqp(hwqp);
2967 		}
2968 	} else {
2969 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2970 		err = -EINVAL;
2971 	}
2972 
2973 out:
2974 	if (api_data->cb_func != NULL) {
2975 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2976 	}
2977 
2978 	free(arg);
2979 
2980 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
2981 		      err);
2982 }
2983 
2984 static void
2985 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
2986 {
2987 	int err = 0;
2988 	struct spdk_nvmf_fc_port *fc_port = NULL;
2989 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
2990 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
2991 
2992 	if (--remove_hwqp_args->pending_remove_hwqp) {
2993 		return;
2994 	}
2995 
2996 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2997 	if (!fc_port) {
2998 		err = -EINVAL;
2999 		SPDK_ERRLOG("fc_port not found.\n");
3000 		goto out;
3001 	}
3002 
3003 	/*
3004 	 * Delete all the nports. Ideally, the nports should have been purged
3005 	 * before the offline event, in which case, only a validation is required.
3006 	 */
3007 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3008 out:
3009 	if (remove_hwqp_args->cb_fn) {
3010 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3011 	}
3012 
3013 	free(remove_hwqp_args);
3014 }
3015 
3016 /*
3017  * Offline a HW port.
3018  */
3019 static void
3020 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3021 {
3022 	ASSERT_SPDK_FC_MAIN_THREAD();
3023 	struct spdk_nvmf_fc_port *fc_port = NULL;
3024 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3025 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3026 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3027 			api_data->api_args;
3028 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3029 	int i = 0;
3030 	int err = 0;
3031 
3032 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3033 	if (fc_port) {
3034 		/* Set the port state to offline, if it is not already. */
3035 		err = nvmf_fc_port_set_offline(fc_port);
3036 		if (err != 0) {
3037 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3038 			err = 0;
3039 			goto out;
3040 		}
3041 
3042 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3043 		if (!remove_hwqp_args) {
3044 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3045 			err = -ENOMEM;
3046 			goto out;
3047 		}
3048 		remove_hwqp_args->cb_fn = api_data->cb_func;
3049 		remove_hwqp_args->cb_args = api_data->api_args;
3050 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3051 
3052 		hwqp = &fc_port->ls_queue;
3053 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3054 
3055 		/* Remove poller for all the io queues. */
3056 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3057 			hwqp = &fc_port->io_queues[i];
3058 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3059 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3060 						       remove_hwqp_args);
3061 		}
3062 
3063 		free(arg);
3064 
3065 		/* Wait until all the hwqps are removed from poll groups. */
3066 		return;
3067 	} else {
3068 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3069 		err = -EINVAL;
3070 	}
3071 out:
3072 	if (api_data->cb_func != NULL) {
3073 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3074 	}
3075 
3076 	free(arg);
3077 
3078 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3079 		      err);
3080 }
3081 
3082 struct nvmf_fc_add_rem_listener_ctx {
3083 	struct spdk_nvmf_subsystem *subsystem;
3084 	bool add_listener;
3085 	struct spdk_nvme_transport_id trid;
3086 };
3087 
3088 static void
3089 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3090 {
3091 	ASSERT_SPDK_FC_MAIN_THREAD();
3092 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3093 	free(ctx);
3094 }
3095 
3096 static void
3097 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3098 {
3099 	ASSERT_SPDK_FC_MAIN_THREAD();
3100 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3101 
3102 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3103 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3104 		free(ctx);
3105 	}
3106 }
3107 
3108 static void
3109 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3110 {
3111 	ASSERT_SPDK_FC_MAIN_THREAD();
3112 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3113 
3114 	if (ctx->add_listener) {
3115 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3116 	} else {
3117 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3118 		nvmf_fc_adm_listen_done(ctx, 0);
3119 	}
3120 }
3121 
3122 static int
3123 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3124 {
3125 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3126 	struct spdk_nvmf_subsystem *subsystem;
3127 	struct spdk_nvmf_listen_opts opts;
3128 
3129 	if (!tgt) {
3130 		SPDK_ERRLOG("No nvmf target defined\n");
3131 		return -EINVAL;
3132 	}
3133 
3134 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
3135 
3136 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3137 	while (subsystem) {
3138 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3139 
3140 		if (spdk_nvmf_subsystem_any_listener_allowed(subsystem) == true) {
3141 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3142 			if (ctx) {
3143 				ctx->add_listener = add;
3144 				ctx->subsystem = subsystem;
3145 				nvmf_fc_create_trid(&ctx->trid,
3146 						    nport->fc_nodename.u.wwn,
3147 						    nport->fc_portname.u.wwn);
3148 
3149 				if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) {
3150 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3151 						    ctx->trid.traddr);
3152 					free(ctx);
3153 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3154 								     0,
3155 								     nvmf_fc_adm_subsystem_paused_cb,
3156 								     ctx)) {
3157 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3158 						    subsystem->subnqn);
3159 					free(ctx);
3160 				}
3161 			}
3162 		}
3163 
3164 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3165 	}
3166 
3167 	return 0;
3168 }
3169 
3170 /*
3171  * Create a Nport.
3172  */
3173 static void
3174 nvmf_fc_adm_evnt_nport_create(void *arg)
3175 {
3176 	ASSERT_SPDK_FC_MAIN_THREAD();
3177 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3178 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3179 			api_data->api_args;
3180 	struct spdk_nvmf_fc_nport *nport = NULL;
3181 	struct spdk_nvmf_fc_port *fc_port = NULL;
3182 	int err = 0;
3183 
3184 	/*
3185 	 * Get the physical port.
3186 	 */
3187 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3188 	if (fc_port == NULL) {
3189 		err = -EINVAL;
3190 		goto out;
3191 	}
3192 
3193 	/*
3194 	 * Check for duplicate initialization.
3195 	 */
3196 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3197 	if (nport != NULL) {
3198 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3199 			    args->port_handle);
3200 		err = -EINVAL;
3201 		goto out;
3202 	}
3203 
3204 	/*
3205 	 * Get the memory to instantiate a fc nport.
3206 	 */
3207 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3208 	if (nport == NULL) {
3209 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3210 			    args->nport_handle);
3211 		err = -ENOMEM;
3212 		goto out;
3213 	}
3214 
3215 	/*
3216 	 * Initialize the contents for the nport
3217 	 */
3218 	nport->nport_hdl    = args->nport_handle;
3219 	nport->port_hdl     = args->port_handle;
3220 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3221 	nport->fc_nodename  = args->fc_nodename;
3222 	nport->fc_portname  = args->fc_portname;
3223 	nport->d_id         = args->d_id;
3224 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3225 
3226 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3227 	TAILQ_INIT(&nport->rem_port_list);
3228 	nport->rport_count = 0;
3229 	TAILQ_INIT(&nport->fc_associations);
3230 	nport->assoc_count = 0;
3231 
3232 	/*
3233 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3234 	 */
3235 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3236 
3237 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3238 out:
3239 	if (err && nport) {
3240 		free(nport);
3241 	}
3242 
3243 	if (api_data->cb_func != NULL) {
3244 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3245 	}
3246 
3247 	free(arg);
3248 }
3249 
3250 static void
3251 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3252 			    void *cb_args, int spdk_err)
3253 {
3254 	ASSERT_SPDK_FC_MAIN_THREAD();
3255 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3256 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3257 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3258 	int err = 0;
3259 	uint16_t nport_hdl = 0;
3260 	char log_str[256];
3261 
3262 	/*
3263 	 * Assert on any delete failure.
3264 	 */
3265 	if (nport == NULL) {
3266 		SPDK_ERRLOG("Nport delete callback returned null nport");
3267 		DEV_VERIFY(!"nport is null.");
3268 		goto out;
3269 	}
3270 
3271 	nport_hdl = nport->nport_hdl;
3272 	if (0 != spdk_err) {
3273 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3274 			    "%d, Nport: %d\n",
3275 			    nport->port_hdl, nport->nport_hdl);
3276 		DEV_VERIFY(!"nport delete callback error.");
3277 	}
3278 
3279 	/*
3280 	 * Free the nport if this is the last rport being deleted and
3281 	 * execute the callback(s).
3282 	 */
3283 	if (nvmf_fc_nport_has_no_rport(nport)) {
3284 		if (0 != nport->assoc_count) {
3285 			SPDK_ERRLOG("association count != 0\n");
3286 			DEV_VERIFY(!"association count != 0");
3287 		}
3288 
3289 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3290 		if (0 != err) {
3291 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3292 				    "nport from nport list. FC Port:%d Nport:%d\n",
3293 				    nport->port_hdl, nport->nport_hdl);
3294 		}
3295 		/* Free the nport */
3296 		free(nport);
3297 
3298 		if (cb_func != NULL) {
3299 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3300 		}
3301 		free(cb_data);
3302 	}
3303 out:
3304 	snprintf(log_str, sizeof(log_str),
3305 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3306 		 port_handle, nport_hdl, event_type, spdk_err);
3307 
3308 	if (err != 0) {
3309 		SPDK_ERRLOG("%s", log_str);
3310 	} else {
3311 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3312 	}
3313 }
3314 
3315 /*
3316  * Delete Nport.
3317  */
3318 static void
3319 nvmf_fc_adm_evnt_nport_delete(void *arg)
3320 {
3321 	ASSERT_SPDK_FC_MAIN_THREAD();
3322 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3323 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3324 			api_data->api_args;
3325 	struct spdk_nvmf_fc_nport *nport = NULL;
3326 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3327 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3328 	int err = 0;
3329 	uint32_t rport_cnt = 0;
3330 	int rc = 0;
3331 
3332 	/*
3333 	 * Make sure that the nport exists.
3334 	 */
3335 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3336 	if (nport == NULL) {
3337 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3338 			    args->port_handle);
3339 		err = -EINVAL;
3340 		goto out;
3341 	}
3342 
3343 	/*
3344 	 * Allocate memory for callback data.
3345 	 */
3346 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3347 	if (NULL == cb_data) {
3348 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3349 		err = -ENOMEM;
3350 		goto out;
3351 	}
3352 
3353 	cb_data->nport = nport;
3354 	cb_data->port_handle = args->port_handle;
3355 	cb_data->fc_cb_func = api_data->cb_func;
3356 	cb_data->fc_cb_ctx = args->cb_ctx;
3357 
3358 	/*
3359 	 * Begin nport tear down
3360 	 */
3361 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3362 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3363 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3364 		/*
3365 		 * Deletion of this nport already in progress. Register callback
3366 		 * and return.
3367 		 */
3368 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3369 		err = -ENODEV;
3370 		goto out;
3371 	} else {
3372 		/* nport partially created/deleted */
3373 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3374 		DEV_VERIFY(0 != "Nport in zombie state");
3375 		err = -ENODEV;
3376 		goto out;
3377 	}
3378 
3379 	/*
3380 	 * Remove this nport from listening addresses across subsystems
3381 	 */
3382 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3383 
3384 	if (0 != rc) {
3385 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3386 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3387 			    nport->nport_hdl);
3388 		goto out;
3389 	}
3390 
3391 	/*
3392 	 * Delete all the remote ports (if any) for the nport
3393 	 */
3394 	/* TODO - Need to do this with a "first" and a "next" accessor function
3395 	 * for completeness. Look at app-subsystem as examples.
3396 	 */
3397 	if (nvmf_fc_nport_has_no_rport(nport)) {
3398 		/* No rports to delete. Complete the nport deletion. */
3399 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3400 		goto out;
3401 	}
3402 
3403 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3404 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3405 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3406 
3407 		if (it_del_args == NULL) {
3408 			err = -ENOMEM;
3409 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3410 				    rport_iter->rpi, rport_iter->s_id);
3411 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3412 			goto out;
3413 		}
3414 
3415 		rport_cnt++;
3416 		it_del_args->port_handle = nport->port_hdl;
3417 		it_del_args->nport_handle = nport->nport_hdl;
3418 		it_del_args->cb_ctx = (void *)cb_data;
3419 		it_del_args->rpi = rport_iter->rpi;
3420 		it_del_args->s_id = rport_iter->s_id;
3421 
3422 		err = nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3423 						 nvmf_fc_adm_delete_nport_cb);
3424 		if (err) {
3425 			free(it_del_args);
3426 		}
3427 	}
3428 
3429 out:
3430 	/* On failure, execute the callback function now */
3431 	if ((err != 0) || (rc != 0)) {
3432 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3433 			    "rport_cnt:%d rc:%d.\n",
3434 			    args->nport_handle, err, args->port_handle,
3435 			    rport_cnt, rc);
3436 		if (cb_data) {
3437 			free(cb_data);
3438 		}
3439 		if (api_data->cb_func != NULL) {
3440 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3441 		}
3442 
3443 	} else {
3444 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3445 			      "NPort %d delete done successfully, fc port:%d. "
3446 			      "rport_cnt:%d\n",
3447 			      args->nport_handle, args->port_handle, rport_cnt);
3448 	}
3449 
3450 	free(arg);
3451 }
3452 
3453 /*
3454  * Process an PRLI/IT add.
3455  */
3456 static void
3457 nvmf_fc_adm_evnt_i_t_add(void *arg)
3458 {
3459 	ASSERT_SPDK_FC_MAIN_THREAD();
3460 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3461 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3462 			api_data->api_args;
3463 	struct spdk_nvmf_fc_nport *nport = NULL;
3464 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3465 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3466 	int err = 0;
3467 
3468 	/*
3469 	 * Make sure the nport port exists.
3470 	 */
3471 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3472 	if (nport == NULL) {
3473 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3474 		err = -EINVAL;
3475 		goto out;
3476 	}
3477 
3478 	/*
3479 	 * Check for duplicate i_t_add.
3480 	 */
3481 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3482 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3483 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3484 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3485 			err = -EEXIST;
3486 			goto out;
3487 		}
3488 	}
3489 
3490 	/*
3491 	 * Get the memory to instantiate the remote port
3492 	 */
3493 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3494 	if (rport == NULL) {
3495 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3496 		err = -ENOMEM;
3497 		goto out;
3498 	}
3499 
3500 	/*
3501 	 * Initialize the contents for the rport
3502 	 */
3503 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3504 	rport->s_id = args->s_id;
3505 	rport->rpi = args->rpi;
3506 	rport->fc_nodename = args->fc_nodename;
3507 	rport->fc_portname = args->fc_portname;
3508 
3509 	/*
3510 	 * Add remote port to nport
3511 	 */
3512 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3513 		DEV_VERIFY(!"Error while adding rport to list");
3514 	};
3515 
3516 	/*
3517 	 * TODO: Do we validate the initiators service parameters?
3518 	 */
3519 
3520 	/*
3521 	 * Get the targets service parameters from the library
3522 	 * to return back to the driver.
3523 	 */
3524 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3525 
3526 out:
3527 	if (api_data->cb_func != NULL) {
3528 		/*
3529 		 * Passing pointer to the args struct as the first argument.
3530 		 * The cb_func should handle this appropriately.
3531 		 */
3532 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3533 	}
3534 
3535 	free(arg);
3536 
3537 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3538 		      "IT add on nport %d done, rc = %d.\n",
3539 		      args->nport_handle, err);
3540 }
3541 
3542 /**
3543  * Process a IT delete.
3544  */
3545 static void
3546 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3547 {
3548 	ASSERT_SPDK_FC_MAIN_THREAD();
3549 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3550 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3551 			api_data->api_args;
3552 	int rc = 0;
3553 	struct spdk_nvmf_fc_nport *nport = NULL;
3554 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3555 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3556 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3557 	uint32_t num_rport = 0;
3558 	char log_str[256];
3559 
3560 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3561 
3562 	/*
3563 	 * Make sure the nport port exists. If it does not, error out.
3564 	 */
3565 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3566 	if (nport == NULL) {
3567 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3568 		rc = -EINVAL;
3569 		goto out;
3570 	}
3571 
3572 	/*
3573 	 * Find this ITN / rport (remote port).
3574 	 */
3575 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3576 		num_rport++;
3577 		if ((rport_iter->s_id == args->s_id) &&
3578 		    (rport_iter->rpi == args->rpi) &&
3579 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3580 			rport = rport_iter;
3581 			break;
3582 		}
3583 	}
3584 
3585 	/*
3586 	 * We should find either zero or exactly one rport.
3587 	 *
3588 	 * If we find zero rports, that means that a previous request has
3589 	 * removed the rport by the time we reached here. In this case,
3590 	 * simply return out.
3591 	 */
3592 	if (rport == NULL) {
3593 		rc = -ENODEV;
3594 		goto out;
3595 	}
3596 
3597 	/*
3598 	 * We have the rport slated for deletion. At this point clean up
3599 	 * any LS requests that are sitting in the pending list. Do this
3600 	 * first, then, set the states of the rport so that new LS requests
3601 	 * are not accepted. Then start the cleanup.
3602 	 */
3603 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3604 
3605 	/*
3606 	 * We have found exactly one rport. Allocate memory for callback data.
3607 	 */
3608 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3609 	if (NULL == cb_data) {
3610 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3611 		rc = -ENOMEM;
3612 		goto out;
3613 	}
3614 
3615 	cb_data->nport = nport;
3616 	cb_data->rport = rport;
3617 	cb_data->port_handle = args->port_handle;
3618 	cb_data->fc_cb_func = api_data->cb_func;
3619 	cb_data->fc_cb_ctx = args->cb_ctx;
3620 
3621 	/*
3622 	 * Validate rport object state.
3623 	 */
3624 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3625 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3626 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3627 		/*
3628 		 * Deletion of this rport already in progress. Register callback
3629 		 * and return.
3630 		 */
3631 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3632 		rc = -ENODEV;
3633 		goto out;
3634 	} else {
3635 		/* rport partially created/deleted */
3636 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3637 		DEV_VERIFY(!"Invalid rport_state");
3638 		rc = -ENODEV;
3639 		goto out;
3640 	}
3641 
3642 	/*
3643 	 * We have successfully found a rport to delete. Call
3644 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3645 	 * IT-delete processing as well as free the cb_data.
3646 	 */
3647 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3648 				     (void *)cb_data);
3649 
3650 out:
3651 	if (rc != 0) {
3652 		/*
3653 		 * We have entered here because either we encountered an
3654 		 * error, or we did not find a rport to delete.
3655 		 * As a result, we will not call the function
3656 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3657 		 * processing. Therefore, execute the callback function now.
3658 		 */
3659 		if (cb_data) {
3660 			free(cb_data);
3661 		}
3662 		if (api_data->cb_func != NULL) {
3663 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3664 		}
3665 	}
3666 
3667 	snprintf(log_str, sizeof(log_str),
3668 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3669 		 args->nport_handle, num_rport, rc);
3670 
3671 	if (rc != 0) {
3672 		SPDK_ERRLOG("%s", log_str);
3673 	} else {
3674 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3675 	}
3676 
3677 	free(arg);
3678 }
3679 
3680 /*
3681  * Process ABTS received
3682  */
3683 static void
3684 nvmf_fc_adm_evnt_abts_recv(void *arg)
3685 {
3686 	ASSERT_SPDK_FC_MAIN_THREAD();
3687 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3688 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3689 	struct spdk_nvmf_fc_nport *nport = NULL;
3690 	int err = 0;
3691 
3692 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3693 		      args->oxid, args->rxid);
3694 
3695 	/*
3696 	 * 1. Make sure the nport port exists.
3697 	 */
3698 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3699 	if (nport == NULL) {
3700 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3701 		err = -EINVAL;
3702 		goto out;
3703 	}
3704 
3705 	/*
3706 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3707 	 */
3708 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3709 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3710 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3711 			      args->rpi, args->oxid, args->rxid);
3712 		err = 0;
3713 		goto out;
3714 
3715 	}
3716 
3717 	/*
3718 	 * 3. Pass the received ABTS-LS to the library for handling.
3719 	 */
3720 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3721 
3722 out:
3723 	if (api_data->cb_func != NULL) {
3724 		/*
3725 		 * Passing pointer to the args struct as the first argument.
3726 		 * The cb_func should handle this appropriately.
3727 		 */
3728 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3729 	} else {
3730 		/* No callback set, free the args */
3731 		free(args);
3732 	}
3733 
3734 	free(arg);
3735 }
3736 
3737 /*
3738  * Callback function for hw port quiesce.
3739  */
3740 static void
3741 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3742 {
3743 	ASSERT_SPDK_FC_MAIN_THREAD();
3744 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3745 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3746 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3747 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3748 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3749 	struct spdk_nvmf_fc_port *fc_port = NULL;
3750 	char *dump_buf = NULL;
3751 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3752 
3753 	/*
3754 	 * Free the callback context struct.
3755 	 */
3756 	free(ctx);
3757 
3758 	if (err != 0) {
3759 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3760 		goto out;
3761 	}
3762 
3763 	if (args->dump_queues == false) {
3764 		/*
3765 		 * Queues need not be dumped.
3766 		 */
3767 		goto out;
3768 	}
3769 
3770 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3771 
3772 	/*
3773 	 * Get the fc port.
3774 	 */
3775 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3776 	if (fc_port == NULL) {
3777 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3778 		err = -EINVAL;
3779 		goto out;
3780 	}
3781 
3782 	/*
3783 	 * Allocate memory for the dump buffer.
3784 	 * This memory will be freed by FCT.
3785 	 */
3786 	dump_buf = (char *)calloc(1, dump_buf_size);
3787 	if (dump_buf == NULL) {
3788 		err = -ENOMEM;
3789 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3790 		goto out;
3791 	}
3792 	*args->dump_buf  = (uint32_t *)dump_buf;
3793 	dump_info.buffer = dump_buf;
3794 	dump_info.offset = 0;
3795 
3796 	/*
3797 	 * Add the dump reason to the top of the buffer.
3798 	 */
3799 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3800 
3801 	/*
3802 	 * Dump the hwqp.
3803 	 */
3804 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3805 				fc_port->num_io_queues, &dump_info);
3806 
3807 out:
3808 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3809 		      args->port_handle, args->dump_queues, err);
3810 
3811 	if (cb_func != NULL) {
3812 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3813 	}
3814 }
3815 
3816 /*
3817  * HW port reset
3818 
3819  */
3820 static void
3821 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3822 {
3823 	ASSERT_SPDK_FC_MAIN_THREAD();
3824 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3825 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3826 			api_data->api_args;
3827 	struct spdk_nvmf_fc_port *fc_port = NULL;
3828 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3829 	int err = 0;
3830 
3831 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3832 
3833 	/*
3834 	 * Make sure the physical port exists.
3835 	 */
3836 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3837 	if (fc_port == NULL) {
3838 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3839 		err = -EINVAL;
3840 		goto out;
3841 	}
3842 
3843 	/*
3844 	 * Save the reset event args and the callback in a context struct.
3845 	 */
3846 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3847 
3848 	if (ctx == NULL) {
3849 		err = -ENOMEM;
3850 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3851 		goto fail;
3852 	}
3853 
3854 	ctx->reset_args = args;
3855 	ctx->reset_cb_func = api_data->cb_func;
3856 
3857 	/*
3858 	 * Quiesce the hw port.
3859 	 */
3860 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3861 	if (err != 0) {
3862 		goto fail;
3863 	}
3864 
3865 	/*
3866 	 * Once the ports are successfully quiesced the reset processing
3867 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3868 	 */
3869 	return;
3870 fail:
3871 	free(ctx);
3872 
3873 out:
3874 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3875 		      err);
3876 
3877 	if (api_data->cb_func != NULL) {
3878 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3879 	}
3880 
3881 	free(arg);
3882 }
3883 
3884 static inline void
3885 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3886 {
3887 	if (nvmf_fc_get_main_thread()) {
3888 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3889 	}
3890 }
3891 
3892 /*
3893  * Queue up an event in the SPDK main threads event queue.
3894  * Used by the FC driver to notify the SPDK main thread of FC related events.
3895  */
3896 int
3897 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3898 			   spdk_nvmf_fc_callback cb_func)
3899 {
3900 	int err = 0;
3901 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3902 	spdk_msg_fn event_fn = NULL;
3903 
3904 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3905 
3906 	if (event_type >= SPDK_FC_EVENT_MAX) {
3907 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3908 		err = -EINVAL;
3909 		goto done;
3910 	}
3911 
3912 	if (args == NULL) {
3913 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3914 		err = -EINVAL;
3915 		goto done;
3916 	}
3917 
3918 	api_data = calloc(1, sizeof(*api_data));
3919 
3920 	if (api_data == NULL) {
3921 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3922 		err = -ENOMEM;
3923 		goto done;
3924 	}
3925 
3926 	api_data->api_args = args;
3927 	api_data->cb_func = cb_func;
3928 
3929 	switch (event_type) {
3930 	case SPDK_FC_HW_PORT_INIT:
3931 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3932 		break;
3933 
3934 	case SPDK_FC_HW_PORT_FREE:
3935 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3936 		break;
3937 
3938 	case SPDK_FC_HW_PORT_ONLINE:
3939 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3940 		break;
3941 
3942 	case SPDK_FC_HW_PORT_OFFLINE:
3943 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3944 		break;
3945 
3946 	case SPDK_FC_NPORT_CREATE:
3947 		event_fn = nvmf_fc_adm_evnt_nport_create;
3948 		break;
3949 
3950 	case SPDK_FC_NPORT_DELETE:
3951 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3952 		break;
3953 
3954 	case SPDK_FC_IT_ADD:
3955 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3956 		break;
3957 
3958 	case SPDK_FC_IT_DELETE:
3959 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3960 		break;
3961 
3962 	case SPDK_FC_ABTS_RECV:
3963 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3964 		break;
3965 
3966 	case SPDK_FC_HW_PORT_RESET:
3967 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3968 		break;
3969 
3970 	case SPDK_FC_UNRECOVERABLE_ERR:
3971 	default:
3972 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3973 		err = -EINVAL;
3974 		break;
3975 	}
3976 
3977 done:
3978 
3979 	if (err == 0) {
3980 		assert(event_fn != NULL);
3981 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
3982 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3983 	} else {
3984 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3985 		if (api_data) {
3986 			free(api_data);
3987 		}
3988 	}
3989 
3990 	return err;
3991 }
3992 
3993 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3994 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
3995 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
3996