xref: /spdk/lib/nvmf/fc.c (revision da2fd6651a9cd4732b0910d30291821e77f4d643)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 #include "spdk/endian.h"
46 #include "spdk/log.h"
47 #include "spdk/thread.h"
48 
49 #include "nvmf_fc.h"
50 #include "fc_lld.h"
51 
52 #ifndef DEV_VERIFY
53 #define DEV_VERIFY assert
54 #endif
55 
56 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
57 #define ASSERT_SPDK_FC_MAIN_THREAD() \
58         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
59 #endif
60 
61 /*
62  * PRLI service parameters
63  */
64 enum spdk_nvmf_fc_service_parameters {
65 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
66 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
67 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
68 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
69 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
70 };
71 
72 static char *fc_req_state_strs[] = {
73 	"SPDK_NVMF_FC_REQ_INIT",
74 	"SPDK_NVMF_FC_REQ_READ_BDEV",
75 	"SPDK_NVMF_FC_REQ_READ_XFER",
76 	"SPDK_NVMF_FC_REQ_READ_RSP",
77 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
78 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
79 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
80 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
81 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
82 	"SPDK_NVMF_FC_REQ_NONE_RSP",
83 	"SPDK_NVMF_FC_REQ_SUCCESS",
84 	"SPDK_NVMF_FC_REQ_FAILED",
85 	"SPDK_NVMF_FC_REQ_ABORTED",
86 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
87 	"SPDK_NVMF_FC_REQ_PENDING",
88 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
89 };
90 
91 #define OBJECT_NVMF_FC_IO				0xA0
92 
93 #define TRACE_GROUP_NVMF_FC				0x8
94 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
95 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
96 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
97 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
98 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
99 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
100 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
101 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
102 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
103 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
104 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
105 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
106 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
107 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
108 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
109 #define TRACE_FC_REQ_FUSED_WAITING		SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x10)
110 
111 #define HWQP_CONN_TABLE_SIZE			8192
112 #define HWQP_RPI_TABLE_SIZE			4096
113 
114 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
115 {
116 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
117 	spdk_trace_register_description("FC_REQ_NEW",
118 					TRACE_FC_REQ_INIT,
119 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1,
120 					SPDK_TRACE_ARG_TYPE_INT, "");
121 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
122 					TRACE_FC_REQ_READ_BDEV,
123 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
124 					SPDK_TRACE_ARG_TYPE_INT, "");
125 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
126 					TRACE_FC_REQ_READ_XFER,
127 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
128 					SPDK_TRACE_ARG_TYPE_INT, "");
129 	spdk_trace_register_description("FC_REQ_READ_RSP",
130 					TRACE_FC_REQ_READ_RSP,
131 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
132 					SPDK_TRACE_ARG_TYPE_INT, "");
133 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
134 					TRACE_FC_REQ_WRITE_BUFFS,
135 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
136 					SPDK_TRACE_ARG_TYPE_INT, "");
137 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
138 					TRACE_FC_REQ_WRITE_XFER,
139 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
140 					SPDK_TRACE_ARG_TYPE_INT, "");
141 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
142 					TRACE_FC_REQ_WRITE_BDEV,
143 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
144 					SPDK_TRACE_ARG_TYPE_INT, "");
145 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
146 					TRACE_FC_REQ_WRITE_RSP,
147 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
148 					SPDK_TRACE_ARG_TYPE_INT, "");
149 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
150 					TRACE_FC_REQ_NONE_BDEV,
151 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
152 					SPDK_TRACE_ARG_TYPE_INT, "");
153 	spdk_trace_register_description("FC_REQ_NONE_RSP",
154 					TRACE_FC_REQ_NONE_RSP,
155 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
156 					SPDK_TRACE_ARG_TYPE_INT, "");
157 	spdk_trace_register_description("FC_REQ_SUCCESS",
158 					TRACE_FC_REQ_SUCCESS,
159 					OWNER_NONE, OBJECT_NONE, 0,
160 					SPDK_TRACE_ARG_TYPE_INT, "");
161 	spdk_trace_register_description("FC_REQ_FAILED",
162 					TRACE_FC_REQ_FAILED,
163 					OWNER_NONE, OBJECT_NONE, 0,
164 					SPDK_TRACE_ARG_TYPE_INT, "");
165 	spdk_trace_register_description("FC_REQ_ABORTED",
166 					TRACE_FC_REQ_ABORTED,
167 					OWNER_NONE, OBJECT_NONE, 0,
168 					SPDK_TRACE_ARG_TYPE_INT, "");
169 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
170 					TRACE_FC_REQ_BDEV_ABORTED,
171 					OWNER_NONE, OBJECT_NONE, 0,
172 					SPDK_TRACE_ARG_TYPE_INT, "");
173 	spdk_trace_register_description("FC_REQ_PENDING",
174 					TRACE_FC_REQ_PENDING,
175 					OWNER_NONE, OBJECT_NONE, 0,
176 					SPDK_TRACE_ARG_TYPE_INT, "");
177 	spdk_trace_register_description("FC_REQ_FUSED_WAITING",
178 					TRACE_FC_REQ_FUSED_WAITING,
179 					OWNER_NONE, OBJECT_NONE, 0,
180 					SPDK_TRACE_ARG_TYPE_INT, "");
181 }
182 
183 /**
184  * The structure used by all fc adm functions
185  */
186 struct spdk_nvmf_fc_adm_api_data {
187 	void *api_args;
188 	spdk_nvmf_fc_callback cb_func;
189 };
190 
191 /**
192  * The callback structure for nport-delete
193  */
194 struct spdk_nvmf_fc_adm_nport_del_cb_data {
195 	struct spdk_nvmf_fc_nport *nport;
196 	uint8_t port_handle;
197 	spdk_nvmf_fc_callback fc_cb_func;
198 	void *fc_cb_ctx;
199 };
200 
201 /**
202  * The callback structure for it-delete
203  */
204 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
205 	struct spdk_nvmf_fc_nport *nport;
206 	struct spdk_nvmf_fc_remote_port_info *rport;
207 	uint8_t port_handle;
208 	spdk_nvmf_fc_callback fc_cb_func;
209 	void *fc_cb_ctx;
210 };
211 
212 
213 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
214 
215 /**
216  * The callback structure for the it-delete-assoc callback
217  */
218 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
219 	struct spdk_nvmf_fc_nport *nport;
220 	struct spdk_nvmf_fc_remote_port_info *rport;
221 	uint8_t port_handle;
222 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
223 	void *cb_ctx;
224 };
225 
226 /*
227  * Call back function pointer for HW port quiesce.
228  */
229 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
230 
231 /**
232  * Context structure for quiescing a hardware port
233  */
234 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
235 	int quiesce_count;
236 	void *ctx;
237 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
238 };
239 
240 /**
241  * Context structure used to reset a hardware port
242  */
243 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
244 	void *reset_args;
245 	spdk_nvmf_fc_callback reset_cb_func;
246 };
247 
248 struct spdk_nvmf_fc_transport {
249 	struct spdk_nvmf_transport transport;
250 	pthread_mutex_t lock;
251 };
252 
253 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
254 
255 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
256 
257 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
258 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
259 
260 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
261 
262 static uint32_t g_nvmf_fgroup_count = 0;
263 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
264 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
265 
266 struct spdk_thread *
267 nvmf_fc_get_main_thread(void)
268 {
269 	return g_nvmf_fc_main_thread;
270 }
271 
272 static inline void
273 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
274 			       enum spdk_nvmf_fc_request_state state)
275 {
276 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
277 
278 	switch (state) {
279 	case SPDK_NVMF_FC_REQ_INIT:
280 		/* Start IO tracing */
281 		tpoint_id = TRACE_FC_REQ_INIT;
282 		break;
283 	case SPDK_NVMF_FC_REQ_READ_BDEV:
284 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
285 		break;
286 	case SPDK_NVMF_FC_REQ_READ_XFER:
287 		tpoint_id = TRACE_FC_REQ_READ_XFER;
288 		break;
289 	case SPDK_NVMF_FC_REQ_READ_RSP:
290 		tpoint_id = TRACE_FC_REQ_READ_RSP;
291 		break;
292 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
293 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
294 		break;
295 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
296 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
297 		break;
298 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
299 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
300 		break;
301 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
302 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
303 		break;
304 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
305 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
306 		break;
307 	case SPDK_NVMF_FC_REQ_NONE_RSP:
308 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
309 		break;
310 	case SPDK_NVMF_FC_REQ_SUCCESS:
311 		tpoint_id = TRACE_FC_REQ_SUCCESS;
312 		break;
313 	case SPDK_NVMF_FC_REQ_FAILED:
314 		tpoint_id = TRACE_FC_REQ_FAILED;
315 		break;
316 	case SPDK_NVMF_FC_REQ_ABORTED:
317 		tpoint_id = TRACE_FC_REQ_ABORTED;
318 		break;
319 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
320 		tpoint_id = TRACE_FC_REQ_ABORTED;
321 		break;
322 	case SPDK_NVMF_FC_REQ_PENDING:
323 		tpoint_id = TRACE_FC_REQ_PENDING;
324 		break;
325 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
326 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
327 		break;
328 	default:
329 		assert(0);
330 		break;
331 	}
332 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
333 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
334 				  (uint64_t)(&fc_req->req));
335 	}
336 }
337 
338 static struct rte_hash *
339 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
340 {
341 	struct rte_hash_parameters hash_params = { 0 };
342 
343 	hash_params.entries = num_entries;
344 	hash_params.key_len = key_len;
345 	hash_params.name = name;
346 
347 	return rte_hash_create(&hash_params);
348 }
349 
350 void
351 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
352 {
353 	free(fc_conn->pool_memory);
354 	fc_conn->pool_memory = NULL;
355 }
356 
357 int
358 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
359 {
360 	uint32_t i, qd;
361 	struct spdk_nvmf_fc_pooled_request *req;
362 
363 	/*
364 	 * Create number of fc-requests to be more than the actual SQ size.
365 	 * This is to handle race conditions where the target driver may send
366 	 * back a RSP and before the target driver gets to process the CQE
367 	 * for the RSP, the initiator may have sent a new command.
368 	 * Depending on the load on the HWQP, there is a slim possibility
369 	 * that the target reaps the RQE corresponding to the new
370 	 * command before processing the CQE corresponding to the RSP.
371 	 */
372 	qd = fc_conn->max_queue_depth * 2;
373 
374 	STAILQ_INIT(&fc_conn->pool_queue);
375 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
376 				      sizeof(struct spdk_nvmf_fc_request));
377 	if (!fc_conn->pool_memory) {
378 		SPDK_ERRLOG("create fc req ring objects failed\n");
379 		goto error;
380 	}
381 	fc_conn->pool_size = qd;
382 	fc_conn->pool_free_elems = qd;
383 
384 	/* Initialise value in ring objects and link the objects */
385 	for (i = 0; i < qd; i++) {
386 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
387 				i * sizeof(struct spdk_nvmf_fc_request));
388 
389 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
390 	}
391 	return 0;
392 error:
393 	nvmf_fc_free_conn_reqpool(fc_conn);
394 	return -1;
395 }
396 
397 static inline struct spdk_nvmf_fc_request *
398 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
399 {
400 	struct spdk_nvmf_fc_request *fc_req;
401 	struct spdk_nvmf_fc_pooled_request *pooled_req;
402 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
403 
404 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
405 	if (!pooled_req) {
406 		SPDK_ERRLOG("Alloc request buffer failed\n");
407 		return NULL;
408 	}
409 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
410 	fc_conn->pool_free_elems -= 1;
411 
412 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
413 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
414 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
415 
416 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
417 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
418 	TAILQ_INIT(&fc_req->abort_cbs);
419 	return fc_req;
420 }
421 
422 static inline void
423 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
424 {
425 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
426 		/* Log an error for debug purpose. */
427 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
428 	}
429 
430 	/* set the magic to mark req as no longer valid. */
431 	fc_req->magic = 0xDEADBEEF;
432 
433 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
434 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
435 
436 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
437 	fc_conn->pool_free_elems += 1;
438 }
439 
440 static inline void
441 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
442 {
443 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
444 		      spdk_nvmf_request, buf_link);
445 }
446 
447 int
448 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
449 {
450 	char name[64];
451 
452 	hwqp->fc_port = fc_port;
453 
454 	/* clear counters */
455 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
456 
457 	TAILQ_INIT(&hwqp->in_use_reqs);
458 	TAILQ_INIT(&hwqp->sync_cbs);
459 	TAILQ_INIT(&hwqp->ls_pending_queue);
460 
461 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
462 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
463 				     sizeof(uint64_t));
464 	if (!hwqp->connection_list_hash) {
465 		SPDK_ERRLOG("Failed to create connection hash table.\n");
466 		return -ENOMEM;
467 	}
468 
469 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
470 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
471 	if (!hwqp->rport_list_hash) {
472 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
473 		rte_hash_free(hwqp->connection_list_hash);
474 		return -ENOMEM;
475 	}
476 
477 	/* Init low level driver queues */
478 	nvmf_fc_init_q(hwqp);
479 	return 0;
480 }
481 
482 static struct spdk_nvmf_fc_poll_group *
483 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
484 {
485 	uint32_t max_count = UINT32_MAX;
486 	struct spdk_nvmf_fc_poll_group *fgroup;
487 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
488 
489 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
490 	/* find poll group with least number of hwqp's assigned to it */
491 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
492 		if (fgroup->hwqp_count < max_count) {
493 			ret_fgroup = fgroup;
494 			max_count = fgroup->hwqp_count;
495 		}
496 	}
497 
498 	if (ret_fgroup) {
499 		ret_fgroup->hwqp_count++;
500 		hwqp->thread = ret_fgroup->group.group->thread;
501 		hwqp->fgroup = ret_fgroup;
502 	}
503 
504 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
505 
506 	return ret_fgroup;
507 }
508 
509 bool
510 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
511 {
512 	struct spdk_nvmf_fc_poll_group *tmp;
513 	bool rc = false;
514 
515 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
516 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
517 		if (tmp == fgroup) {
518 			rc = true;
519 			break;
520 		}
521 	}
522 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
523 	return rc;
524 }
525 
526 void
527 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
528 {
529 	assert(hwqp);
530 	if (hwqp == NULL) {
531 		SPDK_ERRLOG("Error: hwqp is NULL\n");
532 		return;
533 	}
534 
535 	assert(g_nvmf_fgroup_count);
536 
537 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
538 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
539 		return;
540 	}
541 
542 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
543 }
544 
545 static void
546 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
547 {
548 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
549 
550 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
551 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
552 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
553 	} else {
554 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
555 	}
556 
557 	if (args->cb_fn) {
558 		args->cb_fn(args->cb_ctx, 0);
559 	}
560 
561 	free(args);
562 }
563 
564 void
565 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
566 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
567 {
568 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
569 	struct spdk_nvmf_fc_poll_group *tmp;
570 	int rc = 0;
571 
572 	assert(hwqp);
573 
574 	SPDK_DEBUGLOG(nvmf_fc,
575 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
576 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
577 
578 	if (!hwqp->fgroup) {
579 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
580 	} else {
581 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
582 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
583 			if (tmp == hwqp->fgroup) {
584 				hwqp->fgroup->hwqp_count--;
585 				break;
586 			}
587 		}
588 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
589 
590 		if (tmp != hwqp->fgroup) {
591 			/* Pollgroup was already removed. Dont bother. */
592 			goto done;
593 		}
594 
595 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
596 		if (args == NULL) {
597 			rc = -ENOMEM;
598 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
599 			goto done;
600 		}
601 
602 		args->hwqp   = hwqp;
603 		args->cb_fn  = cb_fn;
604 		args->cb_ctx = cb_ctx;
605 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
606 		args->cb_info.cb_data = args;
607 		args->cb_info.cb_thread = spdk_get_thread();
608 
609 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
610 		if (rc) {
611 			rc = -EINVAL;
612 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
613 			free(args);
614 			goto done;
615 		}
616 		return;
617 	}
618 done:
619 	if (cb_fn) {
620 		cb_fn(cb_ctx, rc);
621 	}
622 }
623 
624 /*
625  * Note: This needs to be used only on main poller.
626  */
627 static uint64_t
628 nvmf_fc_get_abts_unique_id(void)
629 {
630 	static uint32_t u_id = 0;
631 
632 	return (uint64_t)(++u_id);
633 }
634 
635 static void
636 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
637 {
638 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
639 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
640 
641 	ctx->hwqps_responded++;
642 
643 	if (ctx->hwqps_responded < ctx->num_hwqps) {
644 		/* Wait for all pollers to complete. */
645 		return;
646 	}
647 
648 	/* Free the queue sync poller args. */
649 	free(ctx->sync_poller_args);
650 
651 	/* Mark as queue synced */
652 	ctx->queue_synced = true;
653 
654 	/* Reset the ctx values */
655 	ctx->hwqps_responded = 0;
656 	ctx->handled = false;
657 
658 	SPDK_DEBUGLOG(nvmf_fc,
659 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
660 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
661 
662 	/* Resend ABTS to pollers */
663 	args = ctx->abts_poller_args;
664 	for (int i = 0; i < ctx->num_hwqps; i++) {
665 		poller_arg = args + i;
666 		nvmf_fc_poller_api_func(poller_arg->hwqp,
667 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
668 					poller_arg);
669 	}
670 }
671 
672 static int
673 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
674 {
675 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
676 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
677 
678 	/* check if FC driver supports queue sync */
679 	if (!nvmf_fc_q_sync_available()) {
680 		return -EPERM;
681 	}
682 
683 	assert(ctx);
684 	if (!ctx) {
685 		SPDK_ERRLOG("NULL ctx pointer");
686 		return -EINVAL;
687 	}
688 
689 	/* Reset the ctx values */
690 	ctx->hwqps_responded = 0;
691 
692 	args = calloc(ctx->num_hwqps,
693 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
694 	if (!args) {
695 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
696 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
697 		return -ENOMEM;
698 	}
699 	ctx->sync_poller_args = args;
700 
701 	abts_args = ctx->abts_poller_args;
702 	for (int i = 0; i < ctx->num_hwqps; i++) {
703 		abts_poller_arg = abts_args + i;
704 		poller_arg = args + i;
705 		poller_arg->u_id = ctx->u_id;
706 		poller_arg->hwqp = abts_poller_arg->hwqp;
707 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
708 		poller_arg->cb_info.cb_data = ctx;
709 		poller_arg->cb_info.cb_thread = spdk_get_thread();
710 
711 		/* Send a Queue sync message to interested pollers */
712 		nvmf_fc_poller_api_func(poller_arg->hwqp,
713 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
714 					poller_arg);
715 	}
716 
717 	SPDK_DEBUGLOG(nvmf_fc,
718 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
719 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
720 
721 	/* Post Marker to queue to track aborted request */
722 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
723 
724 	return 0;
725 }
726 
727 static void
728 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
729 {
730 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
731 	struct spdk_nvmf_fc_nport *nport  = NULL;
732 
733 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
734 		ctx->handled = true;
735 	}
736 
737 	ctx->hwqps_responded++;
738 
739 	if (ctx->hwqps_responded < ctx->num_hwqps) {
740 		/* Wait for all pollers to complete. */
741 		return;
742 	}
743 
744 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
745 
746 	if (ctx->nport != nport) {
747 		/* Nport can be deleted while this abort is being
748 		 * processed by the pollers.
749 		 */
750 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
751 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
752 	} else {
753 		if (!ctx->handled) {
754 			/* Try syncing the queues and try one more time */
755 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
756 				SPDK_DEBUGLOG(nvmf_fc,
757 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
758 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
759 				return;
760 			} else {
761 				/* Send Reject */
762 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
763 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
764 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
765 			}
766 		} else {
767 			/* Send Accept */
768 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
769 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
770 					    0, NULL, NULL);
771 		}
772 	}
773 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
774 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
775 
776 	free(ctx->abts_poller_args);
777 	free(ctx);
778 }
779 
780 void
781 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
782 			  uint16_t oxid, uint16_t rxid)
783 {
784 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
785 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
786 	struct spdk_nvmf_fc_association *assoc = NULL;
787 	struct spdk_nvmf_fc_conn *conn = NULL;
788 	uint32_t hwqp_cnt = 0;
789 	bool skip_hwqp_cnt;
790 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
791 	uint32_t i;
792 
793 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
794 		       nport->nport_hdl, rpi, oxid, rxid);
795 
796 	/* Allocate memory to track hwqp's with at least 1 active connection. */
797 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
798 	if (hwqps == NULL) {
799 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
800 		goto bls_rej;
801 	}
802 
803 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
804 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
805 			if ((conn->rpi != rpi) || !conn->hwqp) {
806 				continue;
807 			}
808 
809 			skip_hwqp_cnt = false;
810 			for (i = 0; i < hwqp_cnt; i++) {
811 				if (hwqps[i] == conn->hwqp) {
812 					/* Skip. This is already present */
813 					skip_hwqp_cnt = true;
814 					break;
815 				}
816 			}
817 			if (!skip_hwqp_cnt) {
818 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
819 				hwqps[hwqp_cnt] = conn->hwqp;
820 				hwqp_cnt++;
821 			}
822 		}
823 	}
824 
825 	if (!hwqp_cnt) {
826 		goto bls_rej;
827 	}
828 
829 	args = calloc(hwqp_cnt,
830 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
831 	if (!args) {
832 		goto bls_rej;
833 	}
834 
835 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
836 	if (!ctx) {
837 		goto bls_rej;
838 	}
839 	ctx->rpi = rpi;
840 	ctx->oxid = oxid;
841 	ctx->rxid = rxid;
842 	ctx->nport = nport;
843 	ctx->nport_hdl = nport->nport_hdl;
844 	ctx->port_hdl = nport->fc_port->port_hdl;
845 	ctx->num_hwqps = hwqp_cnt;
846 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
847 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
848 	ctx->abts_poller_args = args;
849 
850 	/* Get a unique context for this ABTS */
851 	ctx->u_id = nvmf_fc_get_abts_unique_id();
852 
853 	for (i = 0; i < hwqp_cnt; i++) {
854 		poller_arg = args + i;
855 		poller_arg->hwqp = hwqps[i];
856 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
857 		poller_arg->cb_info.cb_data = ctx;
858 		poller_arg->cb_info.cb_thread = spdk_get_thread();
859 		poller_arg->ctx = ctx;
860 
861 		nvmf_fc_poller_api_func(poller_arg->hwqp,
862 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
863 					poller_arg);
864 	}
865 
866 	free(hwqps);
867 
868 	return;
869 bls_rej:
870 	free(args);
871 	free(hwqps);
872 
873 	/* Send Reject */
874 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
875 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
876 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
877 		       nport->nport_hdl, rpi, oxid, rxid);
878 	return;
879 }
880 
881 /*** Accessor functions for the FC structures - BEGIN */
882 /*
883  * Returns true if the port is in offline state.
884  */
885 bool
886 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
887 {
888 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
889 		return true;
890 	}
891 
892 	return false;
893 }
894 
895 /*
896  * Returns true if the port is in online state.
897  */
898 bool
899 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
900 {
901 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
902 		return true;
903 	}
904 
905 	return false;
906 }
907 
908 int
909 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
910 {
911 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
912 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
913 		return 0;
914 	}
915 
916 	return -EPERM;
917 }
918 
919 int
920 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
921 {
922 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
923 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
924 		return 0;
925 	}
926 
927 	return -EPERM;
928 }
929 
930 int
931 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
932 {
933 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
934 		hwqp->state = SPDK_FC_HWQP_ONLINE;
935 		/* reset some queue counters */
936 		hwqp->num_conns = 0;
937 		return nvmf_fc_set_q_online_state(hwqp, true);
938 	}
939 
940 	return -EPERM;
941 }
942 
943 int
944 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
945 {
946 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
947 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
948 		return nvmf_fc_set_q_online_state(hwqp, false);
949 	}
950 
951 	return -EPERM;
952 }
953 
954 void
955 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
956 {
957 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
958 
959 	/*
960 	 * Let LLD add the port to its list.
961 	 */
962 	nvmf_fc_lld_port_add(fc_port);
963 }
964 
965 static void
966 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
967 {
968 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
969 
970 	/*
971 	 * Let LLD remove the port from its list.
972 	 */
973 	nvmf_fc_lld_port_remove(fc_port);
974 }
975 
976 struct spdk_nvmf_fc_port *
977 nvmf_fc_port_lookup(uint8_t port_hdl)
978 {
979 	struct spdk_nvmf_fc_port *fc_port = NULL;
980 
981 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
982 		if (fc_port->port_hdl == port_hdl) {
983 			return fc_port;
984 		}
985 	}
986 	return NULL;
987 }
988 
989 uint32_t
990 nvmf_fc_get_prli_service_params(void)
991 {
992 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
993 }
994 
995 int
996 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
997 		       struct spdk_nvmf_fc_nport *nport)
998 {
999 	if (fc_port) {
1000 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
1001 		fc_port->num_nports++;
1002 		return 0;
1003 	}
1004 
1005 	return -EINVAL;
1006 }
1007 
1008 int
1009 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
1010 			  struct spdk_nvmf_fc_nport *nport)
1011 {
1012 	if (fc_port && nport) {
1013 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
1014 		fc_port->num_nports--;
1015 		return 0;
1016 	}
1017 
1018 	return -EINVAL;
1019 }
1020 
1021 static struct spdk_nvmf_fc_nport *
1022 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
1023 {
1024 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1025 
1026 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
1027 		if (fc_nport->nport_hdl == nport_hdl) {
1028 			return fc_nport;
1029 		}
1030 	}
1031 
1032 	return NULL;
1033 }
1034 
1035 struct spdk_nvmf_fc_nport *
1036 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
1037 {
1038 	struct spdk_nvmf_fc_port *fc_port = NULL;
1039 
1040 	fc_port = nvmf_fc_port_lookup(port_hdl);
1041 	if (fc_port) {
1042 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
1043 	}
1044 
1045 	return NULL;
1046 }
1047 
1048 static inline int
1049 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1050 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1051 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1052 {
1053 	struct spdk_nvmf_fc_nport *n_port;
1054 	struct spdk_nvmf_fc_remote_port_info *r_port;
1055 
1056 	assert(hwqp);
1057 	if (hwqp == NULL) {
1058 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1059 		return -EINVAL;
1060 	}
1061 	assert(nport);
1062 	if (nport == NULL) {
1063 		SPDK_ERRLOG("Error: nport is NULL\n");
1064 		return -EINVAL;
1065 	}
1066 	assert(rport);
1067 	if (rport == NULL) {
1068 		SPDK_ERRLOG("Error: rport is NULL\n");
1069 		return -EINVAL;
1070 	}
1071 
1072 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1073 		if (n_port->d_id == d_id) {
1074 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1075 				if (r_port->s_id == s_id) {
1076 					*nport = n_port;
1077 					*rport = r_port;
1078 					return 0;
1079 				}
1080 			}
1081 			break;
1082 		}
1083 	}
1084 
1085 	return -ENOENT;
1086 }
1087 
1088 /* Returns true if the Nport is empty of all rem_ports */
1089 bool
1090 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1091 {
1092 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1093 		assert(nport->rport_count == 0);
1094 		return true;
1095 	} else {
1096 		return false;
1097 	}
1098 }
1099 
1100 int
1101 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1102 			enum spdk_nvmf_fc_object_state state)
1103 {
1104 	if (nport) {
1105 		nport->nport_state = state;
1106 		return 0;
1107 	} else {
1108 		return -EINVAL;
1109 	}
1110 }
1111 
1112 bool
1113 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1114 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1115 {
1116 	if (nport && rem_port) {
1117 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1118 		nport->rport_count++;
1119 		return 0;
1120 	} else {
1121 		return -EINVAL;
1122 	}
1123 }
1124 
1125 bool
1126 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1127 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1128 {
1129 	if (nport && rem_port) {
1130 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1131 		nport->rport_count--;
1132 		return 0;
1133 	} else {
1134 		return -EINVAL;
1135 	}
1136 }
1137 
1138 int
1139 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1140 			enum spdk_nvmf_fc_object_state state)
1141 {
1142 	if (rport) {
1143 		rport->rport_state = state;
1144 		return 0;
1145 	} else {
1146 		return -EINVAL;
1147 	}
1148 }
1149 int
1150 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1151 			enum spdk_nvmf_fc_object_state state)
1152 {
1153 	if (assoc) {
1154 		assoc->assoc_state = state;
1155 		return 0;
1156 	} else {
1157 		return -EINVAL;
1158 	}
1159 }
1160 
1161 static struct spdk_nvmf_fc_association *
1162 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1163 {
1164 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1165 	struct spdk_nvmf_fc_conn *fc_conn;
1166 
1167 	if (!qpair) {
1168 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1169 		return NULL;
1170 	}
1171 
1172 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1173 
1174 	return fc_conn->fc_assoc;
1175 }
1176 
1177 bool
1178 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1179 		       struct spdk_nvmf_ctrlr *ctrlr)
1180 {
1181 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1182 	struct spdk_nvmf_fc_association *assoc = NULL;
1183 
1184 	if (!ctrlr) {
1185 		return false;
1186 	}
1187 
1188 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1189 	if (!fc_nport) {
1190 		return false;
1191 	}
1192 
1193 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1194 	if (assoc && assoc->tgtport == fc_nport) {
1195 		SPDK_DEBUGLOG(nvmf_fc,
1196 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1197 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1198 			      nport_hdl);
1199 		return true;
1200 	}
1201 	return false;
1202 }
1203 
1204 static void
1205 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1206 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1207 {
1208 	assert(ls_rqst);
1209 
1210 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1211 
1212 	/* Return buffer to chip */
1213 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1214 }
1215 
1216 static int
1217 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1218 			  struct spdk_nvmf_fc_nport *nport,
1219 			  struct spdk_nvmf_fc_remote_port_info *rport)
1220 {
1221 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1222 	int num_deleted = 0;
1223 
1224 	assert(hwqp);
1225 	assert(nport);
1226 	assert(rport);
1227 
1228 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1229 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1230 			num_deleted++;
1231 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1232 		}
1233 	}
1234 	return num_deleted;
1235 }
1236 
1237 static void
1238 nvmf_fc_req_bdev_abort(void *arg1)
1239 {
1240 	struct spdk_nvmf_fc_request *fc_req = arg1;
1241 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1242 	int i;
1243 
1244 	/* Initial release - we don't have to abort Admin Queue or
1245 	 * Fabric commands. The AQ commands supported at this time are
1246 	 * Get-Log-Page,
1247 	 * Identify
1248 	 * Set Features
1249 	 * Get Features
1250 	 * AER -> Special case and handled differently.
1251 	 * Every one of the above Admin commands (except AER) run
1252 	 * to completion and so an Abort of such commands doesn't
1253 	 * make sense.
1254 	 */
1255 	/* The Fabric commands supported are
1256 	 * Property Set
1257 	 * Property Get
1258 	 * Connect -> Special case (async. handling). Not sure how to
1259 	 * handle at this point. Let it run to completion.
1260 	 */
1261 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1262 		if (ctrlr->aer_req[i] == &fc_req->req) {
1263 			SPDK_NOTICELOG("Abort AER request\n");
1264 			nvmf_qpair_free_aer(fc_req->req.qpair);
1265 		}
1266 	}
1267 }
1268 
1269 void
1270 nvmf_fc_request_abort_complete(void *arg1)
1271 {
1272 	struct spdk_nvmf_fc_request *fc_req =
1273 		(struct spdk_nvmf_fc_request *)arg1;
1274 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1275 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1276 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1277 
1278 	/* Make a copy of the cb list from fc_req */
1279 	TAILQ_INIT(&abort_cbs);
1280 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1281 
1282 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1283 		       fc_req_state_strs[fc_req->state]);
1284 
1285 	_nvmf_fc_request_free(fc_req);
1286 
1287 	/* Request abort completed. Notify all the callbacks */
1288 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1289 		/* Notify */
1290 		ctx->cb(hwqp, 0, ctx->cb_args);
1291 		/* Remove */
1292 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1293 		/* free */
1294 		free(ctx);
1295 	}
1296 }
1297 
1298 void
1299 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1300 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1301 {
1302 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1303 	bool kill_req = false;
1304 
1305 	/* Add the cb to list */
1306 	if (cb) {
1307 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1308 		if (!ctx) {
1309 			SPDK_ERRLOG("ctx alloc failed.\n");
1310 			return;
1311 		}
1312 		ctx->cb = cb;
1313 		ctx->cb_args = cb_args;
1314 
1315 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1316 	}
1317 
1318 	if (!fc_req->is_aborted) {
1319 		/* Increment aborted command counter */
1320 		fc_req->hwqp->counters.num_aborted++;
1321 	}
1322 
1323 	/* If port is dead, skip abort wqe */
1324 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1325 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1326 		fc_req->is_aborted = true;
1327 		goto complete;
1328 	}
1329 
1330 	/* Check if the request is already marked for deletion */
1331 	if (fc_req->is_aborted) {
1332 		return;
1333 	}
1334 
1335 	/* Mark request as aborted */
1336 	fc_req->is_aborted = true;
1337 
1338 	/* If xchg is allocated, then save if we need to send abts or not. */
1339 	if (fc_req->xchg) {
1340 		fc_req->xchg->send_abts = send_abts;
1341 		fc_req->xchg->aborted	= true;
1342 	}
1343 
1344 	switch (fc_req->state) {
1345 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1346 		/* Aborted by backend */
1347 		goto complete;
1348 
1349 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1350 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1351 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1352 		/* Notify bdev */
1353 		spdk_thread_send_msg(fc_req->hwqp->thread,
1354 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1355 		break;
1356 
1357 	case SPDK_NVMF_FC_REQ_READ_XFER:
1358 	case SPDK_NVMF_FC_REQ_READ_RSP:
1359 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1360 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1361 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1362 		/* Notify HBA to abort this exchange  */
1363 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1364 		break;
1365 
1366 	case SPDK_NVMF_FC_REQ_PENDING:
1367 		/* Remove from pending */
1368 		nvmf_fc_request_remove_from_pending(fc_req);
1369 		goto complete;
1370 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1371 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1372 		goto complete;
1373 	default:
1374 		SPDK_ERRLOG("Request in invalid state.\n");
1375 		goto complete;
1376 	}
1377 
1378 	return;
1379 complete:
1380 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1381 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1382 				(void *)fc_req);
1383 }
1384 
1385 static int
1386 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1387 {
1388 	uint32_t length = fc_req->req.length;
1389 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1390 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1391 	struct spdk_nvmf_transport *transport = group->transport;
1392 
1393 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1394 		return -ENOMEM;
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 static int
1401 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1402 {
1403 	/* Allocate an XCHG if we dont use send frame for this command. */
1404 	if (!nvmf_fc_use_send_frame(fc_req)) {
1405 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1406 		if (!fc_req->xchg) {
1407 			fc_req->hwqp->counters.no_xchg++;
1408 			return -EAGAIN;
1409 		}
1410 	}
1411 
1412 	if (fc_req->req.length) {
1413 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1414 			fc_req->hwqp->counters.buf_alloc_err++;
1415 			if (fc_req->xchg) {
1416 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1417 				fc_req->xchg = NULL;
1418 			}
1419 			return -EAGAIN;
1420 		}
1421 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1422 	}
1423 
1424 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1425 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1426 
1427 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1428 
1429 		if (nvmf_fc_recv_data(fc_req)) {
1430 			/* Dropped return success to caller */
1431 			fc_req->hwqp->counters.unexpected_err++;
1432 			_nvmf_fc_request_free(fc_req);
1433 		}
1434 	} else {
1435 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1436 
1437 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1438 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1439 		} else {
1440 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1441 		}
1442 		spdk_nvmf_request_exec(&fc_req->req);
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 static void
1449 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1450 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1451 {
1452 	uint8_t df_ctl = fchdr->df_ctl;
1453 	uint32_t f_ctl = fchdr->f_ctl;
1454 
1455 	/* VMID */
1456 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1457 		struct spdk_nvmf_fc_vm_header *vhdr;
1458 		uint32_t vmhdr_offset = 0;
1459 
1460 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1461 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1462 		}
1463 
1464 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1465 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1466 		}
1467 
1468 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1469 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1470 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1471 	}
1472 
1473 	/* Priority */
1474 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1475 		fc_req->csctl = fchdr->cs_ctl;
1476 	}
1477 }
1478 
1479 static int
1480 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1481 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1482 {
1483 	uint16_t cmnd_len;
1484 	uint64_t rqst_conn_id;
1485 	struct spdk_nvmf_fc_request *fc_req = NULL;
1486 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1487 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1488 	enum spdk_nvme_data_transfer xfer;
1489 	uint32_t s_id, d_id;
1490 
1491 	s_id = (uint32_t)frame->s_id;
1492 	d_id = (uint32_t)frame->d_id;
1493 	s_id = from_be32(&s_id) >> 8;
1494 	d_id = from_be32(&d_id) >> 8;
1495 
1496 	cmd_iu = buffer->virt;
1497 	cmnd_len = cmd_iu->cmnd_iu_len;
1498 	cmnd_len = from_be16(&cmnd_len);
1499 
1500 	/* check for a valid cmnd_iu format */
1501 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1502 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1503 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1504 		SPDK_ERRLOG("IU CMD error\n");
1505 		hwqp->counters.nvme_cmd_iu_err++;
1506 		return -ENXIO;
1507 	}
1508 
1509 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1510 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1511 		SPDK_ERRLOG("IU CMD xfer error\n");
1512 		hwqp->counters.nvme_cmd_xfer_err++;
1513 		return -EPERM;
1514 	}
1515 
1516 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1517 
1518 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1519 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1520 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1521 		hwqp->counters.invalid_conn_err++;
1522 		return -ENODEV;
1523 	}
1524 
1525 	/* Validate s_id and d_id */
1526 	if (s_id != fc_conn->s_id) {
1527 		hwqp->counters.rport_invalid++;
1528 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1529 		return -ENODEV;
1530 	}
1531 
1532 	if (d_id != fc_conn->d_id) {
1533 		hwqp->counters.nport_invalid++;
1534 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1535 		return -ENODEV;
1536 	}
1537 
1538 	/* If association/connection is being deleted - return */
1539 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1540 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1541 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1542 		return -EACCES;
1543 	}
1544 
1545 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1546 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1547 			    rqst_conn_id, fc_conn->conn_state);
1548 		return -EACCES;
1549 	}
1550 
1551 	if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1552 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1553 			    rqst_conn_id, fc_conn->qpair.state);
1554 		return -EACCES;
1555 	}
1556 
1557 	/* Make sure xfer len is according to mdts */
1558 	if (from_be32(&cmd_iu->data_len) >
1559 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1560 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1561 		return -EINVAL;
1562 	}
1563 
1564 	/* allocate a request buffer */
1565 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1566 	if (fc_req == NULL) {
1567 		return -ENOMEM;
1568 	}
1569 
1570 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1571 	fc_req->req.qpair = &fc_conn->qpair;
1572 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1573 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1574 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1575 	fc_req->oxid = frame->ox_id;
1576 	fc_req->oxid = from_be16(&fc_req->oxid);
1577 	fc_req->rpi = fc_conn->rpi;
1578 	fc_req->poller_lcore = hwqp->lcore_id;
1579 	fc_req->poller_thread = hwqp->thread;
1580 	fc_req->hwqp = hwqp;
1581 	fc_req->fc_conn = fc_conn;
1582 	fc_req->req.xfer = xfer;
1583 	fc_req->s_id = s_id;
1584 	fc_req->d_id = d_id;
1585 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1586 	nvmf_fc_set_vmid_priority(fc_req, frame);
1587 
1588 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1589 
1590 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1591 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1592 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 /*
1599  * These functions are called from the FC LLD
1600  */
1601 
1602 void
1603 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1604 {
1605 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1606 	struct spdk_nvmf_transport_poll_group *group;
1607 
1608 	if (!fc_req) {
1609 		return;
1610 	}
1611 
1612 	if (fc_req->xchg) {
1613 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1614 		fc_req->xchg = NULL;
1615 	}
1616 
1617 	/* Release IO buffers */
1618 	if (fc_req->req.data_from_pool) {
1619 		group = &hwqp->fgroup->group;
1620 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1621 					       group->transport);
1622 	}
1623 	fc_req->req.data = NULL;
1624 	fc_req->req.iovcnt  = 0;
1625 
1626 	/* Free Fc request */
1627 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1628 }
1629 
1630 void
1631 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1632 			  enum spdk_nvmf_fc_request_state state)
1633 {
1634 	assert(fc_req->magic != 0xDEADBEEF);
1635 
1636 	SPDK_DEBUGLOG(nvmf_fc,
1637 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1638 		      nvmf_fc_request_get_state_str(fc_req->state),
1639 		      nvmf_fc_request_get_state_str(state));
1640 	nvmf_fc_record_req_trace_point(fc_req, state);
1641 	fc_req->state = state;
1642 }
1643 
1644 char *
1645 nvmf_fc_request_get_state_str(int state)
1646 {
1647 	static char *unk_str = "unknown";
1648 
1649 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1650 		fc_req_state_strs[state] : unk_str);
1651 }
1652 
1653 int
1654 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1655 			   uint32_t buff_idx,
1656 			   struct spdk_nvmf_fc_frame_hdr *frame,
1657 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1658 			   uint32_t plen)
1659 {
1660 	int rc = 0;
1661 	uint32_t s_id, d_id;
1662 	struct spdk_nvmf_fc_nport *nport = NULL;
1663 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1664 
1665 	s_id = (uint32_t)frame->s_id;
1666 	d_id = (uint32_t)frame->d_id;
1667 	s_id = from_be32(&s_id) >> 8;
1668 	d_id = from_be32(&d_id) >> 8;
1669 
1670 	SPDK_DEBUGLOG(nvmf_fc,
1671 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1672 		      s_id, d_id,
1673 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1674 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1675 
1676 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1677 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1678 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1679 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1680 
1681 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1682 
1683 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1684 		if (rc) {
1685 			if (nport == NULL) {
1686 				SPDK_ERRLOG("Nport not found. Dropping\n");
1687 				/* increment invalid nport counter */
1688 				hwqp->counters.nport_invalid++;
1689 			} else if (rport == NULL) {
1690 				SPDK_ERRLOG("Rport not found. Dropping\n");
1691 				/* increment invalid rport counter */
1692 				hwqp->counters.rport_invalid++;
1693 			}
1694 			return rc;
1695 		}
1696 
1697 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1698 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1699 			SPDK_ERRLOG("%s state not created. Dropping\n",
1700 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1701 				    "Nport" : "Rport");
1702 			return -EACCES;
1703 		}
1704 
1705 		/* Use the RQ buffer for holding LS request. */
1706 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1707 
1708 		/* Fill in the LS request structure */
1709 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1710 		ls_rqst->rqstbuf.phys = buffer->phys +
1711 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1712 		ls_rqst->rqstbuf.buf_index = buff_idx;
1713 		ls_rqst->rqst_len = plen;
1714 
1715 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1716 		ls_rqst->rspbuf.phys = buffer->phys +
1717 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1718 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1719 
1720 		ls_rqst->private_data = (void *)hwqp;
1721 		ls_rqst->rpi = rport->rpi;
1722 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1723 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1724 		ls_rqst->s_id = s_id;
1725 		ls_rqst->d_id = d_id;
1726 		ls_rqst->nport = nport;
1727 		ls_rqst->rport = rport;
1728 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1729 
1730 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1731 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1732 		} else {
1733 			ls_rqst->xchg = NULL;
1734 		}
1735 
1736 		if (ls_rqst->xchg) {
1737 			/* Handover the request to LS module */
1738 			nvmf_fc_handle_ls_rqst(ls_rqst);
1739 		} else {
1740 			/* No XCHG available. Add to pending list. */
1741 			hwqp->counters.no_xchg++;
1742 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1743 		}
1744 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1745 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1746 
1747 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1748 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1749 		if (!rc) {
1750 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1751 		}
1752 	} else {
1753 
1754 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1755 		hwqp->counters.unknown_frame++;
1756 		rc = -EINVAL;
1757 	}
1758 
1759 	return rc;
1760 }
1761 
1762 void
1763 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1764 {
1765 	struct spdk_nvmf_request *req = NULL, *tmp;
1766 	struct spdk_nvmf_fc_request *fc_req;
1767 	int budget = 64;
1768 
1769 	if (!hwqp->fgroup) {
1770 		/* LS queue is tied to acceptor_poll group and LS pending requests
1771 		 * are stagged and processed using hwqp->ls_pending_queue.
1772 		 */
1773 		return;
1774 	}
1775 
1776 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1777 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1778 		if (!nvmf_fc_request_execute(fc_req)) {
1779 			/* Succesfuly posted, Delete from pending. */
1780 			nvmf_fc_request_remove_from_pending(fc_req);
1781 		}
1782 
1783 		if (budget) {
1784 			budget--;
1785 		} else {
1786 			return;
1787 		}
1788 	}
1789 }
1790 
1791 void
1792 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1793 {
1794 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1795 	struct spdk_nvmf_fc_nport *nport = NULL;
1796 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1797 
1798 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1799 		/* lookup nport and rport again - make sure they are still valid */
1800 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1801 		if (rc) {
1802 			if (nport == NULL) {
1803 				SPDK_ERRLOG("Nport not found. Dropping\n");
1804 				/* increment invalid nport counter */
1805 				hwqp->counters.nport_invalid++;
1806 			} else if (rport == NULL) {
1807 				SPDK_ERRLOG("Rport not found. Dropping\n");
1808 				/* increment invalid rport counter */
1809 				hwqp->counters.rport_invalid++;
1810 			}
1811 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1812 			continue;
1813 		}
1814 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1815 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1816 			SPDK_ERRLOG("%s state not created. Dropping\n",
1817 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1818 				    "Nport" : "Rport");
1819 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1820 			continue;
1821 		}
1822 
1823 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1824 		if (ls_rqst->xchg) {
1825 			/* Got an XCHG */
1826 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1827 			/* Handover the request to LS module */
1828 			nvmf_fc_handle_ls_rqst(ls_rqst);
1829 		} else {
1830 			/* No more XCHGs. Stop processing. */
1831 			hwqp->counters.no_xchg++;
1832 			return;
1833 		}
1834 	}
1835 }
1836 
1837 int
1838 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1839 {
1840 	int rc = 0;
1841 	struct spdk_nvmf_request *req = &fc_req->req;
1842 	struct spdk_nvmf_qpair *qpair = req->qpair;
1843 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1844 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1845 	uint16_t ersp_len = 0;
1846 
1847 	/* set sq head value in resp */
1848 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1849 
1850 	/* Increment connection responses */
1851 	fc_conn->rsp_count++;
1852 
1853 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1854 				       fc_req->transfered_len)) {
1855 		/* Fill ERSP Len */
1856 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1857 				    sizeof(uint32_t)));
1858 		fc_req->ersp.ersp_len = ersp_len;
1859 
1860 		/* Fill RSN */
1861 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1862 		fc_conn->rsn++;
1863 
1864 		/* Fill transfer length */
1865 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1866 
1867 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1868 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1869 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1870 	} else {
1871 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1872 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1873 	}
1874 
1875 	return rc;
1876 }
1877 
1878 bool
1879 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1880 			   uint32_t rsp_cnt, uint32_t xfer_len)
1881 {
1882 	struct spdk_nvmf_request *req = &fc_req->req;
1883 	struct spdk_nvmf_qpair *qpair = req->qpair;
1884 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1885 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1886 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1887 	uint16_t status = *((uint16_t *)&rsp->status);
1888 
1889 	/*
1890 	 * Check if we need to send ERSP
1891 	 * 1) For every N responses where N == ersp_ratio
1892 	 * 2) Fabric commands.
1893 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1894 	 * 4) SQ == 90% full.
1895 	 * 5) Transfer length not equal to CMD IU length
1896 	 */
1897 
1898 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1899 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1900 	    (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 ||
1901 	    (req->length != xfer_len)) {
1902 		return true;
1903 	}
1904 	return false;
1905 }
1906 
1907 static int
1908 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1909 {
1910 	int rc = 0;
1911 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1912 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1913 
1914 	if (fc_req->is_aborted) {
1915 		/* Defer this to make sure we dont call io cleanup in same context. */
1916 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1917 					(void *)fc_req);
1918 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1919 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1920 
1921 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1922 
1923 		rc = nvmf_fc_send_data(fc_req);
1924 	} else {
1925 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1926 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1927 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1928 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1929 		} else {
1930 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1931 		}
1932 
1933 		rc = nvmf_fc_handle_rsp(fc_req);
1934 	}
1935 
1936 	if (rc) {
1937 		SPDK_ERRLOG("Error in request complete.\n");
1938 		_nvmf_fc_request_free(fc_req);
1939 	}
1940 	return 0;
1941 }
1942 
1943 struct spdk_nvmf_tgt *
1944 nvmf_fc_get_tgt(void)
1945 {
1946 	if (g_nvmf_ftransport) {
1947 		return g_nvmf_ftransport->transport.tgt;
1948 	}
1949 	return NULL;
1950 }
1951 
1952 /*
1953  * FC Transport Public API begins here
1954  */
1955 
1956 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1957 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1958 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1959 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1960 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1961 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1962 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1963 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1964 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1965 
1966 static void
1967 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1968 {
1969 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1970 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1971 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1972 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1973 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1974 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1975 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1976 }
1977 
1978 static struct spdk_nvmf_transport *
1979 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1980 {
1981 	uint32_t sge_count;
1982 
1983 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1984 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1985 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1986 		     "  max_aq_depth=%d\n",
1987 		     opts->max_queue_depth,
1988 		     opts->max_io_size,
1989 		     opts->max_qpairs_per_ctrlr - 1,
1990 		     opts->io_unit_size,
1991 		     opts->max_aq_depth);
1992 
1993 	if (g_nvmf_ftransport) {
1994 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1995 		return NULL;
1996 	}
1997 
1998 	if (spdk_env_get_last_core() < 1) {
1999 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
2000 			    spdk_env_get_last_core() + 1);
2001 		return NULL;
2002 	}
2003 
2004 	sge_count = opts->max_io_size / opts->io_unit_size;
2005 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
2006 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
2007 		return NULL;
2008 	}
2009 
2010 	g_nvmf_fc_main_thread = spdk_get_thread();
2011 	g_nvmf_fgroup_count = 0;
2012 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
2013 
2014 	if (!g_nvmf_ftransport) {
2015 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
2016 		return NULL;
2017 	}
2018 
2019 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
2020 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
2021 		free(g_nvmf_ftransport);
2022 		g_nvmf_ftransport = NULL;
2023 		return NULL;
2024 	}
2025 
2026 	/* initialize the low level FC driver */
2027 	nvmf_fc_lld_init();
2028 
2029 	return &g_nvmf_ftransport->transport;
2030 }
2031 
2032 static void
2033 nvmf_fc_destroy_done_cb(void *cb_arg)
2034 {
2035 	free(g_nvmf_ftransport);
2036 	if (g_transport_destroy_done_cb) {
2037 		g_transport_destroy_done_cb(cb_arg);
2038 		g_transport_destroy_done_cb = NULL;
2039 	}
2040 }
2041 
2042 static int
2043 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2044 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2045 {
2046 	if (transport) {
2047 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2048 
2049 		/* clean up any FC poll groups still around */
2050 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2051 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2052 			free(fgroup);
2053 		}
2054 		g_nvmf_fgroup_count = 0;
2055 		g_transport_destroy_done_cb = cb_fn;
2056 
2057 		/* low level FC driver clean up */
2058 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 static int
2065 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2066 	       struct spdk_nvmf_listen_opts *listen_opts)
2067 {
2068 	return 0;
2069 }
2070 
2071 static void
2072 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2073 		    const struct spdk_nvme_transport_id *_trid)
2074 {
2075 }
2076 
2077 static uint32_t
2078 nvmf_fc_accept(struct spdk_nvmf_transport *transport)
2079 {
2080 	struct spdk_nvmf_fc_port *fc_port = NULL;
2081 	uint32_t count = 0;
2082 	static bool start_lld = false;
2083 
2084 	if (spdk_unlikely(!start_lld)) {
2085 		start_lld  = true;
2086 		nvmf_fc_lld_start();
2087 	}
2088 
2089 	/* poll the LS queue on each port */
2090 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2091 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2092 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2093 		}
2094 	}
2095 
2096 	return count;
2097 }
2098 
2099 static void
2100 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2101 		 struct spdk_nvme_transport_id *trid,
2102 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2103 {
2104 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2105 	entry->adrfam = trid->adrfam;
2106 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2107 
2108 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2109 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2110 }
2111 
2112 static struct spdk_nvmf_transport_poll_group *
2113 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
2114 {
2115 	struct spdk_nvmf_fc_poll_group *fgroup;
2116 	struct spdk_nvmf_fc_transport *ftransport =
2117 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2118 
2119 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2120 	if (!fgroup) {
2121 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2122 		return NULL;
2123 	}
2124 
2125 	TAILQ_INIT(&fgroup->hwqp_list);
2126 
2127 	pthread_mutex_lock(&ftransport->lock);
2128 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2129 	g_nvmf_fgroup_count++;
2130 	pthread_mutex_unlock(&ftransport->lock);
2131 
2132 	return &fgroup->group;
2133 }
2134 
2135 static void
2136 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2137 {
2138 	struct spdk_nvmf_fc_poll_group *fgroup;
2139 	struct spdk_nvmf_fc_transport *ftransport =
2140 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2141 
2142 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2143 	pthread_mutex_lock(&ftransport->lock);
2144 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2145 	g_nvmf_fgroup_count--;
2146 	pthread_mutex_unlock(&ftransport->lock);
2147 
2148 	free(fgroup);
2149 }
2150 
2151 static int
2152 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2153 		       struct spdk_nvmf_qpair *qpair)
2154 {
2155 	struct spdk_nvmf_fc_poll_group *fgroup;
2156 	struct spdk_nvmf_fc_conn *fc_conn;
2157 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2158 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2159 	bool hwqp_found = false;
2160 
2161 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2162 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2163 
2164 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2165 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2166 			hwqp_found = true;
2167 			break;
2168 		}
2169 	}
2170 
2171 	if (!hwqp_found) {
2172 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2173 		goto err;
2174 	}
2175 
2176 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2177 					 &fc_conn->conn_id,
2178 					 fc_conn->max_queue_depth)) {
2179 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2180 		goto err;
2181 	}
2182 
2183 	fc_conn->hwqp = hwqp;
2184 
2185 	/* If this is for ADMIN connection, then update assoc ID. */
2186 	if (fc_conn->qpair.qid == 0) {
2187 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2188 	}
2189 
2190 	api_data = &fc_conn->create_opd->u.add_conn;
2191 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2192 	return 0;
2193 err:
2194 	return -1;
2195 }
2196 
2197 static int
2198 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2199 {
2200 	uint32_t count = 0;
2201 	struct spdk_nvmf_fc_poll_group *fgroup;
2202 	struct spdk_nvmf_fc_hwqp *hwqp;
2203 
2204 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2205 
2206 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2207 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2208 			count += nvmf_fc_process_queue(hwqp);
2209 		}
2210 	}
2211 
2212 	return (int) count;
2213 }
2214 
2215 static int
2216 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2217 {
2218 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2219 
2220 	if (!fc_req->is_aborted) {
2221 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2222 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2223 	} else {
2224 		nvmf_fc_request_abort_complete(fc_req);
2225 	}
2226 
2227 	return 0;
2228 }
2229 
2230 static void
2231 nvmf_fc_connection_delete_done_cb(void *arg)
2232 {
2233 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2234 
2235 	if (fc_ctx->cb_fn) {
2236 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2237 	}
2238 	free(fc_ctx);
2239 }
2240 
2241 static void
2242 _nvmf_fc_close_qpair(void *arg)
2243 {
2244 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2245 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2246 	struct spdk_nvmf_fc_conn *fc_conn;
2247 	int rc;
2248 
2249 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2250 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2251 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2252 
2253 		if (fc_conn->create_opd) {
2254 			api_data = &fc_conn->create_opd->u.add_conn;
2255 
2256 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2257 						    api_data->args.fc_conn, api_data->aq_conn);
2258 		}
2259 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2260 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2261 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2262 		if (!rc) {
2263 			/* Wait for transport to complete its work. */
2264 			return;
2265 		}
2266 
2267 		SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__);
2268 	}
2269 
2270 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2271 }
2272 
2273 static void
2274 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2275 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2276 {
2277 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2278 
2279 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2280 	if (!fc_ctx) {
2281 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2282 		if (cb_fn) {
2283 			cb_fn(cb_arg);
2284 		}
2285 		return;
2286 	}
2287 	fc_ctx->qpair = qpair;
2288 	fc_ctx->cb_fn = cb_fn;
2289 	fc_ctx->cb_ctx = cb_arg;
2290 	fc_ctx->qpair_thread = spdk_get_thread();
2291 
2292 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2293 }
2294 
2295 static int
2296 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2297 			    struct spdk_nvme_transport_id *trid)
2298 {
2299 	struct spdk_nvmf_fc_conn *fc_conn;
2300 
2301 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2302 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2303 	return 0;
2304 }
2305 
2306 static int
2307 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2308 			     struct spdk_nvme_transport_id *trid)
2309 {
2310 	struct spdk_nvmf_fc_conn *fc_conn;
2311 
2312 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2313 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2314 	return 0;
2315 }
2316 
2317 static int
2318 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2319 			      struct spdk_nvme_transport_id *trid)
2320 {
2321 	struct spdk_nvmf_fc_conn *fc_conn;
2322 
2323 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2324 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2325 	return 0;
2326 }
2327 
2328 static void
2329 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2330 			    struct spdk_nvmf_request *req)
2331 {
2332 	spdk_nvmf_request_complete(req);
2333 }
2334 
2335 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2336 	.name = "FC",
2337 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2338 	.opts_init = nvmf_fc_opts_init,
2339 	.create = nvmf_fc_create,
2340 	.destroy = nvmf_fc_destroy,
2341 
2342 	.listen = nvmf_fc_listen,
2343 	.stop_listen = nvmf_fc_stop_listen,
2344 	.accept = nvmf_fc_accept,
2345 
2346 	.listener_discover = nvmf_fc_discover,
2347 
2348 	.poll_group_create = nvmf_fc_poll_group_create,
2349 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2350 	.poll_group_add = nvmf_fc_poll_group_add,
2351 	.poll_group_poll = nvmf_fc_poll_group_poll,
2352 
2353 	.req_complete = nvmf_fc_request_complete,
2354 	.req_free = nvmf_fc_request_free,
2355 	.qpair_fini = nvmf_fc_close_qpair,
2356 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2357 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2358 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2359 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2360 };
2361 
2362 /* Initializes the data for the creation of a FC-Port object in the SPDK
2363  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2364  * the API to the library. The contents added to this well defined structure
2365  * is private to each vendors implementation.
2366  */
2367 static int
2368 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2369 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2370 {
2371 	int rc = 0;
2372 	/* Used a high number for the LS HWQP so that it does not clash with the
2373 	 * IO HWQP's and immediately shows a LS queue during tracing.
2374 	 */
2375 	uint32_t i;
2376 
2377 	fc_port->port_hdl       = args->port_handle;
2378 	fc_port->lld_fc_port	= args->lld_fc_port;
2379 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2380 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2381 	fc_port->num_io_queues  = args->io_queue_cnt;
2382 
2383 	/*
2384 	 * Set port context from init args. Used for FCP port stats.
2385 	 */
2386 	fc_port->port_ctx = args->port_ctx;
2387 
2388 	/*
2389 	 * Initialize the LS queue wherever needed.
2390 	 */
2391 	fc_port->ls_queue.queues = args->ls_queue;
2392 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2393 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2394 	fc_port->ls_queue.is_ls_queue = true;
2395 
2396 	/*
2397 	 * Initialize the LS queue.
2398 	 */
2399 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2400 	if (rc) {
2401 		return rc;
2402 	}
2403 
2404 	/*
2405 	 * Initialize the IO queues.
2406 	 */
2407 	for (i = 0; i < args->io_queue_cnt; i++) {
2408 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2409 		hwqp->hwqp_id = i;
2410 		hwqp->queues = args->io_queues[i];
2411 		hwqp->is_ls_queue = false;
2412 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2413 		if (rc) {
2414 			for (; i > 0; --i) {
2415 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2416 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2417 			}
2418 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2419 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2420 			return rc;
2421 		}
2422 	}
2423 
2424 	/*
2425 	 * Initialize the LS processing for port
2426 	 */
2427 	nvmf_fc_ls_init(fc_port);
2428 
2429 	/*
2430 	 * Initialize the list of nport on this HW port.
2431 	 */
2432 	TAILQ_INIT(&fc_port->nport_list);
2433 	fc_port->num_nports = 0;
2434 
2435 	return 0;
2436 }
2437 
2438 /*
2439  * FC port must have all its nports deleted before transitioning to offline state.
2440  */
2441 static void
2442 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2443 {
2444 	struct spdk_nvmf_fc_nport *nport = NULL;
2445 	/* All nports must have been deleted at this point for this fc port */
2446 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2447 	DEV_VERIFY(fc_port->num_nports == 0);
2448 	/* Mark the nport states to be zombie, if they exist */
2449 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2450 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2451 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2452 		}
2453 	}
2454 }
2455 
2456 static void
2457 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2458 {
2459 	ASSERT_SPDK_FC_MAIN_THREAD();
2460 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2461 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2462 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2463 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2464 	int spdk_err = 0;
2465 	uint8_t port_handle = cb_data->port_handle;
2466 	uint32_t s_id = rport->s_id;
2467 	uint32_t rpi = rport->rpi;
2468 	uint32_t assoc_count = rport->assoc_count;
2469 	uint32_t nport_hdl = nport->nport_hdl;
2470 	uint32_t d_id = nport->d_id;
2471 	char log_str[256];
2472 
2473 	/*
2474 	 * Assert on any delete failure.
2475 	 */
2476 	if (0 != err) {
2477 		DEV_VERIFY(!"Error in IT Delete callback.");
2478 		goto out;
2479 	}
2480 
2481 	if (cb_func != NULL) {
2482 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2483 	}
2484 
2485 out:
2486 	free(cb_data);
2487 
2488 	snprintf(log_str, sizeof(log_str),
2489 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2490 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2491 
2492 	if (err != 0) {
2493 		SPDK_ERRLOG("%s", log_str);
2494 	} else {
2495 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2496 	}
2497 }
2498 
2499 static void
2500 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2501 {
2502 	ASSERT_SPDK_FC_MAIN_THREAD();
2503 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2504 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2505 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2506 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2507 	uint32_t s_id = rport->s_id;
2508 	uint32_t rpi = rport->rpi;
2509 	uint32_t assoc_count = rport->assoc_count;
2510 	uint32_t nport_hdl = nport->nport_hdl;
2511 	uint32_t d_id = nport->d_id;
2512 	char log_str[256];
2513 
2514 	/*
2515 	 * Assert on any association delete failure. We continue to delete other
2516 	 * associations in promoted builds.
2517 	 */
2518 	if (0 != err) {
2519 		DEV_VERIFY(!"Nport's association delete callback returned error");
2520 		if (nport->assoc_count > 0) {
2521 			nport->assoc_count--;
2522 		}
2523 		if (rport->assoc_count > 0) {
2524 			rport->assoc_count--;
2525 		}
2526 	}
2527 
2528 	/*
2529 	 * If this is the last association being deleted for the ITN,
2530 	 * execute the callback(s).
2531 	 */
2532 	if (0 == rport->assoc_count) {
2533 		/* Remove the rport from the remote port list. */
2534 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2535 			SPDK_ERRLOG("Error while removing rport from list.\n");
2536 			DEV_VERIFY(!"Error while removing rport from list.");
2537 		}
2538 
2539 		if (cb_func != NULL) {
2540 			/*
2541 			 * Callback function is provided by the caller
2542 			 * of nvmf_fc_adm_i_t_delete_assoc().
2543 			 */
2544 			(void)cb_func(cb_data->cb_ctx, 0);
2545 		}
2546 		free(rport);
2547 		free(args);
2548 	}
2549 
2550 	snprintf(log_str, sizeof(log_str),
2551 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2552 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2553 
2554 	if (err != 0) {
2555 		SPDK_ERRLOG("%s", log_str);
2556 	} else {
2557 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2558 	}
2559 }
2560 
2561 /**
2562  * Process a IT delete.
2563  */
2564 static void
2565 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2566 			     struct spdk_nvmf_fc_remote_port_info *rport,
2567 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2568 			     void *cb_ctx)
2569 {
2570 	int err = 0;
2571 	struct spdk_nvmf_fc_association *assoc = NULL;
2572 	int assoc_err = 0;
2573 	uint32_t num_assoc = 0;
2574 	uint32_t num_assoc_del_scheduled = 0;
2575 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2576 	uint8_t port_hdl = nport->port_hdl;
2577 	uint32_t s_id = rport->s_id;
2578 	uint32_t rpi = rport->rpi;
2579 	uint32_t assoc_count = rport->assoc_count;
2580 	char log_str[256];
2581 
2582 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2583 		      nport->nport_hdl);
2584 
2585 	/*
2586 	 * Allocate memory for callback data.
2587 	 * This memory will be freed by the callback function.
2588 	 */
2589 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2590 	if (NULL == cb_data) {
2591 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2592 		err = -ENOMEM;
2593 		goto out;
2594 	}
2595 	cb_data->nport       = nport;
2596 	cb_data->rport       = rport;
2597 	cb_data->port_handle = port_hdl;
2598 	cb_data->cb_func     = cb_func;
2599 	cb_data->cb_ctx      = cb_ctx;
2600 
2601 	/*
2602 	 * Delete all associations, if any, related with this ITN/remote_port.
2603 	 */
2604 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2605 		num_assoc++;
2606 		if (assoc->s_id == s_id) {
2607 			assoc_err = nvmf_fc_delete_association(nport,
2608 							       assoc->assoc_id,
2609 							       false /* send abts */, false,
2610 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2611 			if (0 != assoc_err) {
2612 				/*
2613 				 * Mark this association as zombie.
2614 				 */
2615 				err = -EINVAL;
2616 				DEV_VERIFY(!"Error while deleting association");
2617 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2618 			} else {
2619 				num_assoc_del_scheduled++;
2620 			}
2621 		}
2622 	}
2623 
2624 out:
2625 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2626 		/*
2627 		 * Since there are no association_delete calls
2628 		 * successfully scheduled, the association_delete
2629 		 * callback function will never be called.
2630 		 * In this case, call the callback function now.
2631 		 */
2632 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2633 	}
2634 
2635 	snprintf(log_str, sizeof(log_str),
2636 		 "IT delete associations on nport:%d end. "
2637 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2638 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2639 
2640 	if (err == 0) {
2641 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2642 	} else {
2643 		SPDK_ERRLOG("%s", log_str);
2644 	}
2645 }
2646 
2647 static void
2648 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2649 {
2650 	ASSERT_SPDK_FC_MAIN_THREAD();
2651 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2652 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2653 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2654 	struct spdk_nvmf_fc_port *fc_port = NULL;
2655 	int err = 0;
2656 
2657 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2658 	hwqp = quiesce_api_data->hwqp;
2659 	fc_port = hwqp->fc_port;
2660 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2661 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2662 
2663 	/*
2664 	 * Decrement the callback/quiesced queue count.
2665 	 */
2666 	port_quiesce_ctx->quiesce_count--;
2667 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2668 
2669 	free(quiesce_api_data);
2670 	/*
2671 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2672 	 */
2673 	if (port_quiesce_ctx->quiesce_count > 0) {
2674 		return;
2675 	}
2676 
2677 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2678 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2679 	} else {
2680 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2681 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2682 	}
2683 
2684 	if (cb_func) {
2685 		/*
2686 		 * Callback function for the called of quiesce.
2687 		 */
2688 		cb_func(port_quiesce_ctx->ctx, err);
2689 	}
2690 
2691 	/*
2692 	 * Free the context structure.
2693 	 */
2694 	free(port_quiesce_ctx);
2695 
2696 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2697 		      err);
2698 }
2699 
2700 static int
2701 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2702 			     spdk_nvmf_fc_poller_api_cb cb_func)
2703 {
2704 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2705 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2706 	int err = 0;
2707 
2708 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2709 
2710 	if (args == NULL) {
2711 		err = -ENOMEM;
2712 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2713 		goto done;
2714 	}
2715 	args->hwqp = fc_hwqp;
2716 	args->ctx = ctx;
2717 	args->cb_info.cb_func = cb_func;
2718 	args->cb_info.cb_data = args;
2719 	args->cb_info.cb_thread = spdk_get_thread();
2720 
2721 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2722 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2723 	if (rc) {
2724 		free(args);
2725 		err = -EINVAL;
2726 	}
2727 
2728 done:
2729 	return err;
2730 }
2731 
2732 /*
2733  * Hw port Quiesce
2734  */
2735 static int
2736 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2737 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2738 {
2739 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2740 	uint32_t i = 0;
2741 	int err = 0;
2742 
2743 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2744 
2745 	/*
2746 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2747 	 * and execute the callback.
2748 	 */
2749 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2750 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2751 	}
2752 
2753 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2754 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2755 			      fc_port->port_hdl);
2756 		/*
2757 		 * Execute the callback function directly.
2758 		 */
2759 		cb_func(ctx, err);
2760 		goto out;
2761 	}
2762 
2763 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2764 
2765 	if (port_quiesce_ctx == NULL) {
2766 		err = -ENOMEM;
2767 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2768 			    fc_port->port_hdl);
2769 		goto out;
2770 	}
2771 
2772 	port_quiesce_ctx->quiesce_count = 0;
2773 	port_quiesce_ctx->ctx = ctx;
2774 	port_quiesce_ctx->cb_func = cb_func;
2775 
2776 	/*
2777 	 * Quiesce the LS queue.
2778 	 */
2779 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2780 					   nvmf_fc_adm_queue_quiesce_cb);
2781 	if (err != 0) {
2782 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2783 		goto out;
2784 	}
2785 	port_quiesce_ctx->quiesce_count++;
2786 
2787 	/*
2788 	 * Quiesce the IO queues.
2789 	 */
2790 	for (i = 0; i < fc_port->num_io_queues; i++) {
2791 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2792 						   port_quiesce_ctx,
2793 						   nvmf_fc_adm_queue_quiesce_cb);
2794 		if (err != 0) {
2795 			DEV_VERIFY(0);
2796 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2797 		}
2798 		port_quiesce_ctx->quiesce_count++;
2799 	}
2800 
2801 out:
2802 	if (port_quiesce_ctx && err != 0) {
2803 		free(port_quiesce_ctx);
2804 	}
2805 	return err;
2806 }
2807 
2808 /*
2809  * Initialize and add a HW port entry to the global
2810  * HW port list.
2811  */
2812 static void
2813 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2814 {
2815 	ASSERT_SPDK_FC_MAIN_THREAD();
2816 	struct spdk_nvmf_fc_port *fc_port = NULL;
2817 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2818 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2819 			api_data->api_args;
2820 	int err = 0;
2821 
2822 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2823 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2824 		err = EINVAL;
2825 		goto abort_port_init;
2826 	}
2827 
2828 	/*
2829 	 * 1. Check for duplicate initialization.
2830 	 */
2831 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2832 	if (fc_port != NULL) {
2833 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2834 		goto abort_port_init;
2835 	}
2836 
2837 	/*
2838 	 * 2. Get the memory to instantiate a fc port.
2839 	 */
2840 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2841 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2842 	if (fc_port == NULL) {
2843 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2844 		err = -ENOMEM;
2845 		goto abort_port_init;
2846 	}
2847 
2848 	/* assign the io_queues array */
2849 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2850 				     struct spdk_nvmf_fc_port));
2851 
2852 	/*
2853 	 * 3. Initialize the contents for the FC-port
2854 	 */
2855 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2856 
2857 	if (err != 0) {
2858 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2859 		DEV_VERIFY(!"Data initialization failed for fc_port");
2860 		goto abort_port_init;
2861 	}
2862 
2863 	/*
2864 	 * 4. Add this port to the global fc port list in the library.
2865 	 */
2866 	nvmf_fc_port_add(fc_port);
2867 
2868 abort_port_init:
2869 	if (err && fc_port) {
2870 		free(fc_port);
2871 	}
2872 	if (api_data->cb_func != NULL) {
2873 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2874 	}
2875 
2876 	free(arg);
2877 
2878 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2879 		      args->port_handle, err);
2880 }
2881 
2882 static void
2883 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2884 {
2885 	struct spdk_nvmf_fc_abts_ctx *ctx;
2886 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2887 
2888 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2889 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2890 		ctx = args->cb_info.cb_data;
2891 		if (ctx) {
2892 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2893 				free(ctx->sync_poller_args);
2894 				free(ctx->abts_poller_args);
2895 				spdk_free(ctx);
2896 			}
2897 		}
2898 	}
2899 }
2900 
2901 static void
2902 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2903 {
2904 	ASSERT_SPDK_FC_MAIN_THREAD();
2905 	int err = 0, i;
2906 	struct spdk_nvmf_fc_port *fc_port = NULL;
2907 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2908 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2909 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2910 			api_data->api_args;
2911 
2912 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2913 	if (!fc_port) {
2914 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2915 		err = -EINVAL;
2916 		goto out;
2917 	}
2918 
2919 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2920 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2921 		err = -EIO;
2922 		goto out;
2923 	}
2924 
2925 	/* Clean up and free fc_port */
2926 	hwqp = &fc_port->ls_queue;
2927 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2928 	rte_hash_free(hwqp->connection_list_hash);
2929 	rte_hash_free(hwqp->rport_list_hash);
2930 
2931 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2932 		hwqp = &fc_port->io_queues[i];
2933 
2934 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2935 		rte_hash_free(hwqp->connection_list_hash);
2936 		rte_hash_free(hwqp->rport_list_hash);
2937 	}
2938 
2939 	nvmf_fc_port_remove(fc_port);
2940 	free(fc_port);
2941 out:
2942 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2943 		      args->port_handle, err);
2944 	if (api_data->cb_func != NULL) {
2945 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2946 	}
2947 
2948 	free(arg);
2949 }
2950 
2951 /*
2952  * Online a HW port.
2953  */
2954 static void
2955 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2956 {
2957 	ASSERT_SPDK_FC_MAIN_THREAD();
2958 	struct spdk_nvmf_fc_port *fc_port = NULL;
2959 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2960 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2961 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2962 			api_data->api_args;
2963 	int i = 0;
2964 	int err = 0;
2965 
2966 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2967 	if (fc_port) {
2968 		/* Set the port state to online */
2969 		err = nvmf_fc_port_set_online(fc_port);
2970 		if (err != 0) {
2971 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2972 			DEV_VERIFY(!"Hw port online failed");
2973 			goto out;
2974 		}
2975 
2976 		hwqp = &fc_port->ls_queue;
2977 		hwqp->context = NULL;
2978 		(void)nvmf_fc_hwqp_set_online(hwqp);
2979 
2980 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2981 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2982 			hwqp = &fc_port->io_queues[i];
2983 			hwqp->context = NULL;
2984 			(void)nvmf_fc_hwqp_set_online(hwqp);
2985 			nvmf_fc_poll_group_add_hwqp(hwqp);
2986 		}
2987 	} else {
2988 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2989 		err = -EINVAL;
2990 	}
2991 
2992 out:
2993 	if (api_data->cb_func != NULL) {
2994 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2995 	}
2996 
2997 	free(arg);
2998 
2999 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
3000 		      err);
3001 }
3002 
3003 static void
3004 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
3005 {
3006 	int err = 0;
3007 	struct spdk_nvmf_fc_port *fc_port = NULL;
3008 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
3009 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
3010 
3011 	if (--remove_hwqp_args->pending_remove_hwqp) {
3012 		return;
3013 	}
3014 
3015 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3016 	if (!fc_port) {
3017 		err = -EINVAL;
3018 		SPDK_ERRLOG("fc_port not found.\n");
3019 		goto out;
3020 	}
3021 
3022 	/*
3023 	 * Delete all the nports. Ideally, the nports should have been purged
3024 	 * before the offline event, in which case, only a validation is required.
3025 	 */
3026 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3027 out:
3028 	if (remove_hwqp_args->cb_fn) {
3029 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3030 	}
3031 
3032 	free(remove_hwqp_args);
3033 }
3034 
3035 /*
3036  * Offline a HW port.
3037  */
3038 static void
3039 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3040 {
3041 	ASSERT_SPDK_FC_MAIN_THREAD();
3042 	struct spdk_nvmf_fc_port *fc_port = NULL;
3043 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3044 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3045 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3046 			api_data->api_args;
3047 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3048 	int i = 0;
3049 	int err = 0;
3050 
3051 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3052 	if (fc_port) {
3053 		/* Set the port state to offline, if it is not already. */
3054 		err = nvmf_fc_port_set_offline(fc_port);
3055 		if (err != 0) {
3056 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3057 			err = 0;
3058 			goto out;
3059 		}
3060 
3061 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3062 		if (!remove_hwqp_args) {
3063 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3064 			err = -ENOMEM;
3065 			goto out;
3066 		}
3067 		remove_hwqp_args->cb_fn = api_data->cb_func;
3068 		remove_hwqp_args->cb_args = api_data->api_args;
3069 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3070 
3071 		hwqp = &fc_port->ls_queue;
3072 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3073 
3074 		/* Remove poller for all the io queues. */
3075 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3076 			hwqp = &fc_port->io_queues[i];
3077 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3078 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3079 						       remove_hwqp_args);
3080 		}
3081 
3082 		free(arg);
3083 
3084 		/* Wait untill all the hwqps are removed from poll groups. */
3085 		return;
3086 	} else {
3087 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3088 		err = -EINVAL;
3089 	}
3090 out:
3091 	if (api_data->cb_func != NULL) {
3092 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3093 	}
3094 
3095 	free(arg);
3096 
3097 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3098 		      err);
3099 }
3100 
3101 struct nvmf_fc_add_rem_listener_ctx {
3102 	struct spdk_nvmf_subsystem *subsystem;
3103 	bool add_listener;
3104 	struct spdk_nvme_transport_id trid;
3105 };
3106 
3107 static void
3108 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3109 {
3110 	ASSERT_SPDK_FC_MAIN_THREAD();
3111 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3112 	free(ctx);
3113 }
3114 
3115 static void
3116 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3117 {
3118 	ASSERT_SPDK_FC_MAIN_THREAD();
3119 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3120 
3121 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3122 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3123 		free(ctx);
3124 	}
3125 }
3126 
3127 static void
3128 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3129 {
3130 	ASSERT_SPDK_FC_MAIN_THREAD();
3131 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3132 
3133 	if (ctx->add_listener) {
3134 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3135 	} else {
3136 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3137 		nvmf_fc_adm_listen_done(ctx, 0);
3138 	}
3139 }
3140 
3141 static int
3142 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3143 {
3144 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3145 	struct spdk_nvmf_subsystem *subsystem;
3146 	struct spdk_nvmf_listen_opts opts;
3147 
3148 	if (!tgt) {
3149 		SPDK_ERRLOG("No nvmf target defined\n");
3150 		return -EINVAL;
3151 	}
3152 
3153 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
3154 
3155 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3156 	while (subsystem) {
3157 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3158 
3159 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3160 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3161 			if (ctx) {
3162 				ctx->add_listener = add;
3163 				ctx->subsystem = subsystem;
3164 				nvmf_fc_create_trid(&ctx->trid,
3165 						    nport->fc_nodename.u.wwn,
3166 						    nport->fc_portname.u.wwn);
3167 
3168 				if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) {
3169 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3170 						    ctx->trid.traddr);
3171 					free(ctx);
3172 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3173 								     0,
3174 								     nvmf_fc_adm_subsystem_paused_cb,
3175 								     ctx)) {
3176 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3177 						    subsystem->subnqn);
3178 					free(ctx);
3179 				}
3180 			}
3181 		}
3182 
3183 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3184 	}
3185 
3186 	return 0;
3187 }
3188 
3189 /*
3190  * Create a Nport.
3191  */
3192 static void
3193 nvmf_fc_adm_evnt_nport_create(void *arg)
3194 {
3195 	ASSERT_SPDK_FC_MAIN_THREAD();
3196 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3197 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3198 			api_data->api_args;
3199 	struct spdk_nvmf_fc_nport *nport = NULL;
3200 	struct spdk_nvmf_fc_port *fc_port = NULL;
3201 	int err = 0;
3202 
3203 	/*
3204 	 * Get the physical port.
3205 	 */
3206 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3207 	if (fc_port == NULL) {
3208 		err = -EINVAL;
3209 		goto out;
3210 	}
3211 
3212 	/*
3213 	 * Check for duplicate initialization.
3214 	 */
3215 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3216 	if (nport != NULL) {
3217 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3218 			    args->port_handle);
3219 		err = -EINVAL;
3220 		goto out;
3221 	}
3222 
3223 	/*
3224 	 * Get the memory to instantiate a fc nport.
3225 	 */
3226 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3227 	if (nport == NULL) {
3228 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3229 			    args->nport_handle);
3230 		err = -ENOMEM;
3231 		goto out;
3232 	}
3233 
3234 	/*
3235 	 * Initialize the contents for the nport
3236 	 */
3237 	nport->nport_hdl    = args->nport_handle;
3238 	nport->port_hdl     = args->port_handle;
3239 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3240 	nport->fc_nodename  = args->fc_nodename;
3241 	nport->fc_portname  = args->fc_portname;
3242 	nport->d_id         = args->d_id;
3243 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3244 
3245 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3246 	TAILQ_INIT(&nport->rem_port_list);
3247 	nport->rport_count = 0;
3248 	TAILQ_INIT(&nport->fc_associations);
3249 	nport->assoc_count = 0;
3250 
3251 	/*
3252 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3253 	 */
3254 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3255 
3256 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3257 out:
3258 	if (err && nport) {
3259 		free(nport);
3260 	}
3261 
3262 	if (api_data->cb_func != NULL) {
3263 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3264 	}
3265 
3266 	free(arg);
3267 }
3268 
3269 static void
3270 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3271 			    void *cb_args, int spdk_err)
3272 {
3273 	ASSERT_SPDK_FC_MAIN_THREAD();
3274 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3275 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3276 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3277 	int err = 0;
3278 	uint16_t nport_hdl = 0;
3279 	char log_str[256];
3280 
3281 	/*
3282 	 * Assert on any delete failure.
3283 	 */
3284 	if (nport == NULL) {
3285 		SPDK_ERRLOG("Nport delete callback returned null nport");
3286 		DEV_VERIFY(!"nport is null.");
3287 		goto out;
3288 	}
3289 
3290 	nport_hdl = nport->nport_hdl;
3291 	if (0 != spdk_err) {
3292 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3293 			    "%d, Nport: %d\n",
3294 			    nport->port_hdl, nport->nport_hdl);
3295 		DEV_VERIFY(!"nport delete callback error.");
3296 	}
3297 
3298 	/*
3299 	 * Free the nport if this is the last rport being deleted and
3300 	 * execute the callback(s).
3301 	 */
3302 	if (nvmf_fc_nport_has_no_rport(nport)) {
3303 		if (0 != nport->assoc_count) {
3304 			SPDK_ERRLOG("association count != 0\n");
3305 			DEV_VERIFY(!"association count != 0");
3306 		}
3307 
3308 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3309 		if (0 != err) {
3310 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3311 				    "nport from nport list. FC Port:%d Nport:%d\n",
3312 				    nport->port_hdl, nport->nport_hdl);
3313 		}
3314 		/* Free the nport */
3315 		free(nport);
3316 
3317 		if (cb_func != NULL) {
3318 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3319 		}
3320 		free(cb_data);
3321 	}
3322 out:
3323 	snprintf(log_str, sizeof(log_str),
3324 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3325 		 port_handle, nport_hdl, event_type, spdk_err);
3326 
3327 	if (err != 0) {
3328 		SPDK_ERRLOG("%s", log_str);
3329 	} else {
3330 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3331 	}
3332 }
3333 
3334 /*
3335  * Delete Nport.
3336  */
3337 static void
3338 nvmf_fc_adm_evnt_nport_delete(void *arg)
3339 {
3340 	ASSERT_SPDK_FC_MAIN_THREAD();
3341 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3342 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3343 			api_data->api_args;
3344 	struct spdk_nvmf_fc_nport *nport = NULL;
3345 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3346 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3347 	int err = 0;
3348 	uint32_t rport_cnt = 0;
3349 	int rc = 0;
3350 
3351 	/*
3352 	 * Make sure that the nport exists.
3353 	 */
3354 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3355 	if (nport == NULL) {
3356 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3357 			    args->port_handle);
3358 		err = -EINVAL;
3359 		goto out;
3360 	}
3361 
3362 	/*
3363 	 * Allocate memory for callback data.
3364 	 */
3365 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3366 	if (NULL == cb_data) {
3367 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3368 		err = -ENOMEM;
3369 		goto out;
3370 	}
3371 
3372 	cb_data->nport = nport;
3373 	cb_data->port_handle = args->port_handle;
3374 	cb_data->fc_cb_func = api_data->cb_func;
3375 	cb_data->fc_cb_ctx = args->cb_ctx;
3376 
3377 	/*
3378 	 * Begin nport tear down
3379 	 */
3380 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3381 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3382 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3383 		/*
3384 		 * Deletion of this nport already in progress. Register callback
3385 		 * and return.
3386 		 */
3387 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3388 		err = -ENODEV;
3389 		goto out;
3390 	} else {
3391 		/* nport partially created/deleted */
3392 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3393 		DEV_VERIFY(0 != "Nport in zombie state");
3394 		err = -ENODEV;
3395 		goto out;
3396 	}
3397 
3398 	/*
3399 	 * Remove this nport from listening addresses across subsystems
3400 	 */
3401 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3402 
3403 	if (0 != rc) {
3404 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3405 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3406 			    nport->nport_hdl);
3407 		goto out;
3408 	}
3409 
3410 	/*
3411 	 * Delete all the remote ports (if any) for the nport
3412 	 */
3413 	/* TODO - Need to do this with a "first" and a "next" accessor function
3414 	 * for completeness. Look at app-subsystem as examples.
3415 	 */
3416 	if (nvmf_fc_nport_has_no_rport(nport)) {
3417 		/* No rports to delete. Complete the nport deletion. */
3418 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3419 		goto out;
3420 	}
3421 
3422 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3423 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3424 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3425 
3426 		if (it_del_args == NULL) {
3427 			err = -ENOMEM;
3428 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3429 				    rport_iter->rpi, rport_iter->s_id);
3430 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3431 			goto out;
3432 		}
3433 
3434 		rport_cnt++;
3435 		it_del_args->port_handle = nport->port_hdl;
3436 		it_del_args->nport_handle = nport->nport_hdl;
3437 		it_del_args->cb_ctx = (void *)cb_data;
3438 		it_del_args->rpi = rport_iter->rpi;
3439 		it_del_args->s_id = rport_iter->s_id;
3440 
3441 		nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3442 					   nvmf_fc_adm_delete_nport_cb);
3443 	}
3444 
3445 out:
3446 	/* On failure, execute the callback function now */
3447 	if ((err != 0) || (rc != 0)) {
3448 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3449 			    "rport_cnt:%d rc:%d.\n",
3450 			    args->nport_handle, err, args->port_handle,
3451 			    rport_cnt, rc);
3452 		if (cb_data) {
3453 			free(cb_data);
3454 		}
3455 		if (api_data->cb_func != NULL) {
3456 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3457 		}
3458 
3459 	} else {
3460 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3461 			      "NPort %d delete done succesfully, fc port:%d. "
3462 			      "rport_cnt:%d\n",
3463 			      args->nport_handle, args->port_handle, rport_cnt);
3464 	}
3465 
3466 	free(arg);
3467 }
3468 
3469 /*
3470  * Process an PRLI/IT add.
3471  */
3472 static void
3473 nvmf_fc_adm_evnt_i_t_add(void *arg)
3474 {
3475 	ASSERT_SPDK_FC_MAIN_THREAD();
3476 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3477 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3478 			api_data->api_args;
3479 	struct spdk_nvmf_fc_nport *nport = NULL;
3480 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3481 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3482 	int err = 0;
3483 
3484 	/*
3485 	 * Make sure the nport port exists.
3486 	 */
3487 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3488 	if (nport == NULL) {
3489 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3490 		err = -EINVAL;
3491 		goto out;
3492 	}
3493 
3494 	/*
3495 	 * Check for duplicate i_t_add.
3496 	 */
3497 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3498 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3499 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3500 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3501 			err = -EEXIST;
3502 			goto out;
3503 		}
3504 	}
3505 
3506 	/*
3507 	 * Get the memory to instantiate the remote port
3508 	 */
3509 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3510 	if (rport == NULL) {
3511 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3512 		err = -ENOMEM;
3513 		goto out;
3514 	}
3515 
3516 	/*
3517 	 * Initialize the contents for the rport
3518 	 */
3519 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3520 	rport->s_id = args->s_id;
3521 	rport->rpi = args->rpi;
3522 	rport->fc_nodename = args->fc_nodename;
3523 	rport->fc_portname = args->fc_portname;
3524 
3525 	/*
3526 	 * Add remote port to nport
3527 	 */
3528 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3529 		DEV_VERIFY(!"Error while adding rport to list");
3530 	};
3531 
3532 	/*
3533 	 * TODO: Do we validate the initiators service parameters?
3534 	 */
3535 
3536 	/*
3537 	 * Get the targets service parameters from the library
3538 	 * to return back to the driver.
3539 	 */
3540 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3541 
3542 out:
3543 	if (api_data->cb_func != NULL) {
3544 		/*
3545 		 * Passing pointer to the args struct as the first argument.
3546 		 * The cb_func should handle this appropriately.
3547 		 */
3548 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3549 	}
3550 
3551 	free(arg);
3552 
3553 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3554 		      "IT add on nport %d done, rc = %d.\n",
3555 		      args->nport_handle, err);
3556 }
3557 
3558 /**
3559  * Process a IT delete.
3560  */
3561 static void
3562 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3563 {
3564 	ASSERT_SPDK_FC_MAIN_THREAD();
3565 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3566 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3567 			api_data->api_args;
3568 	int rc = 0;
3569 	struct spdk_nvmf_fc_nport *nport = NULL;
3570 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3571 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3572 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3573 	uint32_t num_rport = 0;
3574 	char log_str[256];
3575 
3576 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3577 
3578 	/*
3579 	 * Make sure the nport port exists. If it does not, error out.
3580 	 */
3581 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3582 	if (nport == NULL) {
3583 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3584 		rc = -EINVAL;
3585 		goto out;
3586 	}
3587 
3588 	/*
3589 	 * Find this ITN / rport (remote port).
3590 	 */
3591 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3592 		num_rport++;
3593 		if ((rport_iter->s_id == args->s_id) &&
3594 		    (rport_iter->rpi == args->rpi) &&
3595 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3596 			rport = rport_iter;
3597 			break;
3598 		}
3599 	}
3600 
3601 	/*
3602 	 * We should find either zero or exactly one rport.
3603 	 *
3604 	 * If we find zero rports, that means that a previous request has
3605 	 * removed the rport by the time we reached here. In this case,
3606 	 * simply return out.
3607 	 */
3608 	if (rport == NULL) {
3609 		rc = -ENODEV;
3610 		goto out;
3611 	}
3612 
3613 	/*
3614 	 * We have the rport slated for deletion. At this point clean up
3615 	 * any LS requests that are sitting in the pending list. Do this
3616 	 * first, then, set the states of the rport so that new LS requests
3617 	 * are not accepted. Then start the cleanup.
3618 	 */
3619 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3620 
3621 	/*
3622 	 * We have found exactly one rport. Allocate memory for callback data.
3623 	 */
3624 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3625 	if (NULL == cb_data) {
3626 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3627 		rc = -ENOMEM;
3628 		goto out;
3629 	}
3630 
3631 	cb_data->nport = nport;
3632 	cb_data->rport = rport;
3633 	cb_data->port_handle = args->port_handle;
3634 	cb_data->fc_cb_func = api_data->cb_func;
3635 	cb_data->fc_cb_ctx = args->cb_ctx;
3636 
3637 	/*
3638 	 * Validate rport object state.
3639 	 */
3640 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3641 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3642 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3643 		/*
3644 		 * Deletion of this rport already in progress. Register callback
3645 		 * and return.
3646 		 */
3647 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3648 		rc = -ENODEV;
3649 		goto out;
3650 	} else {
3651 		/* rport partially created/deleted */
3652 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3653 		DEV_VERIFY(!"Invalid rport_state");
3654 		rc = -ENODEV;
3655 		goto out;
3656 	}
3657 
3658 	/*
3659 	 * We have successfully found a rport to delete. Call
3660 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3661 	 * IT-delete processing as well as free the cb_data.
3662 	 */
3663 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3664 				     (void *)cb_data);
3665 
3666 out:
3667 	if (rc != 0) {
3668 		/*
3669 		 * We have entered here because either we encountered an
3670 		 * error, or we did not find a rport to delete.
3671 		 * As a result, we will not call the function
3672 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3673 		 * processing. Therefore, execute the callback function now.
3674 		 */
3675 		if (cb_data) {
3676 			free(cb_data);
3677 		}
3678 		if (api_data->cb_func != NULL) {
3679 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3680 		}
3681 	}
3682 
3683 	snprintf(log_str, sizeof(log_str),
3684 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3685 		 args->nport_handle, num_rport, rc);
3686 
3687 	if (rc != 0) {
3688 		SPDK_ERRLOG("%s", log_str);
3689 	} else {
3690 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3691 	}
3692 
3693 	free(arg);
3694 }
3695 
3696 /*
3697  * Process ABTS received
3698  */
3699 static void
3700 nvmf_fc_adm_evnt_abts_recv(void *arg)
3701 {
3702 	ASSERT_SPDK_FC_MAIN_THREAD();
3703 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3704 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3705 	struct spdk_nvmf_fc_nport *nport = NULL;
3706 	int err = 0;
3707 
3708 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3709 		      args->oxid, args->rxid);
3710 
3711 	/*
3712 	 * 1. Make sure the nport port exists.
3713 	 */
3714 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3715 	if (nport == NULL) {
3716 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3717 		err = -EINVAL;
3718 		goto out;
3719 	}
3720 
3721 	/*
3722 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3723 	 */
3724 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3725 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3726 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3727 			      args->rpi, args->oxid, args->rxid);
3728 		err = 0;
3729 		goto out;
3730 
3731 	}
3732 
3733 	/*
3734 	 * 3. Pass the received ABTS-LS to the library for handling.
3735 	 */
3736 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3737 
3738 out:
3739 	if (api_data->cb_func != NULL) {
3740 		/*
3741 		 * Passing pointer to the args struct as the first argument.
3742 		 * The cb_func should handle this appropriately.
3743 		 */
3744 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3745 	} else {
3746 		/* No callback set, free the args */
3747 		free(args);
3748 	}
3749 
3750 	free(arg);
3751 }
3752 
3753 /*
3754  * Callback function for hw port quiesce.
3755  */
3756 static void
3757 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3758 {
3759 	ASSERT_SPDK_FC_MAIN_THREAD();
3760 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3761 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3762 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3763 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3764 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3765 	struct spdk_nvmf_fc_port *fc_port = NULL;
3766 	char *dump_buf = NULL;
3767 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3768 
3769 	/*
3770 	 * Free the callback context struct.
3771 	 */
3772 	free(ctx);
3773 
3774 	if (err != 0) {
3775 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3776 		goto out;
3777 	}
3778 
3779 	if (args->dump_queues == false) {
3780 		/*
3781 		 * Queues need not be dumped.
3782 		 */
3783 		goto out;
3784 	}
3785 
3786 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3787 
3788 	/*
3789 	 * Get the fc port.
3790 	 */
3791 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3792 	if (fc_port == NULL) {
3793 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3794 		err = -EINVAL;
3795 		goto out;
3796 	}
3797 
3798 	/*
3799 	 * Allocate memory for the dump buffer.
3800 	 * This memory will be freed by FCT.
3801 	 */
3802 	dump_buf = (char *)calloc(1, dump_buf_size);
3803 	if (dump_buf == NULL) {
3804 		err = -ENOMEM;
3805 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3806 		goto out;
3807 	}
3808 	*args->dump_buf  = (uint32_t *)dump_buf;
3809 	dump_info.buffer = dump_buf;
3810 	dump_info.offset = 0;
3811 
3812 	/*
3813 	 * Add the dump reason to the top of the buffer.
3814 	 */
3815 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3816 
3817 	/*
3818 	 * Dump the hwqp.
3819 	 */
3820 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3821 				fc_port->num_io_queues, &dump_info);
3822 
3823 out:
3824 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3825 		      args->port_handle, args->dump_queues, err);
3826 
3827 	if (cb_func != NULL) {
3828 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3829 	}
3830 }
3831 
3832 /*
3833  * HW port reset
3834 
3835  */
3836 static void
3837 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3838 {
3839 	ASSERT_SPDK_FC_MAIN_THREAD();
3840 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3841 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3842 			api_data->api_args;
3843 	struct spdk_nvmf_fc_port *fc_port = NULL;
3844 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3845 	int err = 0;
3846 
3847 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3848 
3849 	/*
3850 	 * Make sure the physical port exists.
3851 	 */
3852 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3853 	if (fc_port == NULL) {
3854 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3855 		err = -EINVAL;
3856 		goto out;
3857 	}
3858 
3859 	/*
3860 	 * Save the reset event args and the callback in a context struct.
3861 	 */
3862 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3863 
3864 	if (ctx == NULL) {
3865 		err = -ENOMEM;
3866 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3867 		goto fail;
3868 	}
3869 
3870 	ctx->reset_args = args;
3871 	ctx->reset_cb_func = api_data->cb_func;
3872 
3873 	/*
3874 	 * Quiesce the hw port.
3875 	 */
3876 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3877 	if (err != 0) {
3878 		goto fail;
3879 	}
3880 
3881 	/*
3882 	 * Once the ports are successfully quiesced the reset processing
3883 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3884 	 */
3885 	return;
3886 fail:
3887 	free(ctx);
3888 
3889 out:
3890 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3891 		      err);
3892 
3893 	if (api_data->cb_func != NULL) {
3894 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3895 	}
3896 
3897 	free(arg);
3898 }
3899 
3900 static inline void
3901 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3902 {
3903 	if (nvmf_fc_get_main_thread()) {
3904 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3905 	}
3906 }
3907 
3908 /*
3909  * Queue up an event in the SPDK main threads event queue.
3910  * Used by the FC driver to notify the SPDK main thread of FC related events.
3911  */
3912 int
3913 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3914 			   spdk_nvmf_fc_callback cb_func)
3915 {
3916 	int err = 0;
3917 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3918 	spdk_msg_fn event_fn = NULL;
3919 
3920 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3921 
3922 	if (event_type >= SPDK_FC_EVENT_MAX) {
3923 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3924 		err = -EINVAL;
3925 		goto done;
3926 	}
3927 
3928 	if (args == NULL) {
3929 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3930 		err = -EINVAL;
3931 		goto done;
3932 	}
3933 
3934 	api_data = calloc(1, sizeof(*api_data));
3935 
3936 	if (api_data == NULL) {
3937 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3938 		err = -ENOMEM;
3939 		goto done;
3940 	}
3941 
3942 	api_data->api_args = args;
3943 	api_data->cb_func = cb_func;
3944 
3945 	switch (event_type) {
3946 	case SPDK_FC_HW_PORT_INIT:
3947 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3948 		break;
3949 
3950 	case SPDK_FC_HW_PORT_FREE:
3951 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3952 		break;
3953 
3954 	case SPDK_FC_HW_PORT_ONLINE:
3955 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3956 		break;
3957 
3958 	case SPDK_FC_HW_PORT_OFFLINE:
3959 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3960 		break;
3961 
3962 	case SPDK_FC_NPORT_CREATE:
3963 		event_fn = nvmf_fc_adm_evnt_nport_create;
3964 		break;
3965 
3966 	case SPDK_FC_NPORT_DELETE:
3967 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3968 		break;
3969 
3970 	case SPDK_FC_IT_ADD:
3971 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3972 		break;
3973 
3974 	case SPDK_FC_IT_DELETE:
3975 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3976 		break;
3977 
3978 	case SPDK_FC_ABTS_RECV:
3979 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3980 		break;
3981 
3982 	case SPDK_FC_HW_PORT_RESET:
3983 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3984 		break;
3985 
3986 	case SPDK_FC_UNRECOVERABLE_ERR:
3987 	default:
3988 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3989 		err = -EINVAL;
3990 		break;
3991 	}
3992 
3993 done:
3994 
3995 	if (err == 0) {
3996 		assert(event_fn != NULL);
3997 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
3998 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3999 	} else {
4000 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
4001 		if (api_data) {
4002 			free(api_data);
4003 		}
4004 	}
4005 
4006 	return err;
4007 }
4008 
4009 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
4010 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
4011 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
4012