xref: /spdk/lib/nvmf/fc.c (revision b30d57cdad6d2bc75cc1e4e2ebbcebcb0d98dcfa)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 #include "spdk/endian.h"
46 #include "spdk/log.h"
47 #include "spdk/thread.h"
48 
49 #include "nvmf_fc.h"
50 #include "fc_lld.h"
51 
52 #ifndef DEV_VERIFY
53 #define DEV_VERIFY assert
54 #endif
55 
56 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
57 #define ASSERT_SPDK_FC_MAIN_THREAD() \
58         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
59 #endif
60 
61 /*
62  * PRLI service parameters
63  */
64 enum spdk_nvmf_fc_service_parameters {
65 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
66 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
67 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
68 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
69 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
70 };
71 
72 static char *fc_req_state_strs[] = {
73 	"SPDK_NVMF_FC_REQ_INIT",
74 	"SPDK_NVMF_FC_REQ_READ_BDEV",
75 	"SPDK_NVMF_FC_REQ_READ_XFER",
76 	"SPDK_NVMF_FC_REQ_READ_RSP",
77 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
78 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
79 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
80 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
81 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
82 	"SPDK_NVMF_FC_REQ_NONE_RSP",
83 	"SPDK_NVMF_FC_REQ_SUCCESS",
84 	"SPDK_NVMF_FC_REQ_FAILED",
85 	"SPDK_NVMF_FC_REQ_ABORTED",
86 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
87 	"SPDK_NVMF_FC_REQ_PENDING",
88 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
89 };
90 
91 #define OBJECT_NVMF_FC_IO				0xA0
92 
93 #define TRACE_GROUP_NVMF_FC				0x8
94 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
95 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
96 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
97 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
98 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
99 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
100 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
101 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
102 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
103 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
104 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
105 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
106 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
107 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
108 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
109 #define TRACE_FC_REQ_FUSED_WAITING		SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x10)
110 
111 #define HWQP_CONN_TABLE_SIZE			8192
112 #define HWQP_RPI_TABLE_SIZE			4096
113 
114 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
115 {
116 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
117 	spdk_trace_register_description("FC_REQ_NEW",
118 					TRACE_FC_REQ_INIT,
119 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
120 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
121 					TRACE_FC_REQ_READ_BDEV,
122 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
123 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
124 					TRACE_FC_REQ_READ_XFER,
125 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
126 	spdk_trace_register_description("FC_REQ_READ_RSP",
127 					TRACE_FC_REQ_READ_RSP,
128 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
129 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
130 					TRACE_FC_REQ_WRITE_BUFFS,
131 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
132 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
133 					TRACE_FC_REQ_WRITE_XFER,
134 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
135 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
136 					TRACE_FC_REQ_WRITE_BDEV,
137 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
138 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
139 					TRACE_FC_REQ_WRITE_RSP,
140 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
141 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
142 					TRACE_FC_REQ_NONE_BDEV,
143 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
144 	spdk_trace_register_description("FC_REQ_NONE_RSP",
145 					TRACE_FC_REQ_NONE_RSP,
146 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
147 	spdk_trace_register_description("FC_REQ_SUCCESS",
148 					TRACE_FC_REQ_SUCCESS,
149 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
150 	spdk_trace_register_description("FC_REQ_FAILED",
151 					TRACE_FC_REQ_FAILED,
152 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
153 	spdk_trace_register_description("FC_REQ_ABORTED",
154 					TRACE_FC_REQ_ABORTED,
155 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
156 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
157 					TRACE_FC_REQ_BDEV_ABORTED,
158 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
159 	spdk_trace_register_description("FC_REQ_PENDING",
160 					TRACE_FC_REQ_PENDING,
161 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
162 	spdk_trace_register_description("FC_REQ_FUSED_WAITING",
163 					TRACE_FC_REQ_FUSED_WAITING,
164 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
165 }
166 
167 /**
168  * The structure used by all fc adm functions
169  */
170 struct spdk_nvmf_fc_adm_api_data {
171 	void *api_args;
172 	spdk_nvmf_fc_callback cb_func;
173 };
174 
175 /**
176  * The callback structure for nport-delete
177  */
178 struct spdk_nvmf_fc_adm_nport_del_cb_data {
179 	struct spdk_nvmf_fc_nport *nport;
180 	uint8_t port_handle;
181 	spdk_nvmf_fc_callback fc_cb_func;
182 	void *fc_cb_ctx;
183 };
184 
185 /**
186  * The callback structure for it-delete
187  */
188 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
189 	struct spdk_nvmf_fc_nport *nport;
190 	struct spdk_nvmf_fc_remote_port_info *rport;
191 	uint8_t port_handle;
192 	spdk_nvmf_fc_callback fc_cb_func;
193 	void *fc_cb_ctx;
194 };
195 
196 
197 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
198 
199 /**
200  * The callback structure for the it-delete-assoc callback
201  */
202 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
203 	struct spdk_nvmf_fc_nport *nport;
204 	struct spdk_nvmf_fc_remote_port_info *rport;
205 	uint8_t port_handle;
206 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
207 	void *cb_ctx;
208 };
209 
210 /*
211  * Call back function pointer for HW port quiesce.
212  */
213 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
214 
215 /**
216  * Context structure for quiescing a hardware port
217  */
218 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
219 	int quiesce_count;
220 	void *ctx;
221 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
222 };
223 
224 /**
225  * Context structure used to reset a hardware port
226  */
227 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
228 	void *reset_args;
229 	spdk_nvmf_fc_callback reset_cb_func;
230 };
231 
232 struct spdk_nvmf_fc_transport {
233 	struct spdk_nvmf_transport transport;
234 	pthread_mutex_t lock;
235 };
236 
237 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
238 
239 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
240 
241 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
242 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
243 
244 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
245 
246 static uint32_t g_nvmf_fgroup_count = 0;
247 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
248 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
249 
250 struct spdk_thread *
251 nvmf_fc_get_main_thread(void)
252 {
253 	return g_nvmf_fc_main_thread;
254 }
255 
256 static inline void
257 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
258 			       enum spdk_nvmf_fc_request_state state)
259 {
260 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
261 
262 	switch (state) {
263 	case SPDK_NVMF_FC_REQ_INIT:
264 		/* Start IO tracing */
265 		tpoint_id = TRACE_FC_REQ_INIT;
266 		break;
267 	case SPDK_NVMF_FC_REQ_READ_BDEV:
268 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
269 		break;
270 	case SPDK_NVMF_FC_REQ_READ_XFER:
271 		tpoint_id = TRACE_FC_REQ_READ_XFER;
272 		break;
273 	case SPDK_NVMF_FC_REQ_READ_RSP:
274 		tpoint_id = TRACE_FC_REQ_READ_RSP;
275 		break;
276 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
277 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
278 		break;
279 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
280 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
281 		break;
282 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
283 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
284 		break;
285 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
286 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
287 		break;
288 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
289 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
290 		break;
291 	case SPDK_NVMF_FC_REQ_NONE_RSP:
292 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
293 		break;
294 	case SPDK_NVMF_FC_REQ_SUCCESS:
295 		tpoint_id = TRACE_FC_REQ_SUCCESS;
296 		break;
297 	case SPDK_NVMF_FC_REQ_FAILED:
298 		tpoint_id = TRACE_FC_REQ_FAILED;
299 		break;
300 	case SPDK_NVMF_FC_REQ_ABORTED:
301 		tpoint_id = TRACE_FC_REQ_ABORTED;
302 		break;
303 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
304 		tpoint_id = TRACE_FC_REQ_ABORTED;
305 		break;
306 	case SPDK_NVMF_FC_REQ_PENDING:
307 		tpoint_id = TRACE_FC_REQ_PENDING;
308 		break;
309 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
310 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
311 		break;
312 	default:
313 		assert(0);
314 		break;
315 	}
316 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
317 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
318 				  (uint64_t)(&fc_req->req), 0);
319 	}
320 }
321 
322 static struct rte_hash *
323 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
324 {
325 	struct rte_hash_parameters hash_params = { 0 };
326 
327 	hash_params.entries = num_entries;
328 	hash_params.key_len = key_len;
329 	hash_params.name = name;
330 
331 	return rte_hash_create(&hash_params);
332 }
333 
334 void
335 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
336 {
337 	free(fc_conn->pool_memory);
338 	fc_conn->pool_memory = NULL;
339 }
340 
341 int
342 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
343 {
344 	uint32_t i, qd;
345 	struct spdk_nvmf_fc_pooled_request *req;
346 
347 	/*
348 	 * Create number of fc-requests to be more than the actual SQ size.
349 	 * This is to handle race conditions where the target driver may send
350 	 * back a RSP and before the target driver gets to process the CQE
351 	 * for the RSP, the initiator may have sent a new command.
352 	 * Depending on the load on the HWQP, there is a slim possibility
353 	 * that the target reaps the RQE corresponding to the new
354 	 * command before processing the CQE corresponding to the RSP.
355 	 */
356 	qd = fc_conn->max_queue_depth * 2;
357 
358 	STAILQ_INIT(&fc_conn->pool_queue);
359 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
360 				      sizeof(struct spdk_nvmf_fc_request));
361 	if (!fc_conn->pool_memory) {
362 		SPDK_ERRLOG("create fc req ring objects failed\n");
363 		goto error;
364 	}
365 	fc_conn->pool_size = qd;
366 	fc_conn->pool_free_elems = qd;
367 
368 	/* Initialise value in ring objects and link the objects */
369 	for (i = 0; i < qd; i++) {
370 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
371 				i * sizeof(struct spdk_nvmf_fc_request));
372 
373 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
374 	}
375 	return 0;
376 error:
377 	nvmf_fc_free_conn_reqpool(fc_conn);
378 	return -1;
379 }
380 
381 static inline struct spdk_nvmf_fc_request *
382 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
383 {
384 	struct spdk_nvmf_fc_request *fc_req;
385 	struct spdk_nvmf_fc_pooled_request *pooled_req;
386 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
387 
388 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
389 	if (!pooled_req) {
390 		SPDK_ERRLOG("Alloc request buffer failed\n");
391 		return NULL;
392 	}
393 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
394 	fc_conn->pool_free_elems -= 1;
395 
396 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
397 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
398 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
399 
400 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
401 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
402 	TAILQ_INIT(&fc_req->abort_cbs);
403 	return fc_req;
404 }
405 
406 static inline void
407 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
408 {
409 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
410 		/* Log an error for debug purpose. */
411 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
412 	}
413 
414 	/* set the magic to mark req as no longer valid. */
415 	fc_req->magic = 0xDEADBEEF;
416 
417 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
418 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
419 
420 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
421 	fc_conn->pool_free_elems += 1;
422 }
423 
424 static inline void
425 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
426 {
427 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
428 		      spdk_nvmf_request, buf_link);
429 }
430 
431 int
432 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
433 {
434 	char name[64];
435 
436 	hwqp->fc_port = fc_port;
437 
438 	/* clear counters */
439 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
440 
441 	TAILQ_INIT(&hwqp->in_use_reqs);
442 	TAILQ_INIT(&hwqp->sync_cbs);
443 	TAILQ_INIT(&hwqp->ls_pending_queue);
444 
445 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
446 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
447 				     sizeof(uint64_t));
448 	if (!hwqp->connection_list_hash) {
449 		SPDK_ERRLOG("Failed to create connection hash table.\n");
450 		return -ENOMEM;
451 	}
452 
453 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
454 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
455 	if (!hwqp->rport_list_hash) {
456 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
457 		rte_hash_free(hwqp->connection_list_hash);
458 		return -ENOMEM;
459 	}
460 
461 	/* Init low level driver queues */
462 	nvmf_fc_init_q(hwqp);
463 	return 0;
464 }
465 
466 static struct spdk_nvmf_fc_poll_group *
467 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
468 {
469 	uint32_t max_count = UINT32_MAX;
470 	struct spdk_nvmf_fc_poll_group *fgroup;
471 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
472 
473 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
474 	/* find poll group with least number of hwqp's assigned to it */
475 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
476 		if (fgroup->hwqp_count < max_count) {
477 			ret_fgroup = fgroup;
478 			max_count = fgroup->hwqp_count;
479 		}
480 	}
481 
482 	if (ret_fgroup) {
483 		ret_fgroup->hwqp_count++;
484 		hwqp->thread = ret_fgroup->group.group->thread;
485 		hwqp->fgroup = ret_fgroup;
486 	}
487 
488 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
489 
490 	return ret_fgroup;
491 }
492 
493 bool
494 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
495 {
496 	struct spdk_nvmf_fc_poll_group *tmp;
497 	bool rc = false;
498 
499 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
500 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
501 		if (tmp == fgroup) {
502 			rc = true;
503 			break;
504 		}
505 	}
506 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
507 	return rc;
508 }
509 
510 void
511 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
512 {
513 	assert(hwqp);
514 	if (hwqp == NULL) {
515 		SPDK_ERRLOG("Error: hwqp is NULL\n");
516 		return;
517 	}
518 
519 	assert(g_nvmf_fgroup_count);
520 
521 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
522 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
523 		return;
524 	}
525 
526 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
527 }
528 
529 static void
530 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
531 {
532 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
533 
534 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
535 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
536 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
537 	} else {
538 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
539 	}
540 
541 	if (args->cb_fn) {
542 		args->cb_fn(args->cb_ctx, 0);
543 	}
544 
545 	free(args);
546 }
547 
548 void
549 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
550 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
551 {
552 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
553 	struct spdk_nvmf_fc_poll_group *tmp;
554 	int rc = 0;
555 
556 	assert(hwqp);
557 
558 	SPDK_DEBUGLOG(nvmf_fc,
559 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
560 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
561 
562 	if (!hwqp->fgroup) {
563 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
564 	} else {
565 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
566 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
567 			if (tmp == hwqp->fgroup) {
568 				hwqp->fgroup->hwqp_count--;
569 				break;
570 			}
571 		}
572 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
573 
574 		if (tmp != hwqp->fgroup) {
575 			/* Pollgroup was already removed. Dont bother. */
576 			goto done;
577 		}
578 
579 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
580 		if (args == NULL) {
581 			rc = -ENOMEM;
582 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
583 			goto done;
584 		}
585 
586 		args->hwqp   = hwqp;
587 		args->cb_fn  = cb_fn;
588 		args->cb_ctx = cb_ctx;
589 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
590 		args->cb_info.cb_data = args;
591 		args->cb_info.cb_thread = spdk_get_thread();
592 
593 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
594 		if (rc) {
595 			rc = -EINVAL;
596 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
597 			free(args);
598 			goto done;
599 		}
600 		return;
601 	}
602 done:
603 	if (cb_fn) {
604 		cb_fn(cb_ctx, rc);
605 	}
606 }
607 
608 /*
609  * Note: This needs to be used only on main poller.
610  */
611 static uint64_t
612 nvmf_fc_get_abts_unique_id(void)
613 {
614 	static uint32_t u_id = 0;
615 
616 	return (uint64_t)(++u_id);
617 }
618 
619 static void
620 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
621 {
622 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
623 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
624 
625 	ctx->hwqps_responded++;
626 
627 	if (ctx->hwqps_responded < ctx->num_hwqps) {
628 		/* Wait for all pollers to complete. */
629 		return;
630 	}
631 
632 	/* Free the queue sync poller args. */
633 	free(ctx->sync_poller_args);
634 
635 	/* Mark as queue synced */
636 	ctx->queue_synced = true;
637 
638 	/* Reset the ctx values */
639 	ctx->hwqps_responded = 0;
640 	ctx->handled = false;
641 
642 	SPDK_DEBUGLOG(nvmf_fc,
643 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
644 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
645 
646 	/* Resend ABTS to pollers */
647 	args = ctx->abts_poller_args;
648 	for (int i = 0; i < ctx->num_hwqps; i++) {
649 		poller_arg = args + i;
650 		nvmf_fc_poller_api_func(poller_arg->hwqp,
651 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
652 					poller_arg);
653 	}
654 }
655 
656 static int
657 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
658 {
659 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
660 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
661 
662 	/* check if FC driver supports queue sync */
663 	if (!nvmf_fc_q_sync_available()) {
664 		return -EPERM;
665 	}
666 
667 	assert(ctx);
668 	if (!ctx) {
669 		SPDK_ERRLOG("NULL ctx pointer");
670 		return -EINVAL;
671 	}
672 
673 	/* Reset the ctx values */
674 	ctx->hwqps_responded = 0;
675 
676 	args = calloc(ctx->num_hwqps,
677 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
678 	if (!args) {
679 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
680 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
681 		return -ENOMEM;
682 	}
683 	ctx->sync_poller_args = args;
684 
685 	abts_args = ctx->abts_poller_args;
686 	for (int i = 0; i < ctx->num_hwqps; i++) {
687 		abts_poller_arg = abts_args + i;
688 		poller_arg = args + i;
689 		poller_arg->u_id = ctx->u_id;
690 		poller_arg->hwqp = abts_poller_arg->hwqp;
691 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
692 		poller_arg->cb_info.cb_data = ctx;
693 		poller_arg->cb_info.cb_thread = spdk_get_thread();
694 
695 		/* Send a Queue sync message to interested pollers */
696 		nvmf_fc_poller_api_func(poller_arg->hwqp,
697 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
698 					poller_arg);
699 	}
700 
701 	SPDK_DEBUGLOG(nvmf_fc,
702 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
703 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
704 
705 	/* Post Marker to queue to track aborted request */
706 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
707 
708 	return 0;
709 }
710 
711 static void
712 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
713 {
714 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
715 	struct spdk_nvmf_fc_nport *nport  = NULL;
716 
717 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
718 		ctx->handled = true;
719 	}
720 
721 	ctx->hwqps_responded++;
722 
723 	if (ctx->hwqps_responded < ctx->num_hwqps) {
724 		/* Wait for all pollers to complete. */
725 		return;
726 	}
727 
728 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
729 
730 	if (ctx->nport != nport) {
731 		/* Nport can be deleted while this abort is being
732 		 * processed by the pollers.
733 		 */
734 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
735 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
736 	} else {
737 		if (!ctx->handled) {
738 			/* Try syncing the queues and try one more time */
739 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
740 				SPDK_DEBUGLOG(nvmf_fc,
741 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
742 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
743 				return;
744 			} else {
745 				/* Send Reject */
746 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
747 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
748 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
749 			}
750 		} else {
751 			/* Send Accept */
752 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
753 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
754 					    0, NULL, NULL);
755 		}
756 	}
757 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
758 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
759 
760 	free(ctx->abts_poller_args);
761 	free(ctx);
762 }
763 
764 void
765 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
766 			  uint16_t oxid, uint16_t rxid)
767 {
768 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
769 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
770 	struct spdk_nvmf_fc_association *assoc = NULL;
771 	struct spdk_nvmf_fc_conn *conn = NULL;
772 	uint32_t hwqp_cnt = 0;
773 	bool skip_hwqp_cnt;
774 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
775 	uint32_t i;
776 
777 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
778 		       nport->nport_hdl, rpi, oxid, rxid);
779 
780 	/* Allocate memory to track hwqp's with at least 1 active connection. */
781 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
782 	if (hwqps == NULL) {
783 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
784 		goto bls_rej;
785 	}
786 
787 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
788 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
789 			if ((conn->rpi != rpi) || !conn->hwqp) {
790 				continue;
791 			}
792 
793 			skip_hwqp_cnt = false;
794 			for (i = 0; i < hwqp_cnt; i++) {
795 				if (hwqps[i] == conn->hwqp) {
796 					/* Skip. This is already present */
797 					skip_hwqp_cnt = true;
798 					break;
799 				}
800 			}
801 			if (!skip_hwqp_cnt) {
802 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
803 				hwqps[hwqp_cnt] = conn->hwqp;
804 				hwqp_cnt++;
805 			}
806 		}
807 	}
808 
809 	if (!hwqp_cnt) {
810 		goto bls_rej;
811 	}
812 
813 	args = calloc(hwqp_cnt,
814 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
815 	if (!args) {
816 		goto bls_rej;
817 	}
818 
819 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
820 	if (!ctx) {
821 		goto bls_rej;
822 	}
823 	ctx->rpi = rpi;
824 	ctx->oxid = oxid;
825 	ctx->rxid = rxid;
826 	ctx->nport = nport;
827 	ctx->nport_hdl = nport->nport_hdl;
828 	ctx->port_hdl = nport->fc_port->port_hdl;
829 	ctx->num_hwqps = hwqp_cnt;
830 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
831 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
832 	ctx->abts_poller_args = args;
833 
834 	/* Get a unique context for this ABTS */
835 	ctx->u_id = nvmf_fc_get_abts_unique_id();
836 
837 	for (i = 0; i < hwqp_cnt; i++) {
838 		poller_arg = args + i;
839 		poller_arg->hwqp = hwqps[i];
840 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
841 		poller_arg->cb_info.cb_data = ctx;
842 		poller_arg->cb_info.cb_thread = spdk_get_thread();
843 		poller_arg->ctx = ctx;
844 
845 		nvmf_fc_poller_api_func(poller_arg->hwqp,
846 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
847 					poller_arg);
848 	}
849 
850 	free(hwqps);
851 
852 	return;
853 bls_rej:
854 	free(args);
855 	free(hwqps);
856 
857 	/* Send Reject */
858 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
859 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
860 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
861 		       nport->nport_hdl, rpi, oxid, rxid);
862 	return;
863 }
864 
865 /*** Accessor functions for the FC structures - BEGIN */
866 /*
867  * Returns true if the port is in offline state.
868  */
869 bool
870 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
871 {
872 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
873 		return true;
874 	}
875 
876 	return false;
877 }
878 
879 /*
880  * Returns true if the port is in online state.
881  */
882 bool
883 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
884 {
885 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
886 		return true;
887 	}
888 
889 	return false;
890 }
891 
892 int
893 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
894 {
895 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
896 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
897 		return 0;
898 	}
899 
900 	return -EPERM;
901 }
902 
903 int
904 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
905 {
906 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
907 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
908 		return 0;
909 	}
910 
911 	return -EPERM;
912 }
913 
914 int
915 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
916 {
917 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
918 		hwqp->state = SPDK_FC_HWQP_ONLINE;
919 		/* reset some queue counters */
920 		hwqp->num_conns = 0;
921 		return nvmf_fc_set_q_online_state(hwqp, true);
922 	}
923 
924 	return -EPERM;
925 }
926 
927 int
928 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
929 {
930 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
931 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
932 		return nvmf_fc_set_q_online_state(hwqp, false);
933 	}
934 
935 	return -EPERM;
936 }
937 
938 void
939 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
940 {
941 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
942 
943 	/*
944 	 * Let LLD add the port to its list.
945 	 */
946 	nvmf_fc_lld_port_add(fc_port);
947 }
948 
949 static void
950 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
951 {
952 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
953 
954 	/*
955 	 * Let LLD remove the port from its list.
956 	 */
957 	nvmf_fc_lld_port_remove(fc_port);
958 }
959 
960 struct spdk_nvmf_fc_port *
961 nvmf_fc_port_lookup(uint8_t port_hdl)
962 {
963 	struct spdk_nvmf_fc_port *fc_port = NULL;
964 
965 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
966 		if (fc_port->port_hdl == port_hdl) {
967 			return fc_port;
968 		}
969 	}
970 	return NULL;
971 }
972 
973 uint32_t
974 nvmf_fc_get_prli_service_params(void)
975 {
976 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
977 }
978 
979 int
980 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
981 		       struct spdk_nvmf_fc_nport *nport)
982 {
983 	if (fc_port) {
984 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
985 		fc_port->num_nports++;
986 		return 0;
987 	}
988 
989 	return -EINVAL;
990 }
991 
992 int
993 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
994 			  struct spdk_nvmf_fc_nport *nport)
995 {
996 	if (fc_port && nport) {
997 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
998 		fc_port->num_nports--;
999 		return 0;
1000 	}
1001 
1002 	return -EINVAL;
1003 }
1004 
1005 static struct spdk_nvmf_fc_nport *
1006 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
1007 {
1008 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1009 
1010 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
1011 		if (fc_nport->nport_hdl == nport_hdl) {
1012 			return fc_nport;
1013 		}
1014 	}
1015 
1016 	return NULL;
1017 }
1018 
1019 struct spdk_nvmf_fc_nport *
1020 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
1021 {
1022 	struct spdk_nvmf_fc_port *fc_port = NULL;
1023 
1024 	fc_port = nvmf_fc_port_lookup(port_hdl);
1025 	if (fc_port) {
1026 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
1027 	}
1028 
1029 	return NULL;
1030 }
1031 
1032 static inline int
1033 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1034 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1035 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1036 {
1037 	struct spdk_nvmf_fc_nport *n_port;
1038 	struct spdk_nvmf_fc_remote_port_info *r_port;
1039 
1040 	assert(hwqp);
1041 	if (hwqp == NULL) {
1042 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1043 		return -EINVAL;
1044 	}
1045 	assert(nport);
1046 	if (nport == NULL) {
1047 		SPDK_ERRLOG("Error: nport is NULL\n");
1048 		return -EINVAL;
1049 	}
1050 	assert(rport);
1051 	if (rport == NULL) {
1052 		SPDK_ERRLOG("Error: rport is NULL\n");
1053 		return -EINVAL;
1054 	}
1055 
1056 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1057 		if (n_port->d_id == d_id) {
1058 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1059 				if (r_port->s_id == s_id) {
1060 					*nport = n_port;
1061 					*rport = r_port;
1062 					return 0;
1063 				}
1064 			}
1065 			break;
1066 		}
1067 	}
1068 
1069 	return -ENOENT;
1070 }
1071 
1072 /* Returns true if the Nport is empty of all rem_ports */
1073 bool
1074 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1075 {
1076 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1077 		assert(nport->rport_count == 0);
1078 		return true;
1079 	} else {
1080 		return false;
1081 	}
1082 }
1083 
1084 int
1085 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1086 			enum spdk_nvmf_fc_object_state state)
1087 {
1088 	if (nport) {
1089 		nport->nport_state = state;
1090 		return 0;
1091 	} else {
1092 		return -EINVAL;
1093 	}
1094 }
1095 
1096 bool
1097 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1098 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1099 {
1100 	if (nport && rem_port) {
1101 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1102 		nport->rport_count++;
1103 		return 0;
1104 	} else {
1105 		return -EINVAL;
1106 	}
1107 }
1108 
1109 bool
1110 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1111 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1112 {
1113 	if (nport && rem_port) {
1114 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1115 		nport->rport_count--;
1116 		return 0;
1117 	} else {
1118 		return -EINVAL;
1119 	}
1120 }
1121 
1122 int
1123 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1124 			enum spdk_nvmf_fc_object_state state)
1125 {
1126 	if (rport) {
1127 		rport->rport_state = state;
1128 		return 0;
1129 	} else {
1130 		return -EINVAL;
1131 	}
1132 }
1133 int
1134 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1135 			enum spdk_nvmf_fc_object_state state)
1136 {
1137 	if (assoc) {
1138 		assoc->assoc_state = state;
1139 		return 0;
1140 	} else {
1141 		return -EINVAL;
1142 	}
1143 }
1144 
1145 static struct spdk_nvmf_fc_association *
1146 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1147 {
1148 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1149 	struct spdk_nvmf_fc_conn *fc_conn;
1150 
1151 	if (!qpair) {
1152 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1153 		return NULL;
1154 	}
1155 
1156 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1157 
1158 	return fc_conn->fc_assoc;
1159 }
1160 
1161 bool
1162 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1163 		       struct spdk_nvmf_ctrlr *ctrlr)
1164 {
1165 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1166 	struct spdk_nvmf_fc_association *assoc = NULL;
1167 
1168 	if (!ctrlr) {
1169 		return false;
1170 	}
1171 
1172 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1173 	if (!fc_nport) {
1174 		return false;
1175 	}
1176 
1177 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1178 	if (assoc && assoc->tgtport == fc_nport) {
1179 		SPDK_DEBUGLOG(nvmf_fc,
1180 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1181 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1182 			      nport_hdl);
1183 		return true;
1184 	}
1185 	return false;
1186 }
1187 
1188 static void
1189 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1190 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1191 {
1192 	assert(ls_rqst);
1193 
1194 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1195 
1196 	/* Return buffer to chip */
1197 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1198 }
1199 
1200 static int
1201 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1202 			  struct spdk_nvmf_fc_nport *nport,
1203 			  struct spdk_nvmf_fc_remote_port_info *rport)
1204 {
1205 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1206 	int num_deleted = 0;
1207 
1208 	assert(hwqp);
1209 	assert(nport);
1210 	assert(rport);
1211 
1212 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1213 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1214 			num_deleted++;
1215 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1216 		}
1217 	}
1218 	return num_deleted;
1219 }
1220 
1221 static void
1222 nvmf_fc_req_bdev_abort(void *arg1)
1223 {
1224 	struct spdk_nvmf_fc_request *fc_req = arg1;
1225 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1226 	int i;
1227 
1228 	/* Initial release - we don't have to abort Admin Queue or
1229 	 * Fabric commands. The AQ commands supported at this time are
1230 	 * Get-Log-Page,
1231 	 * Identify
1232 	 * Set Features
1233 	 * Get Features
1234 	 * AER -> Special case and handled differently.
1235 	 * Every one of the above Admin commands (except AER) run
1236 	 * to completion and so an Abort of such commands doesn't
1237 	 * make sense.
1238 	 */
1239 	/* The Fabric commands supported are
1240 	 * Property Set
1241 	 * Property Get
1242 	 * Connect -> Special case (async. handling). Not sure how to
1243 	 * handle at this point. Let it run to completion.
1244 	 */
1245 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1246 		if (ctrlr->aer_req[i] == &fc_req->req) {
1247 			SPDK_NOTICELOG("Abort AER request\n");
1248 			nvmf_qpair_free_aer(fc_req->req.qpair);
1249 		}
1250 	}
1251 }
1252 
1253 void
1254 nvmf_fc_request_abort_complete(void *arg1)
1255 {
1256 	struct spdk_nvmf_fc_request *fc_req =
1257 		(struct spdk_nvmf_fc_request *)arg1;
1258 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1259 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1260 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1261 
1262 	/* Make a copy of the cb list from fc_req */
1263 	TAILQ_INIT(&abort_cbs);
1264 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1265 
1266 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1267 		       fc_req_state_strs[fc_req->state]);
1268 
1269 	_nvmf_fc_request_free(fc_req);
1270 
1271 	/* Request abort completed. Notify all the callbacks */
1272 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1273 		/* Notify */
1274 		ctx->cb(hwqp, 0, ctx->cb_args);
1275 		/* Remove */
1276 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1277 		/* free */
1278 		free(ctx);
1279 	}
1280 }
1281 
1282 void
1283 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1284 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1285 {
1286 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1287 	bool kill_req = false;
1288 
1289 	/* Add the cb to list */
1290 	if (cb) {
1291 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1292 		if (!ctx) {
1293 			SPDK_ERRLOG("ctx alloc failed.\n");
1294 			return;
1295 		}
1296 		ctx->cb = cb;
1297 		ctx->cb_args = cb_args;
1298 
1299 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1300 	}
1301 
1302 	if (!fc_req->is_aborted) {
1303 		/* Increment aborted command counter */
1304 		fc_req->hwqp->counters.num_aborted++;
1305 	}
1306 
1307 	/* If port is dead, skip abort wqe */
1308 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1309 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1310 		fc_req->is_aborted = true;
1311 		goto complete;
1312 	}
1313 
1314 	/* Check if the request is already marked for deletion */
1315 	if (fc_req->is_aborted) {
1316 		return;
1317 	}
1318 
1319 	/* Mark request as aborted */
1320 	fc_req->is_aborted = true;
1321 
1322 	/* If xchg is allocated, then save if we need to send abts or not. */
1323 	if (fc_req->xchg) {
1324 		fc_req->xchg->send_abts = send_abts;
1325 		fc_req->xchg->aborted	= true;
1326 	}
1327 
1328 	switch (fc_req->state) {
1329 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1330 		/* Aborted by backend */
1331 		goto complete;
1332 
1333 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1334 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1335 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1336 		/* Notify bdev */
1337 		spdk_thread_send_msg(fc_req->hwqp->thread,
1338 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1339 		break;
1340 
1341 	case SPDK_NVMF_FC_REQ_READ_XFER:
1342 	case SPDK_NVMF_FC_REQ_READ_RSP:
1343 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1344 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1345 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1346 		/* Notify HBA to abort this exchange  */
1347 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1348 		break;
1349 
1350 	case SPDK_NVMF_FC_REQ_PENDING:
1351 		/* Remove from pending */
1352 		nvmf_fc_request_remove_from_pending(fc_req);
1353 		goto complete;
1354 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1355 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1356 		goto complete;
1357 	default:
1358 		SPDK_ERRLOG("Request in invalid state.\n");
1359 		goto complete;
1360 	}
1361 
1362 	return;
1363 complete:
1364 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1365 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1366 				(void *)fc_req);
1367 }
1368 
1369 static int
1370 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1371 {
1372 	uint32_t length = fc_req->req.length;
1373 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1374 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1375 	struct spdk_nvmf_transport *transport = group->transport;
1376 
1377 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1378 		return -ENOMEM;
1379 	}
1380 
1381 	return 0;
1382 }
1383 
1384 static int
1385 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1386 {
1387 	/* Allocate an XCHG if we dont use send frame for this command. */
1388 	if (!nvmf_fc_use_send_frame(fc_req)) {
1389 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1390 		if (!fc_req->xchg) {
1391 			fc_req->hwqp->counters.no_xchg++;
1392 			return -EAGAIN;
1393 		}
1394 	}
1395 
1396 	if (fc_req->req.length) {
1397 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1398 			fc_req->hwqp->counters.buf_alloc_err++;
1399 			if (fc_req->xchg) {
1400 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1401 				fc_req->xchg = NULL;
1402 			}
1403 			return -EAGAIN;
1404 		}
1405 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1406 	}
1407 
1408 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1409 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1410 
1411 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1412 
1413 		if (nvmf_fc_recv_data(fc_req)) {
1414 			/* Dropped return success to caller */
1415 			fc_req->hwqp->counters.unexpected_err++;
1416 			_nvmf_fc_request_free(fc_req);
1417 		}
1418 	} else {
1419 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1420 
1421 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1422 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1423 		} else {
1424 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1425 		}
1426 		spdk_nvmf_request_exec(&fc_req->req);
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 static void
1433 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1434 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1435 {
1436 	uint8_t df_ctl = fchdr->df_ctl;
1437 	uint32_t f_ctl = fchdr->f_ctl;
1438 
1439 	/* VMID */
1440 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1441 		struct spdk_nvmf_fc_vm_header *vhdr;
1442 		uint32_t vmhdr_offset = 0;
1443 
1444 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1445 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1446 		}
1447 
1448 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1449 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1450 		}
1451 
1452 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1453 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1454 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1455 	}
1456 
1457 	/* Priority */
1458 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1459 		fc_req->csctl = fchdr->cs_ctl;
1460 	}
1461 }
1462 
1463 static int
1464 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1465 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1466 {
1467 	uint16_t cmnd_len;
1468 	uint64_t rqst_conn_id;
1469 	struct spdk_nvmf_fc_request *fc_req = NULL;
1470 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1471 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1472 	enum spdk_nvme_data_transfer xfer;
1473 	uint32_t s_id, d_id;
1474 
1475 	s_id = (uint32_t)frame->s_id;
1476 	d_id = (uint32_t)frame->d_id;
1477 	s_id = from_be32(&s_id) >> 8;
1478 	d_id = from_be32(&d_id) >> 8;
1479 
1480 	cmd_iu = buffer->virt;
1481 	cmnd_len = cmd_iu->cmnd_iu_len;
1482 	cmnd_len = from_be16(&cmnd_len);
1483 
1484 	/* check for a valid cmnd_iu format */
1485 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1486 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1487 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1488 		SPDK_ERRLOG("IU CMD error\n");
1489 		hwqp->counters.nvme_cmd_iu_err++;
1490 		return -ENXIO;
1491 	}
1492 
1493 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1494 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1495 		SPDK_ERRLOG("IU CMD xfer error\n");
1496 		hwqp->counters.nvme_cmd_xfer_err++;
1497 		return -EPERM;
1498 	}
1499 
1500 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1501 
1502 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1503 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1504 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1505 		hwqp->counters.invalid_conn_err++;
1506 		return -ENODEV;
1507 	}
1508 
1509 	/* Validate s_id and d_id */
1510 	if (s_id != fc_conn->s_id) {
1511 		hwqp->counters.rport_invalid++;
1512 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1513 		return -ENODEV;
1514 	}
1515 
1516 	if (d_id != fc_conn->d_id) {
1517 		hwqp->counters.nport_invalid++;
1518 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1519 		return -ENODEV;
1520 	}
1521 
1522 	/* If association/connection is being deleted - return */
1523 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1524 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1525 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1526 		return -EACCES;
1527 	}
1528 
1529 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1530 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1531 			    rqst_conn_id, fc_conn->conn_state);
1532 		return -EACCES;
1533 	}
1534 
1535 	if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1536 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1537 			    rqst_conn_id, fc_conn->qpair.state);
1538 		return -EACCES;
1539 	}
1540 
1541 	/* Make sure xfer len is according to mdts */
1542 	if (from_be32(&cmd_iu->data_len) >
1543 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1544 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1545 		return -EINVAL;
1546 	}
1547 
1548 	/* allocate a request buffer */
1549 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1550 	if (fc_req == NULL) {
1551 		return -ENOMEM;
1552 	}
1553 
1554 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1555 	fc_req->req.qpair = &fc_conn->qpair;
1556 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1557 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1558 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1559 	fc_req->oxid = frame->ox_id;
1560 	fc_req->oxid = from_be16(&fc_req->oxid);
1561 	fc_req->rpi = fc_conn->rpi;
1562 	fc_req->poller_lcore = hwqp->lcore_id;
1563 	fc_req->poller_thread = hwqp->thread;
1564 	fc_req->hwqp = hwqp;
1565 	fc_req->fc_conn = fc_conn;
1566 	fc_req->req.xfer = xfer;
1567 	fc_req->s_id = s_id;
1568 	fc_req->d_id = d_id;
1569 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1570 	nvmf_fc_set_vmid_priority(fc_req, frame);
1571 
1572 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1573 
1574 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1575 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1576 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /*
1583  * These functions are called from the FC LLD
1584  */
1585 
1586 void
1587 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1588 {
1589 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1590 	struct spdk_nvmf_transport_poll_group *group;
1591 
1592 	if (!fc_req) {
1593 		return;
1594 	}
1595 
1596 	if (fc_req->xchg) {
1597 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1598 		fc_req->xchg = NULL;
1599 	}
1600 
1601 	/* Release IO buffers */
1602 	if (fc_req->req.data_from_pool) {
1603 		group = &hwqp->fgroup->group;
1604 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1605 					       group->transport);
1606 	}
1607 	fc_req->req.data = NULL;
1608 	fc_req->req.iovcnt  = 0;
1609 
1610 	/* Free Fc request */
1611 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1612 }
1613 
1614 void
1615 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1616 			  enum spdk_nvmf_fc_request_state state)
1617 {
1618 	assert(fc_req->magic != 0xDEADBEEF);
1619 
1620 	SPDK_DEBUGLOG(nvmf_fc,
1621 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1622 		      nvmf_fc_request_get_state_str(fc_req->state),
1623 		      nvmf_fc_request_get_state_str(state));
1624 	nvmf_fc_record_req_trace_point(fc_req, state);
1625 	fc_req->state = state;
1626 }
1627 
1628 char *
1629 nvmf_fc_request_get_state_str(int state)
1630 {
1631 	static char *unk_str = "unknown";
1632 
1633 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1634 		fc_req_state_strs[state] : unk_str);
1635 }
1636 
1637 int
1638 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1639 			   uint32_t buff_idx,
1640 			   struct spdk_nvmf_fc_frame_hdr *frame,
1641 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1642 			   uint32_t plen)
1643 {
1644 	int rc = 0;
1645 	uint32_t s_id, d_id;
1646 	struct spdk_nvmf_fc_nport *nport = NULL;
1647 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1648 
1649 	s_id = (uint32_t)frame->s_id;
1650 	d_id = (uint32_t)frame->d_id;
1651 	s_id = from_be32(&s_id) >> 8;
1652 	d_id = from_be32(&d_id) >> 8;
1653 
1654 	SPDK_DEBUGLOG(nvmf_fc,
1655 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1656 		      s_id, d_id,
1657 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1658 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1659 
1660 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1661 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1662 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1663 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1664 
1665 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1666 
1667 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1668 		if (rc) {
1669 			if (nport == NULL) {
1670 				SPDK_ERRLOG("Nport not found. Dropping\n");
1671 				/* increment invalid nport counter */
1672 				hwqp->counters.nport_invalid++;
1673 			} else if (rport == NULL) {
1674 				SPDK_ERRLOG("Rport not found. Dropping\n");
1675 				/* increment invalid rport counter */
1676 				hwqp->counters.rport_invalid++;
1677 			}
1678 			return rc;
1679 		}
1680 
1681 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1682 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1683 			SPDK_ERRLOG("%s state not created. Dropping\n",
1684 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1685 				    "Nport" : "Rport");
1686 			return -EACCES;
1687 		}
1688 
1689 		/* Use the RQ buffer for holding LS request. */
1690 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1691 
1692 		/* Fill in the LS request structure */
1693 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1694 		ls_rqst->rqstbuf.phys = buffer->phys +
1695 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1696 		ls_rqst->rqstbuf.buf_index = buff_idx;
1697 		ls_rqst->rqst_len = plen;
1698 
1699 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1700 		ls_rqst->rspbuf.phys = buffer->phys +
1701 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1702 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1703 
1704 		ls_rqst->private_data = (void *)hwqp;
1705 		ls_rqst->rpi = rport->rpi;
1706 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1707 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1708 		ls_rqst->s_id = s_id;
1709 		ls_rqst->d_id = d_id;
1710 		ls_rqst->nport = nport;
1711 		ls_rqst->rport = rport;
1712 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1713 
1714 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1715 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1716 		} else {
1717 			ls_rqst->xchg = NULL;
1718 		}
1719 
1720 		if (ls_rqst->xchg) {
1721 			/* Handover the request to LS module */
1722 			nvmf_fc_handle_ls_rqst(ls_rqst);
1723 		} else {
1724 			/* No XCHG available. Add to pending list. */
1725 			hwqp->counters.no_xchg++;
1726 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1727 		}
1728 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1729 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1730 
1731 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1732 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1733 		if (!rc) {
1734 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1735 		}
1736 	} else {
1737 
1738 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1739 		hwqp->counters.unknown_frame++;
1740 		rc = -EINVAL;
1741 	}
1742 
1743 	return rc;
1744 }
1745 
1746 void
1747 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1748 {
1749 	struct spdk_nvmf_request *req = NULL, *tmp;
1750 	struct spdk_nvmf_fc_request *fc_req;
1751 	int budget = 64;
1752 
1753 	if (!hwqp->fgroup) {
1754 		/* LS queue is tied to acceptor_poll group and LS pending requests
1755 		 * are stagged and processed using hwqp->ls_pending_queue.
1756 		 */
1757 		return;
1758 	}
1759 
1760 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1761 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1762 		if (!nvmf_fc_request_execute(fc_req)) {
1763 			/* Succesfuly posted, Delete from pending. */
1764 			nvmf_fc_request_remove_from_pending(fc_req);
1765 		}
1766 
1767 		if (budget) {
1768 			budget--;
1769 		} else {
1770 			return;
1771 		}
1772 	}
1773 }
1774 
1775 void
1776 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1777 {
1778 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1779 	struct spdk_nvmf_fc_nport *nport = NULL;
1780 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1781 
1782 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1783 		/* lookup nport and rport again - make sure they are still valid */
1784 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1785 		if (rc) {
1786 			if (nport == NULL) {
1787 				SPDK_ERRLOG("Nport not found. Dropping\n");
1788 				/* increment invalid nport counter */
1789 				hwqp->counters.nport_invalid++;
1790 			} else if (rport == NULL) {
1791 				SPDK_ERRLOG("Rport not found. Dropping\n");
1792 				/* increment invalid rport counter */
1793 				hwqp->counters.rport_invalid++;
1794 			}
1795 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1796 			continue;
1797 		}
1798 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1799 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1800 			SPDK_ERRLOG("%s state not created. Dropping\n",
1801 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1802 				    "Nport" : "Rport");
1803 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1804 			continue;
1805 		}
1806 
1807 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1808 		if (ls_rqst->xchg) {
1809 			/* Got an XCHG */
1810 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1811 			/* Handover the request to LS module */
1812 			nvmf_fc_handle_ls_rqst(ls_rqst);
1813 		} else {
1814 			/* No more XCHGs. Stop processing. */
1815 			hwqp->counters.no_xchg++;
1816 			return;
1817 		}
1818 	}
1819 }
1820 
1821 int
1822 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1823 {
1824 	int rc = 0;
1825 	struct spdk_nvmf_request *req = &fc_req->req;
1826 	struct spdk_nvmf_qpair *qpair = req->qpair;
1827 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1828 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1829 	uint16_t ersp_len = 0;
1830 
1831 	/* set sq head value in resp */
1832 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1833 
1834 	/* Increment connection responses */
1835 	fc_conn->rsp_count++;
1836 
1837 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1838 				       fc_req->transfered_len)) {
1839 		/* Fill ERSP Len */
1840 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1841 				    sizeof(uint32_t)));
1842 		fc_req->ersp.ersp_len = ersp_len;
1843 
1844 		/* Fill RSN */
1845 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1846 		fc_conn->rsn++;
1847 
1848 		/* Fill transfer length */
1849 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1850 
1851 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1852 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1853 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1854 	} else {
1855 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1856 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1857 	}
1858 
1859 	return rc;
1860 }
1861 
1862 bool
1863 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1864 			   uint32_t rsp_cnt, uint32_t xfer_len)
1865 {
1866 	struct spdk_nvmf_request *req = &fc_req->req;
1867 	struct spdk_nvmf_qpair *qpair = req->qpair;
1868 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1869 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1870 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1871 	uint16_t status = *((uint16_t *)&rsp->status);
1872 
1873 	/*
1874 	 * Check if we need to send ERSP
1875 	 * 1) For every N responses where N == ersp_ratio
1876 	 * 2) Fabric commands.
1877 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1878 	 * 4) SQ == 90% full.
1879 	 * 5) Transfer length not equal to CMD IU length
1880 	 */
1881 
1882 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1883 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1884 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1885 	    (req->length != xfer_len)) {
1886 		return true;
1887 	}
1888 	return false;
1889 }
1890 
1891 static int
1892 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1893 {
1894 	int rc = 0;
1895 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1896 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1897 
1898 	if (fc_req->is_aborted) {
1899 		/* Defer this to make sure we dont call io cleanup in same context. */
1900 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1901 					(void *)fc_req);
1902 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1903 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1904 
1905 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1906 
1907 		rc = nvmf_fc_send_data(fc_req);
1908 	} else {
1909 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1910 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1911 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1912 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1913 		} else {
1914 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1915 		}
1916 
1917 		rc = nvmf_fc_handle_rsp(fc_req);
1918 	}
1919 
1920 	if (rc) {
1921 		SPDK_ERRLOG("Error in request complete.\n");
1922 		_nvmf_fc_request_free(fc_req);
1923 	}
1924 	return 0;
1925 }
1926 
1927 struct spdk_nvmf_tgt *
1928 nvmf_fc_get_tgt(void)
1929 {
1930 	if (g_nvmf_ftransport) {
1931 		return g_nvmf_ftransport->transport.tgt;
1932 	}
1933 	return NULL;
1934 }
1935 
1936 /*
1937  * FC Transport Public API begins here
1938  */
1939 
1940 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1941 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1942 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1943 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1944 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1945 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1946 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1947 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1948 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1949 
1950 static void
1951 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1952 {
1953 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1954 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1955 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1956 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1957 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1958 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1959 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1960 }
1961 
1962 static struct spdk_nvmf_transport *
1963 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1964 {
1965 	uint32_t sge_count;
1966 
1967 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1968 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1969 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1970 		     "  max_aq_depth=%d\n",
1971 		     opts->max_queue_depth,
1972 		     opts->max_io_size,
1973 		     opts->max_qpairs_per_ctrlr - 1,
1974 		     opts->io_unit_size,
1975 		     opts->max_aq_depth);
1976 
1977 	if (g_nvmf_ftransport) {
1978 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1979 		return NULL;
1980 	}
1981 
1982 	if (spdk_env_get_last_core() < 1) {
1983 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1984 			    spdk_env_get_last_core() + 1);
1985 		return NULL;
1986 	}
1987 
1988 	sge_count = opts->max_io_size / opts->io_unit_size;
1989 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1990 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1991 		return NULL;
1992 	}
1993 
1994 	g_nvmf_fc_main_thread = spdk_get_thread();
1995 	g_nvmf_fgroup_count = 0;
1996 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1997 
1998 	if (!g_nvmf_ftransport) {
1999 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
2000 		return NULL;
2001 	}
2002 
2003 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
2004 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
2005 		free(g_nvmf_ftransport);
2006 		g_nvmf_ftransport = NULL;
2007 		return NULL;
2008 	}
2009 
2010 	/* initialize the low level FC driver */
2011 	nvmf_fc_lld_init();
2012 
2013 	return &g_nvmf_ftransport->transport;
2014 }
2015 
2016 static void
2017 nvmf_fc_destroy_done_cb(void *cb_arg)
2018 {
2019 	free(g_nvmf_ftransport);
2020 	if (g_transport_destroy_done_cb) {
2021 		g_transport_destroy_done_cb(cb_arg);
2022 		g_transport_destroy_done_cb = NULL;
2023 	}
2024 }
2025 
2026 static int
2027 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2028 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2029 {
2030 	if (transport) {
2031 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2032 
2033 		/* clean up any FC poll groups still around */
2034 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2035 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2036 			free(fgroup);
2037 		}
2038 		g_nvmf_fgroup_count = 0;
2039 		g_transport_destroy_done_cb = cb_fn;
2040 
2041 		/* low level FC driver clean up */
2042 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2043 	}
2044 
2045 	return 0;
2046 }
2047 
2048 static int
2049 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2050 	       struct spdk_nvmf_listen_opts *listen_opts)
2051 {
2052 	return 0;
2053 }
2054 
2055 static void
2056 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2057 		    const struct spdk_nvme_transport_id *_trid)
2058 {
2059 }
2060 
2061 static uint32_t
2062 nvmf_fc_accept(struct spdk_nvmf_transport *transport)
2063 {
2064 	struct spdk_nvmf_fc_port *fc_port = NULL;
2065 	uint32_t count = 0;
2066 	static bool start_lld = false;
2067 
2068 	if (spdk_unlikely(!start_lld)) {
2069 		start_lld  = true;
2070 		nvmf_fc_lld_start();
2071 	}
2072 
2073 	/* poll the LS queue on each port */
2074 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2075 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2076 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2077 		}
2078 	}
2079 
2080 	return count;
2081 }
2082 
2083 static void
2084 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2085 		 struct spdk_nvme_transport_id *trid,
2086 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2087 {
2088 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2089 	entry->adrfam = trid->adrfam;
2090 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2091 
2092 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2093 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2094 }
2095 
2096 static struct spdk_nvmf_transport_poll_group *
2097 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
2098 {
2099 	struct spdk_nvmf_fc_poll_group *fgroup;
2100 	struct spdk_nvmf_fc_transport *ftransport =
2101 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2102 
2103 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2104 	if (!fgroup) {
2105 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2106 		return NULL;
2107 	}
2108 
2109 	TAILQ_INIT(&fgroup->hwqp_list);
2110 
2111 	pthread_mutex_lock(&ftransport->lock);
2112 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2113 	g_nvmf_fgroup_count++;
2114 	pthread_mutex_unlock(&ftransport->lock);
2115 
2116 	return &fgroup->group;
2117 }
2118 
2119 static void
2120 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2121 {
2122 	struct spdk_nvmf_fc_poll_group *fgroup;
2123 	struct spdk_nvmf_fc_transport *ftransport =
2124 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2125 
2126 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2127 	pthread_mutex_lock(&ftransport->lock);
2128 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2129 	g_nvmf_fgroup_count--;
2130 	pthread_mutex_unlock(&ftransport->lock);
2131 
2132 	free(fgroup);
2133 }
2134 
2135 static int
2136 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2137 		       struct spdk_nvmf_qpair *qpair)
2138 {
2139 	struct spdk_nvmf_fc_poll_group *fgroup;
2140 	struct spdk_nvmf_fc_conn *fc_conn;
2141 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2142 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2143 	bool hwqp_found = false;
2144 
2145 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2146 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2147 
2148 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2149 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2150 			hwqp_found = true;
2151 			break;
2152 		}
2153 	}
2154 
2155 	if (!hwqp_found) {
2156 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2157 		goto err;
2158 	}
2159 
2160 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2161 					 &fc_conn->conn_id,
2162 					 fc_conn->max_queue_depth)) {
2163 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2164 		goto err;
2165 	}
2166 
2167 	fc_conn->hwqp = hwqp;
2168 
2169 	/* If this is for ADMIN connection, then update assoc ID. */
2170 	if (fc_conn->qpair.qid == 0) {
2171 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2172 	}
2173 
2174 	api_data = &fc_conn->create_opd->u.add_conn;
2175 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2176 	return 0;
2177 err:
2178 	return -1;
2179 }
2180 
2181 static int
2182 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2183 {
2184 	uint32_t count = 0;
2185 	struct spdk_nvmf_fc_poll_group *fgroup;
2186 	struct spdk_nvmf_fc_hwqp *hwqp;
2187 
2188 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2189 
2190 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2191 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2192 			count += nvmf_fc_process_queue(hwqp);
2193 		}
2194 	}
2195 
2196 	return (int) count;
2197 }
2198 
2199 static int
2200 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2201 {
2202 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2203 
2204 	if (!fc_req->is_aborted) {
2205 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2206 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2207 	} else {
2208 		nvmf_fc_request_abort_complete(fc_req);
2209 	}
2210 
2211 	return 0;
2212 }
2213 
2214 static void
2215 nvmf_fc_connection_delete_done_cb(void *arg)
2216 {
2217 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2218 
2219 	if (fc_ctx->cb_fn) {
2220 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2221 	}
2222 	free(fc_ctx);
2223 }
2224 
2225 static void
2226 _nvmf_fc_close_qpair(void *arg)
2227 {
2228 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2229 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2230 	struct spdk_nvmf_fc_conn *fc_conn;
2231 	int rc;
2232 
2233 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2234 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2235 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2236 
2237 		if (fc_conn->create_opd) {
2238 			api_data = &fc_conn->create_opd->u.add_conn;
2239 
2240 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2241 						    api_data->args.fc_conn, api_data->aq_conn);
2242 		}
2243 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2244 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2245 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2246 		if (!rc) {
2247 			/* Wait for transport to complete its work. */
2248 			return;
2249 		}
2250 
2251 		SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__);
2252 	}
2253 
2254 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2255 }
2256 
2257 static void
2258 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2259 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2260 {
2261 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2262 
2263 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2264 	if (!fc_ctx) {
2265 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2266 		if (cb_fn) {
2267 			cb_fn(cb_arg);
2268 		}
2269 		return;
2270 	}
2271 	fc_ctx->qpair = qpair;
2272 	fc_ctx->cb_fn = cb_fn;
2273 	fc_ctx->cb_ctx = cb_arg;
2274 	fc_ctx->qpair_thread = spdk_get_thread();
2275 
2276 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2277 }
2278 
2279 static int
2280 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2281 			    struct spdk_nvme_transport_id *trid)
2282 {
2283 	struct spdk_nvmf_fc_conn *fc_conn;
2284 
2285 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2286 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2287 	return 0;
2288 }
2289 
2290 static int
2291 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2292 			     struct spdk_nvme_transport_id *trid)
2293 {
2294 	struct spdk_nvmf_fc_conn *fc_conn;
2295 
2296 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2297 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2298 	return 0;
2299 }
2300 
2301 static int
2302 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2303 			      struct spdk_nvme_transport_id *trid)
2304 {
2305 	struct spdk_nvmf_fc_conn *fc_conn;
2306 
2307 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2308 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2309 	return 0;
2310 }
2311 
2312 static void
2313 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2314 			    struct spdk_nvmf_request *req)
2315 {
2316 	spdk_nvmf_request_complete(req);
2317 }
2318 
2319 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2320 	.name = "FC",
2321 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2322 	.opts_init = nvmf_fc_opts_init,
2323 	.create = nvmf_fc_create,
2324 	.destroy = nvmf_fc_destroy,
2325 
2326 	.listen = nvmf_fc_listen,
2327 	.stop_listen = nvmf_fc_stop_listen,
2328 	.accept = nvmf_fc_accept,
2329 
2330 	.listener_discover = nvmf_fc_discover,
2331 
2332 	.poll_group_create = nvmf_fc_poll_group_create,
2333 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2334 	.poll_group_add = nvmf_fc_poll_group_add,
2335 	.poll_group_poll = nvmf_fc_poll_group_poll,
2336 
2337 	.req_complete = nvmf_fc_request_complete,
2338 	.req_free = nvmf_fc_request_free,
2339 	.qpair_fini = nvmf_fc_close_qpair,
2340 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2341 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2342 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2343 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2344 };
2345 
2346 /* Initializes the data for the creation of a FC-Port object in the SPDK
2347  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2348  * the API to the library. The contents added to this well defined structure
2349  * is private to each vendors implementation.
2350  */
2351 static int
2352 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2353 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2354 {
2355 	int rc = 0;
2356 	/* Used a high number for the LS HWQP so that it does not clash with the
2357 	 * IO HWQP's and immediately shows a LS queue during tracing.
2358 	 */
2359 	uint32_t i;
2360 
2361 	fc_port->port_hdl       = args->port_handle;
2362 	fc_port->lld_fc_port	= args->lld_fc_port;
2363 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2364 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2365 	fc_port->num_io_queues  = args->io_queue_cnt;
2366 
2367 	/*
2368 	 * Set port context from init args. Used for FCP port stats.
2369 	 */
2370 	fc_port->port_ctx = args->port_ctx;
2371 
2372 	/*
2373 	 * Initialize the LS queue wherever needed.
2374 	 */
2375 	fc_port->ls_queue.queues = args->ls_queue;
2376 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2377 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2378 	fc_port->ls_queue.is_ls_queue = true;
2379 
2380 	/*
2381 	 * Initialize the LS queue.
2382 	 */
2383 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2384 	if (rc) {
2385 		return rc;
2386 	}
2387 
2388 	/*
2389 	 * Initialize the IO queues.
2390 	 */
2391 	for (i = 0; i < args->io_queue_cnt; i++) {
2392 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2393 		hwqp->hwqp_id = i;
2394 		hwqp->queues = args->io_queues[i];
2395 		hwqp->is_ls_queue = false;
2396 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2397 		if (rc) {
2398 			for (; i > 0; --i) {
2399 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2400 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2401 			}
2402 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2403 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2404 			return rc;
2405 		}
2406 	}
2407 
2408 	/*
2409 	 * Initialize the LS processing for port
2410 	 */
2411 	nvmf_fc_ls_init(fc_port);
2412 
2413 	/*
2414 	 * Initialize the list of nport on this HW port.
2415 	 */
2416 	TAILQ_INIT(&fc_port->nport_list);
2417 	fc_port->num_nports = 0;
2418 
2419 	return 0;
2420 }
2421 
2422 /*
2423  * FC port must have all its nports deleted before transitioning to offline state.
2424  */
2425 static void
2426 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2427 {
2428 	struct spdk_nvmf_fc_nport *nport = NULL;
2429 	/* All nports must have been deleted at this point for this fc port */
2430 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2431 	DEV_VERIFY(fc_port->num_nports == 0);
2432 	/* Mark the nport states to be zombie, if they exist */
2433 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2434 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2435 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2436 		}
2437 	}
2438 }
2439 
2440 static void
2441 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2442 {
2443 	ASSERT_SPDK_FC_MAIN_THREAD();
2444 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2445 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2446 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2447 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2448 	int spdk_err = 0;
2449 	uint8_t port_handle = cb_data->port_handle;
2450 	uint32_t s_id = rport->s_id;
2451 	uint32_t rpi = rport->rpi;
2452 	uint32_t assoc_count = rport->assoc_count;
2453 	uint32_t nport_hdl = nport->nport_hdl;
2454 	uint32_t d_id = nport->d_id;
2455 	char log_str[256];
2456 
2457 	/*
2458 	 * Assert on any delete failure.
2459 	 */
2460 	if (0 != err) {
2461 		DEV_VERIFY(!"Error in IT Delete callback.");
2462 		goto out;
2463 	}
2464 
2465 	if (cb_func != NULL) {
2466 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2467 	}
2468 
2469 out:
2470 	free(cb_data);
2471 
2472 	snprintf(log_str, sizeof(log_str),
2473 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2474 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2475 
2476 	if (err != 0) {
2477 		SPDK_ERRLOG("%s", log_str);
2478 	} else {
2479 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2480 	}
2481 }
2482 
2483 static void
2484 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2485 {
2486 	ASSERT_SPDK_FC_MAIN_THREAD();
2487 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2488 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2489 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2490 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2491 	uint32_t s_id = rport->s_id;
2492 	uint32_t rpi = rport->rpi;
2493 	uint32_t assoc_count = rport->assoc_count;
2494 	uint32_t nport_hdl = nport->nport_hdl;
2495 	uint32_t d_id = nport->d_id;
2496 	char log_str[256];
2497 
2498 	/*
2499 	 * Assert on any association delete failure. We continue to delete other
2500 	 * associations in promoted builds.
2501 	 */
2502 	if (0 != err) {
2503 		DEV_VERIFY(!"Nport's association delete callback returned error");
2504 		if (nport->assoc_count > 0) {
2505 			nport->assoc_count--;
2506 		}
2507 		if (rport->assoc_count > 0) {
2508 			rport->assoc_count--;
2509 		}
2510 	}
2511 
2512 	/*
2513 	 * If this is the last association being deleted for the ITN,
2514 	 * execute the callback(s).
2515 	 */
2516 	if (0 == rport->assoc_count) {
2517 		/* Remove the rport from the remote port list. */
2518 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2519 			SPDK_ERRLOG("Error while removing rport from list.\n");
2520 			DEV_VERIFY(!"Error while removing rport from list.");
2521 		}
2522 
2523 		if (cb_func != NULL) {
2524 			/*
2525 			 * Callback function is provided by the caller
2526 			 * of nvmf_fc_adm_i_t_delete_assoc().
2527 			 */
2528 			(void)cb_func(cb_data->cb_ctx, 0);
2529 		}
2530 		free(rport);
2531 		free(args);
2532 	}
2533 
2534 	snprintf(log_str, sizeof(log_str),
2535 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2536 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2537 
2538 	if (err != 0) {
2539 		SPDK_ERRLOG("%s", log_str);
2540 	} else {
2541 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2542 	}
2543 }
2544 
2545 /**
2546  * Process a IT delete.
2547  */
2548 static void
2549 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2550 			     struct spdk_nvmf_fc_remote_port_info *rport,
2551 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2552 			     void *cb_ctx)
2553 {
2554 	int err = 0;
2555 	struct spdk_nvmf_fc_association *assoc = NULL;
2556 	int assoc_err = 0;
2557 	uint32_t num_assoc = 0;
2558 	uint32_t num_assoc_del_scheduled = 0;
2559 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2560 	uint8_t port_hdl = nport->port_hdl;
2561 	uint32_t s_id = rport->s_id;
2562 	uint32_t rpi = rport->rpi;
2563 	uint32_t assoc_count = rport->assoc_count;
2564 	char log_str[256];
2565 
2566 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2567 		      nport->nport_hdl);
2568 
2569 	/*
2570 	 * Allocate memory for callback data.
2571 	 * This memory will be freed by the callback function.
2572 	 */
2573 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2574 	if (NULL == cb_data) {
2575 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2576 		err = -ENOMEM;
2577 		goto out;
2578 	}
2579 	cb_data->nport       = nport;
2580 	cb_data->rport       = rport;
2581 	cb_data->port_handle = port_hdl;
2582 	cb_data->cb_func     = cb_func;
2583 	cb_data->cb_ctx      = cb_ctx;
2584 
2585 	/*
2586 	 * Delete all associations, if any, related with this ITN/remote_port.
2587 	 */
2588 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2589 		num_assoc++;
2590 		if (assoc->s_id == s_id) {
2591 			assoc_err = nvmf_fc_delete_association(nport,
2592 							       assoc->assoc_id,
2593 							       false /* send abts */, false,
2594 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2595 			if (0 != assoc_err) {
2596 				/*
2597 				 * Mark this association as zombie.
2598 				 */
2599 				err = -EINVAL;
2600 				DEV_VERIFY(!"Error while deleting association");
2601 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2602 			} else {
2603 				num_assoc_del_scheduled++;
2604 			}
2605 		}
2606 	}
2607 
2608 out:
2609 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2610 		/*
2611 		 * Since there are no association_delete calls
2612 		 * successfully scheduled, the association_delete
2613 		 * callback function will never be called.
2614 		 * In this case, call the callback function now.
2615 		 */
2616 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2617 	}
2618 
2619 	snprintf(log_str, sizeof(log_str),
2620 		 "IT delete associations on nport:%d end. "
2621 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2622 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2623 
2624 	if (err == 0) {
2625 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2626 	} else {
2627 		SPDK_ERRLOG("%s", log_str);
2628 	}
2629 }
2630 
2631 static void
2632 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2633 {
2634 	ASSERT_SPDK_FC_MAIN_THREAD();
2635 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2636 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2637 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2638 	struct spdk_nvmf_fc_port *fc_port = NULL;
2639 	int err = 0;
2640 
2641 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2642 	hwqp = quiesce_api_data->hwqp;
2643 	fc_port = hwqp->fc_port;
2644 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2645 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2646 
2647 	/*
2648 	 * Decrement the callback/quiesced queue count.
2649 	 */
2650 	port_quiesce_ctx->quiesce_count--;
2651 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2652 
2653 	free(quiesce_api_data);
2654 	/*
2655 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2656 	 */
2657 	if (port_quiesce_ctx->quiesce_count > 0) {
2658 		return;
2659 	}
2660 
2661 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2662 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2663 	} else {
2664 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2665 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2666 	}
2667 
2668 	if (cb_func) {
2669 		/*
2670 		 * Callback function for the called of quiesce.
2671 		 */
2672 		cb_func(port_quiesce_ctx->ctx, err);
2673 	}
2674 
2675 	/*
2676 	 * Free the context structure.
2677 	 */
2678 	free(port_quiesce_ctx);
2679 
2680 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2681 		      err);
2682 }
2683 
2684 static int
2685 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2686 			     spdk_nvmf_fc_poller_api_cb cb_func)
2687 {
2688 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2689 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2690 	int err = 0;
2691 
2692 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2693 
2694 	if (args == NULL) {
2695 		err = -ENOMEM;
2696 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2697 		goto done;
2698 	}
2699 	args->hwqp = fc_hwqp;
2700 	args->ctx = ctx;
2701 	args->cb_info.cb_func = cb_func;
2702 	args->cb_info.cb_data = args;
2703 	args->cb_info.cb_thread = spdk_get_thread();
2704 
2705 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2706 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2707 	if (rc) {
2708 		free(args);
2709 		err = -EINVAL;
2710 	}
2711 
2712 done:
2713 	return err;
2714 }
2715 
2716 /*
2717  * Hw port Quiesce
2718  */
2719 static int
2720 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2721 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2722 {
2723 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2724 	uint32_t i = 0;
2725 	int err = 0;
2726 
2727 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2728 
2729 	/*
2730 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2731 	 * and execute the callback.
2732 	 */
2733 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2734 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2735 	}
2736 
2737 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2738 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2739 			      fc_port->port_hdl);
2740 		/*
2741 		 * Execute the callback function directly.
2742 		 */
2743 		cb_func(ctx, err);
2744 		goto out;
2745 	}
2746 
2747 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2748 
2749 	if (port_quiesce_ctx == NULL) {
2750 		err = -ENOMEM;
2751 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2752 			    fc_port->port_hdl);
2753 		goto out;
2754 	}
2755 
2756 	port_quiesce_ctx->quiesce_count = 0;
2757 	port_quiesce_ctx->ctx = ctx;
2758 	port_quiesce_ctx->cb_func = cb_func;
2759 
2760 	/*
2761 	 * Quiesce the LS queue.
2762 	 */
2763 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2764 					   nvmf_fc_adm_queue_quiesce_cb);
2765 	if (err != 0) {
2766 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2767 		goto out;
2768 	}
2769 	port_quiesce_ctx->quiesce_count++;
2770 
2771 	/*
2772 	 * Quiesce the IO queues.
2773 	 */
2774 	for (i = 0; i < fc_port->num_io_queues; i++) {
2775 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2776 						   port_quiesce_ctx,
2777 						   nvmf_fc_adm_queue_quiesce_cb);
2778 		if (err != 0) {
2779 			DEV_VERIFY(0);
2780 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2781 		}
2782 		port_quiesce_ctx->quiesce_count++;
2783 	}
2784 
2785 out:
2786 	if (port_quiesce_ctx && err != 0) {
2787 		free(port_quiesce_ctx);
2788 	}
2789 	return err;
2790 }
2791 
2792 /*
2793  * Initialize and add a HW port entry to the global
2794  * HW port list.
2795  */
2796 static void
2797 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2798 {
2799 	ASSERT_SPDK_FC_MAIN_THREAD();
2800 	struct spdk_nvmf_fc_port *fc_port = NULL;
2801 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2802 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2803 			api_data->api_args;
2804 	int err = 0;
2805 
2806 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2807 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2808 		err = EINVAL;
2809 		goto abort_port_init;
2810 	}
2811 
2812 	/*
2813 	 * 1. Check for duplicate initialization.
2814 	 */
2815 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2816 	if (fc_port != NULL) {
2817 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2818 		goto abort_port_init;
2819 	}
2820 
2821 	/*
2822 	 * 2. Get the memory to instantiate a fc port.
2823 	 */
2824 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2825 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2826 	if (fc_port == NULL) {
2827 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2828 		err = -ENOMEM;
2829 		goto abort_port_init;
2830 	}
2831 
2832 	/* assign the io_queues array */
2833 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2834 				     struct spdk_nvmf_fc_port));
2835 
2836 	/*
2837 	 * 3. Initialize the contents for the FC-port
2838 	 */
2839 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2840 
2841 	if (err != 0) {
2842 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2843 		DEV_VERIFY(!"Data initialization failed for fc_port");
2844 		goto abort_port_init;
2845 	}
2846 
2847 	/*
2848 	 * 4. Add this port to the global fc port list in the library.
2849 	 */
2850 	nvmf_fc_port_add(fc_port);
2851 
2852 abort_port_init:
2853 	if (err && fc_port) {
2854 		free(fc_port);
2855 	}
2856 	if (api_data->cb_func != NULL) {
2857 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2858 	}
2859 
2860 	free(arg);
2861 
2862 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2863 		      args->port_handle, err);
2864 }
2865 
2866 static void
2867 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2868 {
2869 	struct spdk_nvmf_fc_abts_ctx *ctx;
2870 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2871 
2872 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2873 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2874 		ctx = args->cb_info.cb_data;
2875 		if (ctx) {
2876 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2877 				free(ctx->sync_poller_args);
2878 				free(ctx->abts_poller_args);
2879 				spdk_free(ctx);
2880 			}
2881 		}
2882 	}
2883 }
2884 
2885 static void
2886 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2887 {
2888 	ASSERT_SPDK_FC_MAIN_THREAD();
2889 	int err = 0, i;
2890 	struct spdk_nvmf_fc_port *fc_port = NULL;
2891 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2892 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2893 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2894 			api_data->api_args;
2895 
2896 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2897 	if (!fc_port) {
2898 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2899 		err = -EINVAL;
2900 		goto out;
2901 	}
2902 
2903 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2904 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2905 		err = -EIO;
2906 		goto out;
2907 	}
2908 
2909 	/* Clean up and free fc_port */
2910 	hwqp = &fc_port->ls_queue;
2911 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2912 	rte_hash_free(hwqp->connection_list_hash);
2913 	rte_hash_free(hwqp->rport_list_hash);
2914 
2915 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2916 		hwqp = &fc_port->io_queues[i];
2917 
2918 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2919 		rte_hash_free(hwqp->connection_list_hash);
2920 		rte_hash_free(hwqp->rport_list_hash);
2921 	}
2922 
2923 	nvmf_fc_port_remove(fc_port);
2924 	free(fc_port);
2925 out:
2926 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2927 		      args->port_handle, err);
2928 	if (api_data->cb_func != NULL) {
2929 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2930 	}
2931 
2932 	free(arg);
2933 }
2934 
2935 /*
2936  * Online a HW port.
2937  */
2938 static void
2939 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2940 {
2941 	ASSERT_SPDK_FC_MAIN_THREAD();
2942 	struct spdk_nvmf_fc_port *fc_port = NULL;
2943 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2944 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2945 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2946 			api_data->api_args;
2947 	int i = 0;
2948 	int err = 0;
2949 
2950 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2951 	if (fc_port) {
2952 		/* Set the port state to online */
2953 		err = nvmf_fc_port_set_online(fc_port);
2954 		if (err != 0) {
2955 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2956 			DEV_VERIFY(!"Hw port online failed");
2957 			goto out;
2958 		}
2959 
2960 		hwqp = &fc_port->ls_queue;
2961 		hwqp->context = NULL;
2962 		(void)nvmf_fc_hwqp_set_online(hwqp);
2963 
2964 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2965 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2966 			hwqp = &fc_port->io_queues[i];
2967 			hwqp->context = NULL;
2968 			(void)nvmf_fc_hwqp_set_online(hwqp);
2969 			nvmf_fc_poll_group_add_hwqp(hwqp);
2970 		}
2971 	} else {
2972 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2973 		err = -EINVAL;
2974 	}
2975 
2976 out:
2977 	if (api_data->cb_func != NULL) {
2978 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2979 	}
2980 
2981 	free(arg);
2982 
2983 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
2984 		      err);
2985 }
2986 
2987 static void
2988 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
2989 {
2990 	int err = 0;
2991 	struct spdk_nvmf_fc_port *fc_port = NULL;
2992 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
2993 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
2994 
2995 	if (--remove_hwqp_args->pending_remove_hwqp) {
2996 		return;
2997 	}
2998 
2999 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3000 	if (!fc_port) {
3001 		err = -EINVAL;
3002 		SPDK_ERRLOG("fc_port not found.\n");
3003 		goto out;
3004 	}
3005 
3006 	/*
3007 	 * Delete all the nports. Ideally, the nports should have been purged
3008 	 * before the offline event, in which case, only a validation is required.
3009 	 */
3010 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3011 out:
3012 	if (remove_hwqp_args->cb_fn) {
3013 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3014 	}
3015 
3016 	free(remove_hwqp_args);
3017 }
3018 
3019 /*
3020  * Offline a HW port.
3021  */
3022 static void
3023 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3024 {
3025 	ASSERT_SPDK_FC_MAIN_THREAD();
3026 	struct spdk_nvmf_fc_port *fc_port = NULL;
3027 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3028 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3029 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3030 			api_data->api_args;
3031 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3032 	int i = 0;
3033 	int err = 0;
3034 
3035 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3036 	if (fc_port) {
3037 		/* Set the port state to offline, if it is not already. */
3038 		err = nvmf_fc_port_set_offline(fc_port);
3039 		if (err != 0) {
3040 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3041 			err = 0;
3042 			goto out;
3043 		}
3044 
3045 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3046 		if (!remove_hwqp_args) {
3047 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3048 			err = -ENOMEM;
3049 			goto out;
3050 		}
3051 		remove_hwqp_args->cb_fn = api_data->cb_func;
3052 		remove_hwqp_args->cb_args = api_data->api_args;
3053 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3054 
3055 		hwqp = &fc_port->ls_queue;
3056 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3057 
3058 		/* Remove poller for all the io queues. */
3059 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3060 			hwqp = &fc_port->io_queues[i];
3061 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3062 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3063 						       remove_hwqp_args);
3064 		}
3065 
3066 		free(arg);
3067 
3068 		/* Wait untill all the hwqps are removed from poll groups. */
3069 		return;
3070 	} else {
3071 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3072 		err = -EINVAL;
3073 	}
3074 out:
3075 	if (api_data->cb_func != NULL) {
3076 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3077 	}
3078 
3079 	free(arg);
3080 
3081 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3082 		      err);
3083 }
3084 
3085 struct nvmf_fc_add_rem_listener_ctx {
3086 	struct spdk_nvmf_subsystem *subsystem;
3087 	bool add_listener;
3088 	struct spdk_nvme_transport_id trid;
3089 };
3090 
3091 static void
3092 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3093 {
3094 	ASSERT_SPDK_FC_MAIN_THREAD();
3095 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3096 	free(ctx);
3097 }
3098 
3099 static void
3100 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3101 {
3102 	ASSERT_SPDK_FC_MAIN_THREAD();
3103 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3104 
3105 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3106 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3107 		free(ctx);
3108 	}
3109 }
3110 
3111 static void
3112 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3113 {
3114 	ASSERT_SPDK_FC_MAIN_THREAD();
3115 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3116 
3117 	if (ctx->add_listener) {
3118 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3119 	} else {
3120 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3121 		nvmf_fc_adm_listen_done(ctx, 0);
3122 	}
3123 }
3124 
3125 static int
3126 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3127 {
3128 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3129 	struct spdk_nvmf_subsystem *subsystem;
3130 
3131 	if (!tgt) {
3132 		SPDK_ERRLOG("No nvmf target defined\n");
3133 		return -EINVAL;
3134 	}
3135 
3136 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3137 	while (subsystem) {
3138 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3139 
3140 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3141 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3142 			if (ctx) {
3143 				ctx->add_listener = add;
3144 				ctx->subsystem = subsystem;
3145 				nvmf_fc_create_trid(&ctx->trid,
3146 						    nport->fc_nodename.u.wwn,
3147 						    nport->fc_portname.u.wwn);
3148 
3149 				if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) {
3150 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3151 						    ctx->trid.traddr);
3152 					free(ctx);
3153 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3154 								     nvmf_fc_adm_subsystem_paused_cb,
3155 								     ctx)) {
3156 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3157 						    subsystem->subnqn);
3158 					free(ctx);
3159 				}
3160 			}
3161 		}
3162 
3163 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3164 	}
3165 
3166 	return 0;
3167 }
3168 
3169 /*
3170  * Create a Nport.
3171  */
3172 static void
3173 nvmf_fc_adm_evnt_nport_create(void *arg)
3174 {
3175 	ASSERT_SPDK_FC_MAIN_THREAD();
3176 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3177 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3178 			api_data->api_args;
3179 	struct spdk_nvmf_fc_nport *nport = NULL;
3180 	struct spdk_nvmf_fc_port *fc_port = NULL;
3181 	int err = 0;
3182 
3183 	/*
3184 	 * Get the physical port.
3185 	 */
3186 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3187 	if (fc_port == NULL) {
3188 		err = -EINVAL;
3189 		goto out;
3190 	}
3191 
3192 	/*
3193 	 * Check for duplicate initialization.
3194 	 */
3195 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3196 	if (nport != NULL) {
3197 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3198 			    args->port_handle);
3199 		err = -EINVAL;
3200 		goto out;
3201 	}
3202 
3203 	/*
3204 	 * Get the memory to instantiate a fc nport.
3205 	 */
3206 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3207 	if (nport == NULL) {
3208 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3209 			    args->nport_handle);
3210 		err = -ENOMEM;
3211 		goto out;
3212 	}
3213 
3214 	/*
3215 	 * Initialize the contents for the nport
3216 	 */
3217 	nport->nport_hdl    = args->nport_handle;
3218 	nport->port_hdl     = args->port_handle;
3219 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3220 	nport->fc_nodename  = args->fc_nodename;
3221 	nport->fc_portname  = args->fc_portname;
3222 	nport->d_id         = args->d_id;
3223 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3224 
3225 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3226 	TAILQ_INIT(&nport->rem_port_list);
3227 	nport->rport_count = 0;
3228 	TAILQ_INIT(&nport->fc_associations);
3229 	nport->assoc_count = 0;
3230 
3231 	/*
3232 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3233 	 */
3234 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3235 
3236 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3237 out:
3238 	if (err && nport) {
3239 		free(nport);
3240 	}
3241 
3242 	if (api_data->cb_func != NULL) {
3243 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3244 	}
3245 
3246 	free(arg);
3247 }
3248 
3249 static void
3250 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3251 			    void *cb_args, int spdk_err)
3252 {
3253 	ASSERT_SPDK_FC_MAIN_THREAD();
3254 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3255 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3256 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3257 	int err = 0;
3258 	uint16_t nport_hdl = 0;
3259 	char log_str[256];
3260 
3261 	/*
3262 	 * Assert on any delete failure.
3263 	 */
3264 	if (nport == NULL) {
3265 		SPDK_ERRLOG("Nport delete callback returned null nport");
3266 		DEV_VERIFY(!"nport is null.");
3267 		goto out;
3268 	}
3269 
3270 	nport_hdl = nport->nport_hdl;
3271 	if (0 != spdk_err) {
3272 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3273 			    "%d, Nport: %d\n",
3274 			    nport->port_hdl, nport->nport_hdl);
3275 		DEV_VERIFY(!"nport delete callback error.");
3276 	}
3277 
3278 	/*
3279 	 * Free the nport if this is the last rport being deleted and
3280 	 * execute the callback(s).
3281 	 */
3282 	if (nvmf_fc_nport_has_no_rport(nport)) {
3283 		if (0 != nport->assoc_count) {
3284 			SPDK_ERRLOG("association count != 0\n");
3285 			DEV_VERIFY(!"association count != 0");
3286 		}
3287 
3288 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3289 		if (0 != err) {
3290 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3291 				    "nport from nport list. FC Port:%d Nport:%d\n",
3292 				    nport->port_hdl, nport->nport_hdl);
3293 		}
3294 		/* Free the nport */
3295 		free(nport);
3296 
3297 		if (cb_func != NULL) {
3298 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3299 		}
3300 		free(cb_data);
3301 	}
3302 out:
3303 	snprintf(log_str, sizeof(log_str),
3304 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3305 		 port_handle, nport_hdl, event_type, spdk_err);
3306 
3307 	if (err != 0) {
3308 		SPDK_ERRLOG("%s", log_str);
3309 	} else {
3310 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3311 	}
3312 }
3313 
3314 /*
3315  * Delete Nport.
3316  */
3317 static void
3318 nvmf_fc_adm_evnt_nport_delete(void *arg)
3319 {
3320 	ASSERT_SPDK_FC_MAIN_THREAD();
3321 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3322 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3323 			api_data->api_args;
3324 	struct spdk_nvmf_fc_nport *nport = NULL;
3325 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3326 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3327 	int err = 0;
3328 	uint32_t rport_cnt = 0;
3329 	int rc = 0;
3330 
3331 	/*
3332 	 * Make sure that the nport exists.
3333 	 */
3334 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3335 	if (nport == NULL) {
3336 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3337 			    args->port_handle);
3338 		err = -EINVAL;
3339 		goto out;
3340 	}
3341 
3342 	/*
3343 	 * Allocate memory for callback data.
3344 	 */
3345 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3346 	if (NULL == cb_data) {
3347 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3348 		err = -ENOMEM;
3349 		goto out;
3350 	}
3351 
3352 	cb_data->nport = nport;
3353 	cb_data->port_handle = args->port_handle;
3354 	cb_data->fc_cb_func = api_data->cb_func;
3355 	cb_data->fc_cb_ctx = args->cb_ctx;
3356 
3357 	/*
3358 	 * Begin nport tear down
3359 	 */
3360 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3361 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3362 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3363 		/*
3364 		 * Deletion of this nport already in progress. Register callback
3365 		 * and return.
3366 		 */
3367 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3368 		err = -ENODEV;
3369 		goto out;
3370 	} else {
3371 		/* nport partially created/deleted */
3372 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3373 		DEV_VERIFY(0 != "Nport in zombie state");
3374 		err = -ENODEV;
3375 		goto out;
3376 	}
3377 
3378 	/*
3379 	 * Remove this nport from listening addresses across subsystems
3380 	 */
3381 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3382 
3383 	if (0 != rc) {
3384 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3385 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3386 			    nport->nport_hdl);
3387 		goto out;
3388 	}
3389 
3390 	/*
3391 	 * Delete all the remote ports (if any) for the nport
3392 	 */
3393 	/* TODO - Need to do this with a "first" and a "next" accessor function
3394 	 * for completeness. Look at app-subsystem as examples.
3395 	 */
3396 	if (nvmf_fc_nport_has_no_rport(nport)) {
3397 		/* No rports to delete. Complete the nport deletion. */
3398 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3399 		goto out;
3400 	}
3401 
3402 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3403 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3404 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3405 
3406 		if (it_del_args == NULL) {
3407 			err = -ENOMEM;
3408 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3409 				    rport_iter->rpi, rport_iter->s_id);
3410 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3411 			goto out;
3412 		}
3413 
3414 		rport_cnt++;
3415 		it_del_args->port_handle = nport->port_hdl;
3416 		it_del_args->nport_handle = nport->nport_hdl;
3417 		it_del_args->cb_ctx = (void *)cb_data;
3418 		it_del_args->rpi = rport_iter->rpi;
3419 		it_del_args->s_id = rport_iter->s_id;
3420 
3421 		nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3422 					   nvmf_fc_adm_delete_nport_cb);
3423 	}
3424 
3425 out:
3426 	/* On failure, execute the callback function now */
3427 	if ((err != 0) || (rc != 0)) {
3428 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3429 			    "rport_cnt:%d rc:%d.\n",
3430 			    args->nport_handle, err, args->port_handle,
3431 			    rport_cnt, rc);
3432 		if (cb_data) {
3433 			free(cb_data);
3434 		}
3435 		if (api_data->cb_func != NULL) {
3436 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3437 		}
3438 
3439 	} else {
3440 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3441 			      "NPort %d delete done succesfully, fc port:%d. "
3442 			      "rport_cnt:%d\n",
3443 			      args->nport_handle, args->port_handle, rport_cnt);
3444 	}
3445 
3446 	free(arg);
3447 }
3448 
3449 /*
3450  * Process an PRLI/IT add.
3451  */
3452 static void
3453 nvmf_fc_adm_evnt_i_t_add(void *arg)
3454 {
3455 	ASSERT_SPDK_FC_MAIN_THREAD();
3456 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3457 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3458 			api_data->api_args;
3459 	struct spdk_nvmf_fc_nport *nport = NULL;
3460 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3461 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3462 	int err = 0;
3463 
3464 	/*
3465 	 * Make sure the nport port exists.
3466 	 */
3467 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3468 	if (nport == NULL) {
3469 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3470 		err = -EINVAL;
3471 		goto out;
3472 	}
3473 
3474 	/*
3475 	 * Check for duplicate i_t_add.
3476 	 */
3477 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3478 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3479 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3480 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3481 			err = -EEXIST;
3482 			goto out;
3483 		}
3484 	}
3485 
3486 	/*
3487 	 * Get the memory to instantiate the remote port
3488 	 */
3489 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3490 	if (rport == NULL) {
3491 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3492 		err = -ENOMEM;
3493 		goto out;
3494 	}
3495 
3496 	/*
3497 	 * Initialize the contents for the rport
3498 	 */
3499 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3500 	rport->s_id = args->s_id;
3501 	rport->rpi = args->rpi;
3502 	rport->fc_nodename = args->fc_nodename;
3503 	rport->fc_portname = args->fc_portname;
3504 
3505 	/*
3506 	 * Add remote port to nport
3507 	 */
3508 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3509 		DEV_VERIFY(!"Error while adding rport to list");
3510 	};
3511 
3512 	/*
3513 	 * TODO: Do we validate the initiators service parameters?
3514 	 */
3515 
3516 	/*
3517 	 * Get the targets service parameters from the library
3518 	 * to return back to the driver.
3519 	 */
3520 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3521 
3522 out:
3523 	if (api_data->cb_func != NULL) {
3524 		/*
3525 		 * Passing pointer to the args struct as the first argument.
3526 		 * The cb_func should handle this appropriately.
3527 		 */
3528 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3529 	}
3530 
3531 	free(arg);
3532 
3533 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3534 		      "IT add on nport %d done, rc = %d.\n",
3535 		      args->nport_handle, err);
3536 }
3537 
3538 /**
3539  * Process a IT delete.
3540  */
3541 static void
3542 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3543 {
3544 	ASSERT_SPDK_FC_MAIN_THREAD();
3545 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3546 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3547 			api_data->api_args;
3548 	int rc = 0;
3549 	struct spdk_nvmf_fc_nport *nport = NULL;
3550 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3551 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3552 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3553 	uint32_t num_rport = 0;
3554 	char log_str[256];
3555 
3556 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3557 
3558 	/*
3559 	 * Make sure the nport port exists. If it does not, error out.
3560 	 */
3561 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3562 	if (nport == NULL) {
3563 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3564 		rc = -EINVAL;
3565 		goto out;
3566 	}
3567 
3568 	/*
3569 	 * Find this ITN / rport (remote port).
3570 	 */
3571 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3572 		num_rport++;
3573 		if ((rport_iter->s_id == args->s_id) &&
3574 		    (rport_iter->rpi == args->rpi) &&
3575 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3576 			rport = rport_iter;
3577 			break;
3578 		}
3579 	}
3580 
3581 	/*
3582 	 * We should find either zero or exactly one rport.
3583 	 *
3584 	 * If we find zero rports, that means that a previous request has
3585 	 * removed the rport by the time we reached here. In this case,
3586 	 * simply return out.
3587 	 */
3588 	if (rport == NULL) {
3589 		rc = -ENODEV;
3590 		goto out;
3591 	}
3592 
3593 	/*
3594 	 * We have the rport slated for deletion. At this point clean up
3595 	 * any LS requests that are sitting in the pending list. Do this
3596 	 * first, then, set the states of the rport so that new LS requests
3597 	 * are not accepted. Then start the cleanup.
3598 	 */
3599 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3600 
3601 	/*
3602 	 * We have found exactly one rport. Allocate memory for callback data.
3603 	 */
3604 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3605 	if (NULL == cb_data) {
3606 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3607 		rc = -ENOMEM;
3608 		goto out;
3609 	}
3610 
3611 	cb_data->nport = nport;
3612 	cb_data->rport = rport;
3613 	cb_data->port_handle = args->port_handle;
3614 	cb_data->fc_cb_func = api_data->cb_func;
3615 	cb_data->fc_cb_ctx = args->cb_ctx;
3616 
3617 	/*
3618 	 * Validate rport object state.
3619 	 */
3620 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3621 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3622 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3623 		/*
3624 		 * Deletion of this rport already in progress. Register callback
3625 		 * and return.
3626 		 */
3627 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3628 		rc = -ENODEV;
3629 		goto out;
3630 	} else {
3631 		/* rport partially created/deleted */
3632 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3633 		DEV_VERIFY(!"Invalid rport_state");
3634 		rc = -ENODEV;
3635 		goto out;
3636 	}
3637 
3638 	/*
3639 	 * We have successfully found a rport to delete. Call
3640 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3641 	 * IT-delete processing as well as free the cb_data.
3642 	 */
3643 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3644 				     (void *)cb_data);
3645 
3646 out:
3647 	if (rc != 0) {
3648 		/*
3649 		 * We have entered here because either we encountered an
3650 		 * error, or we did not find a rport to delete.
3651 		 * As a result, we will not call the function
3652 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3653 		 * processing. Therefore, execute the callback function now.
3654 		 */
3655 		if (cb_data) {
3656 			free(cb_data);
3657 		}
3658 		if (api_data->cb_func != NULL) {
3659 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3660 		}
3661 	}
3662 
3663 	snprintf(log_str, sizeof(log_str),
3664 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3665 		 args->nport_handle, num_rport, rc);
3666 
3667 	if (rc != 0) {
3668 		SPDK_ERRLOG("%s", log_str);
3669 	} else {
3670 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3671 	}
3672 
3673 	free(arg);
3674 }
3675 
3676 /*
3677  * Process ABTS received
3678  */
3679 static void
3680 nvmf_fc_adm_evnt_abts_recv(void *arg)
3681 {
3682 	ASSERT_SPDK_FC_MAIN_THREAD();
3683 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3684 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3685 	struct spdk_nvmf_fc_nport *nport = NULL;
3686 	int err = 0;
3687 
3688 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3689 		      args->oxid, args->rxid);
3690 
3691 	/*
3692 	 * 1. Make sure the nport port exists.
3693 	 */
3694 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3695 	if (nport == NULL) {
3696 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3697 		err = -EINVAL;
3698 		goto out;
3699 	}
3700 
3701 	/*
3702 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3703 	 */
3704 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3705 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3706 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3707 			      args->rpi, args->oxid, args->rxid);
3708 		err = 0;
3709 		goto out;
3710 
3711 	}
3712 
3713 	/*
3714 	 * 3. Pass the received ABTS-LS to the library for handling.
3715 	 */
3716 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3717 
3718 out:
3719 	if (api_data->cb_func != NULL) {
3720 		/*
3721 		 * Passing pointer to the args struct as the first argument.
3722 		 * The cb_func should handle this appropriately.
3723 		 */
3724 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3725 	} else {
3726 		/* No callback set, free the args */
3727 		free(args);
3728 	}
3729 
3730 	free(arg);
3731 }
3732 
3733 /*
3734  * Callback function for hw port quiesce.
3735  */
3736 static void
3737 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3738 {
3739 	ASSERT_SPDK_FC_MAIN_THREAD();
3740 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3741 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3742 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3743 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3744 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3745 	struct spdk_nvmf_fc_port *fc_port = NULL;
3746 	char *dump_buf = NULL;
3747 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3748 
3749 	/*
3750 	 * Free the callback context struct.
3751 	 */
3752 	free(ctx);
3753 
3754 	if (err != 0) {
3755 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3756 		goto out;
3757 	}
3758 
3759 	if (args->dump_queues == false) {
3760 		/*
3761 		 * Queues need not be dumped.
3762 		 */
3763 		goto out;
3764 	}
3765 
3766 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3767 
3768 	/*
3769 	 * Get the fc port.
3770 	 */
3771 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3772 	if (fc_port == NULL) {
3773 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3774 		err = -EINVAL;
3775 		goto out;
3776 	}
3777 
3778 	/*
3779 	 * Allocate memory for the dump buffer.
3780 	 * This memory will be freed by FCT.
3781 	 */
3782 	dump_buf = (char *)calloc(1, dump_buf_size);
3783 	if (dump_buf == NULL) {
3784 		err = -ENOMEM;
3785 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3786 		goto out;
3787 	}
3788 	*args->dump_buf  = (uint32_t *)dump_buf;
3789 	dump_info.buffer = dump_buf;
3790 	dump_info.offset = 0;
3791 
3792 	/*
3793 	 * Add the dump reason to the top of the buffer.
3794 	 */
3795 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3796 
3797 	/*
3798 	 * Dump the hwqp.
3799 	 */
3800 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3801 				fc_port->num_io_queues, &dump_info);
3802 
3803 out:
3804 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3805 		      args->port_handle, args->dump_queues, err);
3806 
3807 	if (cb_func != NULL) {
3808 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3809 	}
3810 }
3811 
3812 /*
3813  * HW port reset
3814 
3815  */
3816 static void
3817 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3818 {
3819 	ASSERT_SPDK_FC_MAIN_THREAD();
3820 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3821 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3822 			api_data->api_args;
3823 	struct spdk_nvmf_fc_port *fc_port = NULL;
3824 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3825 	int err = 0;
3826 
3827 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3828 
3829 	/*
3830 	 * Make sure the physical port exists.
3831 	 */
3832 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3833 	if (fc_port == NULL) {
3834 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3835 		err = -EINVAL;
3836 		goto out;
3837 	}
3838 
3839 	/*
3840 	 * Save the reset event args and the callback in a context struct.
3841 	 */
3842 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3843 
3844 	if (ctx == NULL) {
3845 		err = -ENOMEM;
3846 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3847 		goto fail;
3848 	}
3849 
3850 	ctx->reset_args = args;
3851 	ctx->reset_cb_func = api_data->cb_func;
3852 
3853 	/*
3854 	 * Quiesce the hw port.
3855 	 */
3856 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3857 	if (err != 0) {
3858 		goto fail;
3859 	}
3860 
3861 	/*
3862 	 * Once the ports are successfully quiesced the reset processing
3863 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3864 	 */
3865 	return;
3866 fail:
3867 	free(ctx);
3868 
3869 out:
3870 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3871 		      err);
3872 
3873 	if (api_data->cb_func != NULL) {
3874 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3875 	}
3876 
3877 	free(arg);
3878 }
3879 
3880 static inline void
3881 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3882 {
3883 	if (nvmf_fc_get_main_thread()) {
3884 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3885 	}
3886 }
3887 
3888 /*
3889  * Queue up an event in the SPDK main threads event queue.
3890  * Used by the FC driver to notify the SPDK main thread of FC related events.
3891  */
3892 int
3893 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3894 			   spdk_nvmf_fc_callback cb_func)
3895 {
3896 	int err = 0;
3897 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3898 	spdk_msg_fn event_fn = NULL;
3899 
3900 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3901 
3902 	if (event_type >= SPDK_FC_EVENT_MAX) {
3903 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3904 		err = -EINVAL;
3905 		goto done;
3906 	}
3907 
3908 	if (args == NULL) {
3909 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3910 		err = -EINVAL;
3911 		goto done;
3912 	}
3913 
3914 	api_data = calloc(1, sizeof(*api_data));
3915 
3916 	if (api_data == NULL) {
3917 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3918 		err = -ENOMEM;
3919 		goto done;
3920 	}
3921 
3922 	api_data->api_args = args;
3923 	api_data->cb_func = cb_func;
3924 
3925 	switch (event_type) {
3926 	case SPDK_FC_HW_PORT_INIT:
3927 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3928 		break;
3929 
3930 	case SPDK_FC_HW_PORT_FREE:
3931 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3932 		break;
3933 
3934 	case SPDK_FC_HW_PORT_ONLINE:
3935 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3936 		break;
3937 
3938 	case SPDK_FC_HW_PORT_OFFLINE:
3939 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3940 		break;
3941 
3942 	case SPDK_FC_NPORT_CREATE:
3943 		event_fn = nvmf_fc_adm_evnt_nport_create;
3944 		break;
3945 
3946 	case SPDK_FC_NPORT_DELETE:
3947 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3948 		break;
3949 
3950 	case SPDK_FC_IT_ADD:
3951 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3952 		break;
3953 
3954 	case SPDK_FC_IT_DELETE:
3955 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3956 		break;
3957 
3958 	case SPDK_FC_ABTS_RECV:
3959 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3960 		break;
3961 
3962 	case SPDK_FC_HW_PORT_RESET:
3963 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3964 		break;
3965 
3966 	case SPDK_FC_UNRECOVERABLE_ERR:
3967 	default:
3968 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3969 		err = -EINVAL;
3970 		break;
3971 	}
3972 
3973 done:
3974 
3975 	if (err == 0) {
3976 		assert(event_fn != NULL);
3977 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
3978 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3979 	} else {
3980 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3981 		if (api_data) {
3982 			free(api_data);
3983 		}
3984 	}
3985 
3986 	return err;
3987 }
3988 
3989 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3990 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
3991 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
3992