xref: /spdk/lib/nvmf/fc.c (revision d73077b84a71985da1db1c9847ea7c042189bae2)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 #include "spdk/endian.h"
46 #include "spdk/log.h"
47 #include "spdk/thread.h"
48 
49 #include "nvmf_fc.h"
50 #include "fc_lld.h"
51 
52 #ifndef DEV_VERIFY
53 #define DEV_VERIFY assert
54 #endif
55 
56 #ifndef ASSERT_SPDK_FC_MASTER_THREAD
57 #define ASSERT_SPDK_FC_MASTER_THREAD() \
58         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_master_thread());
59 #endif
60 
61 /*
62  * PRLI service parameters
63  */
64 enum spdk_nvmf_fc_service_parameters {
65 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
66 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
67 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
68 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
69 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
70 };
71 
72 static char *fc_req_state_strs[] = {
73 	"SPDK_NVMF_FC_REQ_INIT",
74 	"SPDK_NVMF_FC_REQ_READ_BDEV",
75 	"SPDK_NVMF_FC_REQ_READ_XFER",
76 	"SPDK_NVMF_FC_REQ_READ_RSP",
77 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
78 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
79 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
80 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
81 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
82 	"SPDK_NVMF_FC_REQ_NONE_RSP",
83 	"SPDK_NVMF_FC_REQ_SUCCESS",
84 	"SPDK_NVMF_FC_REQ_FAILED",
85 	"SPDK_NVMF_FC_REQ_ABORTED",
86 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
87 	"SPDK_NVMF_FC_REQ_PENDING"
88 };
89 
90 #define OBJECT_NVMF_FC_IO				0xA0
91 
92 #define TRACE_GROUP_NVMF_FC				0x8
93 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
94 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
95 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
96 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
97 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
98 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
99 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
100 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
101 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
102 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
103 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
104 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
105 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
106 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
107 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
108 
109 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
110 {
111 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
112 	spdk_trace_register_description("FC_REQ_NEW",
113 					TRACE_FC_REQ_INIT,
114 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
115 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
116 					TRACE_FC_REQ_READ_BDEV,
117 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
118 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
119 					TRACE_FC_REQ_READ_XFER,
120 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
121 	spdk_trace_register_description("FC_REQ_READ_RSP",
122 					TRACE_FC_REQ_READ_RSP,
123 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
124 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
125 					TRACE_FC_REQ_WRITE_BUFFS,
126 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
127 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
128 					TRACE_FC_REQ_WRITE_XFER,
129 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
130 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
131 					TRACE_FC_REQ_WRITE_BDEV,
132 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
133 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
134 					TRACE_FC_REQ_WRITE_RSP,
135 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
136 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
137 					TRACE_FC_REQ_NONE_BDEV,
138 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
139 	spdk_trace_register_description("FC_REQ_NONE_RSP",
140 					TRACE_FC_REQ_NONE_RSP,
141 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
142 	spdk_trace_register_description("FC_REQ_SUCCESS",
143 					TRACE_FC_REQ_SUCCESS,
144 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
145 	spdk_trace_register_description("FC_REQ_FAILED",
146 					TRACE_FC_REQ_FAILED,
147 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
148 	spdk_trace_register_description("FC_REQ_ABORTED",
149 					TRACE_FC_REQ_ABORTED,
150 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
151 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
152 					TRACE_FC_REQ_BDEV_ABORTED,
153 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
154 	spdk_trace_register_description("FC_REQ_PENDING",
155 					TRACE_FC_REQ_PENDING,
156 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
157 }
158 
159 /**
160  * The structure used by all fc adm functions
161  */
162 struct spdk_nvmf_fc_adm_api_data {
163 	void *api_args;
164 	spdk_nvmf_fc_callback cb_func;
165 };
166 
167 /**
168  * The callback structure for nport-delete
169  */
170 struct spdk_nvmf_fc_adm_nport_del_cb_data {
171 	struct spdk_nvmf_fc_nport *nport;
172 	uint8_t port_handle;
173 	spdk_nvmf_fc_callback fc_cb_func;
174 	void *fc_cb_ctx;
175 };
176 
177 /**
178  * The callback structure for it-delete
179  */
180 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
181 	struct spdk_nvmf_fc_nport *nport;
182 	struct spdk_nvmf_fc_remote_port_info *rport;
183 	uint8_t port_handle;
184 	spdk_nvmf_fc_callback fc_cb_func;
185 	void *fc_cb_ctx;
186 };
187 
188 
189 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
190 
191 /**
192  * The callback structure for the it-delete-assoc callback
193  */
194 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
195 	struct spdk_nvmf_fc_nport *nport;
196 	struct spdk_nvmf_fc_remote_port_info *rport;
197 	uint8_t port_handle;
198 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
199 	void *cb_ctx;
200 };
201 
202 /*
203  * Call back function pointer for HW port quiesce.
204  */
205 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
206 
207 /**
208  * Context structure for quiescing a hardware port
209  */
210 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
211 	int quiesce_count;
212 	void *ctx;
213 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
214 };
215 
216 /**
217  * Context structure used to reset a hardware port
218  */
219 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
220 	void *reset_args;
221 	spdk_nvmf_fc_callback reset_cb_func;
222 };
223 
224 struct spdk_nvmf_fc_transport {
225 	struct spdk_nvmf_transport transport;
226 	pthread_mutex_t lock;
227 };
228 
229 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
230 
231 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
232 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
233 
234 static struct spdk_thread *g_nvmf_fc_master_thread = NULL;
235 
236 static uint32_t g_nvmf_fgroup_count = 0;
237 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
238 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
239 
240 struct spdk_thread *
241 nvmf_fc_get_master_thread(void)
242 {
243 	return g_nvmf_fc_master_thread;
244 }
245 
246 static inline void
247 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
248 			       enum spdk_nvmf_fc_request_state state)
249 {
250 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
251 
252 	switch (state) {
253 	case SPDK_NVMF_FC_REQ_INIT:
254 		/* Start IO tracing */
255 		tpoint_id = TRACE_FC_REQ_INIT;
256 		break;
257 	case SPDK_NVMF_FC_REQ_READ_BDEV:
258 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
259 		break;
260 	case SPDK_NVMF_FC_REQ_READ_XFER:
261 		tpoint_id = TRACE_FC_REQ_READ_XFER;
262 		break;
263 	case SPDK_NVMF_FC_REQ_READ_RSP:
264 		tpoint_id = TRACE_FC_REQ_READ_RSP;
265 		break;
266 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
267 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
268 		break;
269 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
270 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
271 		break;
272 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
273 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
274 		break;
275 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
276 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
277 		break;
278 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
279 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
280 		break;
281 	case SPDK_NVMF_FC_REQ_NONE_RSP:
282 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
283 		break;
284 	case SPDK_NVMF_FC_REQ_SUCCESS:
285 		tpoint_id = TRACE_FC_REQ_SUCCESS;
286 		break;
287 	case SPDK_NVMF_FC_REQ_FAILED:
288 		tpoint_id = TRACE_FC_REQ_FAILED;
289 		break;
290 	case SPDK_NVMF_FC_REQ_ABORTED:
291 		tpoint_id = TRACE_FC_REQ_ABORTED;
292 		break;
293 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
294 		tpoint_id = TRACE_FC_REQ_ABORTED;
295 		break;
296 	case SPDK_NVMF_FC_REQ_PENDING:
297 		tpoint_id = TRACE_FC_REQ_PENDING;
298 		break;
299 	default:
300 		assert(0);
301 		break;
302 	}
303 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
304 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
305 				  (uint64_t)(&fc_req->req), 0);
306 	}
307 }
308 
309 static void
310 nvmf_fc_handle_connection_failure(void *arg)
311 {
312 	struct spdk_nvmf_fc_conn *fc_conn = arg;
313 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
314 
315 	if (!fc_conn->create_opd) {
316 		return;
317 	}
318 	api_data = &fc_conn->create_opd->u.add_conn;
319 
320 	nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
321 				    api_data->args.fc_conn, api_data->aq_conn);
322 }
323 
324 static void
325 nvmf_fc_handle_assoc_deletion(void *arg)
326 {
327 	struct spdk_nvmf_fc_conn *fc_conn = arg;
328 
329 	nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport,
330 				   fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL);
331 }
332 
333 static int
334 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp)
335 {
336 	uint32_t i;
337 	struct spdk_nvmf_fc_request *fc_req;
338 
339 	TAILQ_INIT(&hwqp->free_reqs);
340 	TAILQ_INIT(&hwqp->in_use_reqs);
341 
342 	hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request));
343 	if (hwqp->fc_reqs_buf == NULL) {
344 		SPDK_ERRLOG("create fc request pool failed\n");
345 		return -ENOMEM;
346 	}
347 
348 	for (i = 0; i < hwqp->rq_size; i++) {
349 		fc_req = hwqp->fc_reqs_buf + i;
350 
351 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
352 		TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link);
353 	}
354 
355 	return 0;
356 }
357 
358 static inline struct spdk_nvmf_fc_request *
359 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp)
360 {
361 	struct spdk_nvmf_fc_request *fc_req;
362 
363 	if (TAILQ_EMPTY(&hwqp->free_reqs)) {
364 		SPDK_ERRLOG("Alloc request buffer failed\n");
365 		return NULL;
366 	}
367 
368 	fc_req = TAILQ_FIRST(&hwqp->free_reqs);
369 	TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link);
370 
371 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
372 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
373 	TAILQ_INIT(&fc_req->abort_cbs);
374 	return fc_req;
375 }
376 
377 static inline void
378 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req)
379 {
380 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
381 		/* Log an error for debug purpose. */
382 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
383 	}
384 
385 	/* set the magic to mark req as no longer valid. */
386 	fc_req->magic = 0xDEADBEEF;
387 
388 	TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link);
389 	TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link);
390 }
391 
392 static inline bool
393 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
394 {
395 	switch (fc_req->state) {
396 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
397 		return true;
398 	default:
399 		return false;
400 	}
401 }
402 
403 void
404 nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp)
405 {
406 	nvmf_fc_init_rqpair_buffers(hwqp);
407 }
408 
409 struct spdk_nvmf_fc_conn *
410 nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
411 {
412 	struct spdk_nvmf_fc_conn *fc_conn;
413 
414 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
415 		if (fc_conn->conn_id == conn_id) {
416 			return fc_conn;
417 		}
418 	}
419 
420 	return NULL;
421 }
422 
423 void
424 nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr)
425 {
426 	struct spdk_nvmf_fc_abts_ctx *ctx;
427 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
428 
429 	/* Clean up any pending sync callbacks */
430 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
431 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
432 		ctx = args->cb_info.cb_data;
433 		if (ctx) {
434 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
435 				free(ctx->sync_poller_args);
436 				free(ctx->abts_poller_args);
437 				free(ctx);
438 			}
439 		}
440 	}
441 
442 	nvmf_fc_reinit_q(hwqp->queues, queues_curr);
443 }
444 
445 void
446 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
447 {
448 	hwqp->fc_port = fc_port;
449 
450 	/* clear counters */
451 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
452 
453 	nvmf_fc_init_poller_queues(hwqp);
454 	if (&fc_port->ls_queue != hwqp) {
455 		nvmf_fc_create_req_mempool(hwqp);
456 	}
457 
458 	nvmf_fc_init_q(hwqp);
459 	TAILQ_INIT(&hwqp->connection_list);
460 	TAILQ_INIT(&hwqp->sync_cbs);
461 	TAILQ_INIT(&hwqp->ls_pending_queue);
462 }
463 
464 static struct spdk_nvmf_fc_poll_group *
465 nvmf_fc_get_idlest_poll_group(void)
466 {
467 	uint32_t max_count = UINT32_MAX;
468 	struct spdk_nvmf_fc_poll_group *fgroup;
469 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
470 
471 	/* find poll group with least number of hwqp's assigned to it */
472 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
473 		if (fgroup->hwqp_count < max_count) {
474 			ret_fgroup = fgroup;
475 			max_count = fgroup->hwqp_count;
476 		}
477 	}
478 
479 	return ret_fgroup;
480 }
481 
482 void
483 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
484 {
485 	struct spdk_nvmf_fc_poll_group *fgroup = NULL;
486 
487 	assert(hwqp);
488 	if (hwqp == NULL) {
489 		SPDK_ERRLOG("Error: hwqp is NULL\n");
490 		return;
491 	}
492 
493 	assert(g_nvmf_fgroup_count);
494 
495 	fgroup = nvmf_fc_get_idlest_poll_group();
496 	if (!fgroup) {
497 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
498 		return;
499 	}
500 
501 	hwqp->thread = fgroup->group.group->thread;
502 	hwqp->fgroup = fgroup;
503 	fgroup->hwqp_count++;
504 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
505 }
506 
507 void
508 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
509 {
510 	assert(hwqp);
511 
512 	SPDK_DEBUGLOG(nvmf_fc,
513 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
514 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
515 
516 	if (!hwqp->fgroup) {
517 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
518 	} else {
519 		hwqp->fgroup->hwqp_count--;
520 		nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL);
521 	}
522 }
523 
524 /*
525  * Note: This needs to be used only on master poller.
526  */
527 static uint64_t
528 nvmf_fc_get_abts_unique_id(void)
529 {
530 	static uint32_t u_id = 0;
531 
532 	return (uint64_t)(++u_id);
533 }
534 
535 static void
536 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
537 {
538 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
539 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
540 
541 	ctx->hwqps_responded++;
542 
543 	if (ctx->hwqps_responded < ctx->num_hwqps) {
544 		/* Wait for all pollers to complete. */
545 		return;
546 	}
547 
548 	/* Free the queue sync poller args. */
549 	free(ctx->sync_poller_args);
550 
551 	/* Mark as queue synced */
552 	ctx->queue_synced = true;
553 
554 	/* Reset the ctx values */
555 	ctx->hwqps_responded = 0;
556 	ctx->handled = false;
557 
558 	SPDK_DEBUGLOG(nvmf_fc,
559 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
560 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
561 
562 	/* Resend ABTS to pollers */
563 	args = ctx->abts_poller_args;
564 	for (int i = 0; i < ctx->num_hwqps; i++) {
565 		poller_arg = args + i;
566 		nvmf_fc_poller_api_func(poller_arg->hwqp,
567 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
568 					poller_arg);
569 	}
570 }
571 
572 static int
573 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
574 {
575 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
576 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
577 
578 	/* check if FC driver supports queue sync */
579 	if (!nvmf_fc_q_sync_available()) {
580 		return -EPERM;
581 	}
582 
583 	assert(ctx);
584 	if (!ctx) {
585 		SPDK_ERRLOG("NULL ctx pointer");
586 		return -EINVAL;
587 	}
588 
589 	/* Reset the ctx values */
590 	ctx->hwqps_responded = 0;
591 
592 	args = calloc(ctx->num_hwqps,
593 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
594 	if (!args) {
595 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
596 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
597 		return -ENOMEM;
598 	}
599 	ctx->sync_poller_args = args;
600 
601 	abts_args = ctx->abts_poller_args;
602 	for (int i = 0; i < ctx->num_hwqps; i++) {
603 		abts_poller_arg = abts_args + i;
604 		poller_arg = args + i;
605 		poller_arg->u_id = ctx->u_id;
606 		poller_arg->hwqp = abts_poller_arg->hwqp;
607 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
608 		poller_arg->cb_info.cb_data = ctx;
609 		poller_arg->cb_info.cb_thread = spdk_get_thread();
610 
611 		/* Send a Queue sync message to interested pollers */
612 		nvmf_fc_poller_api_func(poller_arg->hwqp,
613 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
614 					poller_arg);
615 	}
616 
617 	SPDK_DEBUGLOG(nvmf_fc,
618 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
619 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
620 
621 	/* Post Marker to queue to track aborted request */
622 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
623 
624 	return 0;
625 }
626 
627 static void
628 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
629 {
630 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
631 	struct spdk_nvmf_fc_nport *nport  = NULL;
632 
633 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
634 		ctx->handled = true;
635 	}
636 
637 	ctx->hwqps_responded++;
638 
639 	if (ctx->hwqps_responded < ctx->num_hwqps) {
640 		/* Wait for all pollers to complete. */
641 		return;
642 	}
643 
644 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
645 
646 	if (ctx->nport != nport) {
647 		/* Nport can be deleted while this abort is being
648 		 * processed by the pollers.
649 		 */
650 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
651 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
652 	} else {
653 		if (!ctx->handled) {
654 			/* Try syncing the queues and try one more time */
655 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
656 				SPDK_DEBUGLOG(nvmf_fc,
657 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
658 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
659 				return;
660 			} else {
661 				/* Send Reject */
662 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
663 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
664 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
665 			}
666 		} else {
667 			/* Send Accept */
668 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
669 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
670 					    0, NULL, NULL);
671 		}
672 	}
673 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
674 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
675 
676 	free(ctx->abts_poller_args);
677 	free(ctx);
678 }
679 
680 void
681 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
682 			  uint16_t oxid, uint16_t rxid)
683 {
684 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
685 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
686 	struct spdk_nvmf_fc_association *assoc = NULL;
687 	struct spdk_nvmf_fc_conn *conn = NULL;
688 	uint32_t hwqp_cnt = 0;
689 	bool skip_hwqp_cnt;
690 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
691 	uint32_t i;
692 
693 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
694 		       nport->nport_hdl, rpi, oxid, rxid);
695 
696 	/* Allocate memory to track hwqp's with at least 1 active connection. */
697 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
698 	if (hwqps == NULL) {
699 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
700 		goto bls_rej;
701 	}
702 
703 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
704 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
705 			if (conn->rpi != rpi) {
706 				continue;
707 			}
708 
709 			skip_hwqp_cnt = false;
710 			for (i = 0; i < hwqp_cnt; i++) {
711 				if (hwqps[i] == conn->hwqp) {
712 					/* Skip. This is already present */
713 					skip_hwqp_cnt = true;
714 					break;
715 				}
716 			}
717 			if (!skip_hwqp_cnt) {
718 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
719 				hwqps[hwqp_cnt] = conn->hwqp;
720 				hwqp_cnt++;
721 			}
722 		}
723 	}
724 
725 	if (!hwqp_cnt) {
726 		goto bls_rej;
727 	}
728 
729 	args = calloc(hwqp_cnt,
730 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
731 	if (!args) {
732 		goto bls_rej;
733 	}
734 
735 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
736 	if (!ctx) {
737 		goto bls_rej;
738 	}
739 	ctx->rpi = rpi;
740 	ctx->oxid = oxid;
741 	ctx->rxid = rxid;
742 	ctx->nport = nport;
743 	ctx->nport_hdl = nport->nport_hdl;
744 	ctx->port_hdl = nport->fc_port->port_hdl;
745 	ctx->num_hwqps = hwqp_cnt;
746 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
747 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
748 	ctx->abts_poller_args = args;
749 
750 	/* Get a unique context for this ABTS */
751 	ctx->u_id = nvmf_fc_get_abts_unique_id();
752 
753 	for (i = 0; i < hwqp_cnt; i++) {
754 		poller_arg = args + i;
755 		poller_arg->hwqp = hwqps[i];
756 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
757 		poller_arg->cb_info.cb_data = ctx;
758 		poller_arg->cb_info.cb_thread = spdk_get_thread();
759 		poller_arg->ctx = ctx;
760 
761 		nvmf_fc_poller_api_func(poller_arg->hwqp,
762 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
763 					poller_arg);
764 	}
765 
766 	free(hwqps);
767 
768 	return;
769 bls_rej:
770 	free(args);
771 	free(hwqps);
772 
773 	/* Send Reject */
774 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
775 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
776 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
777 		       nport->nport_hdl, rpi, oxid, rxid);
778 	return;
779 }
780 
781 /*** Accessor functions for the FC structures - BEGIN */
782 /*
783  * Returns true if the port is in offline state.
784  */
785 bool
786 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
787 {
788 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
789 		return true;
790 	}
791 
792 	return false;
793 }
794 
795 /*
796  * Returns true if the port is in online state.
797  */
798 bool
799 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
800 {
801 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
802 		return true;
803 	}
804 
805 	return false;
806 }
807 
808 int
809 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
810 {
811 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
812 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
813 		return 0;
814 	}
815 
816 	return -EPERM;
817 }
818 
819 int
820 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
821 {
822 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
823 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
824 		return 0;
825 	}
826 
827 	return -EPERM;
828 }
829 
830 int
831 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
832 {
833 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
834 		hwqp->state = SPDK_FC_HWQP_ONLINE;
835 		/* reset some queue counters */
836 		hwqp->num_conns = 0;
837 		return nvmf_fc_set_q_online_state(hwqp, true);
838 	}
839 
840 	return -EPERM;
841 }
842 
843 int
844 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
845 {
846 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
847 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
848 		return nvmf_fc_set_q_online_state(hwqp, false);
849 	}
850 
851 	return -EPERM;
852 }
853 
854 void
855 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
856 {
857 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
858 }
859 
860 struct spdk_nvmf_fc_port *
861 nvmf_fc_port_lookup(uint8_t port_hdl)
862 {
863 	struct spdk_nvmf_fc_port *fc_port = NULL;
864 
865 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
866 		if (fc_port->port_hdl == port_hdl) {
867 			return fc_port;
868 		}
869 	}
870 	return NULL;
871 }
872 
873 static void
874 nvmf_fc_port_cleanup(void)
875 {
876 	struct spdk_nvmf_fc_port *fc_port, *tmp;
877 	struct spdk_nvmf_fc_hwqp *hwqp;
878 	uint32_t i;
879 
880 	TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) {
881 		TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list,  fc_port, link);
882 		for (i = 0; i < fc_port->num_io_queues; i++) {
883 			hwqp = &fc_port->io_queues[i];
884 			if (hwqp->fc_reqs_buf) {
885 				free(hwqp->fc_reqs_buf);
886 			}
887 		}
888 		free(fc_port);
889 	}
890 }
891 
892 uint32_t
893 nvmf_fc_get_prli_service_params(void)
894 {
895 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
896 }
897 
898 int
899 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
900 		       struct spdk_nvmf_fc_nport *nport)
901 {
902 	if (fc_port) {
903 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
904 		fc_port->num_nports++;
905 		return 0;
906 	}
907 
908 	return -EINVAL;
909 }
910 
911 int
912 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
913 			  struct spdk_nvmf_fc_nport *nport)
914 {
915 	if (fc_port && nport) {
916 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
917 		fc_port->num_nports--;
918 		return 0;
919 	}
920 
921 	return -EINVAL;
922 }
923 
924 static struct spdk_nvmf_fc_nport *
925 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
926 {
927 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
928 
929 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
930 		if (fc_nport->nport_hdl == nport_hdl) {
931 			return fc_nport;
932 		}
933 	}
934 
935 	return NULL;
936 }
937 
938 struct spdk_nvmf_fc_nport *
939 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
940 {
941 	struct spdk_nvmf_fc_port *fc_port = NULL;
942 
943 	fc_port = nvmf_fc_port_lookup(port_hdl);
944 	if (fc_port) {
945 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
946 	}
947 
948 	return NULL;
949 }
950 
951 static inline int
952 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
953 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
954 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
955 {
956 	struct spdk_nvmf_fc_nport *n_port;
957 	struct spdk_nvmf_fc_remote_port_info *r_port;
958 
959 	assert(hwqp);
960 	if (hwqp == NULL) {
961 		SPDK_ERRLOG("Error: hwqp is NULL\n");
962 		return -EINVAL;
963 	}
964 	assert(nport);
965 	if (nport == NULL) {
966 		SPDK_ERRLOG("Error: nport is NULL\n");
967 		return -EINVAL;
968 	}
969 	assert(rport);
970 	if (rport == NULL) {
971 		SPDK_ERRLOG("Error: rport is NULL\n");
972 		return -EINVAL;
973 	}
974 
975 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
976 		if (n_port->d_id == d_id) {
977 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
978 				if (r_port->s_id == s_id) {
979 					*nport = n_port;
980 					*rport = r_port;
981 					return 0;
982 				}
983 			}
984 			break;
985 		}
986 	}
987 
988 	return -ENOENT;
989 }
990 
991 /* Returns true if the Nport is empty of all rem_ports */
992 bool
993 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
994 {
995 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
996 		assert(nport->rport_count == 0);
997 		return true;
998 	} else {
999 		return false;
1000 	}
1001 }
1002 
1003 int
1004 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1005 			enum spdk_nvmf_fc_object_state state)
1006 {
1007 	if (nport) {
1008 		nport->nport_state = state;
1009 		return 0;
1010 	} else {
1011 		return -EINVAL;
1012 	}
1013 }
1014 
1015 bool
1016 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1017 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1018 {
1019 	if (nport && rem_port) {
1020 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1021 		nport->rport_count++;
1022 		return 0;
1023 	} else {
1024 		return -EINVAL;
1025 	}
1026 }
1027 
1028 bool
1029 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1030 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1031 {
1032 	if (nport && rem_port) {
1033 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1034 		nport->rport_count--;
1035 		return 0;
1036 	} else {
1037 		return -EINVAL;
1038 	}
1039 }
1040 
1041 int
1042 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1043 			enum spdk_nvmf_fc_object_state state)
1044 {
1045 	if (rport) {
1046 		rport->rport_state = state;
1047 		return 0;
1048 	} else {
1049 		return -EINVAL;
1050 	}
1051 }
1052 int
1053 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1054 			enum spdk_nvmf_fc_object_state state)
1055 {
1056 	if (assoc) {
1057 		assoc->assoc_state = state;
1058 		return 0;
1059 	} else {
1060 		return -EINVAL;
1061 	}
1062 }
1063 
1064 static struct spdk_nvmf_fc_association *
1065 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1066 {
1067 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1068 	struct spdk_nvmf_fc_conn *fc_conn;
1069 
1070 	if (!qpair) {
1071 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1072 		return NULL;
1073 	}
1074 
1075 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1076 
1077 	return fc_conn->fc_assoc;
1078 }
1079 
1080 bool
1081 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1082 		       struct spdk_nvmf_ctrlr *ctrlr)
1083 {
1084 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1085 	struct spdk_nvmf_fc_association *assoc = NULL;
1086 
1087 	if (!ctrlr) {
1088 		return false;
1089 	}
1090 
1091 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1092 	if (!fc_nport) {
1093 		return false;
1094 	}
1095 
1096 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1097 	if (assoc && assoc->tgtport == fc_nport) {
1098 		SPDK_DEBUGLOG(nvmf_fc,
1099 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1100 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1101 			      nport_hdl);
1102 		return true;
1103 	}
1104 	return false;
1105 }
1106 
1107 static inline bool
1108 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
1109 {
1110 	switch (fc_req->state) {
1111 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1112 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1113 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1114 		return true;
1115 	default:
1116 		return false;
1117 	}
1118 }
1119 
1120 static inline bool
1121 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
1122 {
1123 	struct spdk_nvmf_request *tmp = NULL;
1124 
1125 	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) {
1126 		if (tmp == &fc_req->req) {
1127 			return true;
1128 		}
1129 	}
1130 	return false;
1131 }
1132 
1133 static void
1134 nvmf_fc_req_bdev_abort(void *arg1)
1135 {
1136 	struct spdk_nvmf_fc_request *fc_req = arg1;
1137 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1138 	int i;
1139 
1140 	/* Initial release - we don't have to abort Admin Queue or
1141 	 * Fabric commands. The AQ commands supported at this time are
1142 	 * Get-Log-Page,
1143 	 * Identify
1144 	 * Set Features
1145 	 * Get Features
1146 	 * AER -> Special case and handled differently.
1147 	 * Every one of the above Admin commands (except AER) run
1148 	 * to completion and so an Abort of such commands doesn't
1149 	 * make sense.
1150 	 */
1151 	/* The Fabric commands supported are
1152 	 * Property Set
1153 	 * Property Get
1154 	 * Connect -> Special case (async. handling). Not sure how to
1155 	 * handle at this point. Let it run to completion.
1156 	 */
1157 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1158 		if (ctrlr->aer_req[i] == &fc_req->req) {
1159 			SPDK_NOTICELOG("Abort AER request\n");
1160 			nvmf_qpair_free_aer(fc_req->req.qpair);
1161 		}
1162 	}
1163 }
1164 
1165 void
1166 nvmf_fc_request_abort_complete(void *arg1)
1167 {
1168 	struct spdk_nvmf_fc_request *fc_req =
1169 		(struct spdk_nvmf_fc_request *)arg1;
1170 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1171 
1172 	/* Request abort completed. Notify all the callbacks */
1173 	TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) {
1174 		/* Notify */
1175 		ctx->cb(fc_req->hwqp, 0, ctx->cb_args);
1176 		/* Remove */
1177 		TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link);
1178 		/* free */
1179 		free(ctx);
1180 	}
1181 
1182 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1183 		       fc_req_state_strs[fc_req->state]);
1184 
1185 	_nvmf_fc_request_free(fc_req);
1186 }
1187 
1188 void
1189 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1190 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1191 {
1192 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1193 	bool kill_req = false;
1194 
1195 	/* Add the cb to list */
1196 	if (cb) {
1197 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1198 		if (!ctx) {
1199 			SPDK_ERRLOG("ctx alloc failed.\n");
1200 			return;
1201 		}
1202 		ctx->cb = cb;
1203 		ctx->cb_args = cb_args;
1204 
1205 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1206 	}
1207 
1208 	if (!fc_req->is_aborted) {
1209 		/* Increment aborted command counter */
1210 		fc_req->hwqp->counters.num_aborted++;
1211 	}
1212 
1213 	/* If port is dead, skip abort wqe */
1214 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1215 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1216 		fc_req->is_aborted = true;
1217 		goto complete;
1218 	}
1219 
1220 	/* Check if the request is already marked for deletion */
1221 	if (fc_req->is_aborted) {
1222 		return;
1223 	}
1224 
1225 	/* Mark request as aborted */
1226 	fc_req->is_aborted = true;
1227 
1228 	/* If xchg is allocated, then save if we need to send abts or not. */
1229 	if (fc_req->xchg) {
1230 		fc_req->xchg->send_abts = send_abts;
1231 		fc_req->xchg->aborted	= true;
1232 	}
1233 
1234 	if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) {
1235 		/* Aborted by backend */
1236 		goto complete;
1237 	} else if (nvmf_fc_req_in_bdev(fc_req)) {
1238 		/* Notify bdev */
1239 		spdk_thread_send_msg(fc_req->hwqp->thread,
1240 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1241 	} else if (nvmf_fc_req_in_xfer(fc_req)) {
1242 		/* Notify HBA to abort this exchange  */
1243 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1244 	} else if (nvmf_fc_req_in_get_buff(fc_req)) {
1245 		/* Will be completed by request_complete callback. */
1246 		SPDK_DEBUGLOG(nvmf_fc, "Abort req when getting buffers.\n");
1247 	} else if (nvmf_fc_req_in_pending(fc_req)) {
1248 		/* Remove from pending */
1249 		STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
1250 			      spdk_nvmf_request, buf_link);
1251 		goto complete;
1252 	} else {
1253 		/* Should never happen */
1254 		SPDK_ERRLOG("Request in invalid state\n");
1255 		goto complete;
1256 	}
1257 
1258 	return;
1259 complete:
1260 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1261 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1262 				(void *)fc_req);
1263 }
1264 
1265 static int
1266 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1267 {
1268 	uint32_t length = fc_req->req.length;
1269 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1270 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1271 	struct spdk_nvmf_transport *transport = group->transport;
1272 
1273 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1274 		return -ENOMEM;
1275 	}
1276 
1277 	return 0;
1278 }
1279 
1280 static int
1281 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1282 {
1283 	/* Allocate an XCHG if we dont use send frame for this command. */
1284 	if (!nvmf_fc_use_send_frame(&fc_req->req)) {
1285 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1286 		if (!fc_req->xchg) {
1287 			fc_req->hwqp->counters.no_xchg++;
1288 			printf("NO XCHGs!\n");
1289 			goto pending;
1290 		}
1291 	}
1292 
1293 	if (fc_req->req.length) {
1294 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1295 			fc_req->hwqp->counters.buf_alloc_err++;
1296 			goto pending;
1297 		}
1298 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1299 	}
1300 
1301 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1302 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1303 
1304 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1305 
1306 		if (nvmf_fc_recv_data(fc_req)) {
1307 			/* Dropped return success to caller */
1308 			fc_req->hwqp->counters.unexpected_err++;
1309 			_nvmf_fc_request_free(fc_req);
1310 		}
1311 	} else {
1312 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1313 
1314 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1315 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1316 		} else {
1317 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1318 		}
1319 		spdk_nvmf_request_exec(&fc_req->req);
1320 	}
1321 
1322 	return 0;
1323 
1324 pending:
1325 	if (fc_req->xchg) {
1326 		nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1327 		fc_req->xchg = NULL;
1328 	}
1329 
1330 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1331 
1332 	return -EAGAIN;
1333 }
1334 
1335 static int
1336 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1337 			    uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1338 {
1339 	uint16_t cmnd_len;
1340 	uint64_t rqst_conn_id;
1341 	struct spdk_nvmf_fc_request *fc_req = NULL;
1342 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1343 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1344 	enum spdk_nvme_data_transfer xfer;
1345 
1346 	cmd_iu = buffer->virt;
1347 	cmnd_len = cmd_iu->cmnd_iu_len;
1348 	cmnd_len = from_be16(&cmnd_len);
1349 
1350 	/* check for a valid cmnd_iu format */
1351 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1352 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1353 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1354 		SPDK_ERRLOG("IU CMD error\n");
1355 		hwqp->counters.nvme_cmd_iu_err++;
1356 		return -ENXIO;
1357 	}
1358 
1359 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1360 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1361 		SPDK_ERRLOG("IU CMD xfer error\n");
1362 		hwqp->counters.nvme_cmd_xfer_err++;
1363 		return -EPERM;
1364 	}
1365 
1366 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1367 
1368 	/* Check if conn id is valid */
1369 	fc_conn = nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id);
1370 	if (!fc_conn) {
1371 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1372 		hwqp->counters.invalid_conn_err++;
1373 		return -ENODEV;
1374 	}
1375 
1376 	/* If association/connection is being deleted - return */
1377 	if (fc_conn->fc_assoc->assoc_state !=  SPDK_NVMF_FC_OBJECT_CREATED) {
1378 		SPDK_ERRLOG("Association state not valid\n");
1379 		return -EACCES;
1380 	}
1381 
1382 	if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) {
1383 		return -EACCES;
1384 	}
1385 
1386 	/* Make sure xfer len is according to mdts */
1387 	if (from_be32(&cmd_iu->data_len) >
1388 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1389 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1390 		return -EINVAL;
1391 	}
1392 
1393 	/* allocate a request buffer */
1394 	fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp);
1395 	if (fc_req == NULL) {
1396 		/* Should not happen. Since fc_reqs == RQ buffers */
1397 		return -ENOMEM;
1398 	}
1399 
1400 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1401 	fc_req->req.qpair = &fc_conn->qpair;
1402 	fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd;
1403 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1404 	fc_req->oxid = frame->ox_id;
1405 	fc_req->oxid = from_be16(&fc_req->oxid);
1406 	fc_req->rpi = fc_conn->rpi;
1407 	fc_req->buf_index = buf_idx;
1408 	fc_req->poller_lcore = hwqp->lcore_id;
1409 	fc_req->poller_thread = hwqp->thread;
1410 	fc_req->hwqp = hwqp;
1411 	fc_req->fc_conn = fc_conn;
1412 	fc_req->req.xfer = xfer;
1413 	fc_req->s_id = (uint32_t)frame->s_id;
1414 	fc_req->d_id = (uint32_t)frame->d_id;
1415 	fc_req->s_id = from_be32(&fc_req->s_id) >> 8;
1416 	fc_req->d_id = from_be32(&fc_req->d_id) >> 8;
1417 
1418 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1419 	if (nvmf_fc_request_execute(fc_req)) {
1420 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 /*
1427  * These functions are called from the FC LLD
1428  */
1429 
1430 void
1431 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1432 {
1433 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1434 	struct spdk_nvmf_transport_poll_group *group;
1435 
1436 	if (!fc_req) {
1437 		return;
1438 	}
1439 
1440 	if (fc_req->xchg) {
1441 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1442 		fc_req->xchg = NULL;
1443 	}
1444 
1445 	/* Release IO buffers */
1446 	if (fc_req->req.data_from_pool) {
1447 		group = &hwqp->fgroup->group;
1448 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1449 					       group->transport);
1450 	}
1451 	fc_req->req.data = NULL;
1452 	fc_req->req.iovcnt  = 0;
1453 
1454 	/* Release Q buffer */
1455 	nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index);
1456 
1457 	/* Free Fc request */
1458 	nvmf_fc_hwqp_free_fc_request(hwqp, fc_req);
1459 }
1460 
1461 void
1462 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1463 			  enum spdk_nvmf_fc_request_state state)
1464 {
1465 	assert(fc_req->magic != 0xDEADBEEF);
1466 
1467 	SPDK_DEBUGLOG(nvmf_fc,
1468 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1469 		      nvmf_fc_request_get_state_str(fc_req->state),
1470 		      nvmf_fc_request_get_state_str(state));
1471 	nvmf_fc_record_req_trace_point(fc_req, state);
1472 	fc_req->state = state;
1473 }
1474 
1475 char *
1476 nvmf_fc_request_get_state_str(int state)
1477 {
1478 	static char *unk_str = "unknown";
1479 
1480 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1481 		fc_req_state_strs[state] : unk_str);
1482 }
1483 
1484 int
1485 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1486 			   uint32_t buff_idx,
1487 			   struct spdk_nvmf_fc_frame_hdr *frame,
1488 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1489 			   uint32_t plen)
1490 {
1491 	int rc = 0;
1492 	uint32_t s_id, d_id;
1493 	struct spdk_nvmf_fc_nport *nport = NULL;
1494 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1495 
1496 	s_id = (uint32_t)frame->s_id;
1497 	d_id = (uint32_t)frame->d_id;
1498 	s_id = from_be32(&s_id) >> 8;
1499 	d_id = from_be32(&d_id) >> 8;
1500 
1501 	/* Note: In tracelog below, we directly do endian conversion on rx_id and.
1502 	 * ox_id Since these are fields, we can't pass address to from_be16().
1503 	 * Since ox_id and rx_id are only needed for tracelog, assigning to local
1504 	 * vars. and doing conversion is a waste of time in non-debug builds. */
1505 	SPDK_DEBUGLOG(nvmf_fc,
1506 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1507 		      s_id, d_id,
1508 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1509 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1510 
1511 	rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1512 	if (rc) {
1513 		if (nport == NULL) {
1514 			SPDK_ERRLOG("Nport not found. Dropping\n");
1515 			/* increment invalid nport counter */
1516 			hwqp->counters.nport_invalid++;
1517 		} else if (rport == NULL) {
1518 			SPDK_ERRLOG("Rport not found. Dropping\n");
1519 			/* increment invalid rport counter */
1520 			hwqp->counters.rport_invalid++;
1521 		}
1522 		return rc;
1523 	}
1524 
1525 	if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1526 	    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1527 		SPDK_ERRLOG("%s state not created. Dropping\n",
1528 			    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1529 			    "Nport" : "Rport");
1530 		return -EACCES;
1531 	}
1532 
1533 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1534 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1535 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1536 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1537 
1538 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1539 
1540 		/* Use the RQ buffer for holding LS request. */
1541 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1542 
1543 		/* Fill in the LS request structure */
1544 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1545 		ls_rqst->rqstbuf.phys = buffer->phys +
1546 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1547 		ls_rqst->rqstbuf.buf_index = buff_idx;
1548 		ls_rqst->rqst_len = plen;
1549 
1550 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1551 		ls_rqst->rspbuf.phys = buffer->phys +
1552 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1553 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1554 
1555 		ls_rqst->private_data = (void *)hwqp;
1556 		ls_rqst->rpi = rport->rpi;
1557 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1558 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1559 		ls_rqst->s_id = s_id;
1560 		ls_rqst->d_id = d_id;
1561 		ls_rqst->nport = nport;
1562 		ls_rqst->rport = rport;
1563 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1564 
1565 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1566 		if (ls_rqst->xchg) {
1567 			/* Handover the request to LS module */
1568 			nvmf_fc_handle_ls_rqst(ls_rqst);
1569 		} else {
1570 			/* No XCHG available. Add to pending list. */
1571 			hwqp->counters.no_xchg++;
1572 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1573 		}
1574 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1575 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1576 
1577 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1578 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen);
1579 	} else {
1580 
1581 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1582 		hwqp->counters.unknown_frame++;
1583 		rc = -EINVAL;
1584 	}
1585 
1586 	return rc;
1587 }
1588 
1589 void
1590 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1591 {
1592 	struct spdk_nvmf_request *req = NULL, *tmp;
1593 	struct spdk_nvmf_fc_request *fc_req;
1594 	int budget = 64;
1595 
1596 	if (!hwqp->fgroup) {
1597 		/* LS queue is tied to acceptor_poll group and LS pending requests
1598 		 * are stagged and processed using hwqp->ls_pending_queue.
1599 		 */
1600 		return;
1601 	}
1602 
1603 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1604 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1605 		if (!nvmf_fc_request_execute(fc_req)) {
1606 			/* Succesfuly posted, Delete from pending. */
1607 			STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link);
1608 		}
1609 
1610 		if (budget) {
1611 			budget--;
1612 		} else {
1613 			return;
1614 		}
1615 	}
1616 }
1617 
1618 void
1619 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1620 {
1621 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1622 	struct spdk_nvmf_fc_nport *nport = NULL;
1623 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1624 
1625 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1626 		/* lookup nport and rport again - make sure they are still valid */
1627 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1628 		if (rc) {
1629 			if (nport == NULL) {
1630 				SPDK_ERRLOG("Nport not found. Dropping\n");
1631 				/* increment invalid nport counter */
1632 				hwqp->counters.nport_invalid++;
1633 			} else if (rport == NULL) {
1634 				SPDK_ERRLOG("Rport not found. Dropping\n");
1635 				/* increment invalid rport counter */
1636 				hwqp->counters.rport_invalid++;
1637 			}
1638 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1639 			/* Return buffer to chip */
1640 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1641 			continue;
1642 		}
1643 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1644 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1645 			SPDK_ERRLOG("%s state not created. Dropping\n",
1646 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1647 				    "Nport" : "Rport");
1648 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1649 			/* Return buffer to chip */
1650 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1651 			continue;
1652 		}
1653 
1654 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1655 		if (ls_rqst->xchg) {
1656 			/* Got an XCHG */
1657 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1658 			/* Handover the request to LS module */
1659 			nvmf_fc_handle_ls_rqst(ls_rqst);
1660 		} else {
1661 			/* No more XCHGs. Stop processing. */
1662 			hwqp->counters.no_xchg++;
1663 			return;
1664 		}
1665 	}
1666 }
1667 
1668 int
1669 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1670 {
1671 	int rc = 0;
1672 	struct spdk_nvmf_request *req = &fc_req->req;
1673 	struct spdk_nvmf_qpair *qpair = req->qpair;
1674 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1675 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1676 	uint16_t ersp_len = 0;
1677 
1678 	/* set sq head value in resp */
1679 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1680 
1681 	/* Increment connection responses */
1682 	fc_conn->rsp_count++;
1683 
1684 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1685 				       fc_req->transfered_len)) {
1686 		/* Fill ERSP Len */
1687 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1688 				    sizeof(uint32_t)));
1689 		fc_req->ersp.ersp_len = ersp_len;
1690 
1691 		/* Fill RSN */
1692 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1693 		fc_conn->rsn++;
1694 
1695 		/* Fill transfer length */
1696 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1697 
1698 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1699 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1700 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1701 	} else {
1702 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1703 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1704 	}
1705 
1706 	return rc;
1707 }
1708 
1709 bool
1710 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1711 			   uint32_t rsp_cnt, uint32_t xfer_len)
1712 {
1713 	struct spdk_nvmf_request *req = &fc_req->req;
1714 	struct spdk_nvmf_qpair *qpair = req->qpair;
1715 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1716 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1717 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1718 	uint16_t status = *((uint16_t *)&rsp->status);
1719 
1720 	/*
1721 	 * Check if we need to send ERSP
1722 	 * 1) For every N responses where N == ersp_ratio
1723 	 * 2) Fabric commands.
1724 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1725 	 * 4) SQ == 90% full.
1726 	 * 5) Transfer length not equal to CMD IU length
1727 	 */
1728 
1729 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1730 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1731 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1732 	    (req->length != xfer_len)) {
1733 		return true;
1734 	}
1735 	return false;
1736 }
1737 
1738 static int
1739 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1740 {
1741 	int rc = 0;
1742 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1743 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1744 
1745 	if (fc_req->is_aborted) {
1746 		/* Defer this to make sure we dont call io cleanup in same context. */
1747 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1748 					(void *)fc_req);
1749 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1750 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1751 
1752 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1753 
1754 		rc = nvmf_fc_send_data(fc_req);
1755 	} else {
1756 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1757 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1758 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1759 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1760 		} else {
1761 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1762 		}
1763 
1764 		rc = nvmf_fc_handle_rsp(fc_req);
1765 	}
1766 
1767 	if (rc) {
1768 		SPDK_ERRLOG("Error in request complete.\n");
1769 		_nvmf_fc_request_free(fc_req);
1770 	}
1771 	return 0;
1772 }
1773 
1774 struct spdk_nvmf_tgt *
1775 nvmf_fc_get_tgt(void)
1776 {
1777 	if (g_nvmf_ftransport) {
1778 		return g_nvmf_ftransport->transport.tgt;
1779 	}
1780 	return NULL;
1781 }
1782 
1783 /*
1784  * FC Transport Public API begins here
1785  */
1786 
1787 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1788 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1789 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1790 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1791 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1792 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1793 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1794 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1795 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1796 
1797 static void
1798 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1799 {
1800 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1801 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1802 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1803 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1804 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1805 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1806 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1807 }
1808 
1809 static struct spdk_nvmf_transport *
1810 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1811 {
1812 	uint32_t sge_count;
1813 
1814 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1815 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1816 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1817 		     "  max_aq_depth=%d\n",
1818 		     opts->max_queue_depth,
1819 		     opts->max_io_size,
1820 		     opts->max_qpairs_per_ctrlr - 1,
1821 		     opts->io_unit_size,
1822 		     opts->max_aq_depth);
1823 
1824 	if (g_nvmf_ftransport) {
1825 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1826 		return NULL;
1827 	}
1828 
1829 	if (spdk_env_get_last_core() < 1) {
1830 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1831 			    spdk_env_get_last_core() + 1);
1832 		return NULL;
1833 	}
1834 
1835 	sge_count = opts->max_io_size / opts->io_unit_size;
1836 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1837 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1838 		return NULL;
1839 	}
1840 
1841 	g_nvmf_fc_master_thread = spdk_get_thread();
1842 	g_nvmf_fgroup_count = 0;
1843 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1844 
1845 	if (!g_nvmf_ftransport) {
1846 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1847 		return NULL;
1848 	}
1849 
1850 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1851 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1852 		free(g_nvmf_ftransport);
1853 		g_nvmf_ftransport = NULL;
1854 		return NULL;
1855 	}
1856 
1857 	/* initialize the low level FC driver */
1858 	nvmf_fc_lld_init();
1859 
1860 	return &g_nvmf_ftransport->transport;
1861 }
1862 
1863 static int
1864 nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
1865 {
1866 	if (transport) {
1867 		struct spdk_nvmf_fc_transport *ftransport;
1868 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
1869 
1870 		ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1871 
1872 		free(ftransport);
1873 
1874 		/* clean up any FC poll groups still around */
1875 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
1876 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1877 			free(fgroup);
1878 		}
1879 		g_nvmf_fgroup_count = 0;
1880 
1881 		/* low level FC driver clean up */
1882 		nvmf_fc_lld_fini();
1883 
1884 		nvmf_fc_port_cleanup();
1885 	}
1886 
1887 	return 0;
1888 }
1889 
1890 static int
1891 nvmf_fc_listen(struct spdk_nvmf_transport *transport,
1892 	       const struct spdk_nvme_transport_id *trid)
1893 {
1894 	return 0;
1895 }
1896 
1897 static void
1898 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
1899 		    const struct spdk_nvme_transport_id *_trid)
1900 {
1901 }
1902 
1903 static uint32_t
1904 nvmf_fc_accept(struct spdk_nvmf_transport *transport)
1905 {
1906 	struct spdk_nvmf_fc_port *fc_port = NULL;
1907 	uint32_t count = 0;
1908 	static bool start_lld = false;
1909 
1910 	if (spdk_unlikely(!start_lld)) {
1911 		start_lld  = true;
1912 		nvmf_fc_lld_start();
1913 	}
1914 
1915 	/* poll the LS queue on each port */
1916 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
1917 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
1918 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
1919 		}
1920 	}
1921 
1922 	return count;
1923 }
1924 
1925 static void
1926 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
1927 		 struct spdk_nvme_transport_id *trid,
1928 		 struct spdk_nvmf_discovery_log_page_entry *entry)
1929 {
1930 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
1931 	entry->adrfam = trid->adrfam;
1932 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
1933 
1934 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1935 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1936 }
1937 
1938 static struct spdk_nvmf_transport_poll_group *
1939 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
1940 {
1941 	struct spdk_nvmf_fc_poll_group *fgroup;
1942 	struct spdk_nvmf_fc_transport *ftransport =
1943 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1944 
1945 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
1946 	if (!fgroup) {
1947 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
1948 		return NULL;
1949 	}
1950 
1951 	TAILQ_INIT(&fgroup->hwqp_list);
1952 
1953 	pthread_mutex_lock(&ftransport->lock);
1954 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
1955 	g_nvmf_fgroup_count++;
1956 	pthread_mutex_unlock(&ftransport->lock);
1957 
1958 	return &fgroup->group;
1959 }
1960 
1961 static void
1962 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1963 {
1964 	struct spdk_nvmf_fc_poll_group *fgroup;
1965 	struct spdk_nvmf_fc_transport *ftransport =
1966 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
1967 
1968 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
1969 	pthread_mutex_lock(&ftransport->lock);
1970 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1971 	g_nvmf_fgroup_count--;
1972 	pthread_mutex_unlock(&ftransport->lock);
1973 
1974 	free(fgroup);
1975 }
1976 
1977 static int
1978 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
1979 		       struct spdk_nvmf_qpair *qpair)
1980 {
1981 	struct spdk_nvmf_fc_poll_group *fgroup;
1982 	struct spdk_nvmf_fc_conn *fc_conn;
1983 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
1984 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
1985 	bool hwqp_found = false;
1986 
1987 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
1988 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1989 
1990 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
1991 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
1992 			hwqp_found = true;
1993 			break;
1994 		}
1995 	}
1996 
1997 	if (!hwqp_found) {
1998 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
1999 		goto err;
2000 	}
2001 
2002 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2003 					 &fc_conn->conn_id,
2004 					 fc_conn->max_queue_depth)) {
2005 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2006 		goto err;
2007 	}
2008 
2009 	fc_conn->hwqp = hwqp;
2010 
2011 	/* If this is for ADMIN connection, then update assoc ID. */
2012 	if (fc_conn->qpair.qid == 0) {
2013 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2014 	}
2015 
2016 	api_data = &fc_conn->create_opd->u.add_conn;
2017 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2018 	return 0;
2019 err:
2020 	return -1;
2021 }
2022 
2023 static int
2024 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2025 {
2026 	uint32_t count = 0;
2027 	struct spdk_nvmf_fc_poll_group *fgroup;
2028 	struct spdk_nvmf_fc_hwqp *hwqp;
2029 
2030 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2031 
2032 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2033 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2034 			count += nvmf_fc_process_queue(hwqp);
2035 		}
2036 	}
2037 
2038 	return (int) count;
2039 }
2040 
2041 static int
2042 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2043 {
2044 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2045 
2046 	if (!fc_req->is_aborted) {
2047 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2048 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2049 	} else {
2050 		nvmf_fc_request_abort_complete(fc_req);
2051 	}
2052 	return 0;
2053 }
2054 
2055 
2056 static void
2057 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair)
2058 {
2059 	struct spdk_nvmf_fc_conn *fc_conn;
2060 
2061 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2062 
2063 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2064 		/* QP creation failure in FC tranport. Cleanup. */
2065 		spdk_thread_send_msg(nvmf_fc_get_master_thread(),
2066 				     nvmf_fc_handle_connection_failure, fc_conn);
2067 	} else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id &&
2068 		   fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2069 		/* Admin connection */
2070 		spdk_thread_send_msg(nvmf_fc_get_master_thread(),
2071 				     nvmf_fc_handle_assoc_deletion, fc_conn);
2072 	}
2073 }
2074 
2075 static int
2076 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2077 			    struct spdk_nvme_transport_id *trid)
2078 {
2079 	struct spdk_nvmf_fc_conn *fc_conn;
2080 
2081 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2082 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2083 	return 0;
2084 }
2085 
2086 static int
2087 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2088 			     struct spdk_nvme_transport_id *trid)
2089 {
2090 	struct spdk_nvmf_fc_conn *fc_conn;
2091 
2092 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2093 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2094 	return 0;
2095 }
2096 
2097 static int
2098 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2099 			      struct spdk_nvme_transport_id *trid)
2100 {
2101 	struct spdk_nvmf_fc_conn *fc_conn;
2102 
2103 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2104 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2105 	return 0;
2106 }
2107 
2108 static void
2109 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2110 			    struct spdk_nvmf_request *req)
2111 {
2112 	spdk_nvmf_request_complete(req);
2113 }
2114 
2115 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2116 	.name = "FC",
2117 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2118 	.opts_init = nvmf_fc_opts_init,
2119 	.create = nvmf_fc_create,
2120 	.destroy = nvmf_fc_destroy,
2121 
2122 	.listen = nvmf_fc_listen,
2123 	.stop_listen = nvmf_fc_stop_listen,
2124 	.accept = nvmf_fc_accept,
2125 
2126 	.listener_discover = nvmf_fc_discover,
2127 
2128 	.poll_group_create = nvmf_fc_poll_group_create,
2129 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2130 	.poll_group_add = nvmf_fc_poll_group_add,
2131 	.poll_group_poll = nvmf_fc_poll_group_poll,
2132 
2133 	.req_complete = nvmf_fc_request_complete,
2134 	.req_free = nvmf_fc_request_free,
2135 	.qpair_fini = nvmf_fc_close_qpair,
2136 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2137 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2138 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2139 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2140 };
2141 
2142 /* Initializes the data for the creation of a FC-Port object in the SPDK
2143  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2144  * the API to the library. The contents added to this well defined structure
2145  * is private to each vendors implementation.
2146  */
2147 static int
2148 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2149 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2150 {
2151 	/* Used a high number for the LS HWQP so that it does not clash with the
2152 	 * IO HWQP's and immediately shows a LS queue during tracing.
2153 	 */
2154 	uint32_t i;
2155 
2156 	fc_port->port_hdl       = args->port_handle;
2157 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2158 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2159 	fc_port->num_io_queues  = args->io_queue_cnt;
2160 
2161 	/*
2162 	 * Set port context from init args. Used for FCP port stats.
2163 	 */
2164 	fc_port->port_ctx = args->port_ctx;
2165 
2166 	/*
2167 	 * Initialize the LS queue wherever needed.
2168 	 */
2169 	fc_port->ls_queue.queues = args->ls_queue;
2170 	fc_port->ls_queue.thread = nvmf_fc_get_master_thread();
2171 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2172 
2173 	/*
2174 	 * Initialize the LS queue.
2175 	 */
2176 	nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2177 
2178 	/*
2179 	 * Initialize the IO queues.
2180 	 */
2181 	for (i = 0; i < args->io_queue_cnt; i++) {
2182 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2183 		hwqp->hwqp_id = i;
2184 		hwqp->queues = args->io_queues[i];
2185 		hwqp->rq_size = args->io_queue_size;
2186 		nvmf_fc_init_hwqp(fc_port, hwqp);
2187 	}
2188 
2189 	/*
2190 	 * Initialize the LS processing for port
2191 	 */
2192 	nvmf_fc_ls_init(fc_port);
2193 
2194 	/*
2195 	 * Initialize the list of nport on this HW port.
2196 	 */
2197 	TAILQ_INIT(&fc_port->nport_list);
2198 	fc_port->num_nports = 0;
2199 
2200 	return 0;
2201 }
2202 
2203 /*
2204  * FC port must have all its nports deleted before transitioning to offline state.
2205  */
2206 static void
2207 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2208 {
2209 	struct spdk_nvmf_fc_nport *nport = NULL;
2210 	/* All nports must have been deleted at this point for this fc port */
2211 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2212 	DEV_VERIFY(fc_port->num_nports == 0);
2213 	/* Mark the nport states to be zombie, if they exist */
2214 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2215 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2216 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2217 		}
2218 	}
2219 }
2220 
2221 static void
2222 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2223 {
2224 	ASSERT_SPDK_FC_MASTER_THREAD();
2225 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2226 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2227 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2228 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2229 	int spdk_err = 0;
2230 	uint8_t port_handle = cb_data->port_handle;
2231 	uint32_t s_id = rport->s_id;
2232 	uint32_t rpi = rport->rpi;
2233 	uint32_t assoc_count = rport->assoc_count;
2234 	uint32_t nport_hdl = nport->nport_hdl;
2235 	uint32_t d_id = nport->d_id;
2236 	char log_str[256];
2237 
2238 	/*
2239 	 * Assert on any delete failure.
2240 	 */
2241 	if (0 != err) {
2242 		DEV_VERIFY(!"Error in IT Delete callback.");
2243 		goto out;
2244 	}
2245 
2246 	if (cb_func != NULL) {
2247 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2248 	}
2249 
2250 out:
2251 	free(cb_data);
2252 
2253 	snprintf(log_str, sizeof(log_str),
2254 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2255 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2256 
2257 	if (err != 0) {
2258 		SPDK_ERRLOG("%s", log_str);
2259 	} else {
2260 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2261 	}
2262 }
2263 
2264 static void
2265 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2266 {
2267 	ASSERT_SPDK_FC_MASTER_THREAD();
2268 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2269 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2270 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2271 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2272 	uint32_t s_id = rport->s_id;
2273 	uint32_t rpi = rport->rpi;
2274 	uint32_t assoc_count = rport->assoc_count;
2275 	uint32_t nport_hdl = nport->nport_hdl;
2276 	uint32_t d_id = nport->d_id;
2277 	char log_str[256];
2278 
2279 	/*
2280 	 * Assert on any association delete failure. We continue to delete other
2281 	 * associations in promoted builds.
2282 	 */
2283 	if (0 != err) {
2284 		DEV_VERIFY(!"Nport's association delete callback returned error");
2285 		if (nport->assoc_count > 0) {
2286 			nport->assoc_count--;
2287 		}
2288 		if (rport->assoc_count > 0) {
2289 			rport->assoc_count--;
2290 		}
2291 	}
2292 
2293 	/*
2294 	 * If this is the last association being deleted for the ITN,
2295 	 * execute the callback(s).
2296 	 */
2297 	if (0 == rport->assoc_count) {
2298 		/* Remove the rport from the remote port list. */
2299 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2300 			SPDK_ERRLOG("Error while removing rport from list.\n");
2301 			DEV_VERIFY(!"Error while removing rport from list.");
2302 		}
2303 
2304 		if (cb_func != NULL) {
2305 			/*
2306 			 * Callback function is provided by the caller
2307 			 * of nvmf_fc_adm_i_t_delete_assoc().
2308 			 */
2309 			(void)cb_func(cb_data->cb_ctx, 0);
2310 		}
2311 		free(rport);
2312 		free(args);
2313 	}
2314 
2315 	snprintf(log_str, sizeof(log_str),
2316 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2317 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2318 
2319 	if (err != 0) {
2320 		SPDK_ERRLOG("%s", log_str);
2321 	} else {
2322 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2323 	}
2324 }
2325 
2326 /**
2327  * Process a IT delete.
2328  */
2329 static void
2330 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2331 			     struct spdk_nvmf_fc_remote_port_info *rport,
2332 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2333 			     void *cb_ctx)
2334 {
2335 	int err = 0;
2336 	struct spdk_nvmf_fc_association *assoc = NULL;
2337 	int assoc_err = 0;
2338 	uint32_t num_assoc = 0;
2339 	uint32_t num_assoc_del_scheduled = 0;
2340 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2341 	uint8_t port_hdl = nport->port_hdl;
2342 	uint32_t s_id = rport->s_id;
2343 	uint32_t rpi = rport->rpi;
2344 	uint32_t assoc_count = rport->assoc_count;
2345 	char log_str[256];
2346 
2347 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2348 		      nport->nport_hdl);
2349 
2350 	/*
2351 	 * Allocate memory for callback data.
2352 	 * This memory will be freed by the callback function.
2353 	 */
2354 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2355 	if (NULL == cb_data) {
2356 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2357 		err = -ENOMEM;
2358 		goto out;
2359 	}
2360 	cb_data->nport       = nport;
2361 	cb_data->rport       = rport;
2362 	cb_data->port_handle = port_hdl;
2363 	cb_data->cb_func     = cb_func;
2364 	cb_data->cb_ctx      = cb_ctx;
2365 
2366 	/*
2367 	 * Delete all associations, if any, related with this ITN/remote_port.
2368 	 */
2369 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2370 		num_assoc++;
2371 		if (assoc->s_id == s_id) {
2372 			assoc_err = nvmf_fc_delete_association(nport,
2373 							       assoc->assoc_id,
2374 							       false /* send abts */, false,
2375 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2376 			if (0 != assoc_err) {
2377 				/*
2378 				 * Mark this association as zombie.
2379 				 */
2380 				err = -EINVAL;
2381 				DEV_VERIFY(!"Error while deleting association");
2382 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2383 			} else {
2384 				num_assoc_del_scheduled++;
2385 			}
2386 		}
2387 	}
2388 
2389 out:
2390 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2391 		/*
2392 		 * Since there are no association_delete calls
2393 		 * successfully scheduled, the association_delete
2394 		 * callback function will never be called.
2395 		 * In this case, call the callback function now.
2396 		 */
2397 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2398 	}
2399 
2400 	snprintf(log_str, sizeof(log_str),
2401 		 "IT delete associations on nport:%d end. "
2402 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2403 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2404 
2405 	if (err == 0) {
2406 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2407 	} else {
2408 		SPDK_ERRLOG("%s", log_str);
2409 	}
2410 }
2411 
2412 static void
2413 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2414 {
2415 	ASSERT_SPDK_FC_MASTER_THREAD();
2416 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2417 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2418 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2419 	struct spdk_nvmf_fc_port *fc_port = NULL;
2420 	int err = 0;
2421 
2422 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2423 	hwqp = quiesce_api_data->hwqp;
2424 	fc_port = hwqp->fc_port;
2425 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2426 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2427 
2428 	/*
2429 	 * Decrement the callback/quiesced queue count.
2430 	 */
2431 	port_quiesce_ctx->quiesce_count--;
2432 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2433 
2434 	free(quiesce_api_data);
2435 	/*
2436 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2437 	 */
2438 	if (port_quiesce_ctx->quiesce_count > 0) {
2439 		return;
2440 	}
2441 
2442 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2443 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2444 	} else {
2445 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2446 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2447 	}
2448 
2449 	if (cb_func) {
2450 		/*
2451 		 * Callback function for the called of quiesce.
2452 		 */
2453 		cb_func(port_quiesce_ctx->ctx, err);
2454 	}
2455 
2456 	/*
2457 	 * Free the context structure.
2458 	 */
2459 	free(port_quiesce_ctx);
2460 
2461 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2462 		      err);
2463 }
2464 
2465 static int
2466 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2467 			     spdk_nvmf_fc_poller_api_cb cb_func)
2468 {
2469 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2470 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2471 	int err = 0;
2472 
2473 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2474 
2475 	if (args == NULL) {
2476 		err = -ENOMEM;
2477 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2478 		goto done;
2479 	}
2480 	args->hwqp = fc_hwqp;
2481 	args->ctx = ctx;
2482 	args->cb_info.cb_func = cb_func;
2483 	args->cb_info.cb_data = args;
2484 	args->cb_info.cb_thread = spdk_get_thread();
2485 
2486 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2487 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2488 	if (rc) {
2489 		free(args);
2490 		err = -EINVAL;
2491 	}
2492 
2493 done:
2494 	return err;
2495 }
2496 
2497 /*
2498  * Hw port Quiesce
2499  */
2500 static int
2501 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2502 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2503 {
2504 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2505 	uint32_t i = 0;
2506 	int err = 0;
2507 
2508 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2509 
2510 	/*
2511 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2512 	 * and execute the callback.
2513 	 */
2514 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2515 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2516 	}
2517 
2518 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2519 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2520 			      fc_port->port_hdl);
2521 		/*
2522 		 * Execute the callback function directly.
2523 		 */
2524 		cb_func(ctx, err);
2525 		goto out;
2526 	}
2527 
2528 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2529 
2530 	if (port_quiesce_ctx == NULL) {
2531 		err = -ENOMEM;
2532 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2533 			    fc_port->port_hdl);
2534 		goto out;
2535 	}
2536 
2537 	port_quiesce_ctx->quiesce_count = 0;
2538 	port_quiesce_ctx->ctx = ctx;
2539 	port_quiesce_ctx->cb_func = cb_func;
2540 
2541 	/*
2542 	 * Quiesce the LS queue.
2543 	 */
2544 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2545 					   nvmf_fc_adm_queue_quiesce_cb);
2546 	if (err != 0) {
2547 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2548 		goto out;
2549 	}
2550 	port_quiesce_ctx->quiesce_count++;
2551 
2552 	/*
2553 	 * Quiesce the IO queues.
2554 	 */
2555 	for (i = 0; i < fc_port->num_io_queues; i++) {
2556 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2557 						   port_quiesce_ctx,
2558 						   nvmf_fc_adm_queue_quiesce_cb);
2559 		if (err != 0) {
2560 			DEV_VERIFY(0);
2561 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2562 		}
2563 		port_quiesce_ctx->quiesce_count++;
2564 	}
2565 
2566 out:
2567 	if (port_quiesce_ctx && err != 0) {
2568 		free(port_quiesce_ctx);
2569 	}
2570 	return err;
2571 }
2572 
2573 /*
2574  * Initialize and add a HW port entry to the global
2575  * HW port list.
2576  */
2577 static void
2578 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2579 {
2580 	ASSERT_SPDK_FC_MASTER_THREAD();
2581 	struct spdk_nvmf_fc_port *fc_port = NULL;
2582 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2583 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2584 			api_data->api_args;
2585 	int err = 0;
2586 
2587 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2588 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2589 		err = EINVAL;
2590 		goto abort_port_init;
2591 	}
2592 
2593 	/*
2594 	 * 1. Check for duplicate initialization.
2595 	 */
2596 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2597 	if (fc_port != NULL) {
2598 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2599 		goto abort_port_init;
2600 	}
2601 
2602 	/*
2603 	 * 2. Get the memory to instantiate a fc port.
2604 	 */
2605 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2606 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2607 	if (fc_port == NULL) {
2608 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2609 		err = -ENOMEM;
2610 		goto abort_port_init;
2611 	}
2612 
2613 	/* assign the io_queues array */
2614 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2615 				     struct spdk_nvmf_fc_port));
2616 
2617 	/*
2618 	 * 3. Initialize the contents for the FC-port
2619 	 */
2620 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2621 
2622 	if (err != 0) {
2623 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2624 		DEV_VERIFY(!"Data initialization failed for fc_port");
2625 		goto abort_port_init;
2626 	}
2627 
2628 	/*
2629 	 * 4. Add this port to the global fc port list in the library.
2630 	 */
2631 	nvmf_fc_port_add(fc_port);
2632 
2633 abort_port_init:
2634 	if (err && fc_port) {
2635 		free(fc_port);
2636 	}
2637 	if (api_data->cb_func != NULL) {
2638 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2639 	}
2640 
2641 	free(arg);
2642 
2643 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2644 		      args->port_handle, err);
2645 }
2646 
2647 /*
2648  * Online a HW port.
2649  */
2650 static void
2651 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2652 {
2653 	ASSERT_SPDK_FC_MASTER_THREAD();
2654 	struct spdk_nvmf_fc_port *fc_port = NULL;
2655 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2656 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2657 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2658 			api_data->api_args;
2659 	int i = 0;
2660 	int err = 0;
2661 
2662 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2663 	if (fc_port) {
2664 		/* Set the port state to online */
2665 		err = nvmf_fc_port_set_online(fc_port);
2666 		if (err != 0) {
2667 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2668 			DEV_VERIFY(!"Hw port online failed");
2669 			goto out;
2670 		}
2671 
2672 		hwqp = &fc_port->ls_queue;
2673 		hwqp->context = NULL;
2674 		(void)nvmf_fc_hwqp_set_online(hwqp);
2675 
2676 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2677 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2678 			hwqp = &fc_port->io_queues[i];
2679 			hwqp->context = NULL;
2680 			(void)nvmf_fc_hwqp_set_online(hwqp);
2681 			nvmf_fc_poll_group_add_hwqp(hwqp);
2682 		}
2683 	} else {
2684 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2685 		err = -EINVAL;
2686 	}
2687 
2688 out:
2689 	if (api_data->cb_func != NULL) {
2690 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2691 	}
2692 
2693 	free(arg);
2694 
2695 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
2696 		      err);
2697 }
2698 
2699 /*
2700  * Offline a HW port.
2701  */
2702 static void
2703 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
2704 {
2705 	ASSERT_SPDK_FC_MASTER_THREAD();
2706 	struct spdk_nvmf_fc_port *fc_port = NULL;
2707 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2708 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2709 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
2710 			api_data->api_args;
2711 	int i = 0;
2712 	int err = 0;
2713 
2714 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2715 	if (fc_port) {
2716 		/* Set the port state to offline, if it is not already. */
2717 		err = nvmf_fc_port_set_offline(fc_port);
2718 		if (err != 0) {
2719 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
2720 			err = 0;
2721 			goto out;
2722 		}
2723 
2724 		hwqp = &fc_port->ls_queue;
2725 		(void)nvmf_fc_hwqp_set_offline(hwqp);
2726 
2727 		/* Remove poller for all the io queues. */
2728 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2729 			hwqp = &fc_port->io_queues[i];
2730 			(void)nvmf_fc_hwqp_set_offline(hwqp);
2731 			nvmf_fc_poll_group_remove_hwqp(hwqp);
2732 		}
2733 
2734 		/*
2735 		 * Delete all the nports. Ideally, the nports should have been purged
2736 		 * before the offline event, in which case, only a validation is required.
2737 		 */
2738 		nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
2739 	} else {
2740 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2741 		err = -EINVAL;
2742 	}
2743 out:
2744 	if (api_data->cb_func != NULL) {
2745 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
2746 	}
2747 
2748 	free(arg);
2749 
2750 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
2751 		      err);
2752 }
2753 
2754 struct nvmf_fc_add_rem_listener_ctx {
2755 	struct spdk_nvmf_subsystem *subsystem;
2756 	bool add_listener;
2757 	struct spdk_nvme_transport_id trid;
2758 };
2759 
2760 static void
2761 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2762 {
2763 	ASSERT_SPDK_FC_MASTER_THREAD();
2764 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2765 	free(ctx);
2766 }
2767 
2768 static void
2769 nvmf_fc_adm_listen_done(void *cb_arg, int status)
2770 {
2771 	ASSERT_SPDK_FC_MASTER_THREAD();
2772 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
2773 
2774 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
2775 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
2776 		free(ctx);
2777 	}
2778 }
2779 
2780 static void
2781 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2782 {
2783 	ASSERT_SPDK_FC_MASTER_THREAD();
2784 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2785 
2786 	if (ctx->add_listener) {
2787 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
2788 	} else {
2789 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
2790 		nvmf_fc_adm_listen_done(ctx, 0);
2791 	}
2792 }
2793 
2794 static int
2795 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
2796 {
2797 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
2798 	struct spdk_nvmf_subsystem *subsystem;
2799 
2800 	if (!tgt) {
2801 		SPDK_ERRLOG("No nvmf target defined\n");
2802 		return -EINVAL;
2803 	}
2804 
2805 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
2806 	while (subsystem) {
2807 		struct nvmf_fc_add_rem_listener_ctx *ctx;
2808 
2809 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
2810 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
2811 			if (ctx) {
2812 				ctx->add_listener = add;
2813 				ctx->subsystem = subsystem;
2814 				nvmf_fc_create_trid(&ctx->trid,
2815 						    nport->fc_nodename.u.wwn,
2816 						    nport->fc_portname.u.wwn);
2817 
2818 				if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) {
2819 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
2820 						    ctx->trid.traddr);
2821 					free(ctx);
2822 				} else if (spdk_nvmf_subsystem_pause(subsystem,
2823 								     nvmf_fc_adm_subsystem_paused_cb,
2824 								     ctx)) {
2825 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
2826 						    subsystem->subnqn);
2827 					free(ctx);
2828 				}
2829 			}
2830 		}
2831 
2832 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
2833 	}
2834 
2835 	return 0;
2836 }
2837 
2838 /*
2839  * Create a Nport.
2840  */
2841 static void
2842 nvmf_fc_adm_evnt_nport_create(void *arg)
2843 {
2844 	ASSERT_SPDK_FC_MASTER_THREAD();
2845 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2846 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
2847 			api_data->api_args;
2848 	struct spdk_nvmf_fc_nport *nport = NULL;
2849 	struct spdk_nvmf_fc_port *fc_port = NULL;
2850 	int err = 0;
2851 
2852 	/*
2853 	 * Get the physical port.
2854 	 */
2855 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2856 	if (fc_port == NULL) {
2857 		err = -EINVAL;
2858 		goto out;
2859 	}
2860 
2861 	/*
2862 	 * Check for duplicate initialization.
2863 	 */
2864 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
2865 	if (nport != NULL) {
2866 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
2867 			    args->port_handle);
2868 		err = -EINVAL;
2869 		goto out;
2870 	}
2871 
2872 	/*
2873 	 * Get the memory to instantiate a fc nport.
2874 	 */
2875 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
2876 	if (nport == NULL) {
2877 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
2878 			    args->nport_handle);
2879 		err = -ENOMEM;
2880 		goto out;
2881 	}
2882 
2883 	/*
2884 	 * Initialize the contents for the nport
2885 	 */
2886 	nport->nport_hdl    = args->nport_handle;
2887 	nport->port_hdl     = args->port_handle;
2888 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
2889 	nport->fc_nodename  = args->fc_nodename;
2890 	nport->fc_portname  = args->fc_portname;
2891 	nport->d_id         = args->d_id;
2892 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
2893 
2894 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
2895 	TAILQ_INIT(&nport->rem_port_list);
2896 	nport->rport_count = 0;
2897 	TAILQ_INIT(&nport->fc_associations);
2898 	nport->assoc_count = 0;
2899 
2900 	/*
2901 	 * Populate the nport address (as listening address) to the nvmf subsystems.
2902 	 */
2903 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
2904 
2905 	(void)nvmf_fc_port_add_nport(fc_port, nport);
2906 out:
2907 	if (err && nport) {
2908 		free(nport);
2909 	}
2910 
2911 	if (api_data->cb_func != NULL) {
2912 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
2913 	}
2914 
2915 	free(arg);
2916 }
2917 
2918 static void
2919 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
2920 			    void *cb_args, int spdk_err)
2921 {
2922 	ASSERT_SPDK_FC_MASTER_THREAD();
2923 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
2924 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2925 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2926 	int err = 0;
2927 	uint16_t nport_hdl = 0;
2928 	char log_str[256];
2929 
2930 	/*
2931 	 * Assert on any delete failure.
2932 	 */
2933 	if (nport == NULL) {
2934 		SPDK_ERRLOG("Nport delete callback returned null nport");
2935 		DEV_VERIFY(!"nport is null.");
2936 		goto out;
2937 	}
2938 
2939 	nport_hdl = nport->nport_hdl;
2940 	if (0 != spdk_err) {
2941 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
2942 			    "%d, Nport: %d\n",
2943 			    nport->port_hdl, nport->nport_hdl);
2944 		DEV_VERIFY(!"nport delete callback error.");
2945 	}
2946 
2947 	/*
2948 	 * Free the nport if this is the last rport being deleted and
2949 	 * execute the callback(s).
2950 	 */
2951 	if (nvmf_fc_nport_has_no_rport(nport)) {
2952 		if (0 != nport->assoc_count) {
2953 			SPDK_ERRLOG("association count != 0\n");
2954 			DEV_VERIFY(!"association count != 0");
2955 		}
2956 
2957 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
2958 		if (0 != err) {
2959 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
2960 				    "nport from nport list. FC Port:%d Nport:%d\n",
2961 				    nport->port_hdl, nport->nport_hdl);
2962 		}
2963 		/* Free the nport */
2964 		free(nport);
2965 
2966 		if (cb_func != NULL) {
2967 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2968 		}
2969 		free(cb_data);
2970 	}
2971 out:
2972 	snprintf(log_str, sizeof(log_str),
2973 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
2974 		 port_handle, nport_hdl, event_type, spdk_err);
2975 
2976 	if (err != 0) {
2977 		SPDK_ERRLOG("%s", log_str);
2978 	} else {
2979 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2980 	}
2981 }
2982 
2983 /*
2984  * Delete Nport.
2985  */
2986 static void
2987 nvmf_fc_adm_evnt_nport_delete(void *arg)
2988 {
2989 	ASSERT_SPDK_FC_MASTER_THREAD();
2990 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2991 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
2992 			api_data->api_args;
2993 	struct spdk_nvmf_fc_nport *nport = NULL;
2994 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
2995 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
2996 	int err = 0;
2997 	uint32_t rport_cnt = 0;
2998 	int rc = 0;
2999 
3000 	/*
3001 	 * Make sure that the nport exists.
3002 	 */
3003 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3004 	if (nport == NULL) {
3005 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3006 			    args->port_handle);
3007 		err = -EINVAL;
3008 		goto out;
3009 	}
3010 
3011 	/*
3012 	 * Allocate memory for callback data.
3013 	 */
3014 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3015 	if (NULL == cb_data) {
3016 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3017 		err = -ENOMEM;
3018 		goto out;
3019 	}
3020 
3021 	cb_data->nport = nport;
3022 	cb_data->port_handle = args->port_handle;
3023 	cb_data->fc_cb_func = api_data->cb_func;
3024 	cb_data->fc_cb_ctx = args->cb_ctx;
3025 
3026 	/*
3027 	 * Begin nport tear down
3028 	 */
3029 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3030 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3031 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3032 		/*
3033 		 * Deletion of this nport already in progress. Register callback
3034 		 * and return.
3035 		 */
3036 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3037 		err = -ENODEV;
3038 		goto out;
3039 	} else {
3040 		/* nport partially created/deleted */
3041 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3042 		DEV_VERIFY(0 != "Nport in zombie state");
3043 		err = -ENODEV;
3044 		goto out;
3045 	}
3046 
3047 	/*
3048 	 * Remove this nport from listening addresses across subsystems
3049 	 */
3050 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3051 
3052 	if (0 != rc) {
3053 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3054 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3055 			    nport->nport_hdl);
3056 		goto out;
3057 	}
3058 
3059 	/*
3060 	 * Delete all the remote ports (if any) for the nport
3061 	 */
3062 	/* TODO - Need to do this with a "first" and a "next" accessor function
3063 	 * for completeness. Look at app-subsystem as examples.
3064 	 */
3065 	if (nvmf_fc_nport_has_no_rport(nport)) {
3066 		/* No rports to delete. Complete the nport deletion. */
3067 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3068 		goto out;
3069 	}
3070 
3071 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3072 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3073 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3074 
3075 		if (it_del_args == NULL) {
3076 			err = -ENOMEM;
3077 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3078 				    rport_iter->rpi, rport_iter->s_id);
3079 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3080 			goto out;
3081 		}
3082 
3083 		rport_cnt++;
3084 		it_del_args->port_handle = nport->port_hdl;
3085 		it_del_args->nport_handle = nport->nport_hdl;
3086 		it_del_args->cb_ctx = (void *)cb_data;
3087 		it_del_args->rpi = rport_iter->rpi;
3088 		it_del_args->s_id = rport_iter->s_id;
3089 
3090 		nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3091 					     nvmf_fc_adm_delete_nport_cb);
3092 	}
3093 
3094 out:
3095 	/* On failure, execute the callback function now */
3096 	if ((err != 0) || (rc != 0)) {
3097 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3098 			    "rport_cnt:%d rc:%d.\n",
3099 			    args->nport_handle, err, args->port_handle,
3100 			    rport_cnt, rc);
3101 		if (cb_data) {
3102 			free(cb_data);
3103 		}
3104 		if (api_data->cb_func != NULL) {
3105 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3106 		}
3107 
3108 	} else {
3109 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3110 			      "NPort %d delete done succesfully, fc port:%d. "
3111 			      "rport_cnt:%d\n",
3112 			      args->nport_handle, args->port_handle, rport_cnt);
3113 	}
3114 
3115 	free(arg);
3116 }
3117 
3118 /*
3119  * Process an PRLI/IT add.
3120  */
3121 static void
3122 nvmf_fc_adm_evnt_i_t_add(void *arg)
3123 {
3124 	ASSERT_SPDK_FC_MASTER_THREAD();
3125 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3126 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3127 			api_data->api_args;
3128 	struct spdk_nvmf_fc_nport *nport = NULL;
3129 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3130 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3131 	int err = 0;
3132 
3133 	/*
3134 	 * Make sure the nport port exists.
3135 	 */
3136 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3137 	if (nport == NULL) {
3138 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3139 		err = -EINVAL;
3140 		goto out;
3141 	}
3142 
3143 	/*
3144 	 * Check for duplicate i_t_add.
3145 	 */
3146 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3147 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3148 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3149 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3150 			err = -EEXIST;
3151 			goto out;
3152 		}
3153 	}
3154 
3155 	/*
3156 	 * Get the memory to instantiate the remote port
3157 	 */
3158 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3159 	if (rport == NULL) {
3160 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3161 		err = -ENOMEM;
3162 		goto out;
3163 	}
3164 
3165 	/*
3166 	 * Initialize the contents for the rport
3167 	 */
3168 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3169 	rport->s_id = args->s_id;
3170 	rport->rpi = args->rpi;
3171 	rport->fc_nodename = args->fc_nodename;
3172 	rport->fc_portname = args->fc_portname;
3173 
3174 	/*
3175 	 * Add remote port to nport
3176 	 */
3177 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3178 		DEV_VERIFY(!"Error while adding rport to list");
3179 	};
3180 
3181 	/*
3182 	 * TODO: Do we validate the initiators service parameters?
3183 	 */
3184 
3185 	/*
3186 	 * Get the targets service parameters from the library
3187 	 * to return back to the driver.
3188 	 */
3189 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3190 
3191 out:
3192 	if (api_data->cb_func != NULL) {
3193 		/*
3194 		 * Passing pointer to the args struct as the first argument.
3195 		 * The cb_func should handle this appropriately.
3196 		 */
3197 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3198 	}
3199 
3200 	free(arg);
3201 
3202 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3203 		      "IT add on nport %d done, rc = %d.\n",
3204 		      args->nport_handle, err);
3205 }
3206 
3207 /**
3208  * Process a IT delete.
3209  */
3210 static void
3211 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3212 {
3213 	ASSERT_SPDK_FC_MASTER_THREAD();
3214 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3215 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3216 			api_data->api_args;
3217 	int rc = 0;
3218 	struct spdk_nvmf_fc_nport *nport = NULL;
3219 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3220 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3221 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3222 	uint32_t num_rport = 0;
3223 	char log_str[256];
3224 
3225 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3226 
3227 	/*
3228 	 * Make sure the nport port exists. If it does not, error out.
3229 	 */
3230 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3231 	if (nport == NULL) {
3232 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3233 		rc = -EINVAL;
3234 		goto out;
3235 	}
3236 
3237 	/*
3238 	 * Find this ITN / rport (remote port).
3239 	 */
3240 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3241 		num_rport++;
3242 		if ((rport_iter->s_id == args->s_id) &&
3243 		    (rport_iter->rpi == args->rpi) &&
3244 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3245 			rport = rport_iter;
3246 			break;
3247 		}
3248 	}
3249 
3250 	/*
3251 	 * We should find either zero or exactly one rport.
3252 	 *
3253 	 * If we find zero rports, that means that a previous request has
3254 	 * removed the rport by the time we reached here. In this case,
3255 	 * simply return out.
3256 	 */
3257 	if (rport == NULL) {
3258 		rc = -ENODEV;
3259 		goto out;
3260 	}
3261 
3262 	/*
3263 	 * We have found exactly one rport. Allocate memory for callback data.
3264 	 */
3265 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3266 	if (NULL == cb_data) {
3267 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3268 		rc = -ENOMEM;
3269 		goto out;
3270 	}
3271 
3272 	cb_data->nport = nport;
3273 	cb_data->rport = rport;
3274 	cb_data->port_handle = args->port_handle;
3275 	cb_data->fc_cb_func = api_data->cb_func;
3276 	cb_data->fc_cb_ctx = args->cb_ctx;
3277 
3278 	/*
3279 	 * Validate rport object state.
3280 	 */
3281 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3282 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3283 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3284 		/*
3285 		 * Deletion of this rport already in progress. Register callback
3286 		 * and return.
3287 		 */
3288 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3289 		rc = -ENODEV;
3290 		goto out;
3291 	} else {
3292 		/* rport partially created/deleted */
3293 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3294 		DEV_VERIFY(!"Invalid rport_state");
3295 		rc = -ENODEV;
3296 		goto out;
3297 	}
3298 
3299 	/*
3300 	 * We have successfully found a rport to delete. Call
3301 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3302 	 * IT-delete processing as well as free the cb_data.
3303 	 */
3304 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3305 				     (void *)cb_data);
3306 
3307 out:
3308 	if (rc != 0) {
3309 		/*
3310 		 * We have entered here because either we encountered an
3311 		 * error, or we did not find a rport to delete.
3312 		 * As a result, we will not call the function
3313 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3314 		 * processing. Therefore, execute the callback function now.
3315 		 */
3316 		if (cb_data) {
3317 			free(cb_data);
3318 		}
3319 		if (api_data->cb_func != NULL) {
3320 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3321 		}
3322 	}
3323 
3324 	snprintf(log_str, sizeof(log_str),
3325 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3326 		 args->nport_handle, num_rport, rc);
3327 
3328 	if (rc != 0) {
3329 		SPDK_ERRLOG("%s", log_str);
3330 	} else {
3331 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3332 	}
3333 
3334 	free(arg);
3335 }
3336 
3337 /*
3338  * Process ABTS received
3339  */
3340 static void
3341 nvmf_fc_adm_evnt_abts_recv(void *arg)
3342 {
3343 	ASSERT_SPDK_FC_MASTER_THREAD();
3344 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3345 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3346 	struct spdk_nvmf_fc_nport *nport = NULL;
3347 	int err = 0;
3348 
3349 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3350 		      args->oxid, args->rxid);
3351 
3352 	/*
3353 	 * 1. Make sure the nport port exists.
3354 	 */
3355 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3356 	if (nport == NULL) {
3357 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3358 		err = -EINVAL;
3359 		goto out;
3360 	}
3361 
3362 	/*
3363 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3364 	 */
3365 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3366 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3367 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3368 			      args->rpi, args->oxid, args->rxid);
3369 		err = 0;
3370 		goto out;
3371 
3372 	}
3373 
3374 	/*
3375 	 * 3. Pass the received ABTS-LS to the library for handling.
3376 	 */
3377 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3378 
3379 out:
3380 	if (api_data->cb_func != NULL) {
3381 		/*
3382 		 * Passing pointer to the args struct as the first argument.
3383 		 * The cb_func should handle this appropriately.
3384 		 */
3385 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3386 	} else {
3387 		/* No callback set, free the args */
3388 		free(args);
3389 	}
3390 
3391 	free(arg);
3392 }
3393 
3394 /*
3395  * Callback function for hw port quiesce.
3396  */
3397 static void
3398 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3399 {
3400 	ASSERT_SPDK_FC_MASTER_THREAD();
3401 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3402 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3403 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3404 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3405 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3406 	struct spdk_nvmf_fc_port *fc_port = NULL;
3407 	char *dump_buf = NULL;
3408 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3409 
3410 	/*
3411 	 * Free the callback context struct.
3412 	 */
3413 	free(ctx);
3414 
3415 	if (err != 0) {
3416 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3417 		goto out;
3418 	}
3419 
3420 	if (args->dump_queues == false) {
3421 		/*
3422 		 * Queues need not be dumped.
3423 		 */
3424 		goto out;
3425 	}
3426 
3427 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3428 
3429 	/*
3430 	 * Get the fc port.
3431 	 */
3432 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3433 	if (fc_port == NULL) {
3434 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3435 		err = -EINVAL;
3436 		goto out;
3437 	}
3438 
3439 	/*
3440 	 * Allocate memory for the dump buffer.
3441 	 * This memory will be freed by FCT.
3442 	 */
3443 	dump_buf = (char *)calloc(1, dump_buf_size);
3444 	if (dump_buf == NULL) {
3445 		err = -ENOMEM;
3446 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3447 		goto out;
3448 	}
3449 	*args->dump_buf  = (uint32_t *)dump_buf;
3450 	dump_info.buffer = dump_buf;
3451 	dump_info.offset = 0;
3452 
3453 	/*
3454 	 * Add the dump reason to the top of the buffer.
3455 	 */
3456 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3457 
3458 	/*
3459 	 * Dump the hwqp.
3460 	 */
3461 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3462 				fc_port->num_io_queues, &dump_info);
3463 
3464 out:
3465 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3466 		      args->port_handle, args->dump_queues, err);
3467 
3468 	if (cb_func != NULL) {
3469 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3470 	}
3471 }
3472 
3473 /*
3474  * HW port reset
3475 
3476  */
3477 static void
3478 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3479 {
3480 	ASSERT_SPDK_FC_MASTER_THREAD();
3481 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3482 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3483 			api_data->api_args;
3484 	struct spdk_nvmf_fc_port *fc_port = NULL;
3485 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3486 	int err = 0;
3487 
3488 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3489 
3490 	/*
3491 	 * Make sure the physical port exists.
3492 	 */
3493 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3494 	if (fc_port == NULL) {
3495 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3496 		err = -EINVAL;
3497 		goto out;
3498 	}
3499 
3500 	/*
3501 	 * Save the reset event args and the callback in a context struct.
3502 	 */
3503 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3504 
3505 	if (ctx == NULL) {
3506 		err = -ENOMEM;
3507 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3508 		goto fail;
3509 	}
3510 
3511 	ctx->reset_args = arg;
3512 	ctx->reset_cb_func = api_data->cb_func;
3513 
3514 	/*
3515 	 * Quiesce the hw port.
3516 	 */
3517 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3518 	if (err != 0) {
3519 		goto fail;
3520 	}
3521 
3522 	/*
3523 	 * Once the ports are successfully quiesced the reset processing
3524 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3525 	 */
3526 	return;
3527 fail:
3528 	free(ctx);
3529 
3530 out:
3531 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3532 		      err);
3533 
3534 	if (api_data->cb_func != NULL) {
3535 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3536 	}
3537 
3538 	free(arg);
3539 }
3540 
3541 static inline void
3542 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args)
3543 {
3544 	if (nvmf_fc_get_master_thread()) {
3545 		spdk_thread_send_msg(nvmf_fc_get_master_thread(), fn, args);
3546 	}
3547 }
3548 
3549 /*
3550  * Queue up an event in the SPDK masters event queue.
3551  * Used by the FC driver to notify the SPDK master of FC related events.
3552  */
3553 int
3554 nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args,
3555 			     spdk_nvmf_fc_callback cb_func)
3556 {
3557 	int err = 0;
3558 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3559 	spdk_msg_fn event_fn = NULL;
3560 
3561 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3562 
3563 	if (event_type >= SPDK_FC_EVENT_MAX) {
3564 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3565 		err = -EINVAL;
3566 		goto done;
3567 	}
3568 
3569 	if (args == NULL) {
3570 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3571 		err = -EINVAL;
3572 		goto done;
3573 	}
3574 
3575 	api_data = calloc(1, sizeof(*api_data));
3576 
3577 	if (api_data == NULL) {
3578 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3579 		err = -ENOMEM;
3580 		goto done;
3581 	}
3582 
3583 	api_data->api_args = args;
3584 	api_data->cb_func = cb_func;
3585 
3586 	switch (event_type) {
3587 	case SPDK_FC_HW_PORT_INIT:
3588 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3589 		break;
3590 
3591 	case SPDK_FC_HW_PORT_ONLINE:
3592 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3593 		break;
3594 
3595 	case SPDK_FC_HW_PORT_OFFLINE:
3596 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3597 		break;
3598 
3599 	case SPDK_FC_NPORT_CREATE:
3600 		event_fn = nvmf_fc_adm_evnt_nport_create;
3601 		break;
3602 
3603 	case SPDK_FC_NPORT_DELETE:
3604 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3605 		break;
3606 
3607 	case SPDK_FC_IT_ADD:
3608 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3609 		break;
3610 
3611 	case SPDK_FC_IT_DELETE:
3612 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3613 		break;
3614 
3615 	case SPDK_FC_ABTS_RECV:
3616 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3617 		break;
3618 
3619 	case SPDK_FC_HW_PORT_RESET:
3620 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3621 		break;
3622 
3623 	case SPDK_FC_UNRECOVERABLE_ERR:
3624 	default:
3625 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3626 		err = -EINVAL;
3627 		break;
3628 	}
3629 
3630 done:
3631 
3632 	if (err == 0) {
3633 		assert(event_fn != NULL);
3634 		nvmf_fc_adm_run_on_master_thread(event_fn, (void *)api_data);
3635 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3636 	} else {
3637 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3638 		if (api_data) {
3639 			free(api_data);
3640 		}
3641 	}
3642 
3643 	return err;
3644 }
3645 
3646 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3647 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
3648 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
3649