xref: /spdk/lib/nvmf/fc.c (revision 9889ab2dc80e40dae92dcef361d53dcba722043d)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf.h"
41 #include "spdk/nvmf_spec.h"
42 #include "spdk/string.h"
43 #include "spdk/trace.h"
44 #include "spdk/util.h"
45 #include "spdk/event.h"
46 #include "spdk/likely.h"
47 #include "spdk/endian.h"
48 #include "spdk/log.h"
49 #include "spdk/io_channel.h"
50 
51 #include "spdk_internal/log.h"
52 
53 #include "nvmf_internal.h"
54 #include "transport.h"
55 #include "nvmf_fc.h"
56 #include "fc_lld.h"
57 
58 #ifndef DEV_VERIFY
59 #define DEV_VERIFY assert
60 #endif
61 
62 #ifndef ASSERT_SPDK_FC_MASTER_THREAD
63 #define ASSERT_SPDK_FC_MASTER_THREAD() \
64         DEV_VERIFY(spdk_get_thread() == spdk_nvmf_fc_get_master_thread());
65 #endif
66 
67 /*
68  * PRLI service parameters
69  */
70 enum spdk_nvmf_fc_service_parameters {
71 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
72 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
73 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
74 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
75 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
76 };
77 
78 static char *fc_req_state_strs[] = {
79 	"SPDK_NVMF_FC_REQ_INIT",
80 	"SPDK_NVMF_FC_REQ_READ_BDEV",
81 	"SPDK_NVMF_FC_REQ_READ_XFER",
82 	"SPDK_NVMF_FC_REQ_READ_RSP",
83 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
84 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
85 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
86 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
87 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
88 	"SPDK_NVMF_FC_REQ_NONE_RSP",
89 	"SPDK_NVMF_FC_REQ_SUCCESS",
90 	"SPDK_NVMF_FC_REQ_FAILED",
91 	"SPDK_NVMF_FC_REQ_ABORTED",
92 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
93 	"SPDK_NVMF_FC_REQ_PENDING"
94 };
95 
96 #define OBJECT_NVMF_FC_IO				0xA0
97 
98 #define TRACE_GROUP_NVMF_FC				0x8
99 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
100 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
101 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
102 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
103 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
104 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
105 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
106 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
107 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
108 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
109 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
110 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
111 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
112 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
113 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
114 
115 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
116 {
117 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
118 	spdk_trace_register_description("FC_REQ_NEW",
119 					TRACE_FC_REQ_INIT,
120 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
121 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
122 					TRACE_FC_REQ_READ_BDEV,
123 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
124 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
125 					TRACE_FC_REQ_READ_XFER,
126 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
127 	spdk_trace_register_description("FC_REQ_READ_RSP",
128 					TRACE_FC_REQ_READ_RSP,
129 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
130 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
131 					TRACE_FC_REQ_WRITE_BUFFS,
132 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
133 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
134 					TRACE_FC_REQ_WRITE_XFER,
135 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
136 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
137 					TRACE_FC_REQ_WRITE_BDEV,
138 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
139 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
140 					TRACE_FC_REQ_WRITE_RSP,
141 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
142 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
143 					TRACE_FC_REQ_NONE_BDEV,
144 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
145 	spdk_trace_register_description("FC_REQ_NONE_RSP",
146 					TRACE_FC_REQ_NONE_RSP,
147 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
148 	spdk_trace_register_description("FC_REQ_SUCCESS",
149 					TRACE_FC_REQ_SUCCESS,
150 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
151 	spdk_trace_register_description("FC_REQ_FAILED",
152 					TRACE_FC_REQ_FAILED,
153 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
154 	spdk_trace_register_description("FC_REQ_ABORTED",
155 					TRACE_FC_REQ_ABORTED,
156 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
157 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
158 					TRACE_FC_REQ_BDEV_ABORTED,
159 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
160 	spdk_trace_register_description("FC_REQ_PENDING",
161 					TRACE_FC_REQ_PENDING,
162 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
163 }
164 
165 /**
166  * The structure used by all fc adm functions
167  */
168 struct spdk_nvmf_fc_adm_api_data {
169 	void *api_args;
170 	spdk_nvmf_fc_callback cb_func;
171 };
172 
173 /**
174  * The callback structure for nport-delete
175  */
176 struct spdk_nvmf_fc_adm_nport_del_cb_data {
177 	struct spdk_nvmf_fc_nport *nport;
178 	uint8_t port_handle;
179 	spdk_nvmf_fc_callback fc_cb_func;
180 	void *fc_cb_ctx;
181 };
182 
183 /**
184  * The callback structure for it-delete
185  */
186 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
187 	struct spdk_nvmf_fc_nport *nport;
188 	struct spdk_nvmf_fc_remote_port_info *rport;
189 	uint8_t port_handle;
190 	spdk_nvmf_fc_callback fc_cb_func;
191 	void *fc_cb_ctx;
192 };
193 
194 
195 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
196 
197 /**
198  * The callback structure for the it-delete-assoc callback
199  */
200 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
201 	struct spdk_nvmf_fc_nport *nport;
202 	struct spdk_nvmf_fc_remote_port_info *rport;
203 	uint8_t port_handle;
204 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
205 	void *cb_ctx;
206 };
207 
208 /*
209  * Call back function pointer for HW port quiesce.
210  */
211 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
212 
213 /**
214  * Context structure for quiescing a hardware port
215  */
216 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
217 	int quiesce_count;
218 	void *ctx;
219 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
220 };
221 
222 /**
223  * Context structure used to reset a hardware port
224  */
225 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
226 	void *reset_args;
227 	spdk_nvmf_fc_callback reset_cb_func;
228 };
229 
230 /**
231  * The callback structure for HW port link break event
232  */
233 struct spdk_nvmf_fc_adm_port_link_break_cb_data {
234 	struct spdk_nvmf_hw_port_link_break_args *args;
235 	struct spdk_nvmf_fc_nport_delete_args nport_del_args;
236 	spdk_nvmf_fc_callback cb_func;
237 };
238 
239 struct spdk_nvmf_fc_transport {
240 	struct spdk_nvmf_transport transport;
241 	pthread_mutex_t lock;
242 };
243 
244 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
245 
246 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
247 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
248 
249 static struct spdk_thread *g_nvmf_fc_master_thread = NULL;
250 
251 static uint32_t g_nvmf_fgroup_count = 0;
252 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
253 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
254 
255 struct spdk_thread *
256 spdk_nvmf_fc_get_master_thread(void)
257 {
258 	return g_nvmf_fc_master_thread;
259 }
260 
261 static inline void
262 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
263 			       enum spdk_nvmf_fc_request_state state)
264 {
265 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
266 
267 	switch (state) {
268 	case SPDK_NVMF_FC_REQ_INIT:
269 		/* Start IO tracing */
270 		tpoint_id = TRACE_FC_REQ_INIT;
271 		break;
272 	case SPDK_NVMF_FC_REQ_READ_BDEV:
273 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
274 		break;
275 	case SPDK_NVMF_FC_REQ_READ_XFER:
276 		tpoint_id = TRACE_FC_REQ_READ_XFER;
277 		break;
278 	case SPDK_NVMF_FC_REQ_READ_RSP:
279 		tpoint_id = TRACE_FC_REQ_READ_RSP;
280 		break;
281 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
282 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
283 		break;
284 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
285 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
286 		break;
287 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
288 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
289 		break;
290 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
291 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
292 		break;
293 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
294 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
295 		break;
296 	case SPDK_NVMF_FC_REQ_NONE_RSP:
297 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
298 		break;
299 	case SPDK_NVMF_FC_REQ_SUCCESS:
300 		tpoint_id = TRACE_FC_REQ_SUCCESS;
301 		break;
302 	case SPDK_NVMF_FC_REQ_FAILED:
303 		tpoint_id = TRACE_FC_REQ_FAILED;
304 		break;
305 	case SPDK_NVMF_FC_REQ_ABORTED:
306 		tpoint_id = TRACE_FC_REQ_ABORTED;
307 		break;
308 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
309 		tpoint_id = TRACE_FC_REQ_ABORTED;
310 		break;
311 	case SPDK_NVMF_FC_REQ_PENDING:
312 		tpoint_id = TRACE_FC_REQ_PENDING;
313 		break;
314 	default:
315 		assert(0);
316 		break;
317 	}
318 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
319 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
320 				  (uint64_t)(&fc_req->req), 0);
321 	}
322 }
323 
324 static void
325 nvmf_fc_handle_connection_failure(void *arg)
326 {
327 	struct spdk_nvmf_fc_conn *fc_conn = arg;
328 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
329 
330 	if (!fc_conn->create_opd) {
331 		return;
332 	}
333 	api_data = &fc_conn->create_opd->u.add_conn;
334 
335 	nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
336 				    api_data->args.fc_conn, api_data->aq_conn);
337 }
338 
339 static void
340 nvmf_fc_handle_assoc_deletion(void *arg)
341 {
342 	struct spdk_nvmf_fc_conn *fc_conn = arg;
343 
344 	spdk_nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport,
345 					fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL);
346 }
347 
348 static int
349 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp)
350 {
351 	uint32_t i;
352 	struct spdk_nvmf_fc_request *fc_req;
353 
354 	TAILQ_INIT(&hwqp->free_reqs);
355 	TAILQ_INIT(&hwqp->in_use_reqs);
356 
357 	hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request));
358 	if (hwqp->fc_reqs_buf == NULL) {
359 		SPDK_ERRLOG("create fc request pool failed\n");
360 		return -ENOMEM;
361 	}
362 
363 	for (i = 0; i < hwqp->rq_size; i++) {
364 		fc_req = hwqp->fc_reqs_buf + i;
365 
366 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
367 		TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link);
368 	}
369 
370 	return 0;
371 }
372 
373 static inline struct spdk_nvmf_fc_request *
374 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp)
375 {
376 	struct spdk_nvmf_fc_request *fc_req;
377 
378 	if (TAILQ_EMPTY(&hwqp->free_reqs)) {
379 		SPDK_ERRLOG("Alloc request buffer failed\n");
380 		return NULL;
381 	}
382 
383 	fc_req = TAILQ_FIRST(&hwqp->free_reqs);
384 	TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link);
385 
386 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
387 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
388 	TAILQ_INIT(&fc_req->abort_cbs);
389 	return fc_req;
390 }
391 
392 static inline void
393 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req)
394 {
395 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
396 		/* Log an error for debug purpose. */
397 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
398 	}
399 
400 	/* set the magic to mark req as no longer valid. */
401 	fc_req->magic = 0xDEADBEEF;
402 
403 	TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link);
404 	TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link);
405 }
406 
407 static inline bool
408 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
409 {
410 	switch (fc_req->state) {
411 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
412 		return true;
413 	default:
414 		return false;
415 	}
416 }
417 
418 void
419 spdk_nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp)
420 {
421 	nvmf_fc_init_rqpair_buffers(hwqp);
422 }
423 
424 struct spdk_nvmf_fc_conn *
425 spdk_nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
426 {
427 	struct spdk_nvmf_fc_conn *fc_conn;
428 
429 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
430 		if (fc_conn->conn_id == conn_id) {
431 			return fc_conn;
432 		}
433 	}
434 
435 	return NULL;
436 }
437 
438 void
439 spdk_nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr)
440 {
441 	struct spdk_nvmf_fc_abts_ctx *ctx;
442 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
443 
444 	/* Clean up any pending sync callbacks */
445 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
446 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
447 		ctx = args->cb_info.cb_data;
448 		if (ctx) {
449 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
450 				free(ctx->sync_poller_args);
451 				free(ctx->abts_poller_args);
452 				free(ctx);
453 			}
454 		}
455 	}
456 
457 	nvmf_fc_reinit_q(hwqp->queues, queues_curr);
458 }
459 
460 void
461 spdk_nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
462 {
463 	hwqp->fc_port = fc_port;
464 
465 	/* clear counters */
466 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
467 
468 	spdk_nvmf_fc_init_poller_queues(hwqp);
469 	if (&fc_port->ls_queue != hwqp) {
470 		nvmf_fc_create_req_mempool(hwqp);
471 	}
472 
473 	nvmf_fc_init_q(hwqp);
474 	TAILQ_INIT(&hwqp->connection_list);
475 	TAILQ_INIT(&hwqp->sync_cbs);
476 	TAILQ_INIT(&hwqp->ls_pending_queue);
477 }
478 
479 static struct spdk_nvmf_fc_poll_group *
480 nvmf_fc_get_idlest_poll_group(void)
481 {
482 	uint32_t max_count = UINT32_MAX;
483 	struct spdk_nvmf_fc_poll_group *fgroup;
484 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
485 
486 	/* find poll group with least number of hwqp's assigned to it */
487 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
488 		if (fgroup->hwqp_count < max_count) {
489 			ret_fgroup = fgroup;
490 			max_count = fgroup->hwqp_count;
491 		}
492 	}
493 
494 	return ret_fgroup;
495 }
496 
497 void
498 spdk_nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
499 {
500 	struct spdk_nvmf_fc_poll_group *fgroup = NULL;
501 
502 	assert(hwqp);
503 	if (hwqp == NULL) {
504 		SPDK_ERRLOG("Error: hwqp is NULL\n");
505 		return;
506 	}
507 
508 	assert(g_nvmf_fgroup_count);
509 
510 	fgroup = nvmf_fc_get_idlest_poll_group();
511 	if (!fgroup) {
512 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
513 		return;
514 	}
515 
516 	hwqp->thread = fgroup->group.group->thread;
517 	hwqp->fgroup = fgroup;
518 	fgroup->hwqp_count++;
519 	spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
520 }
521 
522 void
523 spdk_nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
524 {
525 	assert(hwqp);
526 
527 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
528 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
529 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
530 
531 	if (!hwqp->fgroup) {
532 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
533 	} else {
534 		hwqp->fgroup->hwqp_count--;
535 		spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL);
536 	}
537 }
538 
539 /*
540  * Note: This needs to be used only on master poller.
541  */
542 static uint64_t
543 nvmf_fc_get_abts_unique_id(void)
544 {
545 	static uint32_t u_id = 0;
546 
547 	return (uint64_t)(++u_id);
548 }
549 
550 static void
551 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
552 {
553 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
554 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
555 
556 	ctx->hwqps_responded++;
557 
558 	if (ctx->hwqps_responded < ctx->num_hwqps) {
559 		/* Wait for all pollers to complete. */
560 		return;
561 	}
562 
563 	/* Free the queue sync poller args. */
564 	free(ctx->sync_poller_args);
565 
566 	/* Mark as queue synced */
567 	ctx->queue_synced = true;
568 
569 	/* Reset the ctx values */
570 	ctx->hwqps_responded = 0;
571 	ctx->handled = false;
572 
573 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
574 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
575 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
576 
577 	/* Resend ABTS to pollers */
578 	args = ctx->abts_poller_args;
579 	for (int i = 0; i < ctx->num_hwqps; i++) {
580 		poller_arg = args + i;
581 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
582 					     SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
583 					     poller_arg);
584 	}
585 }
586 
587 static int
588 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
589 {
590 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
591 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
592 
593 	/* check if FC driver supports queue sync */
594 	if (!nvmf_fc_q_sync_available()) {
595 		return -EPERM;
596 	}
597 
598 	assert(ctx);
599 	if (!ctx) {
600 		SPDK_ERRLOG("NULL ctx pointer");
601 		return -EINVAL;
602 	}
603 
604 	/* Reset the ctx values */
605 	ctx->hwqps_responded = 0;
606 
607 	args = calloc(ctx->num_hwqps,
608 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
609 	if (!args) {
610 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
611 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
612 		return -ENOMEM;
613 	}
614 	ctx->sync_poller_args = args;
615 
616 	abts_args = ctx->abts_poller_args;
617 	for (int i = 0; i < ctx->num_hwqps; i++) {
618 		abts_poller_arg = abts_args + i;
619 		poller_arg = args + i;
620 		poller_arg->u_id = ctx->u_id;
621 		poller_arg->hwqp = abts_poller_arg->hwqp;
622 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
623 		poller_arg->cb_info.cb_data = ctx;
624 		poller_arg->cb_info.cb_thread = spdk_get_thread();
625 
626 		/* Send a Queue sync message to interested pollers */
627 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
628 					     SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
629 					     poller_arg);
630 	}
631 
632 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
633 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
634 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
635 
636 	/* Post Marker to queue to track aborted request */
637 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
638 
639 	return 0;
640 }
641 
642 static void
643 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
644 {
645 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
646 	struct spdk_nvmf_fc_nport *nport  = NULL;
647 
648 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
649 		ctx->handled = true;
650 	}
651 
652 	ctx->hwqps_responded++;
653 
654 	if (ctx->hwqps_responded < ctx->num_hwqps) {
655 		/* Wait for all pollers to complete. */
656 		return;
657 	}
658 
659 	nport = spdk_nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
660 
661 	if (ctx->nport != nport) {
662 		/* Nport can be deleted while this abort is being
663 		 * processed by the pollers.
664 		 */
665 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
666 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
667 	} else {
668 		if (!ctx->handled) {
669 			/* Try syncing the queues and try one more time */
670 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
671 				SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
672 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
673 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
674 				return;
675 			} else {
676 				/* Send Reject */
677 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
678 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
679 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
680 			}
681 		} else {
682 			/* Send Accept */
683 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
684 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
685 					    0, NULL, NULL);
686 		}
687 	}
688 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
689 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
690 
691 	free(ctx->abts_poller_args);
692 	free(ctx);
693 }
694 
695 void
696 spdk_nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
697 			       uint16_t oxid, uint16_t rxid)
698 {
699 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
700 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
701 	struct spdk_nvmf_fc_association *assoc = NULL;
702 	struct spdk_nvmf_fc_conn *conn = NULL;
703 	uint32_t hwqp_cnt = 0;
704 	bool skip_hwqp_cnt;
705 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
706 	uint32_t i;
707 
708 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
709 		       nport->nport_hdl, rpi, oxid, rxid);
710 
711 	/* Allocate memory to track hwqp's with at least 1 active connection. */
712 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
713 	if (hwqps == NULL) {
714 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
715 		goto bls_rej;
716 	}
717 
718 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
719 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
720 			if (conn->rpi != rpi) {
721 				continue;
722 			}
723 
724 			skip_hwqp_cnt = false;
725 			for (i = 0; i < hwqp_cnt; i++) {
726 				if (hwqps[i] == conn->hwqp) {
727 					/* Skip. This is already present */
728 					skip_hwqp_cnt = true;
729 					break;
730 				}
731 			}
732 			if (!skip_hwqp_cnt) {
733 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
734 				hwqps[hwqp_cnt] = conn->hwqp;
735 				hwqp_cnt++;
736 			}
737 		}
738 	}
739 
740 	if (!hwqp_cnt) {
741 		goto bls_rej;
742 	}
743 
744 	args = calloc(hwqp_cnt,
745 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
746 	if (!args) {
747 		goto bls_rej;
748 	}
749 
750 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
751 	if (!ctx) {
752 		goto bls_rej;
753 	}
754 	ctx->rpi = rpi;
755 	ctx->oxid = oxid;
756 	ctx->rxid = rxid;
757 	ctx->nport = nport;
758 	ctx->nport_hdl = nport->nport_hdl;
759 	ctx->port_hdl = nport->fc_port->port_hdl;
760 	ctx->num_hwqps = hwqp_cnt;
761 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
762 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
763 	ctx->abts_poller_args = args;
764 
765 	/* Get a unique context for this ABTS */
766 	ctx->u_id = nvmf_fc_get_abts_unique_id();
767 
768 	for (i = 0; i < hwqp_cnt; i++) {
769 		poller_arg = args + i;
770 		poller_arg->hwqp = hwqps[i];
771 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
772 		poller_arg->cb_info.cb_data = ctx;
773 		poller_arg->cb_info.cb_thread = spdk_get_thread();
774 		poller_arg->ctx = ctx;
775 
776 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
777 					     SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
778 					     poller_arg);
779 	}
780 
781 	free(hwqps);
782 
783 	return;
784 bls_rej:
785 	free(args);
786 	free(hwqps);
787 
788 	/* Send Reject */
789 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
790 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
791 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
792 		       nport->nport_hdl, rpi, oxid, rxid);
793 	return;
794 }
795 
796 /*** Accessor functions for the FC structures - BEGIN */
797 /*
798  * Returns true if the port is in offline state.
799  */
800 bool
801 spdk_nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
802 {
803 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
804 		return true;
805 	}
806 
807 	return false;
808 }
809 
810 /*
811  * Returns true if the port is in online state.
812  */
813 bool
814 spdk_nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
815 {
816 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
817 		return true;
818 	}
819 
820 	return false;
821 }
822 
823 int
824 spdk_nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
825 {
826 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
827 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
828 		return 0;
829 	}
830 
831 	return -EPERM;
832 }
833 
834 int
835 spdk_nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
836 {
837 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
838 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
839 		return 0;
840 	}
841 
842 	return -EPERM;
843 }
844 
845 int
846 spdk_nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
847 {
848 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
849 		hwqp->state = SPDK_FC_HWQP_ONLINE;
850 		/* reset some queue counters */
851 		hwqp->num_conns = 0;
852 		return nvmf_fc_set_q_online_state(hwqp, true);
853 	}
854 
855 	return -EPERM;
856 }
857 
858 int
859 spdk_nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
860 {
861 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
862 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
863 		return nvmf_fc_set_q_online_state(hwqp, false);
864 	}
865 
866 	return -EPERM;
867 }
868 
869 void
870 spdk_nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
871 {
872 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
873 }
874 
875 struct spdk_nvmf_fc_port *
876 spdk_nvmf_fc_port_lookup(uint8_t port_hdl)
877 {
878 	struct spdk_nvmf_fc_port *fc_port = NULL;
879 
880 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
881 		if (fc_port->port_hdl == port_hdl) {
882 			return fc_port;
883 		}
884 	}
885 	return NULL;
886 }
887 
888 static void
889 nvmf_fc_port_cleanup(void)
890 {
891 	struct spdk_nvmf_fc_port *fc_port, *tmp;
892 	struct spdk_nvmf_fc_hwqp *hwqp;
893 	uint32_t i;
894 
895 	TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) {
896 		TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list,  fc_port, link);
897 		for (i = 0; i < fc_port->num_io_queues; i++) {
898 			hwqp = &fc_port->io_queues[i];
899 			if (hwqp->fc_reqs_buf) {
900 				free(hwqp->fc_reqs_buf);
901 			}
902 		}
903 		free(fc_port);
904 	}
905 }
906 
907 uint32_t
908 spdk_nvmf_fc_get_prli_service_params(void)
909 {
910 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
911 }
912 
913 int
914 spdk_nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
915 			    struct spdk_nvmf_fc_nport *nport)
916 {
917 	if (fc_port) {
918 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
919 		fc_port->num_nports++;
920 		return 0;
921 	}
922 
923 	return -EINVAL;
924 }
925 
926 int
927 spdk_nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
928 			       struct spdk_nvmf_fc_nport *nport)
929 {
930 	if (fc_port && nport) {
931 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
932 		fc_port->num_nports--;
933 		return 0;
934 	}
935 
936 	return -EINVAL;
937 }
938 
939 static struct spdk_nvmf_fc_nport *
940 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
941 {
942 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
943 
944 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
945 		if (fc_nport->nport_hdl == nport_hdl) {
946 			return fc_nport;
947 		}
948 	}
949 
950 	return NULL;
951 }
952 
953 struct spdk_nvmf_fc_nport *
954 spdk_nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
955 {
956 	struct spdk_nvmf_fc_port *fc_port = NULL;
957 
958 	fc_port = spdk_nvmf_fc_port_lookup(port_hdl);
959 	if (fc_port) {
960 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
961 	}
962 
963 	return NULL;
964 }
965 
966 static inline int
967 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
968 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
969 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
970 {
971 	struct spdk_nvmf_fc_nport *n_port;
972 	struct spdk_nvmf_fc_remote_port_info *r_port;
973 
974 	assert(hwqp);
975 	if (hwqp == NULL) {
976 		SPDK_ERRLOG("Error: hwqp is NULL\n");
977 		return -EINVAL;
978 	}
979 	assert(nport);
980 	if (nport == NULL) {
981 		SPDK_ERRLOG("Error: nport is NULL\n");
982 		return -EINVAL;
983 	}
984 	assert(rport);
985 	if (rport == NULL) {
986 		SPDK_ERRLOG("Error: rport is NULL\n");
987 		return -EINVAL;
988 	}
989 
990 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
991 		if (n_port->d_id == d_id) {
992 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
993 				if (r_port->s_id == s_id) {
994 					*nport = n_port;
995 					*rport = r_port;
996 					return 0;
997 				}
998 			}
999 			break;
1000 		}
1001 	}
1002 
1003 	return -ENOENT;
1004 }
1005 
1006 /* Returns true if the Nport is empty of all rem_ports */
1007 bool
1008 spdk_nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1009 {
1010 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1011 		assert(nport->rport_count == 0);
1012 		return true;
1013 	} else {
1014 		return false;
1015 	}
1016 }
1017 
1018 int
1019 spdk_nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1020 			     enum spdk_nvmf_fc_object_state state)
1021 {
1022 	if (nport) {
1023 		nport->nport_state = state;
1024 		return 0;
1025 	} else {
1026 		return -EINVAL;
1027 	}
1028 }
1029 
1030 bool
1031 spdk_nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1032 				struct spdk_nvmf_fc_remote_port_info *rem_port)
1033 {
1034 	if (nport && rem_port) {
1035 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1036 		nport->rport_count++;
1037 		return 0;
1038 	} else {
1039 		return -EINVAL;
1040 	}
1041 }
1042 
1043 bool
1044 spdk_nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1045 				   struct spdk_nvmf_fc_remote_port_info *rem_port)
1046 {
1047 	if (nport && rem_port) {
1048 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1049 		nport->rport_count--;
1050 		return 0;
1051 	} else {
1052 		return -EINVAL;
1053 	}
1054 }
1055 
1056 int
1057 spdk_nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1058 			     enum spdk_nvmf_fc_object_state state)
1059 {
1060 	if (rport) {
1061 		rport->rport_state = state;
1062 		return 0;
1063 	} else {
1064 		return -EINVAL;
1065 	}
1066 }
1067 int
1068 spdk_nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1069 			     enum spdk_nvmf_fc_object_state state)
1070 {
1071 	if (assoc) {
1072 		assoc->assoc_state = state;
1073 		return 0;
1074 	} else {
1075 		return -EINVAL;
1076 	}
1077 }
1078 
1079 static struct spdk_nvmf_fc_association *
1080 spdk_nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1081 {
1082 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1083 	struct spdk_nvmf_fc_conn *fc_conn;
1084 
1085 	if (!qpair) {
1086 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1087 		return NULL;
1088 	}
1089 
1090 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1091 
1092 	return fc_conn->fc_assoc;
1093 }
1094 
1095 bool
1096 spdk_nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1097 			    struct spdk_nvmf_ctrlr *ctrlr)
1098 {
1099 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1100 	struct spdk_nvmf_fc_association *assoc = NULL;
1101 
1102 	if (!ctrlr) {
1103 		return false;
1104 	}
1105 
1106 	fc_nport = spdk_nvmf_fc_nport_find(port_hdl, nport_hdl);
1107 	if (!fc_nport) {
1108 		return false;
1109 	}
1110 
1111 	assoc = spdk_nvmf_ctrlr_get_fc_assoc(ctrlr);
1112 	if (assoc && assoc->tgtport == fc_nport) {
1113 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1114 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1115 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1116 			      nport_hdl);
1117 		return true;
1118 	}
1119 	return false;
1120 }
1121 
1122 static inline bool
1123 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
1124 {
1125 	switch (fc_req->state) {
1126 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1127 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1128 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1129 		return true;
1130 	default:
1131 		return false;
1132 	}
1133 }
1134 
1135 static inline bool
1136 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
1137 {
1138 	struct spdk_nvmf_request *tmp = NULL;
1139 
1140 	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) {
1141 		if (tmp == &fc_req->req) {
1142 			return true;
1143 		}
1144 	}
1145 	return false;
1146 }
1147 
1148 static void
1149 nvmf_fc_req_bdev_abort(void *arg1)
1150 {
1151 	struct spdk_nvmf_fc_request *fc_req = arg1;
1152 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1153 
1154 	/* Initial release - we don't have to abort Admin Queue or
1155 	 * Fabric commands. The AQ commands supported at this time are
1156 	 * Get-Log-Page,
1157 	 * Identify
1158 	 * Set Features
1159 	 * Get Features
1160 	 * AER -> Special case and handled differently.
1161 	 * Every one of the above Admin commands (except AER) run
1162 	 * to completion and so an Abort of such commands doesn't
1163 	 * make sense.
1164 	 */
1165 	/* The Fabric commands supported are
1166 	 * Property Set
1167 	 * Property Get
1168 	 * Connect -> Special case (async. handling). Not sure how to
1169 	 * handle at this point. Let it run to completion.
1170 	 */
1171 	if (ctrlr->aer_req == &fc_req->req) {
1172 		SPDK_NOTICELOG("Abort AER request\n");
1173 		spdk_nvmf_qpair_free_aer(fc_req->req.qpair);
1174 	}
1175 }
1176 
1177 void
1178 spdk_nvmf_fc_request_abort_complete(void *arg1)
1179 {
1180 	struct spdk_nvmf_fc_request *fc_req =
1181 		(struct spdk_nvmf_fc_request *)arg1;
1182 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1183 
1184 	/* Request abort completed. Notify all the callbacks */
1185 	TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) {
1186 		/* Notify */
1187 		ctx->cb(fc_req->hwqp, 0, ctx->cb_args);
1188 		/* Remove */
1189 		TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link);
1190 		/* free */
1191 		free(ctx);
1192 	}
1193 
1194 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1195 		       fc_req_state_strs[fc_req->state]);
1196 
1197 	spdk_nvmf_fc_request_free(fc_req);
1198 }
1199 
1200 void
1201 spdk_nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1202 			   spdk_nvmf_fc_caller_cb cb, void *cb_args)
1203 {
1204 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1205 	bool kill_req = false;
1206 
1207 	/* Add the cb to list */
1208 	if (cb) {
1209 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1210 		if (!ctx) {
1211 			SPDK_ERRLOG("ctx alloc failed.\n");
1212 			return;
1213 		}
1214 		ctx->cb = cb;
1215 		ctx->cb_args = cb_args;
1216 
1217 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1218 	}
1219 
1220 	if (!fc_req->is_aborted) {
1221 		/* Increment aborted command counter */
1222 		fc_req->hwqp->counters.num_aborted++;
1223 	}
1224 
1225 	/* If port is dead, skip abort wqe */
1226 	kill_req = spdk_nvmf_fc_is_port_dead(fc_req->hwqp);
1227 	if (kill_req && spdk_nvmf_fc_req_in_xfer(fc_req)) {
1228 		fc_req->is_aborted = true;
1229 		goto complete;
1230 	}
1231 
1232 	/* Check if the request is already marked for deletion */
1233 	if (fc_req->is_aborted) {
1234 		return;
1235 	}
1236 
1237 	/* Mark request as aborted */
1238 	fc_req->is_aborted = true;
1239 
1240 	/* If xchg is allocated, then save if we need to send abts or not. */
1241 	if (fc_req->xchg) {
1242 		fc_req->xchg->send_abts = send_abts;
1243 		fc_req->xchg->aborted	= true;
1244 	}
1245 
1246 	if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) {
1247 		/* Aborted by backend */
1248 		goto complete;
1249 	} else if (nvmf_fc_req_in_bdev(fc_req)) {
1250 		/* Notify bdev */
1251 		spdk_thread_send_msg(fc_req->hwqp->thread,
1252 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1253 	} else if (spdk_nvmf_fc_req_in_xfer(fc_req)) {
1254 		/* Notify HBA to abort this exchange  */
1255 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1256 	} else if (nvmf_fc_req_in_get_buff(fc_req)) {
1257 		/* Will be completed by request_complete callback. */
1258 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
1259 	} else if (nvmf_fc_req_in_pending(fc_req)) {
1260 		/* Remove from pending */
1261 		STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
1262 			      spdk_nvmf_request, buf_link);
1263 		goto complete;
1264 	} else {
1265 		/* Should never happen */
1266 		SPDK_ERRLOG("Request in invalid state\n");
1267 		goto complete;
1268 	}
1269 
1270 	return;
1271 complete:
1272 	spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1273 	spdk_nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1274 				     (void *)fc_req);
1275 }
1276 
1277 static int
1278 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1279 {
1280 	uint32_t length = fc_req->req.length;
1281 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1282 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1283 	struct spdk_nvmf_transport *transport = group->transport;
1284 
1285 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1286 		return -ENOMEM;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static int
1293 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1294 {
1295 	/* Allocate an XCHG if we dont use send frame for this command. */
1296 	if (!spdk_nvmf_fc_use_send_frame(&fc_req->req)) {
1297 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1298 		if (!fc_req->xchg) {
1299 			fc_req->hwqp->counters.no_xchg++;
1300 			printf("NO XCHGs!\n");
1301 			goto pending;
1302 		}
1303 	}
1304 
1305 	if (fc_req->req.length) {
1306 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1307 			fc_req->hwqp->counters.buf_alloc_err++;
1308 			goto pending;
1309 		}
1310 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1311 	}
1312 
1313 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1314 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "WRITE CMD.\n");
1315 
1316 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1317 
1318 		if (nvmf_fc_recv_data(fc_req)) {
1319 			/* Dropped return success to caller */
1320 			fc_req->hwqp->counters.unexpected_err++;
1321 			spdk_nvmf_fc_request_free(fc_req);
1322 		}
1323 	} else {
1324 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "READ/NONE CMD\n");
1325 
1326 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1327 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1328 		} else {
1329 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1330 		}
1331 		spdk_nvmf_request_exec(&fc_req->req);
1332 	}
1333 
1334 	return 0;
1335 
1336 pending:
1337 	if (fc_req->xchg) {
1338 		nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1339 		fc_req->xchg = NULL;
1340 	}
1341 
1342 	spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1343 
1344 	return -EAGAIN;
1345 }
1346 
1347 static int
1348 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1349 			    uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1350 {
1351 	uint16_t cmnd_len;
1352 	uint64_t rqst_conn_id;
1353 	struct spdk_nvmf_fc_request *fc_req = NULL;
1354 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1355 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1356 	enum spdk_nvme_data_transfer xfer;
1357 
1358 	cmd_iu = buffer->virt;
1359 	cmnd_len = cmd_iu->cmnd_iu_len;
1360 	cmnd_len = from_be16(&cmnd_len);
1361 
1362 	/* check for a valid cmnd_iu format */
1363 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1364 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1365 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1366 		SPDK_ERRLOG("IU CMD error\n");
1367 		hwqp->counters.nvme_cmd_iu_err++;
1368 		return -ENXIO;
1369 	}
1370 
1371 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1372 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1373 		SPDK_ERRLOG("IU CMD xfer error\n");
1374 		hwqp->counters.nvme_cmd_xfer_err++;
1375 		return -EPERM;
1376 	}
1377 
1378 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1379 
1380 	/* Check if conn id is valid */
1381 	fc_conn = spdk_nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id);
1382 	if (!fc_conn) {
1383 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1384 		hwqp->counters.invalid_conn_err++;
1385 		return -ENODEV;
1386 	}
1387 
1388 	/* If association/connection is being deleted - return */
1389 	if (fc_conn->fc_assoc->assoc_state !=  SPDK_NVMF_FC_OBJECT_CREATED) {
1390 		SPDK_ERRLOG("Association state not valid\n");
1391 		return -EACCES;
1392 	}
1393 
1394 	if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) {
1395 		return -EACCES;
1396 	}
1397 
1398 	/* Make sure xfer len is according to mdts */
1399 	if (from_be32(&cmd_iu->data_len) >
1400 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1401 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1402 		return -EINVAL;
1403 	}
1404 
1405 	/* allocate a request buffer */
1406 	fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp);
1407 	if (fc_req == NULL) {
1408 		/* Should not happen. Since fc_reqs == RQ buffers */
1409 		return -ENOMEM;
1410 	}
1411 
1412 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1413 	fc_req->req.qpair = &fc_conn->qpair;
1414 	fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd;
1415 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1416 	fc_req->oxid = frame->ox_id;
1417 	fc_req->oxid = from_be16(&fc_req->oxid);
1418 	fc_req->rpi = fc_conn->rpi;
1419 	fc_req->buf_index = buf_idx;
1420 	fc_req->poller_lcore = hwqp->lcore_id;
1421 	fc_req->poller_thread = hwqp->thread;
1422 	fc_req->hwqp = hwqp;
1423 	fc_req->fc_conn = fc_conn;
1424 	fc_req->req.xfer = xfer;
1425 	fc_req->s_id = (uint32_t)frame->s_id;
1426 	fc_req->d_id = (uint32_t)frame->d_id;
1427 	fc_req->s_id = from_be32(&fc_req->s_id) >> 8;
1428 	fc_req->d_id = from_be32(&fc_req->d_id) >> 8;
1429 
1430 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1431 	if (nvmf_fc_request_execute(fc_req)) {
1432 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 /*
1439  * These functions are called from the FC LLD
1440  */
1441 
1442 void
1443 spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1444 {
1445 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1446 	struct spdk_nvmf_fc_poll_group *fgroup = hwqp->fgroup;
1447 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1448 	struct spdk_nvmf_transport *transport = group->transport;
1449 
1450 	if (!fc_req) {
1451 		return;
1452 	}
1453 
1454 	if (fc_req->xchg) {
1455 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1456 		fc_req->xchg = NULL;
1457 	}
1458 
1459 	/* Release IO buffers */
1460 	if (fc_req->req.data_from_pool) {
1461 		spdk_nvmf_request_free_buffers(&fc_req->req, group, transport);
1462 	}
1463 	fc_req->req.data = NULL;
1464 	fc_req->req.iovcnt  = 0;
1465 
1466 	/* Release Q buffer */
1467 	nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index);
1468 
1469 	/* Free Fc request */
1470 	nvmf_fc_hwqp_free_fc_request(hwqp, fc_req);
1471 }
1472 
1473 void
1474 spdk_nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1475 			       enum spdk_nvmf_fc_request_state state)
1476 {
1477 	assert(fc_req->magic != 0xDEADBEEF);
1478 
1479 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1480 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1481 		      spdk_nvmf_fc_request_get_state_str(fc_req->state),
1482 		      spdk_nvmf_fc_request_get_state_str(state));
1483 	nvmf_fc_record_req_trace_point(fc_req, state);
1484 	fc_req->state = state;
1485 }
1486 
1487 char *
1488 spdk_nvmf_fc_request_get_state_str(int state)
1489 {
1490 	static char *unk_str = "unknown";
1491 
1492 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1493 		fc_req_state_strs[state] : unk_str);
1494 }
1495 
1496 int
1497 spdk_nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1498 				uint32_t buff_idx,
1499 				struct spdk_nvmf_fc_frame_hdr *frame,
1500 				struct spdk_nvmf_fc_buffer_desc *buffer,
1501 				uint32_t plen)
1502 {
1503 	int rc = 0;
1504 	uint32_t s_id, d_id;
1505 	struct spdk_nvmf_fc_nport *nport = NULL;
1506 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1507 
1508 	s_id = (uint32_t)frame->s_id;
1509 	d_id = (uint32_t)frame->d_id;
1510 	s_id = from_be32(&s_id) >> 8;
1511 	d_id = from_be32(&d_id) >> 8;
1512 
1513 	/* Note: In tracelog below, we directly do endian conversion on rx_id and.
1514 	 * ox_id Since these are fields, we can't pass address to from_be16().
1515 	 * Since ox_id and rx_id are only needed for tracelog, assigning to local
1516 	 * vars. and doing conversion is a waste of time in non-debug builds. */
1517 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1518 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1519 		      s_id, d_id,
1520 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1521 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1522 
1523 	rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1524 	if (rc) {
1525 		if (nport == NULL) {
1526 			SPDK_ERRLOG("Nport not found. Dropping\n");
1527 			/* increment invalid nport counter */
1528 			hwqp->counters.nport_invalid++;
1529 		} else if (rport == NULL) {
1530 			SPDK_ERRLOG("Rport not found. Dropping\n");
1531 			/* increment invalid rport counter */
1532 			hwqp->counters.rport_invalid++;
1533 		}
1534 		return rc;
1535 	}
1536 
1537 	if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1538 	    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1539 		SPDK_ERRLOG("%s state not created. Dropping\n",
1540 			    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1541 			    "Nport" : "Rport");
1542 		return -EACCES;
1543 	}
1544 
1545 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1546 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1547 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1548 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1549 
1550 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process LS NVME frame\n");
1551 
1552 		/* Use the RQ buffer for holding LS request. */
1553 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1554 
1555 		/* Fill in the LS request structure */
1556 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1557 		ls_rqst->rqstbuf.phys = buffer->phys +
1558 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1559 		ls_rqst->rqstbuf.buf_index = buff_idx;
1560 		ls_rqst->rqst_len = plen;
1561 
1562 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1563 		ls_rqst->rspbuf.phys = buffer->phys +
1564 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1565 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1566 
1567 		ls_rqst->private_data = (void *)hwqp;
1568 		ls_rqst->rpi = rport->rpi;
1569 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1570 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1571 		ls_rqst->s_id = s_id;
1572 		ls_rqst->d_id = d_id;
1573 		ls_rqst->nport = nport;
1574 		ls_rqst->rport = rport;
1575 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1576 
1577 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1578 		if (ls_rqst->xchg) {
1579 			/* Handover the request to LS module */
1580 			spdk_nvmf_fc_handle_ls_rqst(ls_rqst);
1581 		} else {
1582 			/* No XCHG available. Add to pending list. */
1583 			hwqp->counters.no_xchg++;
1584 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1585 		}
1586 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1587 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1588 
1589 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process IO NVME frame\n");
1590 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen);
1591 	} else {
1592 
1593 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1594 		hwqp->counters.unknown_frame++;
1595 		rc = -EINVAL;
1596 	}
1597 
1598 	return rc;
1599 }
1600 
1601 void
1602 spdk_nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1603 {
1604 	struct spdk_nvmf_request *req = NULL, *tmp;
1605 	struct spdk_nvmf_fc_request *fc_req;
1606 	int budget = 64;
1607 
1608 	if (!hwqp->fgroup) {
1609 		/* LS queue is tied to acceptor_poll group and LS pending requests
1610 		 * are stagged and processed using hwqp->ls_pending_queue.
1611 		 */
1612 		return;
1613 	}
1614 
1615 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1616 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1617 		if (!nvmf_fc_request_execute(fc_req)) {
1618 			/* Succesfuly posted, Delete from pending. */
1619 			STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link);
1620 		}
1621 
1622 		if (budget) {
1623 			budget--;
1624 		} else {
1625 			return;
1626 		}
1627 	}
1628 }
1629 
1630 void
1631 spdk_nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1632 {
1633 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1634 	struct spdk_nvmf_fc_nport *nport = NULL;
1635 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1636 
1637 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1638 		/* lookup nport and rport again - make sure they are still valid */
1639 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1640 		if (rc) {
1641 			if (nport == NULL) {
1642 				SPDK_ERRLOG("Nport not found. Dropping\n");
1643 				/* increment invalid nport counter */
1644 				hwqp->counters.nport_invalid++;
1645 			} else if (rport == NULL) {
1646 				SPDK_ERRLOG("Rport not found. Dropping\n");
1647 				/* increment invalid rport counter */
1648 				hwqp->counters.rport_invalid++;
1649 			}
1650 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1651 			/* Return buffer to chip */
1652 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1653 			continue;
1654 		}
1655 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1656 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1657 			SPDK_ERRLOG("%s state not created. Dropping\n",
1658 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1659 				    "Nport" : "Rport");
1660 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1661 			/* Return buffer to chip */
1662 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1663 			continue;
1664 		}
1665 
1666 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1667 		if (ls_rqst->xchg) {
1668 			/* Got an XCHG */
1669 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1670 			/* Handover the request to LS module */
1671 			spdk_nvmf_fc_handle_ls_rqst(ls_rqst);
1672 		} else {
1673 			/* No more XCHGs. Stop processing. */
1674 			hwqp->counters.no_xchg++;
1675 			return;
1676 		}
1677 	}
1678 }
1679 
1680 int
1681 spdk_nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1682 {
1683 	int rc = 0;
1684 	struct spdk_nvmf_request *req = &fc_req->req;
1685 	struct spdk_nvmf_qpair *qpair = req->qpair;
1686 	struct spdk_nvmf_fc_conn *fc_conn = spdk_nvmf_fc_get_conn(qpair);
1687 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1688 	uint16_t ersp_len = 0;
1689 
1690 	/* set sq head value in resp */
1691 	rsp->sqhd = spdk_nvmf_fc_advance_conn_sqhead(qpair);
1692 
1693 	/* Increment connection responses */
1694 	fc_conn->rsp_count++;
1695 
1696 	if (spdk_nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1697 					    fc_req->transfered_len)) {
1698 		/* Fill ERSP Len */
1699 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1700 				    sizeof(uint32_t)));
1701 		fc_req->ersp.ersp_len = ersp_len;
1702 
1703 		/* Fill RSN */
1704 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1705 		fc_conn->rsn++;
1706 
1707 		/* Fill transfer length */
1708 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1709 
1710 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting ERSP.\n");
1711 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1712 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1713 	} else {
1714 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting RSP.\n");
1715 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1716 	}
1717 
1718 	return rc;
1719 }
1720 
1721 int
1722 spdk_nvmf_fc_xmt_ls_rsp(struct spdk_nvmf_fc_nport *tgtport,
1723 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1724 {
1725 	return nvmf_fc_xmt_ls_rsp(tgtport, ls_rqst);
1726 }
1727 
1728 int
1729 spdk_nvmf_fc_xmt_srsr_req(struct spdk_nvmf_fc_hwqp *hwqp,
1730 			  struct spdk_nvmf_fc_srsr_bufs *srsr_bufs,
1731 			  spdk_nvmf_fc_caller_cb cb, void *cb_args)
1732 {
1733 	return nvmf_fc_xmt_srsr_req(hwqp, srsr_bufs, cb, cb_args);
1734 }
1735 
1736 bool
1737 spdk_nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1738 				uint32_t rsp_cnt, uint32_t xfer_len)
1739 {
1740 	struct spdk_nvmf_request *req = &fc_req->req;
1741 	struct spdk_nvmf_qpair *qpair = req->qpair;
1742 	struct spdk_nvmf_fc_conn *fc_conn = spdk_nvmf_fc_get_conn(qpair);
1743 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1744 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1745 	uint16_t status = *((uint16_t *)&rsp->status);
1746 
1747 	/*
1748 	 * Check if we need to send ERSP
1749 	 * 1) For every N responses where N == ersp_ratio
1750 	 * 2) Fabric commands.
1751 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1752 	 * 4) SQ == 90% full.
1753 	 * 5) Transfer length not equal to CMD IU length
1754 	 */
1755 
1756 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1757 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1758 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1759 	    (req->length != xfer_len)) {
1760 		return true;
1761 	}
1762 	return false;
1763 }
1764 
1765 void
1766 spdk_nvmf_fc_dump_all_queues(struct spdk_nvmf_fc_port *fc_port,
1767 			     struct spdk_nvmf_fc_queue_dump_info *dump_info)
1768 {
1769 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
1770 				fc_port->num_io_queues, dump_info);
1771 }
1772 
1773 static int
1774 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1775 {
1776 	int rc = 0;
1777 	struct spdk_nvmf_fc_request *fc_req = spdk_nvmf_fc_get_fc_req(req);
1778 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1779 
1780 	if (fc_req->is_aborted) {
1781 		/* Defer this to make sure we dont call io cleanup in same context. */
1782 		spdk_nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1783 					     (void *)fc_req);
1784 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1785 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1786 
1787 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1788 
1789 		rc = nvmf_fc_send_data(fc_req);
1790 	} else {
1791 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1792 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1793 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1794 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1795 		} else {
1796 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1797 		}
1798 
1799 		rc = spdk_nvmf_fc_handle_rsp(fc_req);
1800 	}
1801 
1802 	if (rc) {
1803 		SPDK_ERRLOG("Error in request complete.\n");
1804 		spdk_nvmf_fc_request_free(fc_req);
1805 	}
1806 	return 0;
1807 }
1808 
1809 struct spdk_nvmf_tgt *
1810 spdk_nvmf_fc_get_tgt(void)
1811 {
1812 	if (g_nvmf_ftransport) {
1813 		return g_nvmf_ftransport->transport.tgt;
1814 	}
1815 	return NULL;
1816 }
1817 
1818 /*
1819  * FC Transport Public API begins here
1820  */
1821 
1822 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1823 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1824 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1825 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1826 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1827 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1828 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1829 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1830 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1831 
1832 static void
1833 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1834 {
1835 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1836 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1837 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1838 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1839 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1840 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1841 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1842 }
1843 
1844 static struct spdk_nvmf_transport *
1845 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1846 {
1847 	uint32_t sge_count;
1848 
1849 	SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
1850 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1851 		     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1852 		     "  max_aq_depth=%d\n",
1853 		     opts->max_queue_depth,
1854 		     opts->max_io_size,
1855 		     opts->max_qpairs_per_ctrlr,
1856 		     opts->io_unit_size,
1857 		     opts->max_aq_depth);
1858 
1859 	if (g_nvmf_ftransport) {
1860 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1861 		return NULL;
1862 	}
1863 
1864 	if (spdk_env_get_last_core() < 1) {
1865 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1866 			    spdk_env_get_last_core() + 1);
1867 		return NULL;
1868 	}
1869 
1870 	sge_count = opts->max_io_size / opts->io_unit_size;
1871 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1872 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1873 		return NULL;
1874 	}
1875 
1876 	g_nvmf_fc_master_thread = spdk_get_thread();
1877 	g_nvmf_fgroup_count = 0;
1878 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1879 
1880 	if (!g_nvmf_ftransport) {
1881 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1882 		return NULL;
1883 	}
1884 
1885 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1886 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1887 		free(g_nvmf_ftransport);
1888 		g_nvmf_ftransport = NULL;
1889 		return NULL;
1890 	}
1891 
1892 	/* initialize the low level FC driver */
1893 	nvmf_fc_lld_init();
1894 
1895 	return &g_nvmf_ftransport->transport;
1896 }
1897 
1898 static int
1899 nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
1900 {
1901 	if (transport) {
1902 		struct spdk_nvmf_fc_transport *ftransport;
1903 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
1904 
1905 		ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1906 
1907 		free(ftransport);
1908 
1909 		/* clean up any FC poll groups still around */
1910 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
1911 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1912 			free(fgroup);
1913 		}
1914 		g_nvmf_fgroup_count = 0;
1915 
1916 		/* low level FC driver clean up */
1917 		nvmf_fc_lld_fini();
1918 
1919 		nvmf_fc_port_cleanup();
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 static int
1926 nvmf_fc_listen(struct spdk_nvmf_transport *transport,
1927 	       const struct spdk_nvme_transport_id *trid)
1928 {
1929 	return 0;
1930 }
1931 
1932 static int
1933 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
1934 		    const struct spdk_nvme_transport_id *_trid)
1935 {
1936 	return 0;
1937 }
1938 
1939 static void
1940 nvmf_fc_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn, void *cb_arg)
1941 {
1942 	struct spdk_nvmf_fc_port *fc_port = NULL;
1943 	static bool start_lld = false;
1944 
1945 	if (spdk_unlikely(!start_lld)) {
1946 		start_lld  = true;
1947 		nvmf_fc_lld_start();
1948 	}
1949 
1950 	/* poll the LS queue on each port */
1951 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
1952 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
1953 			fc_port->new_qp_cb = cb_fn;
1954 			nvmf_fc_process_queue(&fc_port->ls_queue);
1955 		}
1956 	}
1957 }
1958 
1959 static void
1960 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
1961 		 struct spdk_nvme_transport_id *trid,
1962 		 struct spdk_nvmf_discovery_log_page_entry *entry)
1963 {
1964 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
1965 	entry->adrfam = trid->adrfam;
1966 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
1967 
1968 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1969 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1970 }
1971 
1972 static struct spdk_nvmf_transport_poll_group *
1973 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
1974 {
1975 	struct spdk_nvmf_fc_poll_group *fgroup;
1976 	struct spdk_nvmf_fc_transport *ftransport =
1977 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1978 
1979 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
1980 	if (!fgroup) {
1981 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
1982 		return NULL;
1983 	}
1984 
1985 	TAILQ_INIT(&fgroup->hwqp_list);
1986 
1987 	pthread_mutex_lock(&ftransport->lock);
1988 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
1989 	g_nvmf_fgroup_count++;
1990 	pthread_mutex_unlock(&ftransport->lock);
1991 
1992 	return &fgroup->group;
1993 }
1994 
1995 static void
1996 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1997 {
1998 	struct spdk_nvmf_fc_poll_group *fgroup;
1999 	struct spdk_nvmf_fc_transport *ftransport =
2000 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2001 
2002 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2003 	pthread_mutex_lock(&ftransport->lock);
2004 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2005 	g_nvmf_fgroup_count--;
2006 	pthread_mutex_unlock(&ftransport->lock);
2007 
2008 	free(fgroup);
2009 }
2010 
2011 static int
2012 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2013 		       struct spdk_nvmf_qpair *qpair)
2014 {
2015 	struct spdk_nvmf_fc_poll_group *fgroup;
2016 	struct spdk_nvmf_fc_conn *fc_conn;
2017 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2018 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2019 	bool hwqp_found = false;
2020 
2021 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2022 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2023 
2024 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2025 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2026 			hwqp_found = true;
2027 			break;
2028 		}
2029 	}
2030 
2031 	if (!hwqp_found) {
2032 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2033 		goto err;
2034 	}
2035 
2036 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2037 					 &fc_conn->conn_id,
2038 					 fc_conn->max_queue_depth)) {
2039 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2040 		goto err;
2041 	}
2042 
2043 	fc_conn->hwqp = hwqp;
2044 
2045 	/* If this is for ADMIN connection, then update assoc ID. */
2046 	if (fc_conn->qpair.qid == 0) {
2047 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2048 	}
2049 
2050 	api_data = &fc_conn->create_opd->u.add_conn;
2051 	spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2052 	return 0;
2053 err:
2054 	return -1;
2055 }
2056 
2057 static int
2058 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2059 {
2060 	uint32_t count = 0;
2061 	struct spdk_nvmf_fc_poll_group *fgroup;
2062 	struct spdk_nvmf_fc_hwqp *hwqp;
2063 
2064 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2065 
2066 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2067 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2068 			count += nvmf_fc_process_queue(hwqp);
2069 		}
2070 	}
2071 
2072 	return (int) count;
2073 }
2074 
2075 static int
2076 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2077 {
2078 	struct spdk_nvmf_fc_request *fc_req = spdk_nvmf_fc_get_fc_req(req);
2079 
2080 	if (!fc_req->is_aborted) {
2081 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2082 		spdk_nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2083 	} else {
2084 		spdk_nvmf_fc_request_abort_complete(fc_req);
2085 	}
2086 	return 0;
2087 }
2088 
2089 
2090 static void
2091 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair)
2092 {
2093 	struct spdk_nvmf_fc_conn *fc_conn;
2094 
2095 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2096 
2097 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2098 		/* QP creation failure in FC tranport. Cleanup. */
2099 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(),
2100 				     nvmf_fc_handle_connection_failure, fc_conn);
2101 	} else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id &&
2102 		   fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2103 		/* Admin connection */
2104 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(),
2105 				     nvmf_fc_handle_assoc_deletion, fc_conn);
2106 	}
2107 }
2108 
2109 static int
2110 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2111 			    struct spdk_nvme_transport_id *trid)
2112 {
2113 	struct spdk_nvmf_fc_conn *fc_conn;
2114 
2115 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2116 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2117 	return 0;
2118 }
2119 
2120 static int
2121 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2122 			     struct spdk_nvme_transport_id *trid)
2123 {
2124 	struct spdk_nvmf_fc_conn *fc_conn;
2125 
2126 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2127 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2128 	return 0;
2129 }
2130 
2131 static int
2132 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2133 			      struct spdk_nvme_transport_id *trid)
2134 {
2135 	struct spdk_nvmf_fc_conn *fc_conn;
2136 
2137 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2138 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2139 	return 0;
2140 }
2141 
2142 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2143 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2144 	.opts_init = nvmf_fc_opts_init,
2145 	.create = nvmf_fc_create,
2146 	.destroy = nvmf_fc_destroy,
2147 
2148 	.listen = nvmf_fc_listen,
2149 	.stop_listen = nvmf_fc_stop_listen,
2150 	.accept = nvmf_fc_accept,
2151 
2152 	.listener_discover = nvmf_fc_discover,
2153 
2154 	.poll_group_create = nvmf_fc_poll_group_create,
2155 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2156 	.poll_group_add = nvmf_fc_poll_group_add,
2157 	.poll_group_poll = nvmf_fc_poll_group_poll,
2158 
2159 	.req_complete = nvmf_fc_request_complete,
2160 	.req_free = nvmf_fc_request_free,
2161 	.qpair_fini = nvmf_fc_close_qpair,
2162 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2163 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2164 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2165 };
2166 
2167 /*
2168  * Re-initialize the FC-Port after an offline event.
2169  * Only the queue information needs to be populated. XCHG, lcore and other hwqp information remains
2170  * unchanged after the first initialization.
2171  *
2172  */
2173 static int
2174 nvmf_fc_adm_hw_port_reinit_validate(struct spdk_nvmf_fc_port *fc_port,
2175 				    struct spdk_nvmf_fc_hw_port_init_args *args)
2176 {
2177 	uint32_t i;
2178 
2179 	/* Verify that the port was previously in offline or quiesced state */
2180 	if (spdk_nvmf_fc_port_is_online(fc_port)) {
2181 		SPDK_ERRLOG("SPDK FC port %d already initialized and online.\n", args->port_handle);
2182 		return -EINVAL;
2183 	}
2184 
2185 	/* Reinit information in new LS queue from previous queue */
2186 	spdk_nvmf_fc_hwqp_reinit_poller_queues(&fc_port->ls_queue, args->ls_queue);
2187 
2188 	fc_port->fcp_rq_id = args->fcp_rq_id;
2189 
2190 	/* Initialize the LS queue */
2191 	fc_port->ls_queue.queues = args->ls_queue;
2192 	spdk_nvmf_fc_init_poller_queues(fc_port->ls_queue.queues);
2193 
2194 	for (i = 0; i < fc_port->num_io_queues; i++) {
2195 		/* Reinit information in new IO queue from previous queue */
2196 		spdk_nvmf_fc_hwqp_reinit_poller_queues(&fc_port->io_queues[i],
2197 						       args->io_queues[i]);
2198 		fc_port->io_queues[i].queues = args->io_queues[i];
2199 		/* Initialize the IO queues */
2200 		spdk_nvmf_fc_init_poller_queues(fc_port->io_queues[i].queues);
2201 	}
2202 
2203 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2204 
2205 	/* Validate the port information */
2206 	DEV_VERIFY(TAILQ_EMPTY(&fc_port->nport_list));
2207 	DEV_VERIFY(fc_port->num_nports == 0);
2208 	if (!TAILQ_EMPTY(&fc_port->nport_list) || (fc_port->num_nports != 0)) {
2209 		return -EINVAL;
2210 	}
2211 
2212 	return 0;
2213 }
2214 
2215 /* Initializes the data for the creation of a FC-Port object in the SPDK
2216  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2217  * the API to the library. The contents added to this well defined structure
2218  * is private to each vendors implementation.
2219  */
2220 static int
2221 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2222 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2223 {
2224 	/* Used a high number for the LS HWQP so that it does not clash with the
2225 	 * IO HWQP's and immediately shows a LS queue during tracing.
2226 	 */
2227 	uint32_t i;
2228 
2229 	fc_port->port_hdl       = args->port_handle;
2230 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2231 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2232 	fc_port->num_io_queues  = args->io_queue_cnt;
2233 
2234 	/*
2235 	 * Set port context from init args. Used for FCP port stats.
2236 	 */
2237 	fc_port->port_ctx = args->port_ctx;
2238 
2239 	/*
2240 	 * Initialize the LS queue wherever needed.
2241 	 */
2242 	fc_port->ls_queue.queues = args->ls_queue;
2243 	fc_port->ls_queue.thread = spdk_nvmf_fc_get_master_thread();
2244 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2245 
2246 	/*
2247 	 * Initialize the LS queue.
2248 	 */
2249 	spdk_nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2250 
2251 	/*
2252 	 * Initialize the IO queues.
2253 	 */
2254 	for (i = 0; i < args->io_queue_cnt; i++) {
2255 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2256 		hwqp->hwqp_id = i;
2257 		hwqp->queues = args->io_queues[i];
2258 		hwqp->rq_size = args->io_queue_size;
2259 		spdk_nvmf_fc_init_hwqp(fc_port, hwqp);
2260 	}
2261 
2262 	/*
2263 	 * Initialize the LS processing for port
2264 	 */
2265 	spdk_nvmf_fc_ls_init(fc_port);
2266 
2267 	/*
2268 	 * Initialize the list of nport on this HW port.
2269 	 */
2270 	TAILQ_INIT(&fc_port->nport_list);
2271 	fc_port->num_nports = 0;
2272 
2273 	return 0;
2274 }
2275 
2276 static void
2277 nvmf_fc_adm_port_hwqp_offline_del_poller(struct spdk_nvmf_fc_port *fc_port)
2278 {
2279 	struct spdk_nvmf_fc_hwqp *hwqp    = NULL;
2280 	int i = 0;
2281 
2282 	hwqp = &fc_port->ls_queue;
2283 	(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2284 
2285 	/*  Remove poller for all the io queues. */
2286 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2287 		hwqp = &fc_port->io_queues[i];
2288 		(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2289 		spdk_nvmf_fc_poll_group_remove_hwqp(hwqp);
2290 	}
2291 }
2292 
2293 /*
2294  * Callback function for HW port link break operation.
2295  *
2296  * Notice that this callback is being triggered when spdk_fc_nport_delete()
2297  * completes, if that spdk_fc_nport_delete() called is issued by
2298  * nvmf_fc_adm_evnt_hw_port_link_break().
2299  *
2300  * Since nvmf_fc_adm_evnt_hw_port_link_break() can invoke spdk_fc_nport_delete() multiple
2301  * times (one per nport in the HW port's nport_list), a single call to
2302  * nvmf_fc_adm_evnt_hw_port_link_break() can result in multiple calls to this callback function.
2303  *
2304  * As a result, this function only invokes a callback to the caller of
2305  * nvmf_fc_adm_evnt_hw_port_link_break() only when the HW port's nport_list is empty.
2306  */
2307 static void
2308 nvmf_fc_adm_hw_port_link_break_cb(uint8_t port_handle,
2309 				  enum spdk_fc_event event_type, void *cb_args, int spdk_err)
2310 {
2311 	ASSERT_SPDK_FC_MASTER_THREAD();
2312 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *offline_cb_args = cb_args;
2313 	struct spdk_nvmf_hw_port_link_break_args *offline_args = NULL;
2314 	spdk_nvmf_fc_callback cb_func = NULL;
2315 	int err = 0;
2316 	struct spdk_nvmf_fc_port *fc_port = NULL;
2317 	int num_nports = 0;
2318 	char log_str[256];
2319 
2320 	if (0 != spdk_err) {
2321 		DEV_VERIFY(!"port link break cb: spdk_err not success.");
2322 		SPDK_ERRLOG("port link break cb: spdk_err:%d.\n", spdk_err);
2323 		goto out;
2324 	}
2325 
2326 	if (!offline_cb_args) {
2327 		DEV_VERIFY(!"port link break cb: port_offline_args is NULL.");
2328 		err = -EINVAL;
2329 		goto out;
2330 	}
2331 
2332 	offline_args = offline_cb_args->args;
2333 	if (!offline_args) {
2334 		DEV_VERIFY(!"port link break cb: offline_args is NULL.");
2335 		err = -EINVAL;
2336 		goto out;
2337 	}
2338 
2339 	if (port_handle != offline_args->port_handle) {
2340 		DEV_VERIFY(!"port link break cb: port_handle mismatch.");
2341 		err = -EINVAL;
2342 		goto out;
2343 	}
2344 
2345 	cb_func = offline_cb_args->cb_func;
2346 	if (!cb_func) {
2347 		DEV_VERIFY(!"port link break cb: cb_func is NULL.");
2348 		err = -EINVAL;
2349 		goto out;
2350 	}
2351 
2352 	fc_port = spdk_nvmf_fc_port_lookup(port_handle);
2353 	if (!fc_port) {
2354 		DEV_VERIFY(!"port link break cb: fc_port is NULL.");
2355 		SPDK_ERRLOG("port link break cb: Unable to find port:%d\n",
2356 			    offline_args->port_handle);
2357 		err = -EINVAL;
2358 		goto out;
2359 	}
2360 
2361 	num_nports = fc_port->num_nports;
2362 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2363 		/*
2364 		 * Don't call the callback unless all nports have been deleted.
2365 		 */
2366 		goto out;
2367 	}
2368 
2369 	if (num_nports != 0) {
2370 		DEV_VERIFY(!"port link break cb: num_nports in non-zero.");
2371 		SPDK_ERRLOG("port link break cb: # of ports should be 0. Instead, num_nports:%d\n",
2372 			    num_nports);
2373 		err = -EINVAL;
2374 	}
2375 
2376 	/*
2377 	 * Mark the hwqps as offline and unregister the pollers.
2378 	 */
2379 	(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
2380 
2381 	/*
2382 	 * Since there are no more nports, execute the callback(s).
2383 	 */
2384 	(void)cb_func(port_handle, SPDK_FC_LINK_BREAK,
2385 		      (void *)offline_args->cb_ctx, spdk_err);
2386 
2387 out:
2388 	free(offline_cb_args);
2389 
2390 	snprintf(log_str, sizeof(log_str),
2391 		 "port link break cb: port:%d evt_type:%d num_nports:%d err:%d spdk_err:%d.\n",
2392 		 port_handle, event_type, num_nports, err, spdk_err);
2393 
2394 	if (err != 0) {
2395 		SPDK_ERRLOG("%s", log_str);
2396 	} else {
2397 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2398 	}
2399 	return;
2400 }
2401 
2402 /*
2403  * FC port must have all its nports deleted before transitioning to offline state.
2404  */
2405 static void
2406 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2407 {
2408 	struct spdk_nvmf_fc_nport *nport = NULL;
2409 	/* All nports must have been deleted at this point for this fc port */
2410 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2411 	DEV_VERIFY(fc_port->num_nports == 0);
2412 	/* Mark the nport states to be zombie, if they exist */
2413 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2414 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2415 			(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2416 		}
2417 	}
2418 }
2419 
2420 static void
2421 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2422 {
2423 	ASSERT_SPDK_FC_MASTER_THREAD();
2424 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2425 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2426 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2427 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2428 	int spdk_err = 0;
2429 	uint8_t port_handle = cb_data->port_handle;
2430 	uint32_t s_id = rport->s_id;
2431 	uint32_t rpi = rport->rpi;
2432 	uint32_t assoc_count = rport->assoc_count;
2433 	uint32_t nport_hdl = nport->nport_hdl;
2434 	uint32_t d_id = nport->d_id;
2435 	char log_str[256];
2436 
2437 	/*
2438 	 * Assert on any delete failure.
2439 	 */
2440 	if (0 != err) {
2441 		DEV_VERIFY(!"Error in IT Delete callback.");
2442 		goto out;
2443 	}
2444 
2445 	if (cb_func != NULL) {
2446 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2447 	}
2448 
2449 out:
2450 	free(cb_data);
2451 
2452 	snprintf(log_str, sizeof(log_str),
2453 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2454 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2455 
2456 	if (err != 0) {
2457 		SPDK_ERRLOG("%s", log_str);
2458 	} else {
2459 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2460 	}
2461 }
2462 
2463 static void
2464 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2465 {
2466 	ASSERT_SPDK_FC_MASTER_THREAD();
2467 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2468 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2469 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2470 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2471 	uint32_t s_id = rport->s_id;
2472 	uint32_t rpi = rport->rpi;
2473 	uint32_t assoc_count = rport->assoc_count;
2474 	uint32_t nport_hdl = nport->nport_hdl;
2475 	uint32_t d_id = nport->d_id;
2476 	char log_str[256];
2477 
2478 	/*
2479 	 * Assert on any association delete failure. We continue to delete other
2480 	 * associations in promoted builds.
2481 	 */
2482 	if (0 != err) {
2483 		DEV_VERIFY(!"Nport's association delete callback returned error");
2484 		if (nport->assoc_count > 0) {
2485 			nport->assoc_count--;
2486 		}
2487 		if (rport->assoc_count > 0) {
2488 			rport->assoc_count--;
2489 		}
2490 	}
2491 
2492 	/*
2493 	 * If this is the last association being deleted for the ITN,
2494 	 * execute the callback(s).
2495 	 */
2496 	if (0 == rport->assoc_count) {
2497 		/* Remove the rport from the remote port list. */
2498 		if (spdk_nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2499 			SPDK_ERRLOG("Error while removing rport from list.\n");
2500 			DEV_VERIFY(!"Error while removing rport from list.");
2501 		}
2502 
2503 		if (cb_func != NULL) {
2504 			/*
2505 			 * Callback function is provided by the caller
2506 			 * of nvmf_fc_adm_i_t_delete_assoc().
2507 			 */
2508 			(void)cb_func(cb_data->cb_ctx, 0);
2509 		}
2510 		free(rport);
2511 		free(args);
2512 	}
2513 
2514 	snprintf(log_str, sizeof(log_str),
2515 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2516 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2517 
2518 	if (err != 0) {
2519 		SPDK_ERRLOG("%s", log_str);
2520 	} else {
2521 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2522 	}
2523 }
2524 
2525 /**
2526  * Process a IT delete.
2527  */
2528 static void
2529 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2530 			     struct spdk_nvmf_fc_remote_port_info *rport,
2531 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2532 			     void *cb_ctx)
2533 {
2534 	int err = 0;
2535 	struct spdk_nvmf_fc_association *assoc = NULL;
2536 	int assoc_err = 0;
2537 	uint32_t num_assoc = 0;
2538 	uint32_t num_assoc_del_scheduled = 0;
2539 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2540 	uint8_t port_hdl = nport->port_hdl;
2541 	uint32_t s_id = rport->s_id;
2542 	uint32_t rpi = rport->rpi;
2543 	uint32_t assoc_count = rport->assoc_count;
2544 	char log_str[256];
2545 
2546 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete associations on nport:%d begin.\n",
2547 		      nport->nport_hdl);
2548 
2549 	/*
2550 	 * Allocate memory for callback data.
2551 	 * This memory will be freed by the callback function.
2552 	 */
2553 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2554 	if (NULL == cb_data) {
2555 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2556 		err = -ENOMEM;
2557 		goto out;
2558 	}
2559 	cb_data->nport       = nport;
2560 	cb_data->rport       = rport;
2561 	cb_data->port_handle = port_hdl;
2562 	cb_data->cb_func     = cb_func;
2563 	cb_data->cb_ctx      = cb_ctx;
2564 
2565 	/*
2566 	 * Delete all associations, if any, related with this ITN/remote_port.
2567 	 */
2568 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2569 		num_assoc++;
2570 		if (assoc->s_id == s_id) {
2571 			assoc_err = spdk_nvmf_fc_delete_association(nport,
2572 					assoc->assoc_id,
2573 					false /* send abts */, false,
2574 					nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2575 			if (0 != assoc_err) {
2576 				/*
2577 				 * Mark this association as zombie.
2578 				 */
2579 				err = -EINVAL;
2580 				DEV_VERIFY(!"Error while deleting association");
2581 				(void)spdk_nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2582 			} else {
2583 				num_assoc_del_scheduled++;
2584 			}
2585 		}
2586 	}
2587 
2588 out:
2589 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2590 		/*
2591 		 * Since there are no association_delete calls
2592 		 * successfully scheduled, the association_delete
2593 		 * callback function will never be called.
2594 		 * In this case, call the callback function now.
2595 		 */
2596 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2597 	}
2598 
2599 	snprintf(log_str, sizeof(log_str),
2600 		 "IT delete associations on nport:%d end. "
2601 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2602 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2603 
2604 	if (err == 0) {
2605 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2606 	} else {
2607 		SPDK_ERRLOG("%s", log_str);
2608 	}
2609 }
2610 
2611 static void
2612 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2613 {
2614 	ASSERT_SPDK_FC_MASTER_THREAD();
2615 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2616 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2617 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2618 	struct spdk_nvmf_fc_port *fc_port = NULL;
2619 	int err = 0;
2620 
2621 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2622 	hwqp = quiesce_api_data->hwqp;
2623 	fc_port = hwqp->fc_port;
2624 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2625 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2626 
2627 	/*
2628 	 * Decrement the callback/quiesced queue count.
2629 	 */
2630 	port_quiesce_ctx->quiesce_count--;
2631 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2632 
2633 	free(quiesce_api_data);
2634 	/*
2635 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2636 	 */
2637 	if (port_quiesce_ctx->quiesce_count > 0) {
2638 		return;
2639 	}
2640 
2641 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2642 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2643 	} else {
2644 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesced.\n", fc_port->port_hdl);
2645 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2646 	}
2647 
2648 	if (cb_func) {
2649 		/*
2650 		 * Callback function for the called of quiesce.
2651 		 */
2652 		cb_func(port_quiesce_ctx->ctx, err);
2653 	}
2654 
2655 	/*
2656 	 * Free the context structure.
2657 	 */
2658 	free(port_quiesce_ctx);
2659 
2660 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2661 		      err);
2662 }
2663 
2664 static int
2665 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2666 			     spdk_nvmf_fc_poller_api_cb cb_func)
2667 {
2668 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2669 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2670 	int err = 0;
2671 
2672 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2673 
2674 	if (args == NULL) {
2675 		err = -ENOMEM;
2676 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2677 		goto done;
2678 	}
2679 	args->hwqp = fc_hwqp;
2680 	args->ctx = ctx;
2681 	args->cb_info.cb_func = cb_func;
2682 	args->cb_info.cb_data = args;
2683 	args->cb_info.cb_thread = spdk_get_thread();
2684 
2685 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2686 	rc = spdk_nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2687 	if (rc) {
2688 		free(args);
2689 		err = -EINVAL;
2690 	}
2691 
2692 done:
2693 	return err;
2694 }
2695 
2696 /*
2697  * Hw port Quiesce
2698  */
2699 static int
2700 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2701 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2702 {
2703 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2704 	uint32_t i = 0;
2705 	int err = 0;
2706 
2707 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2708 
2709 	/*
2710 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2711 	 * and execute the callback.
2712 	 */
2713 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2714 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2715 	}
2716 
2717 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2718 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Port %d already in quiesced state.\n",
2719 			      fc_port->port_hdl);
2720 		/*
2721 		 * Execute the callback function directly.
2722 		 */
2723 		cb_func(ctx, err);
2724 		goto out;
2725 	}
2726 
2727 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2728 
2729 	if (port_quiesce_ctx == NULL) {
2730 		err = -ENOMEM;
2731 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2732 			    fc_port->port_hdl);
2733 		goto out;
2734 	}
2735 
2736 	port_quiesce_ctx->quiesce_count = 0;
2737 	port_quiesce_ctx->ctx = ctx;
2738 	port_quiesce_ctx->cb_func = cb_func;
2739 
2740 	/*
2741 	 * Quiesce the LS queue.
2742 	 */
2743 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2744 					   nvmf_fc_adm_queue_quiesce_cb);
2745 	if (err != 0) {
2746 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2747 		goto out;
2748 	}
2749 	port_quiesce_ctx->quiesce_count++;
2750 
2751 	/*
2752 	 * Quiesce the IO queues.
2753 	 */
2754 	for (i = 0; i < fc_port->num_io_queues; i++) {
2755 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2756 						   port_quiesce_ctx,
2757 						   nvmf_fc_adm_queue_quiesce_cb);
2758 		if (err != 0) {
2759 			DEV_VERIFY(0);
2760 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2761 		}
2762 		port_quiesce_ctx->quiesce_count++;
2763 	}
2764 
2765 out:
2766 	if (port_quiesce_ctx && err != 0) {
2767 		free(port_quiesce_ctx);
2768 	}
2769 	return err;
2770 }
2771 
2772 /*
2773  * Initialize and add a HW port entry to the global
2774  * HW port list.
2775  */
2776 static void
2777 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2778 {
2779 	ASSERT_SPDK_FC_MASTER_THREAD();
2780 	struct spdk_nvmf_fc_port *fc_port = NULL;
2781 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2782 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2783 			api_data->api_args;
2784 	int err = 0;
2785 
2786 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2787 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2788 		err = EINVAL;
2789 		goto abort_port_init;
2790 	}
2791 
2792 	/*
2793 	 * 1. Check for duplicate initialization.
2794 	 */
2795 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2796 	if (fc_port != NULL) {
2797 		/* Port already exists, check if it has to be re-initialized */
2798 		err = nvmf_fc_adm_hw_port_reinit_validate(fc_port, args);
2799 		if (err) {
2800 			/*
2801 			 * In case of an error we do not want to free the fc_port
2802 			 * so we set that pointer to NULL.
2803 			 */
2804 			fc_port = NULL;
2805 		}
2806 		goto abort_port_init;
2807 	}
2808 
2809 	/*
2810 	 * 2. Get the memory to instantiate a fc port.
2811 	 */
2812 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2813 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2814 	if (fc_port == NULL) {
2815 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2816 		err = -ENOMEM;
2817 		goto abort_port_init;
2818 	}
2819 
2820 	/* assign the io_queues array */
2821 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2822 				     struct spdk_nvmf_fc_port));
2823 
2824 	/*
2825 	 * 3. Initialize the contents for the FC-port
2826 	 */
2827 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2828 
2829 	if (err != 0) {
2830 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2831 		DEV_VERIFY(!"Data initialization failed for fc_port");
2832 		goto abort_port_init;
2833 	}
2834 
2835 	/*
2836 	 * 4. Add this port to the global fc port list in the library.
2837 	 */
2838 	spdk_nvmf_fc_port_add(fc_port);
2839 
2840 abort_port_init:
2841 	if (err && fc_port) {
2842 		free(fc_port);
2843 	}
2844 	if (api_data->cb_func != NULL) {
2845 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2846 	}
2847 
2848 	free(arg);
2849 
2850 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d initialize done, rc = %d.\n",
2851 		      args->port_handle, err);
2852 }
2853 
2854 /*
2855  * Online a HW port.
2856  */
2857 static void
2858 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2859 {
2860 	ASSERT_SPDK_FC_MASTER_THREAD();
2861 	struct spdk_nvmf_fc_port *fc_port = NULL;
2862 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2863 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2864 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2865 			api_data->api_args;
2866 	int i = 0;
2867 	int err = 0;
2868 
2869 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2870 	if (fc_port) {
2871 		/* Set the port state to online */
2872 		err = spdk_nvmf_fc_port_set_online(fc_port);
2873 		if (err != 0) {
2874 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2875 			DEV_VERIFY(!"Hw port online failed");
2876 			goto out;
2877 		}
2878 
2879 		hwqp = &fc_port->ls_queue;
2880 		hwqp->context = NULL;
2881 		(void)spdk_nvmf_fc_hwqp_set_online(hwqp);
2882 
2883 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2884 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2885 			hwqp = &fc_port->io_queues[i];
2886 			hwqp->context = NULL;
2887 			(void)spdk_nvmf_fc_hwqp_set_online(hwqp);
2888 			spdk_nvmf_fc_poll_group_add_hwqp(hwqp);
2889 		}
2890 	} else {
2891 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2892 		err = -EINVAL;
2893 	}
2894 
2895 out:
2896 	if (api_data->cb_func != NULL) {
2897 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2898 	}
2899 
2900 	free(arg);
2901 
2902 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d online done, rc = %d.\n", args->port_handle,
2903 		      err);
2904 }
2905 
2906 /*
2907  * Offline a HW port.
2908  */
2909 static void
2910 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
2911 {
2912 	ASSERT_SPDK_FC_MASTER_THREAD();
2913 	struct spdk_nvmf_fc_port *fc_port = NULL;
2914 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2915 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2916 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
2917 			api_data->api_args;
2918 	int i = 0;
2919 	int err = 0;
2920 
2921 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2922 	if (fc_port) {
2923 		/* Set the port state to offline, if it is not already. */
2924 		err = spdk_nvmf_fc_port_set_offline(fc_port);
2925 		if (err != 0) {
2926 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
2927 			err = 0;
2928 			goto out;
2929 		}
2930 
2931 		hwqp = &fc_port->ls_queue;
2932 		(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2933 
2934 		/* Remove poller for all the io queues. */
2935 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2936 			hwqp = &fc_port->io_queues[i];
2937 			(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2938 			spdk_nvmf_fc_poll_group_remove_hwqp(hwqp);
2939 		}
2940 
2941 		/*
2942 		 * Delete all the nports. Ideally, the nports should have been purged
2943 		 * before the offline event, in which case, only a validation is required.
2944 		 */
2945 		nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
2946 	} else {
2947 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2948 		err = -EINVAL;
2949 	}
2950 out:
2951 	if (api_data->cb_func != NULL) {
2952 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
2953 	}
2954 
2955 	free(arg);
2956 
2957 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d offline done, rc = %d.\n", args->port_handle,
2958 		      err);
2959 }
2960 
2961 struct nvmf_fc_add_rem_listener_ctx {
2962 	bool add_listener;
2963 	struct spdk_nvme_transport_id trid;
2964 };
2965 
2966 static void
2967 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2968 {
2969 	ASSERT_SPDK_FC_MASTER_THREAD();
2970 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2971 	free(ctx);
2972 }
2973 
2974 static void
2975 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2976 {
2977 	ASSERT_SPDK_FC_MASTER_THREAD();
2978 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2979 
2980 	if (ctx->add_listener) {
2981 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid);
2982 	} else {
2983 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
2984 	}
2985 	if (spdk_nvmf_subsystem_resume(subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
2986 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", subsystem->subnqn);
2987 		free(ctx);
2988 	}
2989 }
2990 
2991 static int
2992 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
2993 {
2994 	struct spdk_nvmf_tgt *tgt = spdk_nvmf_fc_get_tgt();
2995 	struct spdk_nvmf_subsystem *subsystem;
2996 
2997 	if (!tgt) {
2998 		SPDK_ERRLOG("No nvmf target defined\n");
2999 		return -EINVAL;
3000 	}
3001 
3002 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3003 	while (subsystem) {
3004 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3005 
3006 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3007 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3008 			if (ctx) {
3009 				ctx->add_listener = add;
3010 				spdk_nvmf_fc_create_trid(&ctx->trid,
3011 							 nport->fc_nodename.u.wwn,
3012 							 nport->fc_portname.u.wwn);
3013 				if (spdk_nvmf_subsystem_pause(subsystem,
3014 							      nvmf_fc_adm_subsystem_paused_cb,
3015 							      ctx)) {
3016 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3017 						    subsystem->subnqn);
3018 					free(ctx);
3019 				}
3020 			}
3021 		}
3022 
3023 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3024 	}
3025 
3026 	return 0;
3027 }
3028 
3029 /*
3030  * Create a Nport.
3031  */
3032 static void
3033 nvmf_fc_adm_evnt_nport_create(void *arg)
3034 {
3035 	ASSERT_SPDK_FC_MASTER_THREAD();
3036 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3037 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3038 			api_data->api_args;
3039 	struct spdk_nvmf_fc_nport *nport = NULL;
3040 	struct spdk_nvmf_fc_port *fc_port = NULL;
3041 	int err = 0;
3042 
3043 	/*
3044 	 * Get the physical port.
3045 	 */
3046 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3047 	if (fc_port == NULL) {
3048 		err = -EINVAL;
3049 		goto out;
3050 	}
3051 
3052 	/*
3053 	 * Check for duplicate initialization.
3054 	 */
3055 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3056 	if (nport != NULL) {
3057 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3058 			    args->port_handle);
3059 		err = -EINVAL;
3060 		goto out;
3061 	}
3062 
3063 	/*
3064 	 * Get the memory to instantiate a fc nport.
3065 	 */
3066 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3067 	if (nport == NULL) {
3068 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3069 			    args->nport_handle);
3070 		err = -ENOMEM;
3071 		goto out;
3072 	}
3073 
3074 	/*
3075 	 * Initialize the contents for the nport
3076 	 */
3077 	nport->nport_hdl    = args->nport_handle;
3078 	nport->port_hdl     = args->port_handle;
3079 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3080 	nport->fc_nodename  = args->fc_nodename;
3081 	nport->fc_portname  = args->fc_portname;
3082 	nport->d_id         = args->d_id;
3083 	nport->fc_port      = spdk_nvmf_fc_port_lookup(args->port_handle);
3084 
3085 	(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3086 	TAILQ_INIT(&nport->rem_port_list);
3087 	nport->rport_count = 0;
3088 	TAILQ_INIT(&nport->fc_associations);
3089 	nport->assoc_count = 0;
3090 
3091 	/*
3092 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3093 	 */
3094 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3095 
3096 	(void)spdk_nvmf_fc_port_add_nport(fc_port, nport);
3097 out:
3098 	if (err && nport) {
3099 		free(nport);
3100 	}
3101 
3102 	if (api_data->cb_func != NULL) {
3103 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3104 	}
3105 
3106 	free(arg);
3107 }
3108 
3109 static void
3110 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3111 			    void *cb_args, int spdk_err)
3112 {
3113 	ASSERT_SPDK_FC_MASTER_THREAD();
3114 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3115 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3116 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3117 	int err = 0;
3118 	uint16_t nport_hdl = 0;
3119 	char log_str[256];
3120 
3121 	/*
3122 	 * Assert on any delete failure.
3123 	 */
3124 	if (nport == NULL) {
3125 		SPDK_ERRLOG("Nport delete callback returned null nport");
3126 		DEV_VERIFY(!"nport is null.");
3127 		goto out;
3128 	}
3129 
3130 	nport_hdl = nport->nport_hdl;
3131 	if (0 != spdk_err) {
3132 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3133 			    "%d, Nport: %d\n",
3134 			    nport->port_hdl, nport->nport_hdl);
3135 		DEV_VERIFY(!"nport delete callback error.");
3136 	}
3137 
3138 	/*
3139 	 * Free the nport if this is the last rport being deleted and
3140 	 * execute the callback(s).
3141 	 */
3142 	if (spdk_nvmf_fc_nport_has_no_rport(nport)) {
3143 		if (0 != nport->assoc_count) {
3144 			SPDK_ERRLOG("association count != 0\n");
3145 			DEV_VERIFY(!"association count != 0");
3146 		}
3147 
3148 		err = spdk_nvmf_fc_port_remove_nport(nport->fc_port, nport);
3149 		if (0 != err) {
3150 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3151 				    "nport from nport list. FC Port:%d Nport:%d\n",
3152 				    nport->port_hdl, nport->nport_hdl);
3153 		}
3154 		/* Free the nport */
3155 		free(nport);
3156 
3157 		if (cb_func != NULL) {
3158 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3159 		}
3160 		free(cb_data);
3161 	}
3162 out:
3163 	snprintf(log_str, sizeof(log_str),
3164 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3165 		 port_handle, nport_hdl, event_type, spdk_err);
3166 
3167 	if (err != 0) {
3168 		SPDK_ERRLOG("%s", log_str);
3169 	} else {
3170 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3171 	}
3172 }
3173 
3174 /*
3175  * Delete Nport.
3176  */
3177 static void
3178 nvmf_fc_adm_evnt_nport_delete(void *arg)
3179 {
3180 	ASSERT_SPDK_FC_MASTER_THREAD();
3181 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3182 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3183 			api_data->api_args;
3184 	struct spdk_nvmf_fc_nport *nport = NULL;
3185 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3186 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3187 	int err = 0;
3188 	uint32_t rport_cnt = 0;
3189 	int rc = 0;
3190 
3191 	/*
3192 	 * Make sure that the nport exists.
3193 	 */
3194 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3195 	if (nport == NULL) {
3196 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3197 			    args->port_handle);
3198 		err = -EINVAL;
3199 		goto out;
3200 	}
3201 
3202 	/*
3203 	 * Allocate memory for callback data.
3204 	 */
3205 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3206 	if (NULL == cb_data) {
3207 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3208 		err = -ENOMEM;
3209 		goto out;
3210 	}
3211 
3212 	cb_data->nport = nport;
3213 	cb_data->port_handle = args->port_handle;
3214 	cb_data->fc_cb_func = api_data->cb_func;
3215 	cb_data->fc_cb_ctx = args->cb_ctx;
3216 
3217 	/*
3218 	 * Begin nport tear down
3219 	 */
3220 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3221 		(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3222 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3223 		/*
3224 		 * Deletion of this nport already in progress. Register callback
3225 		 * and return.
3226 		 */
3227 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3228 		err = -ENODEV;
3229 		goto out;
3230 	} else {
3231 		/* nport partially created/deleted */
3232 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3233 		DEV_VERIFY(0 != "Nport in zombie state");
3234 		err = -ENODEV;
3235 		goto out;
3236 	}
3237 
3238 	/*
3239 	 * Remove this nport from listening addresses across subsystems
3240 	 */
3241 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3242 
3243 	if (0 != rc) {
3244 		err = spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3245 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3246 			    nport->nport_hdl);
3247 		goto out;
3248 	}
3249 
3250 	/*
3251 	 * Delete all the remote ports (if any) for the nport
3252 	 */
3253 	/* TODO - Need to do this with a "first" and a "next" accessor function
3254 	 * for completeness. Look at app-subsystem as examples.
3255 	 */
3256 	if (spdk_nvmf_fc_nport_has_no_rport(nport)) {
3257 		/* No rports to delete. Complete the nport deletion. */
3258 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3259 		goto out;
3260 	}
3261 
3262 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3263 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3264 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3265 
3266 		if (it_del_args == NULL) {
3267 			err = -ENOMEM;
3268 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3269 				    rport_iter->rpi, rport_iter->s_id);
3270 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3271 			goto out;
3272 		}
3273 
3274 		rport_cnt++;
3275 		it_del_args->port_handle = nport->port_hdl;
3276 		it_del_args->nport_handle = nport->nport_hdl;
3277 		it_del_args->cb_ctx = (void *)cb_data;
3278 		it_del_args->rpi = rport_iter->rpi;
3279 		it_del_args->s_id = rport_iter->s_id;
3280 
3281 		spdk_nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3282 						  nvmf_fc_adm_delete_nport_cb);
3283 	}
3284 
3285 out:
3286 	/* On failure, execute the callback function now */
3287 	if ((err != 0) || (rc != 0)) {
3288 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3289 			    "rport_cnt:%d rc:%d.\n",
3290 			    args->nport_handle, err, args->port_handle,
3291 			    rport_cnt, rc);
3292 		if (cb_data) {
3293 			free(cb_data);
3294 		}
3295 		if (api_data->cb_func != NULL) {
3296 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3297 		}
3298 
3299 	} else {
3300 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3301 			      "NPort %d delete done succesfully, fc port:%d. "
3302 			      "rport_cnt:%d\n",
3303 			      args->nport_handle, args->port_handle, rport_cnt);
3304 	}
3305 
3306 	free(arg);
3307 }
3308 
3309 /*
3310  * Process an PRLI/IT add.
3311  */
3312 static void
3313 nvmf_fc_adm_evnt_i_t_add(void *arg)
3314 {
3315 	ASSERT_SPDK_FC_MASTER_THREAD();
3316 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3317 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3318 			api_data->api_args;
3319 	struct spdk_nvmf_fc_nport *nport = NULL;
3320 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3321 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3322 	int err = 0;
3323 
3324 	/*
3325 	 * Make sure the nport port exists.
3326 	 */
3327 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3328 	if (nport == NULL) {
3329 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3330 		err = -EINVAL;
3331 		goto out;
3332 	}
3333 
3334 	/*
3335 	 * Check for duplicate i_t_add.
3336 	 */
3337 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3338 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3339 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3340 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3341 			err = -EEXIST;
3342 			goto out;
3343 		}
3344 	}
3345 
3346 	/*
3347 	 * Get the memory to instantiate the remote port
3348 	 */
3349 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3350 	if (rport == NULL) {
3351 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3352 		err = -ENOMEM;
3353 		goto out;
3354 	}
3355 
3356 	/*
3357 	 * Initialize the contents for the rport
3358 	 */
3359 	(void)spdk_nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3360 	rport->s_id = args->s_id;
3361 	rport->rpi = args->rpi;
3362 	rport->fc_nodename = args->fc_nodename;
3363 	rport->fc_portname = args->fc_portname;
3364 
3365 	/*
3366 	 * Add remote port to nport
3367 	 */
3368 	if (spdk_nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3369 		DEV_VERIFY(!"Error while adding rport to list");
3370 	};
3371 
3372 	/*
3373 	 * TODO: Do we validate the initiators service parameters?
3374 	 */
3375 
3376 	/*
3377 	 * Get the targets service parameters from the library
3378 	 * to return back to the driver.
3379 	 */
3380 	args->target_prli_info = spdk_nvmf_fc_get_prli_service_params();
3381 
3382 out:
3383 	if (api_data->cb_func != NULL) {
3384 		/*
3385 		 * Passing pointer to the args struct as the first argument.
3386 		 * The cb_func should handle this appropriately.
3387 		 */
3388 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3389 	}
3390 
3391 	free(arg);
3392 
3393 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3394 		      "IT add on nport %d done, rc = %d.\n",
3395 		      args->nport_handle, err);
3396 }
3397 
3398 /**
3399  * Process a IT delete.
3400  */
3401 static void
3402 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3403 {
3404 	ASSERT_SPDK_FC_MASTER_THREAD();
3405 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3406 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3407 			api_data->api_args;
3408 	int rc = 0;
3409 	struct spdk_nvmf_fc_nport *nport = NULL;
3410 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3411 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3412 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3413 	uint32_t num_rport = 0;
3414 	char log_str[256];
3415 
3416 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete on nport:%d begin.\n", args->nport_handle);
3417 
3418 	/*
3419 	 * Make sure the nport port exists. If it does not, error out.
3420 	 */
3421 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3422 	if (nport == NULL) {
3423 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3424 		rc = -EINVAL;
3425 		goto out;
3426 	}
3427 
3428 	/*
3429 	 * Find this ITN / rport (remote port).
3430 	 */
3431 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3432 		num_rport++;
3433 		if ((rport_iter->s_id == args->s_id) &&
3434 		    (rport_iter->rpi == args->rpi) &&
3435 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3436 			rport = rport_iter;
3437 			break;
3438 		}
3439 	}
3440 
3441 	/*
3442 	 * We should find either zero or exactly one rport.
3443 	 *
3444 	 * If we find zero rports, that means that a previous request has
3445 	 * removed the rport by the time we reached here. In this case,
3446 	 * simply return out.
3447 	 */
3448 	if (rport == NULL) {
3449 		rc = -ENODEV;
3450 		goto out;
3451 	}
3452 
3453 	/*
3454 	 * We have found exactly one rport. Allocate memory for callback data.
3455 	 */
3456 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3457 	if (NULL == cb_data) {
3458 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3459 		rc = -ENOMEM;
3460 		goto out;
3461 	}
3462 
3463 	cb_data->nport = nport;
3464 	cb_data->rport = rport;
3465 	cb_data->port_handle = args->port_handle;
3466 	cb_data->fc_cb_func = api_data->cb_func;
3467 	cb_data->fc_cb_ctx = args->cb_ctx;
3468 
3469 	/*
3470 	 * Validate rport object state.
3471 	 */
3472 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3473 		(void)spdk_nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3474 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3475 		/*
3476 		 * Deletion of this rport already in progress. Register callback
3477 		 * and return.
3478 		 */
3479 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3480 		rc = -ENODEV;
3481 		goto out;
3482 	} else {
3483 		/* rport partially created/deleted */
3484 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3485 		DEV_VERIFY(!"Invalid rport_state");
3486 		rc = -ENODEV;
3487 		goto out;
3488 	}
3489 
3490 	/*
3491 	 * We have successfully found a rport to delete. Call
3492 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3493 	 * IT-delete processing as well as free the cb_data.
3494 	 */
3495 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3496 				     (void *)cb_data);
3497 
3498 out:
3499 	if (rc != 0) {
3500 		/*
3501 		 * We have entered here because either we encountered an
3502 		 * error, or we did not find a rport to delete.
3503 		 * As a result, we will not call the function
3504 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3505 		 * processing. Therefore, execute the callback function now.
3506 		 */
3507 		if (cb_data) {
3508 			free(cb_data);
3509 		}
3510 		if (api_data->cb_func != NULL) {
3511 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3512 		}
3513 	}
3514 
3515 	snprintf(log_str, sizeof(log_str),
3516 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3517 		 args->nport_handle, num_rport, rc);
3518 
3519 	if (rc != 0) {
3520 		SPDK_ERRLOG("%s", log_str);
3521 	} else {
3522 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3523 	}
3524 
3525 	free(arg);
3526 }
3527 
3528 /*
3529  * Process ABTS received
3530  */
3531 static void
3532 nvmf_fc_adm_evnt_abts_recv(void *arg)
3533 {
3534 	ASSERT_SPDK_FC_MASTER_THREAD();
3535 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3536 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3537 	struct spdk_nvmf_fc_nport *nport = NULL;
3538 	int err = 0;
3539 
3540 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3541 		      args->oxid, args->rxid);
3542 
3543 	/*
3544 	 * 1. Make sure the nport port exists.
3545 	 */
3546 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3547 	if (nport == NULL) {
3548 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3549 		err = -EINVAL;
3550 		goto out;
3551 	}
3552 
3553 	/*
3554 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3555 	 */
3556 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3557 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3558 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3559 			      args->rpi, args->oxid, args->rxid);
3560 		err = 0;
3561 		goto out;
3562 
3563 	}
3564 
3565 	/*
3566 	 * 3. Pass the received ABTS-LS to the library for handling.
3567 	 */
3568 	spdk_nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3569 
3570 out:
3571 	if (api_data->cb_func != NULL) {
3572 		/*
3573 		 * Passing pointer to the args struct as the first argument.
3574 		 * The cb_func should handle this appropriately.
3575 		 */
3576 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3577 	} else {
3578 		/* No callback set, free the args */
3579 		free(args);
3580 	}
3581 
3582 	free(arg);
3583 }
3584 
3585 /*
3586  * Callback function for hw port quiesce.
3587  */
3588 static void
3589 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3590 {
3591 	ASSERT_SPDK_FC_MASTER_THREAD();
3592 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3593 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3594 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3595 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3596 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3597 	struct spdk_nvmf_fc_port *fc_port = NULL;
3598 	char *dump_buf = NULL;
3599 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3600 
3601 	/*
3602 	 * Free the callback context struct.
3603 	 */
3604 	free(ctx);
3605 
3606 	if (err != 0) {
3607 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3608 		goto out;
3609 	}
3610 
3611 	if (args->dump_queues == false) {
3612 		/*
3613 		 * Queues need not be dumped.
3614 		 */
3615 		goto out;
3616 	}
3617 
3618 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3619 
3620 	/*
3621 	 * Get the fc port.
3622 	 */
3623 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3624 	if (fc_port == NULL) {
3625 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3626 		err = -EINVAL;
3627 		goto out;
3628 	}
3629 
3630 	/*
3631 	 * Allocate memory for the dump buffer.
3632 	 * This memory will be freed by FCT.
3633 	 */
3634 	dump_buf = (char *)calloc(1, dump_buf_size);
3635 	if (dump_buf == NULL) {
3636 		err = -ENOMEM;
3637 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3638 		goto out;
3639 	}
3640 	*args->dump_buf  = (uint32_t *)dump_buf;
3641 	dump_info.buffer = dump_buf;
3642 	dump_info.offset = 0;
3643 
3644 	/*
3645 	 * Add the dump reason to the top of the buffer.
3646 	 */
3647 	spdk_nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3648 
3649 	/*
3650 	 * Dump the hwqp.
3651 	 */
3652 	spdk_nvmf_fc_dump_all_queues(fc_port, &dump_info);
3653 
3654 out:
3655 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3656 		      args->port_handle, args->dump_queues, err);
3657 
3658 	if (cb_func != NULL) {
3659 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3660 	}
3661 }
3662 
3663 /*
3664  * HW port reset
3665 
3666  */
3667 static void
3668 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3669 {
3670 	ASSERT_SPDK_FC_MASTER_THREAD();
3671 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3672 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3673 			api_data->api_args;
3674 	struct spdk_nvmf_fc_port *fc_port = NULL;
3675 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3676 	int err = 0;
3677 
3678 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump\n", args->port_handle);
3679 
3680 	/*
3681 	 * Make sure the physical port exists.
3682 	 */
3683 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3684 	if (fc_port == NULL) {
3685 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3686 		err = -EINVAL;
3687 		goto out;
3688 	}
3689 
3690 	/*
3691 	 * Save the reset event args and the callback in a context struct.
3692 	 */
3693 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3694 
3695 	if (ctx == NULL) {
3696 		err = -ENOMEM;
3697 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3698 		goto fail;
3699 	}
3700 
3701 	ctx->reset_args = arg;
3702 	ctx->reset_cb_func = api_data->cb_func;
3703 
3704 	/*
3705 	 * Quiesce the hw port.
3706 	 */
3707 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3708 	if (err != 0) {
3709 		goto fail;
3710 	}
3711 
3712 	/*
3713 	 * Once the ports are successfully quiesced the reset processing
3714 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3715 	 */
3716 	return;
3717 fail:
3718 	free(ctx);
3719 
3720 out:
3721 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump done, rc = %d.\n", args->port_handle,
3722 		      err);
3723 
3724 	if (api_data->cb_func != NULL) {
3725 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3726 	}
3727 
3728 	free(arg);
3729 }
3730 
3731 /*
3732  * Process a link break event on a HW port.
3733  */
3734 static void
3735 nvmf_fc_adm_evnt_hw_port_link_break(void *arg)
3736 {
3737 	ASSERT_SPDK_FC_MASTER_THREAD();
3738 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3739 	struct spdk_nvmf_hw_port_link_break_args *args = (struct spdk_nvmf_hw_port_link_break_args *)
3740 			api_data->api_args;
3741 	struct spdk_nvmf_fc_port *fc_port = NULL;
3742 	int err = 0;
3743 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *cb_data = NULL;
3744 	struct spdk_nvmf_fc_nport *nport = NULL;
3745 	uint32_t nport_deletes_sent = 0;
3746 	uint32_t nport_deletes_skipped = 0;
3747 	struct spdk_nvmf_fc_nport_delete_args *nport_del_args = NULL;
3748 	char log_str[256];
3749 
3750 	/*
3751 	 * Get the fc port using the port handle.
3752 	 */
3753 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3754 	if (!fc_port) {
3755 		SPDK_ERRLOG("port link break: Unable to find the SPDK FC port %d\n",
3756 			    args->port_handle);
3757 		err = -EINVAL;
3758 		goto out;
3759 	}
3760 
3761 	/*
3762 	 * Set the port state to offline, if it is not already.
3763 	 */
3764 	err = spdk_nvmf_fc_port_set_offline(fc_port);
3765 	if (err != 0) {
3766 		SPDK_ERRLOG("port link break: HW port %d already offline. rc = %d\n",
3767 			    fc_port->port_hdl, err);
3768 		err = 0;
3769 		goto out;
3770 	}
3771 
3772 	/*
3773 	 * Delete all the nports, if any.
3774 	 */
3775 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
3776 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
3777 			/* Skipped the nports that are not in CREATED state */
3778 			if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
3779 				nport_deletes_skipped++;
3780 				continue;
3781 			}
3782 
3783 			/* Allocate memory for callback data. */
3784 			cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_port_link_break_cb_data));
3785 			if (NULL == cb_data) {
3786 				SPDK_ERRLOG("port link break: Failed to allocate memory for cb_data %d.\n",
3787 					    args->port_handle);
3788 				err = -ENOMEM;
3789 				goto out;
3790 			}
3791 			cb_data->args = args;
3792 			cb_data->cb_func = api_data->cb_func;
3793 			nport_del_args = &cb_data->nport_del_args;
3794 			nport_del_args->port_handle = args->port_handle;
3795 			nport_del_args->nport_handle = nport->nport_hdl;
3796 			nport_del_args->cb_ctx = cb_data;
3797 
3798 			spdk_nvmf_fc_master_enqueue_event(SPDK_FC_NPORT_DELETE,
3799 							  (void *)nport_del_args,
3800 							  nvmf_fc_adm_hw_port_link_break_cb);
3801 
3802 			nport_deletes_sent++;
3803 		}
3804 	}
3805 
3806 	if (nport_deletes_sent == 0 && err == 0) {
3807 		/*
3808 		 * Mark the hwqps as offline and unregister the pollers.
3809 		 */
3810 		(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
3811 	}
3812 
3813 out:
3814 	snprintf(log_str, sizeof(log_str),
3815 		 "port link break done: port:%d nport_deletes_sent:%d nport_deletes_skipped:%d rc:%d.\n",
3816 		 args->port_handle, nport_deletes_sent, nport_deletes_skipped, err);
3817 
3818 	if (err != 0) {
3819 		SPDK_ERRLOG("%s", log_str);
3820 	} else {
3821 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3822 	}
3823 
3824 	if ((api_data->cb_func != NULL) && (nport_deletes_sent == 0)) {
3825 		/*
3826 		 * No nport_deletes are sent, which would have eventually
3827 		 * called the port_link_break callback. Therefore, call the
3828 		 * port_link_break callback here.
3829 		 */
3830 		(void)api_data->cb_func(args->port_handle, SPDK_FC_LINK_BREAK, args->cb_ctx, err);
3831 	}
3832 
3833 	free(arg);
3834 }
3835 
3836 static inline void
3837 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args)
3838 {
3839 	if (spdk_nvmf_fc_get_master_thread()) {
3840 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(), fn, args);
3841 	}
3842 }
3843 
3844 /*
3845  * Queue up an event in the SPDK masters event queue.
3846  * Used by the FC driver to notify the SPDK master of FC related events.
3847  */
3848 int
3849 spdk_nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args,
3850 				  spdk_nvmf_fc_callback cb_func)
3851 {
3852 	int err = 0;
3853 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3854 
3855 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d.\n", event_type);
3856 
3857 	if (event_type >= SPDK_FC_EVENT_MAX) {
3858 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3859 		err = -EINVAL;
3860 		goto done;
3861 	}
3862 
3863 	if (args == NULL) {
3864 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3865 		err = -EINVAL;
3866 		goto done;
3867 	}
3868 
3869 	api_data = calloc(1, sizeof(*api_data));
3870 
3871 	if (api_data == NULL) {
3872 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3873 		err = -ENOMEM;
3874 		goto done;
3875 	}
3876 
3877 	api_data->api_args = args;
3878 	api_data->cb_func = cb_func;
3879 
3880 	switch (event_type) {
3881 	case SPDK_FC_HW_PORT_INIT:
3882 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_init,
3883 						 (void *)api_data);
3884 		break;
3885 
3886 	case SPDK_FC_HW_PORT_ONLINE:
3887 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_online,
3888 						 (void *)api_data);
3889 		break;
3890 
3891 	case SPDK_FC_HW_PORT_OFFLINE:
3892 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_offline,
3893 						 (void *)api_data);
3894 		break;
3895 
3896 	case SPDK_FC_NPORT_CREATE:
3897 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_create,
3898 						 (void *)api_data);
3899 		break;
3900 
3901 	case SPDK_FC_NPORT_DELETE:
3902 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_delete,
3903 						 (void *)api_data);
3904 		break;
3905 
3906 	case SPDK_FC_IT_ADD:
3907 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_add,
3908 						 (void *)api_data);
3909 		break;
3910 
3911 	case SPDK_FC_IT_DELETE:
3912 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_delete,
3913 						 (void *)api_data);
3914 		break;
3915 
3916 	case SPDK_FC_ABTS_RECV:
3917 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_abts_recv,
3918 						 (void *)api_data);
3919 		break;
3920 
3921 	case SPDK_FC_LINK_BREAK:
3922 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_link_break,
3923 						 (void *)api_data);
3924 		break;
3925 
3926 	case SPDK_FC_HW_PORT_RESET:
3927 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_reset,
3928 						 (void *)api_data);
3929 		break;
3930 
3931 	case SPDK_FC_UNRECOVERABLE_ERR:
3932 	default:
3933 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3934 		err = -EINVAL;
3935 		break;
3936 	}
3937 
3938 done:
3939 
3940 	if (err == 0) {
3941 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d done successfully\n", event_type);
3942 	} else {
3943 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3944 		if (api_data) {
3945 			free(api_data);
3946 		}
3947 	}
3948 
3949 	return err;
3950 }
3951 
3952 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc_adm_api", SPDK_LOG_NVMF_FC_ADM_API);
3953 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc", SPDK_LOG_NVMF_FC)
3954