xref: /spdk/lib/nvmf/fc.c (revision 06b537bfdb4393dea857e204b85d8df46a351d8a)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf_transport.h"
41 #include "spdk/string.h"
42 #include "spdk/trace.h"
43 #include "spdk/util.h"
44 #include "spdk/likely.h"
45 #include "spdk/endian.h"
46 #include "spdk/log.h"
47 #include "spdk/thread.h"
48 
49 #include "spdk_internal/log.h"
50 
51 #include "nvmf_fc.h"
52 #include "fc_lld.h"
53 
54 #ifndef DEV_VERIFY
55 #define DEV_VERIFY assert
56 #endif
57 
58 #ifndef ASSERT_SPDK_FC_MASTER_THREAD
59 #define ASSERT_SPDK_FC_MASTER_THREAD() \
60         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_master_thread());
61 #endif
62 
63 /*
64  * PRLI service parameters
65  */
66 enum spdk_nvmf_fc_service_parameters {
67 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
68 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
69 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
70 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
71 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
72 };
73 
74 static char *fc_req_state_strs[] = {
75 	"SPDK_NVMF_FC_REQ_INIT",
76 	"SPDK_NVMF_FC_REQ_READ_BDEV",
77 	"SPDK_NVMF_FC_REQ_READ_XFER",
78 	"SPDK_NVMF_FC_REQ_READ_RSP",
79 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
80 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
81 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
82 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
83 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
84 	"SPDK_NVMF_FC_REQ_NONE_RSP",
85 	"SPDK_NVMF_FC_REQ_SUCCESS",
86 	"SPDK_NVMF_FC_REQ_FAILED",
87 	"SPDK_NVMF_FC_REQ_ABORTED",
88 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
89 	"SPDK_NVMF_FC_REQ_PENDING"
90 };
91 
92 #define OBJECT_NVMF_FC_IO				0xA0
93 
94 #define TRACE_GROUP_NVMF_FC				0x8
95 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
96 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
97 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
98 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
99 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
100 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
101 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
102 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
103 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
104 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
105 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
106 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
107 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
108 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
109 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
110 
111 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
112 {
113 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
114 	spdk_trace_register_description("FC_REQ_NEW",
115 					TRACE_FC_REQ_INIT,
116 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
117 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
118 					TRACE_FC_REQ_READ_BDEV,
119 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
120 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
121 					TRACE_FC_REQ_READ_XFER,
122 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
123 	spdk_trace_register_description("FC_REQ_READ_RSP",
124 					TRACE_FC_REQ_READ_RSP,
125 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
126 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
127 					TRACE_FC_REQ_WRITE_BUFFS,
128 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
129 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
130 					TRACE_FC_REQ_WRITE_XFER,
131 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
132 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
133 					TRACE_FC_REQ_WRITE_BDEV,
134 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
135 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
136 					TRACE_FC_REQ_WRITE_RSP,
137 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
138 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
139 					TRACE_FC_REQ_NONE_BDEV,
140 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
141 	spdk_trace_register_description("FC_REQ_NONE_RSP",
142 					TRACE_FC_REQ_NONE_RSP,
143 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
144 	spdk_trace_register_description("FC_REQ_SUCCESS",
145 					TRACE_FC_REQ_SUCCESS,
146 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
147 	spdk_trace_register_description("FC_REQ_FAILED",
148 					TRACE_FC_REQ_FAILED,
149 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
150 	spdk_trace_register_description("FC_REQ_ABORTED",
151 					TRACE_FC_REQ_ABORTED,
152 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
153 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
154 					TRACE_FC_REQ_BDEV_ABORTED,
155 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
156 	spdk_trace_register_description("FC_REQ_PENDING",
157 					TRACE_FC_REQ_PENDING,
158 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
159 }
160 
161 /**
162  * The structure used by all fc adm functions
163  */
164 struct spdk_nvmf_fc_adm_api_data {
165 	void *api_args;
166 	spdk_nvmf_fc_callback cb_func;
167 };
168 
169 /**
170  * The callback structure for nport-delete
171  */
172 struct spdk_nvmf_fc_adm_nport_del_cb_data {
173 	struct spdk_nvmf_fc_nport *nport;
174 	uint8_t port_handle;
175 	spdk_nvmf_fc_callback fc_cb_func;
176 	void *fc_cb_ctx;
177 };
178 
179 /**
180  * The callback structure for it-delete
181  */
182 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
183 	struct spdk_nvmf_fc_nport *nport;
184 	struct spdk_nvmf_fc_remote_port_info *rport;
185 	uint8_t port_handle;
186 	spdk_nvmf_fc_callback fc_cb_func;
187 	void *fc_cb_ctx;
188 };
189 
190 
191 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
192 
193 /**
194  * The callback structure for the it-delete-assoc callback
195  */
196 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
197 	struct spdk_nvmf_fc_nport *nport;
198 	struct spdk_nvmf_fc_remote_port_info *rport;
199 	uint8_t port_handle;
200 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
201 	void *cb_ctx;
202 };
203 
204 /*
205  * Call back function pointer for HW port quiesce.
206  */
207 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
208 
209 /**
210  * Context structure for quiescing a hardware port
211  */
212 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
213 	int quiesce_count;
214 	void *ctx;
215 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
216 };
217 
218 /**
219  * Context structure used to reset a hardware port
220  */
221 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
222 	void *reset_args;
223 	spdk_nvmf_fc_callback reset_cb_func;
224 };
225 
226 /**
227  * The callback structure for HW port link break event
228  */
229 struct spdk_nvmf_fc_adm_port_link_break_cb_data {
230 	struct spdk_nvmf_hw_port_link_break_args *args;
231 	struct spdk_nvmf_fc_nport_delete_args nport_del_args;
232 	spdk_nvmf_fc_callback cb_func;
233 };
234 
235 struct spdk_nvmf_fc_transport {
236 	struct spdk_nvmf_transport transport;
237 	pthread_mutex_t lock;
238 };
239 
240 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
241 
242 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
243 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
244 
245 static struct spdk_thread *g_nvmf_fc_master_thread = NULL;
246 
247 static uint32_t g_nvmf_fgroup_count = 0;
248 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
249 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
250 
251 struct spdk_thread *
252 nvmf_fc_get_master_thread(void)
253 {
254 	return g_nvmf_fc_master_thread;
255 }
256 
257 static inline void
258 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
259 			       enum spdk_nvmf_fc_request_state state)
260 {
261 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
262 
263 	switch (state) {
264 	case SPDK_NVMF_FC_REQ_INIT:
265 		/* Start IO tracing */
266 		tpoint_id = TRACE_FC_REQ_INIT;
267 		break;
268 	case SPDK_NVMF_FC_REQ_READ_BDEV:
269 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
270 		break;
271 	case SPDK_NVMF_FC_REQ_READ_XFER:
272 		tpoint_id = TRACE_FC_REQ_READ_XFER;
273 		break;
274 	case SPDK_NVMF_FC_REQ_READ_RSP:
275 		tpoint_id = TRACE_FC_REQ_READ_RSP;
276 		break;
277 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
278 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
279 		break;
280 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
281 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
282 		break;
283 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
284 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
285 		break;
286 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
287 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
288 		break;
289 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
290 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
291 		break;
292 	case SPDK_NVMF_FC_REQ_NONE_RSP:
293 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
294 		break;
295 	case SPDK_NVMF_FC_REQ_SUCCESS:
296 		tpoint_id = TRACE_FC_REQ_SUCCESS;
297 		break;
298 	case SPDK_NVMF_FC_REQ_FAILED:
299 		tpoint_id = TRACE_FC_REQ_FAILED;
300 		break;
301 	case SPDK_NVMF_FC_REQ_ABORTED:
302 		tpoint_id = TRACE_FC_REQ_ABORTED;
303 		break;
304 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
305 		tpoint_id = TRACE_FC_REQ_ABORTED;
306 		break;
307 	case SPDK_NVMF_FC_REQ_PENDING:
308 		tpoint_id = TRACE_FC_REQ_PENDING;
309 		break;
310 	default:
311 		assert(0);
312 		break;
313 	}
314 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
315 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
316 				  (uint64_t)(&fc_req->req), 0);
317 	}
318 }
319 
320 static void
321 nvmf_fc_handle_connection_failure(void *arg)
322 {
323 	struct spdk_nvmf_fc_conn *fc_conn = arg;
324 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
325 
326 	if (!fc_conn->create_opd) {
327 		return;
328 	}
329 	api_data = &fc_conn->create_opd->u.add_conn;
330 
331 	nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
332 				    api_data->args.fc_conn, api_data->aq_conn);
333 }
334 
335 static void
336 nvmf_fc_handle_assoc_deletion(void *arg)
337 {
338 	struct spdk_nvmf_fc_conn *fc_conn = arg;
339 
340 	nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport,
341 				   fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL);
342 }
343 
344 static int
345 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp)
346 {
347 	uint32_t i;
348 	struct spdk_nvmf_fc_request *fc_req;
349 
350 	TAILQ_INIT(&hwqp->free_reqs);
351 	TAILQ_INIT(&hwqp->in_use_reqs);
352 
353 	hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request));
354 	if (hwqp->fc_reqs_buf == NULL) {
355 		SPDK_ERRLOG("create fc request pool failed\n");
356 		return -ENOMEM;
357 	}
358 
359 	for (i = 0; i < hwqp->rq_size; i++) {
360 		fc_req = hwqp->fc_reqs_buf + i;
361 
362 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
363 		TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link);
364 	}
365 
366 	return 0;
367 }
368 
369 static inline struct spdk_nvmf_fc_request *
370 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp)
371 {
372 	struct spdk_nvmf_fc_request *fc_req;
373 
374 	if (TAILQ_EMPTY(&hwqp->free_reqs)) {
375 		SPDK_ERRLOG("Alloc request buffer failed\n");
376 		return NULL;
377 	}
378 
379 	fc_req = TAILQ_FIRST(&hwqp->free_reqs);
380 	TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link);
381 
382 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
383 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
384 	TAILQ_INIT(&fc_req->abort_cbs);
385 	return fc_req;
386 }
387 
388 static inline void
389 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req)
390 {
391 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
392 		/* Log an error for debug purpose. */
393 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
394 	}
395 
396 	/* set the magic to mark req as no longer valid. */
397 	fc_req->magic = 0xDEADBEEF;
398 
399 	TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link);
400 	TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link);
401 }
402 
403 static inline bool
404 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
405 {
406 	switch (fc_req->state) {
407 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
408 		return true;
409 	default:
410 		return false;
411 	}
412 }
413 
414 void
415 nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp)
416 {
417 	nvmf_fc_init_rqpair_buffers(hwqp);
418 }
419 
420 struct spdk_nvmf_fc_conn *
421 nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
422 {
423 	struct spdk_nvmf_fc_conn *fc_conn;
424 
425 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
426 		if (fc_conn->conn_id == conn_id) {
427 			return fc_conn;
428 		}
429 	}
430 
431 	return NULL;
432 }
433 
434 void
435 nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr)
436 {
437 	struct spdk_nvmf_fc_abts_ctx *ctx;
438 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
439 
440 	/* Clean up any pending sync callbacks */
441 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
442 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
443 		ctx = args->cb_info.cb_data;
444 		if (ctx) {
445 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
446 				free(ctx->sync_poller_args);
447 				free(ctx->abts_poller_args);
448 				free(ctx);
449 			}
450 		}
451 	}
452 
453 	nvmf_fc_reinit_q(hwqp->queues, queues_curr);
454 }
455 
456 void
457 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
458 {
459 	hwqp->fc_port = fc_port;
460 
461 	/* clear counters */
462 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
463 
464 	nvmf_fc_init_poller_queues(hwqp);
465 	if (&fc_port->ls_queue != hwqp) {
466 		nvmf_fc_create_req_mempool(hwqp);
467 	}
468 
469 	nvmf_fc_init_q(hwqp);
470 	TAILQ_INIT(&hwqp->connection_list);
471 	TAILQ_INIT(&hwqp->sync_cbs);
472 	TAILQ_INIT(&hwqp->ls_pending_queue);
473 }
474 
475 static struct spdk_nvmf_fc_poll_group *
476 nvmf_fc_get_idlest_poll_group(void)
477 {
478 	uint32_t max_count = UINT32_MAX;
479 	struct spdk_nvmf_fc_poll_group *fgroup;
480 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
481 
482 	/* find poll group with least number of hwqp's assigned to it */
483 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
484 		if (fgroup->hwqp_count < max_count) {
485 			ret_fgroup = fgroup;
486 			max_count = fgroup->hwqp_count;
487 		}
488 	}
489 
490 	return ret_fgroup;
491 }
492 
493 void
494 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
495 {
496 	struct spdk_nvmf_fc_poll_group *fgroup = NULL;
497 
498 	assert(hwqp);
499 	if (hwqp == NULL) {
500 		SPDK_ERRLOG("Error: hwqp is NULL\n");
501 		return;
502 	}
503 
504 	assert(g_nvmf_fgroup_count);
505 
506 	fgroup = nvmf_fc_get_idlest_poll_group();
507 	if (!fgroup) {
508 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
509 		return;
510 	}
511 
512 	hwqp->thread = fgroup->group.group->thread;
513 	hwqp->fgroup = fgroup;
514 	fgroup->hwqp_count++;
515 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
516 }
517 
518 void
519 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
520 {
521 	assert(hwqp);
522 
523 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
524 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
525 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
526 
527 	if (!hwqp->fgroup) {
528 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
529 	} else {
530 		hwqp->fgroup->hwqp_count--;
531 		nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL);
532 	}
533 }
534 
535 /*
536  * Note: This needs to be used only on master poller.
537  */
538 static uint64_t
539 nvmf_fc_get_abts_unique_id(void)
540 {
541 	static uint32_t u_id = 0;
542 
543 	return (uint64_t)(++u_id);
544 }
545 
546 static void
547 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
548 {
549 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
550 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
551 
552 	ctx->hwqps_responded++;
553 
554 	if (ctx->hwqps_responded < ctx->num_hwqps) {
555 		/* Wait for all pollers to complete. */
556 		return;
557 	}
558 
559 	/* Free the queue sync poller args. */
560 	free(ctx->sync_poller_args);
561 
562 	/* Mark as queue synced */
563 	ctx->queue_synced = true;
564 
565 	/* Reset the ctx values */
566 	ctx->hwqps_responded = 0;
567 	ctx->handled = false;
568 
569 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
570 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
571 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
572 
573 	/* Resend ABTS to pollers */
574 	args = ctx->abts_poller_args;
575 	for (int i = 0; i < ctx->num_hwqps; i++) {
576 		poller_arg = args + i;
577 		nvmf_fc_poller_api_func(poller_arg->hwqp,
578 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
579 					poller_arg);
580 	}
581 }
582 
583 static int
584 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
585 {
586 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
587 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
588 
589 	/* check if FC driver supports queue sync */
590 	if (!nvmf_fc_q_sync_available()) {
591 		return -EPERM;
592 	}
593 
594 	assert(ctx);
595 	if (!ctx) {
596 		SPDK_ERRLOG("NULL ctx pointer");
597 		return -EINVAL;
598 	}
599 
600 	/* Reset the ctx values */
601 	ctx->hwqps_responded = 0;
602 
603 	args = calloc(ctx->num_hwqps,
604 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
605 	if (!args) {
606 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
607 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
608 		return -ENOMEM;
609 	}
610 	ctx->sync_poller_args = args;
611 
612 	abts_args = ctx->abts_poller_args;
613 	for (int i = 0; i < ctx->num_hwqps; i++) {
614 		abts_poller_arg = abts_args + i;
615 		poller_arg = args + i;
616 		poller_arg->u_id = ctx->u_id;
617 		poller_arg->hwqp = abts_poller_arg->hwqp;
618 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
619 		poller_arg->cb_info.cb_data = ctx;
620 		poller_arg->cb_info.cb_thread = spdk_get_thread();
621 
622 		/* Send a Queue sync message to interested pollers */
623 		nvmf_fc_poller_api_func(poller_arg->hwqp,
624 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
625 					poller_arg);
626 	}
627 
628 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
629 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
630 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
631 
632 	/* Post Marker to queue to track aborted request */
633 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
634 
635 	return 0;
636 }
637 
638 static void
639 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
640 {
641 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
642 	struct spdk_nvmf_fc_nport *nport  = NULL;
643 
644 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
645 		ctx->handled = true;
646 	}
647 
648 	ctx->hwqps_responded++;
649 
650 	if (ctx->hwqps_responded < ctx->num_hwqps) {
651 		/* Wait for all pollers to complete. */
652 		return;
653 	}
654 
655 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
656 
657 	if (ctx->nport != nport) {
658 		/* Nport can be deleted while this abort is being
659 		 * processed by the pollers.
660 		 */
661 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
662 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
663 	} else {
664 		if (!ctx->handled) {
665 			/* Try syncing the queues and try one more time */
666 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
667 				SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
668 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
669 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
670 				return;
671 			} else {
672 				/* Send Reject */
673 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
674 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
675 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
676 			}
677 		} else {
678 			/* Send Accept */
679 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
680 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
681 					    0, NULL, NULL);
682 		}
683 	}
684 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
685 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
686 
687 	free(ctx->abts_poller_args);
688 	free(ctx);
689 }
690 
691 void
692 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
693 			  uint16_t oxid, uint16_t rxid)
694 {
695 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
696 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
697 	struct spdk_nvmf_fc_association *assoc = NULL;
698 	struct spdk_nvmf_fc_conn *conn = NULL;
699 	uint32_t hwqp_cnt = 0;
700 	bool skip_hwqp_cnt;
701 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
702 	uint32_t i;
703 
704 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
705 		       nport->nport_hdl, rpi, oxid, rxid);
706 
707 	/* Allocate memory to track hwqp's with at least 1 active connection. */
708 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
709 	if (hwqps == NULL) {
710 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
711 		goto bls_rej;
712 	}
713 
714 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
715 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
716 			if (conn->rpi != rpi) {
717 				continue;
718 			}
719 
720 			skip_hwqp_cnt = false;
721 			for (i = 0; i < hwqp_cnt; i++) {
722 				if (hwqps[i] == conn->hwqp) {
723 					/* Skip. This is already present */
724 					skip_hwqp_cnt = true;
725 					break;
726 				}
727 			}
728 			if (!skip_hwqp_cnt) {
729 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
730 				hwqps[hwqp_cnt] = conn->hwqp;
731 				hwqp_cnt++;
732 			}
733 		}
734 	}
735 
736 	if (!hwqp_cnt) {
737 		goto bls_rej;
738 	}
739 
740 	args = calloc(hwqp_cnt,
741 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
742 	if (!args) {
743 		goto bls_rej;
744 	}
745 
746 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
747 	if (!ctx) {
748 		goto bls_rej;
749 	}
750 	ctx->rpi = rpi;
751 	ctx->oxid = oxid;
752 	ctx->rxid = rxid;
753 	ctx->nport = nport;
754 	ctx->nport_hdl = nport->nport_hdl;
755 	ctx->port_hdl = nport->fc_port->port_hdl;
756 	ctx->num_hwqps = hwqp_cnt;
757 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
758 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
759 	ctx->abts_poller_args = args;
760 
761 	/* Get a unique context for this ABTS */
762 	ctx->u_id = nvmf_fc_get_abts_unique_id();
763 
764 	for (i = 0; i < hwqp_cnt; i++) {
765 		poller_arg = args + i;
766 		poller_arg->hwqp = hwqps[i];
767 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
768 		poller_arg->cb_info.cb_data = ctx;
769 		poller_arg->cb_info.cb_thread = spdk_get_thread();
770 		poller_arg->ctx = ctx;
771 
772 		nvmf_fc_poller_api_func(poller_arg->hwqp,
773 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
774 					poller_arg);
775 	}
776 
777 	free(hwqps);
778 
779 	return;
780 bls_rej:
781 	free(args);
782 	free(hwqps);
783 
784 	/* Send Reject */
785 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
786 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
787 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
788 		       nport->nport_hdl, rpi, oxid, rxid);
789 	return;
790 }
791 
792 /*** Accessor functions for the FC structures - BEGIN */
793 /*
794  * Returns true if the port is in offline state.
795  */
796 bool
797 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
798 {
799 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
800 		return true;
801 	}
802 
803 	return false;
804 }
805 
806 /*
807  * Returns true if the port is in online state.
808  */
809 bool
810 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
811 {
812 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
813 		return true;
814 	}
815 
816 	return false;
817 }
818 
819 int
820 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
821 {
822 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
823 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
824 		return 0;
825 	}
826 
827 	return -EPERM;
828 }
829 
830 int
831 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
832 {
833 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
834 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
835 		return 0;
836 	}
837 
838 	return -EPERM;
839 }
840 
841 int
842 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
843 {
844 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
845 		hwqp->state = SPDK_FC_HWQP_ONLINE;
846 		/* reset some queue counters */
847 		hwqp->num_conns = 0;
848 		return nvmf_fc_set_q_online_state(hwqp, true);
849 	}
850 
851 	return -EPERM;
852 }
853 
854 int
855 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
856 {
857 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
858 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
859 		return nvmf_fc_set_q_online_state(hwqp, false);
860 	}
861 
862 	return -EPERM;
863 }
864 
865 void
866 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
867 {
868 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
869 }
870 
871 struct spdk_nvmf_fc_port *
872 nvmf_fc_port_lookup(uint8_t port_hdl)
873 {
874 	struct spdk_nvmf_fc_port *fc_port = NULL;
875 
876 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
877 		if (fc_port->port_hdl == port_hdl) {
878 			return fc_port;
879 		}
880 	}
881 	return NULL;
882 }
883 
884 static void
885 nvmf_fc_port_cleanup(void)
886 {
887 	struct spdk_nvmf_fc_port *fc_port, *tmp;
888 	struct spdk_nvmf_fc_hwqp *hwqp;
889 	uint32_t i;
890 
891 	TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) {
892 		TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list,  fc_port, link);
893 		for (i = 0; i < fc_port->num_io_queues; i++) {
894 			hwqp = &fc_port->io_queues[i];
895 			if (hwqp->fc_reqs_buf) {
896 				free(hwqp->fc_reqs_buf);
897 			}
898 		}
899 		free(fc_port);
900 	}
901 }
902 
903 uint32_t
904 nvmf_fc_get_prli_service_params(void)
905 {
906 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
907 }
908 
909 int
910 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
911 		       struct spdk_nvmf_fc_nport *nport)
912 {
913 	if (fc_port) {
914 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
915 		fc_port->num_nports++;
916 		return 0;
917 	}
918 
919 	return -EINVAL;
920 }
921 
922 int
923 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
924 			  struct spdk_nvmf_fc_nport *nport)
925 {
926 	if (fc_port && nport) {
927 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
928 		fc_port->num_nports--;
929 		return 0;
930 	}
931 
932 	return -EINVAL;
933 }
934 
935 static struct spdk_nvmf_fc_nport *
936 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
937 {
938 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
939 
940 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
941 		if (fc_nport->nport_hdl == nport_hdl) {
942 			return fc_nport;
943 		}
944 	}
945 
946 	return NULL;
947 }
948 
949 struct spdk_nvmf_fc_nport *
950 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
951 {
952 	struct spdk_nvmf_fc_port *fc_port = NULL;
953 
954 	fc_port = nvmf_fc_port_lookup(port_hdl);
955 	if (fc_port) {
956 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
957 	}
958 
959 	return NULL;
960 }
961 
962 static inline int
963 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
964 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
965 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
966 {
967 	struct spdk_nvmf_fc_nport *n_port;
968 	struct spdk_nvmf_fc_remote_port_info *r_port;
969 
970 	assert(hwqp);
971 	if (hwqp == NULL) {
972 		SPDK_ERRLOG("Error: hwqp is NULL\n");
973 		return -EINVAL;
974 	}
975 	assert(nport);
976 	if (nport == NULL) {
977 		SPDK_ERRLOG("Error: nport is NULL\n");
978 		return -EINVAL;
979 	}
980 	assert(rport);
981 	if (rport == NULL) {
982 		SPDK_ERRLOG("Error: rport is NULL\n");
983 		return -EINVAL;
984 	}
985 
986 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
987 		if (n_port->d_id == d_id) {
988 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
989 				if (r_port->s_id == s_id) {
990 					*nport = n_port;
991 					*rport = r_port;
992 					return 0;
993 				}
994 			}
995 			break;
996 		}
997 	}
998 
999 	return -ENOENT;
1000 }
1001 
1002 /* Returns true if the Nport is empty of all rem_ports */
1003 bool
1004 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1005 {
1006 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1007 		assert(nport->rport_count == 0);
1008 		return true;
1009 	} else {
1010 		return false;
1011 	}
1012 }
1013 
1014 int
1015 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1016 			enum spdk_nvmf_fc_object_state state)
1017 {
1018 	if (nport) {
1019 		nport->nport_state = state;
1020 		return 0;
1021 	} else {
1022 		return -EINVAL;
1023 	}
1024 }
1025 
1026 bool
1027 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1028 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1029 {
1030 	if (nport && rem_port) {
1031 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1032 		nport->rport_count++;
1033 		return 0;
1034 	} else {
1035 		return -EINVAL;
1036 	}
1037 }
1038 
1039 bool
1040 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1041 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1042 {
1043 	if (nport && rem_port) {
1044 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1045 		nport->rport_count--;
1046 		return 0;
1047 	} else {
1048 		return -EINVAL;
1049 	}
1050 }
1051 
1052 int
1053 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1054 			enum spdk_nvmf_fc_object_state state)
1055 {
1056 	if (rport) {
1057 		rport->rport_state = state;
1058 		return 0;
1059 	} else {
1060 		return -EINVAL;
1061 	}
1062 }
1063 int
1064 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1065 			enum spdk_nvmf_fc_object_state state)
1066 {
1067 	if (assoc) {
1068 		assoc->assoc_state = state;
1069 		return 0;
1070 	} else {
1071 		return -EINVAL;
1072 	}
1073 }
1074 
1075 static struct spdk_nvmf_fc_association *
1076 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1077 {
1078 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1079 	struct spdk_nvmf_fc_conn *fc_conn;
1080 
1081 	if (!qpair) {
1082 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1083 		return NULL;
1084 	}
1085 
1086 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1087 
1088 	return fc_conn->fc_assoc;
1089 }
1090 
1091 bool
1092 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1093 		       struct spdk_nvmf_ctrlr *ctrlr)
1094 {
1095 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1096 	struct spdk_nvmf_fc_association *assoc = NULL;
1097 
1098 	if (!ctrlr) {
1099 		return false;
1100 	}
1101 
1102 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1103 	if (!fc_nport) {
1104 		return false;
1105 	}
1106 
1107 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1108 	if (assoc && assoc->tgtport == fc_nport) {
1109 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1110 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1111 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1112 			      nport_hdl);
1113 		return true;
1114 	}
1115 	return false;
1116 }
1117 
1118 static inline bool
1119 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
1120 {
1121 	switch (fc_req->state) {
1122 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1123 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1124 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1125 		return true;
1126 	default:
1127 		return false;
1128 	}
1129 }
1130 
1131 static inline bool
1132 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
1133 {
1134 	struct spdk_nvmf_request *tmp = NULL;
1135 
1136 	STAILQ_FOREACH(tmp, &fc_req->hwqp->fgroup->group.pending_buf_queue, buf_link) {
1137 		if (tmp == &fc_req->req) {
1138 			return true;
1139 		}
1140 	}
1141 	return false;
1142 }
1143 
1144 static void
1145 nvmf_fc_req_bdev_abort(void *arg1)
1146 {
1147 	struct spdk_nvmf_fc_request *fc_req = arg1;
1148 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1149 	int i;
1150 
1151 	/* Initial release - we don't have to abort Admin Queue or
1152 	 * Fabric commands. The AQ commands supported at this time are
1153 	 * Get-Log-Page,
1154 	 * Identify
1155 	 * Set Features
1156 	 * Get Features
1157 	 * AER -> Special case and handled differently.
1158 	 * Every one of the above Admin commands (except AER) run
1159 	 * to completion and so an Abort of such commands doesn't
1160 	 * make sense.
1161 	 */
1162 	/* The Fabric commands supported are
1163 	 * Property Set
1164 	 * Property Get
1165 	 * Connect -> Special case (async. handling). Not sure how to
1166 	 * handle at this point. Let it run to completion.
1167 	 */
1168 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1169 		if (ctrlr->aer_req[i] == &fc_req->req) {
1170 			SPDK_NOTICELOG("Abort AER request\n");
1171 			nvmf_qpair_free_aer(fc_req->req.qpair);
1172 		}
1173 	}
1174 }
1175 
1176 void
1177 nvmf_fc_request_abort_complete(void *arg1)
1178 {
1179 	struct spdk_nvmf_fc_request *fc_req =
1180 		(struct spdk_nvmf_fc_request *)arg1;
1181 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1182 
1183 	/* Request abort completed. Notify all the callbacks */
1184 	TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) {
1185 		/* Notify */
1186 		ctx->cb(fc_req->hwqp, 0, ctx->cb_args);
1187 		/* Remove */
1188 		TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link);
1189 		/* free */
1190 		free(ctx);
1191 	}
1192 
1193 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1194 		       fc_req_state_strs[fc_req->state]);
1195 
1196 	_nvmf_fc_request_free(fc_req);
1197 }
1198 
1199 void
1200 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1201 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1202 {
1203 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1204 	bool kill_req = false;
1205 
1206 	/* Add the cb to list */
1207 	if (cb) {
1208 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1209 		if (!ctx) {
1210 			SPDK_ERRLOG("ctx alloc failed.\n");
1211 			return;
1212 		}
1213 		ctx->cb = cb;
1214 		ctx->cb_args = cb_args;
1215 
1216 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1217 	}
1218 
1219 	if (!fc_req->is_aborted) {
1220 		/* Increment aborted command counter */
1221 		fc_req->hwqp->counters.num_aborted++;
1222 	}
1223 
1224 	/* If port is dead, skip abort wqe */
1225 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1226 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1227 		fc_req->is_aborted = true;
1228 		goto complete;
1229 	}
1230 
1231 	/* Check if the request is already marked for deletion */
1232 	if (fc_req->is_aborted) {
1233 		return;
1234 	}
1235 
1236 	/* Mark request as aborted */
1237 	fc_req->is_aborted = true;
1238 
1239 	/* If xchg is allocated, then save if we need to send abts or not. */
1240 	if (fc_req->xchg) {
1241 		fc_req->xchg->send_abts = send_abts;
1242 		fc_req->xchg->aborted	= true;
1243 	}
1244 
1245 	if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) {
1246 		/* Aborted by backend */
1247 		goto complete;
1248 	} else if (nvmf_fc_req_in_bdev(fc_req)) {
1249 		/* Notify bdev */
1250 		spdk_thread_send_msg(fc_req->hwqp->thread,
1251 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1252 	} else if (nvmf_fc_req_in_xfer(fc_req)) {
1253 		/* Notify HBA to abort this exchange  */
1254 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1255 	} else if (nvmf_fc_req_in_get_buff(fc_req)) {
1256 		/* Will be completed by request_complete callback. */
1257 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
1258 	} else if (nvmf_fc_req_in_pending(fc_req)) {
1259 		/* Remove from pending */
1260 		STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
1261 			      spdk_nvmf_request, buf_link);
1262 		goto complete;
1263 	} else {
1264 		/* Should never happen */
1265 		SPDK_ERRLOG("Request in invalid state\n");
1266 		goto complete;
1267 	}
1268 
1269 	return;
1270 complete:
1271 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1272 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1273 				(void *)fc_req);
1274 }
1275 
1276 static int
1277 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1278 {
1279 	uint32_t length = fc_req->req.length;
1280 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1281 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1282 	struct spdk_nvmf_transport *transport = group->transport;
1283 
1284 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1285 		return -ENOMEM;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static int
1292 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1293 {
1294 	/* Allocate an XCHG if we dont use send frame for this command. */
1295 	if (!nvmf_fc_use_send_frame(&fc_req->req)) {
1296 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1297 		if (!fc_req->xchg) {
1298 			fc_req->hwqp->counters.no_xchg++;
1299 			printf("NO XCHGs!\n");
1300 			goto pending;
1301 		}
1302 	}
1303 
1304 	if (fc_req->req.length) {
1305 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1306 			fc_req->hwqp->counters.buf_alloc_err++;
1307 			goto pending;
1308 		}
1309 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1310 	}
1311 
1312 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1313 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "WRITE CMD.\n");
1314 
1315 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1316 
1317 		if (nvmf_fc_recv_data(fc_req)) {
1318 			/* Dropped return success to caller */
1319 			fc_req->hwqp->counters.unexpected_err++;
1320 			_nvmf_fc_request_free(fc_req);
1321 		}
1322 	} else {
1323 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "READ/NONE CMD\n");
1324 
1325 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1326 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1327 		} else {
1328 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1329 		}
1330 		spdk_nvmf_request_exec(&fc_req->req);
1331 	}
1332 
1333 	return 0;
1334 
1335 pending:
1336 	if (fc_req->xchg) {
1337 		nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1338 		fc_req->xchg = NULL;
1339 	}
1340 
1341 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1342 
1343 	return -EAGAIN;
1344 }
1345 
1346 static int
1347 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1348 			    uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1349 {
1350 	uint16_t cmnd_len;
1351 	uint64_t rqst_conn_id;
1352 	struct spdk_nvmf_fc_request *fc_req = NULL;
1353 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1354 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1355 	enum spdk_nvme_data_transfer xfer;
1356 
1357 	cmd_iu = buffer->virt;
1358 	cmnd_len = cmd_iu->cmnd_iu_len;
1359 	cmnd_len = from_be16(&cmnd_len);
1360 
1361 	/* check for a valid cmnd_iu format */
1362 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1363 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1364 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1365 		SPDK_ERRLOG("IU CMD error\n");
1366 		hwqp->counters.nvme_cmd_iu_err++;
1367 		return -ENXIO;
1368 	}
1369 
1370 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1371 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1372 		SPDK_ERRLOG("IU CMD xfer error\n");
1373 		hwqp->counters.nvme_cmd_xfer_err++;
1374 		return -EPERM;
1375 	}
1376 
1377 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1378 
1379 	/* Check if conn id is valid */
1380 	fc_conn = nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id);
1381 	if (!fc_conn) {
1382 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1383 		hwqp->counters.invalid_conn_err++;
1384 		return -ENODEV;
1385 	}
1386 
1387 	/* If association/connection is being deleted - return */
1388 	if (fc_conn->fc_assoc->assoc_state !=  SPDK_NVMF_FC_OBJECT_CREATED) {
1389 		SPDK_ERRLOG("Association state not valid\n");
1390 		return -EACCES;
1391 	}
1392 
1393 	if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) {
1394 		return -EACCES;
1395 	}
1396 
1397 	/* Make sure xfer len is according to mdts */
1398 	if (from_be32(&cmd_iu->data_len) >
1399 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1400 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1401 		return -EINVAL;
1402 	}
1403 
1404 	/* allocate a request buffer */
1405 	fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp);
1406 	if (fc_req == NULL) {
1407 		/* Should not happen. Since fc_reqs == RQ buffers */
1408 		return -ENOMEM;
1409 	}
1410 
1411 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1412 	fc_req->req.qpair = &fc_conn->qpair;
1413 	fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd;
1414 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1415 	fc_req->oxid = frame->ox_id;
1416 	fc_req->oxid = from_be16(&fc_req->oxid);
1417 	fc_req->rpi = fc_conn->rpi;
1418 	fc_req->buf_index = buf_idx;
1419 	fc_req->poller_lcore = hwqp->lcore_id;
1420 	fc_req->poller_thread = hwqp->thread;
1421 	fc_req->hwqp = hwqp;
1422 	fc_req->fc_conn = fc_conn;
1423 	fc_req->req.xfer = xfer;
1424 	fc_req->s_id = (uint32_t)frame->s_id;
1425 	fc_req->d_id = (uint32_t)frame->d_id;
1426 	fc_req->s_id = from_be32(&fc_req->s_id) >> 8;
1427 	fc_req->d_id = from_be32(&fc_req->d_id) >> 8;
1428 
1429 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1430 	if (nvmf_fc_request_execute(fc_req)) {
1431 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 /*
1438  * These functions are called from the FC LLD
1439  */
1440 
1441 void
1442 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1443 {
1444 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1445 	struct spdk_nvmf_fc_poll_group *fgroup = hwqp->fgroup;
1446 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1447 	struct spdk_nvmf_transport *transport = group->transport;
1448 
1449 	if (!fc_req) {
1450 		return;
1451 	}
1452 
1453 	if (fc_req->xchg) {
1454 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1455 		fc_req->xchg = NULL;
1456 	}
1457 
1458 	/* Release IO buffers */
1459 	if (fc_req->req.data_from_pool) {
1460 		spdk_nvmf_request_free_buffers(&fc_req->req, group, transport);
1461 	}
1462 	fc_req->req.data = NULL;
1463 	fc_req->req.iovcnt  = 0;
1464 
1465 	/* Release Q buffer */
1466 	nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index);
1467 
1468 	/* Free Fc request */
1469 	nvmf_fc_hwqp_free_fc_request(hwqp, fc_req);
1470 }
1471 
1472 void
1473 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1474 			  enum spdk_nvmf_fc_request_state state)
1475 {
1476 	assert(fc_req->magic != 0xDEADBEEF);
1477 
1478 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1479 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1480 		      nvmf_fc_request_get_state_str(fc_req->state),
1481 		      nvmf_fc_request_get_state_str(state));
1482 	nvmf_fc_record_req_trace_point(fc_req, state);
1483 	fc_req->state = state;
1484 }
1485 
1486 char *
1487 nvmf_fc_request_get_state_str(int state)
1488 {
1489 	static char *unk_str = "unknown";
1490 
1491 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1492 		fc_req_state_strs[state] : unk_str);
1493 }
1494 
1495 int
1496 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1497 			   uint32_t buff_idx,
1498 			   struct spdk_nvmf_fc_frame_hdr *frame,
1499 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1500 			   uint32_t plen)
1501 {
1502 	int rc = 0;
1503 	uint32_t s_id, d_id;
1504 	struct spdk_nvmf_fc_nport *nport = NULL;
1505 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1506 
1507 	s_id = (uint32_t)frame->s_id;
1508 	d_id = (uint32_t)frame->d_id;
1509 	s_id = from_be32(&s_id) >> 8;
1510 	d_id = from_be32(&d_id) >> 8;
1511 
1512 	/* Note: In tracelog below, we directly do endian conversion on rx_id and.
1513 	 * ox_id Since these are fields, we can't pass address to from_be16().
1514 	 * Since ox_id and rx_id are only needed for tracelog, assigning to local
1515 	 * vars. and doing conversion is a waste of time in non-debug builds. */
1516 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1517 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1518 		      s_id, d_id,
1519 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1520 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1521 
1522 	rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1523 	if (rc) {
1524 		if (nport == NULL) {
1525 			SPDK_ERRLOG("Nport not found. Dropping\n");
1526 			/* increment invalid nport counter */
1527 			hwqp->counters.nport_invalid++;
1528 		} else if (rport == NULL) {
1529 			SPDK_ERRLOG("Rport not found. Dropping\n");
1530 			/* increment invalid rport counter */
1531 			hwqp->counters.rport_invalid++;
1532 		}
1533 		return rc;
1534 	}
1535 
1536 	if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1537 	    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1538 		SPDK_ERRLOG("%s state not created. Dropping\n",
1539 			    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1540 			    "Nport" : "Rport");
1541 		return -EACCES;
1542 	}
1543 
1544 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1545 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1546 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1547 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1548 
1549 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process LS NVME frame\n");
1550 
1551 		/* Use the RQ buffer for holding LS request. */
1552 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1553 
1554 		/* Fill in the LS request structure */
1555 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1556 		ls_rqst->rqstbuf.phys = buffer->phys +
1557 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1558 		ls_rqst->rqstbuf.buf_index = buff_idx;
1559 		ls_rqst->rqst_len = plen;
1560 
1561 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1562 		ls_rqst->rspbuf.phys = buffer->phys +
1563 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1564 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1565 
1566 		ls_rqst->private_data = (void *)hwqp;
1567 		ls_rqst->rpi = rport->rpi;
1568 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1569 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1570 		ls_rqst->s_id = s_id;
1571 		ls_rqst->d_id = d_id;
1572 		ls_rqst->nport = nport;
1573 		ls_rqst->rport = rport;
1574 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1575 
1576 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1577 		if (ls_rqst->xchg) {
1578 			/* Handover the request to LS module */
1579 			nvmf_fc_handle_ls_rqst(ls_rqst);
1580 		} else {
1581 			/* No XCHG available. Add to pending list. */
1582 			hwqp->counters.no_xchg++;
1583 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1584 		}
1585 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1586 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1587 
1588 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process IO NVME frame\n");
1589 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen);
1590 	} else {
1591 
1592 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1593 		hwqp->counters.unknown_frame++;
1594 		rc = -EINVAL;
1595 	}
1596 
1597 	return rc;
1598 }
1599 
1600 void
1601 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1602 {
1603 	struct spdk_nvmf_request *req = NULL, *tmp;
1604 	struct spdk_nvmf_fc_request *fc_req;
1605 	int budget = 64;
1606 
1607 	if (!hwqp->fgroup) {
1608 		/* LS queue is tied to acceptor_poll group and LS pending requests
1609 		 * are stagged and processed using hwqp->ls_pending_queue.
1610 		 */
1611 		return;
1612 	}
1613 
1614 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1615 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1616 		if (!nvmf_fc_request_execute(fc_req)) {
1617 			/* Succesfuly posted, Delete from pending. */
1618 			STAILQ_REMOVE_HEAD(&hwqp->fgroup->group.pending_buf_queue, buf_link);
1619 		}
1620 
1621 		if (budget) {
1622 			budget--;
1623 		} else {
1624 			return;
1625 		}
1626 	}
1627 }
1628 
1629 void
1630 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1631 {
1632 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1633 	struct spdk_nvmf_fc_nport *nport = NULL;
1634 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1635 
1636 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1637 		/* lookup nport and rport again - make sure they are still valid */
1638 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1639 		if (rc) {
1640 			if (nport == NULL) {
1641 				SPDK_ERRLOG("Nport not found. Dropping\n");
1642 				/* increment invalid nport counter */
1643 				hwqp->counters.nport_invalid++;
1644 			} else if (rport == NULL) {
1645 				SPDK_ERRLOG("Rport not found. Dropping\n");
1646 				/* increment invalid rport counter */
1647 				hwqp->counters.rport_invalid++;
1648 			}
1649 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1650 			/* Return buffer to chip */
1651 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1652 			continue;
1653 		}
1654 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1655 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1656 			SPDK_ERRLOG("%s state not created. Dropping\n",
1657 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1658 				    "Nport" : "Rport");
1659 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1660 			/* Return buffer to chip */
1661 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1662 			continue;
1663 		}
1664 
1665 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1666 		if (ls_rqst->xchg) {
1667 			/* Got an XCHG */
1668 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1669 			/* Handover the request to LS module */
1670 			nvmf_fc_handle_ls_rqst(ls_rqst);
1671 		} else {
1672 			/* No more XCHGs. Stop processing. */
1673 			hwqp->counters.no_xchg++;
1674 			return;
1675 		}
1676 	}
1677 }
1678 
1679 int
1680 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1681 {
1682 	int rc = 0;
1683 	struct spdk_nvmf_request *req = &fc_req->req;
1684 	struct spdk_nvmf_qpair *qpair = req->qpair;
1685 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1686 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1687 	uint16_t ersp_len = 0;
1688 
1689 	/* set sq head value in resp */
1690 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1691 
1692 	/* Increment connection responses */
1693 	fc_conn->rsp_count++;
1694 
1695 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1696 				       fc_req->transfered_len)) {
1697 		/* Fill ERSP Len */
1698 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1699 				    sizeof(uint32_t)));
1700 		fc_req->ersp.ersp_len = ersp_len;
1701 
1702 		/* Fill RSN */
1703 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1704 		fc_conn->rsn++;
1705 
1706 		/* Fill transfer length */
1707 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1708 
1709 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting ERSP.\n");
1710 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1711 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1712 	} else {
1713 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting RSP.\n");
1714 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1715 	}
1716 
1717 	return rc;
1718 }
1719 
1720 bool
1721 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1722 			   uint32_t rsp_cnt, uint32_t xfer_len)
1723 {
1724 	struct spdk_nvmf_request *req = &fc_req->req;
1725 	struct spdk_nvmf_qpair *qpair = req->qpair;
1726 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1727 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1728 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1729 	uint16_t status = *((uint16_t *)&rsp->status);
1730 
1731 	/*
1732 	 * Check if we need to send ERSP
1733 	 * 1) For every N responses where N == ersp_ratio
1734 	 * 2) Fabric commands.
1735 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1736 	 * 4) SQ == 90% full.
1737 	 * 5) Transfer length not equal to CMD IU length
1738 	 */
1739 
1740 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1741 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1742 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1743 	    (req->length != xfer_len)) {
1744 		return true;
1745 	}
1746 	return false;
1747 }
1748 
1749 static int
1750 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1751 {
1752 	int rc = 0;
1753 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1754 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1755 
1756 	if (fc_req->is_aborted) {
1757 		/* Defer this to make sure we dont call io cleanup in same context. */
1758 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1759 					(void *)fc_req);
1760 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1761 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1762 
1763 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1764 
1765 		rc = nvmf_fc_send_data(fc_req);
1766 	} else {
1767 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1768 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1769 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1770 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1771 		} else {
1772 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1773 		}
1774 
1775 		rc = nvmf_fc_handle_rsp(fc_req);
1776 	}
1777 
1778 	if (rc) {
1779 		SPDK_ERRLOG("Error in request complete.\n");
1780 		_nvmf_fc_request_free(fc_req);
1781 	}
1782 	return 0;
1783 }
1784 
1785 struct spdk_nvmf_tgt *
1786 nvmf_fc_get_tgt(void)
1787 {
1788 	if (g_nvmf_ftransport) {
1789 		return g_nvmf_ftransport->transport.tgt;
1790 	}
1791 	return NULL;
1792 }
1793 
1794 /*
1795  * FC Transport Public API begins here
1796  */
1797 
1798 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1799 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1800 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1801 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1802 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1803 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1804 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1805 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1806 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1807 
1808 static void
1809 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1810 {
1811 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1812 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1813 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1814 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1815 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1816 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1817 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1818 }
1819 
1820 static struct spdk_nvmf_transport *
1821 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1822 {
1823 	uint32_t sge_count;
1824 
1825 	SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
1826 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1827 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1828 		     "  max_aq_depth=%d\n",
1829 		     opts->max_queue_depth,
1830 		     opts->max_io_size,
1831 		     opts->max_qpairs_per_ctrlr - 1,
1832 		     opts->io_unit_size,
1833 		     opts->max_aq_depth);
1834 
1835 	if (g_nvmf_ftransport) {
1836 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1837 		return NULL;
1838 	}
1839 
1840 	if (spdk_env_get_last_core() < 1) {
1841 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1842 			    spdk_env_get_last_core() + 1);
1843 		return NULL;
1844 	}
1845 
1846 	sge_count = opts->max_io_size / opts->io_unit_size;
1847 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1848 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1849 		return NULL;
1850 	}
1851 
1852 	g_nvmf_fc_master_thread = spdk_get_thread();
1853 	g_nvmf_fgroup_count = 0;
1854 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1855 
1856 	if (!g_nvmf_ftransport) {
1857 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1858 		return NULL;
1859 	}
1860 
1861 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1862 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1863 		free(g_nvmf_ftransport);
1864 		g_nvmf_ftransport = NULL;
1865 		return NULL;
1866 	}
1867 
1868 	/* initialize the low level FC driver */
1869 	nvmf_fc_lld_init();
1870 
1871 	return &g_nvmf_ftransport->transport;
1872 }
1873 
1874 static int
1875 nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
1876 {
1877 	if (transport) {
1878 		struct spdk_nvmf_fc_transport *ftransport;
1879 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
1880 
1881 		ftransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1882 
1883 		free(ftransport);
1884 
1885 		/* clean up any FC poll groups still around */
1886 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
1887 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1888 			free(fgroup);
1889 		}
1890 		g_nvmf_fgroup_count = 0;
1891 
1892 		/* low level FC driver clean up */
1893 		nvmf_fc_lld_fini();
1894 
1895 		nvmf_fc_port_cleanup();
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 static int
1902 nvmf_fc_listen(struct spdk_nvmf_transport *transport,
1903 	       const struct spdk_nvme_transport_id *trid)
1904 {
1905 	return 0;
1906 }
1907 
1908 static void
1909 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
1910 		    const struct spdk_nvme_transport_id *_trid)
1911 {
1912 }
1913 
1914 static uint32_t
1915 nvmf_fc_accept(struct spdk_nvmf_transport *transport)
1916 {
1917 	struct spdk_nvmf_fc_port *fc_port = NULL;
1918 	uint32_t count = 0;
1919 	static bool start_lld = false;
1920 
1921 	if (spdk_unlikely(!start_lld)) {
1922 		start_lld  = true;
1923 		nvmf_fc_lld_start();
1924 	}
1925 
1926 	/* poll the LS queue on each port */
1927 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
1928 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
1929 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
1930 		}
1931 	}
1932 
1933 	return count;
1934 }
1935 
1936 static void
1937 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
1938 		 struct spdk_nvme_transport_id *trid,
1939 		 struct spdk_nvmf_discovery_log_page_entry *entry)
1940 {
1941 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
1942 	entry->adrfam = trid->adrfam;
1943 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
1944 
1945 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
1946 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
1947 }
1948 
1949 static struct spdk_nvmf_transport_poll_group *
1950 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
1951 {
1952 	struct spdk_nvmf_fc_poll_group *fgroup;
1953 	struct spdk_nvmf_fc_transport *ftransport =
1954 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1955 
1956 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
1957 	if (!fgroup) {
1958 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
1959 		return NULL;
1960 	}
1961 
1962 	TAILQ_INIT(&fgroup->hwqp_list);
1963 
1964 	pthread_mutex_lock(&ftransport->lock);
1965 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
1966 	g_nvmf_fgroup_count++;
1967 	pthread_mutex_unlock(&ftransport->lock);
1968 
1969 	return &fgroup->group;
1970 }
1971 
1972 static void
1973 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
1974 {
1975 	struct spdk_nvmf_fc_poll_group *fgroup;
1976 	struct spdk_nvmf_fc_transport *ftransport =
1977 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
1978 
1979 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
1980 	pthread_mutex_lock(&ftransport->lock);
1981 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
1982 	g_nvmf_fgroup_count--;
1983 	pthread_mutex_unlock(&ftransport->lock);
1984 
1985 	free(fgroup);
1986 }
1987 
1988 static int
1989 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
1990 		       struct spdk_nvmf_qpair *qpair)
1991 {
1992 	struct spdk_nvmf_fc_poll_group *fgroup;
1993 	struct spdk_nvmf_fc_conn *fc_conn;
1994 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
1995 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
1996 	bool hwqp_found = false;
1997 
1998 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
1999 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2000 
2001 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2002 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2003 			hwqp_found = true;
2004 			break;
2005 		}
2006 	}
2007 
2008 	if (!hwqp_found) {
2009 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2010 		goto err;
2011 	}
2012 
2013 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2014 					 &fc_conn->conn_id,
2015 					 fc_conn->max_queue_depth)) {
2016 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2017 		goto err;
2018 	}
2019 
2020 	fc_conn->hwqp = hwqp;
2021 
2022 	/* If this is for ADMIN connection, then update assoc ID. */
2023 	if (fc_conn->qpair.qid == 0) {
2024 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2025 	}
2026 
2027 	api_data = &fc_conn->create_opd->u.add_conn;
2028 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2029 	return 0;
2030 err:
2031 	return -1;
2032 }
2033 
2034 static int
2035 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2036 {
2037 	uint32_t count = 0;
2038 	struct spdk_nvmf_fc_poll_group *fgroup;
2039 	struct spdk_nvmf_fc_hwqp *hwqp;
2040 
2041 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2042 
2043 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2044 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2045 			count += nvmf_fc_process_queue(hwqp);
2046 		}
2047 	}
2048 
2049 	return (int) count;
2050 }
2051 
2052 static int
2053 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2054 {
2055 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2056 
2057 	if (!fc_req->is_aborted) {
2058 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2059 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2060 	} else {
2061 		nvmf_fc_request_abort_complete(fc_req);
2062 	}
2063 	return 0;
2064 }
2065 
2066 
2067 static void
2068 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair)
2069 {
2070 	struct spdk_nvmf_fc_conn *fc_conn;
2071 
2072 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2073 
2074 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2075 		/* QP creation failure in FC tranport. Cleanup. */
2076 		spdk_thread_send_msg(nvmf_fc_get_master_thread(),
2077 				     nvmf_fc_handle_connection_failure, fc_conn);
2078 	} else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id &&
2079 		   fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2080 		/* Admin connection */
2081 		spdk_thread_send_msg(nvmf_fc_get_master_thread(),
2082 				     nvmf_fc_handle_assoc_deletion, fc_conn);
2083 	}
2084 }
2085 
2086 static int
2087 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2088 			    struct spdk_nvme_transport_id *trid)
2089 {
2090 	struct spdk_nvmf_fc_conn *fc_conn;
2091 
2092 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2093 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2094 	return 0;
2095 }
2096 
2097 static int
2098 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2099 			     struct spdk_nvme_transport_id *trid)
2100 {
2101 	struct spdk_nvmf_fc_conn *fc_conn;
2102 
2103 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2104 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2105 	return 0;
2106 }
2107 
2108 static int
2109 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2110 			      struct spdk_nvme_transport_id *trid)
2111 {
2112 	struct spdk_nvmf_fc_conn *fc_conn;
2113 
2114 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2115 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2116 	return 0;
2117 }
2118 
2119 static void
2120 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2121 			    struct spdk_nvmf_request *req)
2122 {
2123 	spdk_nvmf_request_complete(req);
2124 }
2125 
2126 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2127 	.name = "FC",
2128 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2129 	.opts_init = nvmf_fc_opts_init,
2130 	.create = nvmf_fc_create,
2131 	.destroy = nvmf_fc_destroy,
2132 
2133 	.listen = nvmf_fc_listen,
2134 	.stop_listen = nvmf_fc_stop_listen,
2135 	.accept = nvmf_fc_accept,
2136 
2137 	.listener_discover = nvmf_fc_discover,
2138 
2139 	.poll_group_create = nvmf_fc_poll_group_create,
2140 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2141 	.poll_group_add = nvmf_fc_poll_group_add,
2142 	.poll_group_poll = nvmf_fc_poll_group_poll,
2143 
2144 	.req_complete = nvmf_fc_request_complete,
2145 	.req_free = nvmf_fc_request_free,
2146 	.qpair_fini = nvmf_fc_close_qpair,
2147 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2148 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2149 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2150 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2151 };
2152 
2153 /*
2154  * Re-initialize the FC-Port after an offline event.
2155  * Only the queue information needs to be populated. XCHG, lcore and other hwqp information remains
2156  * unchanged after the first initialization.
2157  *
2158  */
2159 static int
2160 nvmf_fc_adm_hw_port_reinit_validate(struct spdk_nvmf_fc_port *fc_port,
2161 				    struct spdk_nvmf_fc_hw_port_init_args *args)
2162 {
2163 	uint32_t i;
2164 
2165 	/* Verify that the port was previously in offline or quiesced state */
2166 	if (nvmf_fc_port_is_online(fc_port)) {
2167 		SPDK_ERRLOG("SPDK FC port %d already initialized and online.\n", args->port_handle);
2168 		return -EINVAL;
2169 	}
2170 
2171 	/* Reinit information in new LS queue from previous queue */
2172 	nvmf_fc_hwqp_reinit_poller_queues(&fc_port->ls_queue, args->ls_queue);
2173 
2174 	fc_port->fcp_rq_id = args->fcp_rq_id;
2175 
2176 	/* Initialize the LS queue */
2177 	fc_port->ls_queue.queues = args->ls_queue;
2178 	nvmf_fc_init_poller_queues(fc_port->ls_queue.queues);
2179 
2180 	for (i = 0; i < fc_port->num_io_queues; i++) {
2181 		/* Reinit information in new IO queue from previous queue */
2182 		nvmf_fc_hwqp_reinit_poller_queues(&fc_port->io_queues[i],
2183 						  args->io_queues[i]);
2184 		fc_port->io_queues[i].queues = args->io_queues[i];
2185 		/* Initialize the IO queues */
2186 		nvmf_fc_init_poller_queues(fc_port->io_queues[i].queues);
2187 	}
2188 
2189 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2190 
2191 	/* Validate the port information */
2192 	DEV_VERIFY(TAILQ_EMPTY(&fc_port->nport_list));
2193 	DEV_VERIFY(fc_port->num_nports == 0);
2194 	if (!TAILQ_EMPTY(&fc_port->nport_list) || (fc_port->num_nports != 0)) {
2195 		return -EINVAL;
2196 	}
2197 
2198 	return 0;
2199 }
2200 
2201 /* Initializes the data for the creation of a FC-Port object in the SPDK
2202  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2203  * the API to the library. The contents added to this well defined structure
2204  * is private to each vendors implementation.
2205  */
2206 static int
2207 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2208 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2209 {
2210 	/* Used a high number for the LS HWQP so that it does not clash with the
2211 	 * IO HWQP's and immediately shows a LS queue during tracing.
2212 	 */
2213 	uint32_t i;
2214 
2215 	fc_port->port_hdl       = args->port_handle;
2216 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2217 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2218 	fc_port->num_io_queues  = args->io_queue_cnt;
2219 
2220 	/*
2221 	 * Set port context from init args. Used for FCP port stats.
2222 	 */
2223 	fc_port->port_ctx = args->port_ctx;
2224 
2225 	/*
2226 	 * Initialize the LS queue wherever needed.
2227 	 */
2228 	fc_port->ls_queue.queues = args->ls_queue;
2229 	fc_port->ls_queue.thread = nvmf_fc_get_master_thread();
2230 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2231 
2232 	/*
2233 	 * Initialize the LS queue.
2234 	 */
2235 	nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2236 
2237 	/*
2238 	 * Initialize the IO queues.
2239 	 */
2240 	for (i = 0; i < args->io_queue_cnt; i++) {
2241 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2242 		hwqp->hwqp_id = i;
2243 		hwqp->queues = args->io_queues[i];
2244 		hwqp->rq_size = args->io_queue_size;
2245 		nvmf_fc_init_hwqp(fc_port, hwqp);
2246 	}
2247 
2248 	/*
2249 	 * Initialize the LS processing for port
2250 	 */
2251 	nvmf_fc_ls_init(fc_port);
2252 
2253 	/*
2254 	 * Initialize the list of nport on this HW port.
2255 	 */
2256 	TAILQ_INIT(&fc_port->nport_list);
2257 	fc_port->num_nports = 0;
2258 
2259 	return 0;
2260 }
2261 
2262 static void
2263 nvmf_fc_adm_port_hwqp_offline_del_poller(struct spdk_nvmf_fc_port *fc_port)
2264 {
2265 	struct spdk_nvmf_fc_hwqp *hwqp    = NULL;
2266 	int i = 0;
2267 
2268 	hwqp = &fc_port->ls_queue;
2269 	(void)nvmf_fc_hwqp_set_offline(hwqp);
2270 
2271 	/*  Remove poller for all the io queues. */
2272 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2273 		hwqp = &fc_port->io_queues[i];
2274 		(void)nvmf_fc_hwqp_set_offline(hwqp);
2275 		nvmf_fc_poll_group_remove_hwqp(hwqp);
2276 	}
2277 }
2278 
2279 /*
2280  * Callback function for HW port link break operation.
2281  *
2282  * Notice that this callback is being triggered when spdk_fc_nport_delete()
2283  * completes, if that spdk_fc_nport_delete() called is issued by
2284  * nvmf_fc_adm_evnt_hw_port_link_break().
2285  *
2286  * Since nvmf_fc_adm_evnt_hw_port_link_break() can invoke spdk_fc_nport_delete() multiple
2287  * times (one per nport in the HW port's nport_list), a single call to
2288  * nvmf_fc_adm_evnt_hw_port_link_break() can result in multiple calls to this callback function.
2289  *
2290  * As a result, this function only invokes a callback to the caller of
2291  * nvmf_fc_adm_evnt_hw_port_link_break() only when the HW port's nport_list is empty.
2292  */
2293 static void
2294 nvmf_fc_adm_hw_port_link_break_cb(uint8_t port_handle,
2295 				  enum spdk_fc_event event_type, void *cb_args, int spdk_err)
2296 {
2297 	ASSERT_SPDK_FC_MASTER_THREAD();
2298 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *offline_cb_args = cb_args;
2299 	struct spdk_nvmf_hw_port_link_break_args *offline_args = NULL;
2300 	spdk_nvmf_fc_callback cb_func = NULL;
2301 	int err = 0;
2302 	struct spdk_nvmf_fc_port *fc_port = NULL;
2303 	int num_nports = 0;
2304 	char log_str[256];
2305 
2306 	if (0 != spdk_err) {
2307 		DEV_VERIFY(!"port link break cb: spdk_err not success.");
2308 		SPDK_ERRLOG("port link break cb: spdk_err:%d.\n", spdk_err);
2309 		goto out;
2310 	}
2311 
2312 	if (!offline_cb_args) {
2313 		DEV_VERIFY(!"port link break cb: port_offline_args is NULL.");
2314 		err = -EINVAL;
2315 		goto out;
2316 	}
2317 
2318 	offline_args = offline_cb_args->args;
2319 	if (!offline_args) {
2320 		DEV_VERIFY(!"port link break cb: offline_args is NULL.");
2321 		err = -EINVAL;
2322 		goto out;
2323 	}
2324 
2325 	if (port_handle != offline_args->port_handle) {
2326 		DEV_VERIFY(!"port link break cb: port_handle mismatch.");
2327 		err = -EINVAL;
2328 		goto out;
2329 	}
2330 
2331 	cb_func = offline_cb_args->cb_func;
2332 	if (!cb_func) {
2333 		DEV_VERIFY(!"port link break cb: cb_func is NULL.");
2334 		err = -EINVAL;
2335 		goto out;
2336 	}
2337 
2338 	fc_port = nvmf_fc_port_lookup(port_handle);
2339 	if (!fc_port) {
2340 		DEV_VERIFY(!"port link break cb: fc_port is NULL.");
2341 		SPDK_ERRLOG("port link break cb: Unable to find port:%d\n",
2342 			    offline_args->port_handle);
2343 		err = -EINVAL;
2344 		goto out;
2345 	}
2346 
2347 	num_nports = fc_port->num_nports;
2348 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2349 		/*
2350 		 * Don't call the callback unless all nports have been deleted.
2351 		 */
2352 		goto out;
2353 	}
2354 
2355 	if (num_nports != 0) {
2356 		DEV_VERIFY(!"port link break cb: num_nports in non-zero.");
2357 		SPDK_ERRLOG("port link break cb: # of ports should be 0. Instead, num_nports:%d\n",
2358 			    num_nports);
2359 		err = -EINVAL;
2360 	}
2361 
2362 	/*
2363 	 * Mark the hwqps as offline and unregister the pollers.
2364 	 */
2365 	(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
2366 
2367 	/*
2368 	 * Since there are no more nports, execute the callback(s).
2369 	 */
2370 	(void)cb_func(port_handle, SPDK_FC_LINK_BREAK,
2371 		      (void *)offline_args->cb_ctx, spdk_err);
2372 
2373 out:
2374 	free(offline_cb_args);
2375 
2376 	snprintf(log_str, sizeof(log_str),
2377 		 "port link break cb: port:%d evt_type:%d num_nports:%d err:%d spdk_err:%d.\n",
2378 		 port_handle, event_type, num_nports, err, spdk_err);
2379 
2380 	if (err != 0) {
2381 		SPDK_ERRLOG("%s", log_str);
2382 	} else {
2383 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2384 	}
2385 	return;
2386 }
2387 
2388 /*
2389  * FC port must have all its nports deleted before transitioning to offline state.
2390  */
2391 static void
2392 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2393 {
2394 	struct spdk_nvmf_fc_nport *nport = NULL;
2395 	/* All nports must have been deleted at this point for this fc port */
2396 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2397 	DEV_VERIFY(fc_port->num_nports == 0);
2398 	/* Mark the nport states to be zombie, if they exist */
2399 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2400 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2401 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2402 		}
2403 	}
2404 }
2405 
2406 static void
2407 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2408 {
2409 	ASSERT_SPDK_FC_MASTER_THREAD();
2410 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2411 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2412 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2413 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2414 	int spdk_err = 0;
2415 	uint8_t port_handle = cb_data->port_handle;
2416 	uint32_t s_id = rport->s_id;
2417 	uint32_t rpi = rport->rpi;
2418 	uint32_t assoc_count = rport->assoc_count;
2419 	uint32_t nport_hdl = nport->nport_hdl;
2420 	uint32_t d_id = nport->d_id;
2421 	char log_str[256];
2422 
2423 	/*
2424 	 * Assert on any delete failure.
2425 	 */
2426 	if (0 != err) {
2427 		DEV_VERIFY(!"Error in IT Delete callback.");
2428 		goto out;
2429 	}
2430 
2431 	if (cb_func != NULL) {
2432 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2433 	}
2434 
2435 out:
2436 	free(cb_data);
2437 
2438 	snprintf(log_str, sizeof(log_str),
2439 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2440 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2441 
2442 	if (err != 0) {
2443 		SPDK_ERRLOG("%s", log_str);
2444 	} else {
2445 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2446 	}
2447 }
2448 
2449 static void
2450 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2451 {
2452 	ASSERT_SPDK_FC_MASTER_THREAD();
2453 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2454 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2455 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2456 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2457 	uint32_t s_id = rport->s_id;
2458 	uint32_t rpi = rport->rpi;
2459 	uint32_t assoc_count = rport->assoc_count;
2460 	uint32_t nport_hdl = nport->nport_hdl;
2461 	uint32_t d_id = nport->d_id;
2462 	char log_str[256];
2463 
2464 	/*
2465 	 * Assert on any association delete failure. We continue to delete other
2466 	 * associations in promoted builds.
2467 	 */
2468 	if (0 != err) {
2469 		DEV_VERIFY(!"Nport's association delete callback returned error");
2470 		if (nport->assoc_count > 0) {
2471 			nport->assoc_count--;
2472 		}
2473 		if (rport->assoc_count > 0) {
2474 			rport->assoc_count--;
2475 		}
2476 	}
2477 
2478 	/*
2479 	 * If this is the last association being deleted for the ITN,
2480 	 * execute the callback(s).
2481 	 */
2482 	if (0 == rport->assoc_count) {
2483 		/* Remove the rport from the remote port list. */
2484 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2485 			SPDK_ERRLOG("Error while removing rport from list.\n");
2486 			DEV_VERIFY(!"Error while removing rport from list.");
2487 		}
2488 
2489 		if (cb_func != NULL) {
2490 			/*
2491 			 * Callback function is provided by the caller
2492 			 * of nvmf_fc_adm_i_t_delete_assoc().
2493 			 */
2494 			(void)cb_func(cb_data->cb_ctx, 0);
2495 		}
2496 		free(rport);
2497 		free(args);
2498 	}
2499 
2500 	snprintf(log_str, sizeof(log_str),
2501 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2502 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2503 
2504 	if (err != 0) {
2505 		SPDK_ERRLOG("%s", log_str);
2506 	} else {
2507 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2508 	}
2509 }
2510 
2511 /**
2512  * Process a IT delete.
2513  */
2514 static void
2515 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2516 			     struct spdk_nvmf_fc_remote_port_info *rport,
2517 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2518 			     void *cb_ctx)
2519 {
2520 	int err = 0;
2521 	struct spdk_nvmf_fc_association *assoc = NULL;
2522 	int assoc_err = 0;
2523 	uint32_t num_assoc = 0;
2524 	uint32_t num_assoc_del_scheduled = 0;
2525 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2526 	uint8_t port_hdl = nport->port_hdl;
2527 	uint32_t s_id = rport->s_id;
2528 	uint32_t rpi = rport->rpi;
2529 	uint32_t assoc_count = rport->assoc_count;
2530 	char log_str[256];
2531 
2532 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete associations on nport:%d begin.\n",
2533 		      nport->nport_hdl);
2534 
2535 	/*
2536 	 * Allocate memory for callback data.
2537 	 * This memory will be freed by the callback function.
2538 	 */
2539 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2540 	if (NULL == cb_data) {
2541 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2542 		err = -ENOMEM;
2543 		goto out;
2544 	}
2545 	cb_data->nport       = nport;
2546 	cb_data->rport       = rport;
2547 	cb_data->port_handle = port_hdl;
2548 	cb_data->cb_func     = cb_func;
2549 	cb_data->cb_ctx      = cb_ctx;
2550 
2551 	/*
2552 	 * Delete all associations, if any, related with this ITN/remote_port.
2553 	 */
2554 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2555 		num_assoc++;
2556 		if (assoc->s_id == s_id) {
2557 			assoc_err = nvmf_fc_delete_association(nport,
2558 							       assoc->assoc_id,
2559 							       false /* send abts */, false,
2560 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2561 			if (0 != assoc_err) {
2562 				/*
2563 				 * Mark this association as zombie.
2564 				 */
2565 				err = -EINVAL;
2566 				DEV_VERIFY(!"Error while deleting association");
2567 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2568 			} else {
2569 				num_assoc_del_scheduled++;
2570 			}
2571 		}
2572 	}
2573 
2574 out:
2575 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2576 		/*
2577 		 * Since there are no association_delete calls
2578 		 * successfully scheduled, the association_delete
2579 		 * callback function will never be called.
2580 		 * In this case, call the callback function now.
2581 		 */
2582 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2583 	}
2584 
2585 	snprintf(log_str, sizeof(log_str),
2586 		 "IT delete associations on nport:%d end. "
2587 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2588 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2589 
2590 	if (err == 0) {
2591 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2592 	} else {
2593 		SPDK_ERRLOG("%s", log_str);
2594 	}
2595 }
2596 
2597 static void
2598 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2599 {
2600 	ASSERT_SPDK_FC_MASTER_THREAD();
2601 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2602 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2603 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2604 	struct spdk_nvmf_fc_port *fc_port = NULL;
2605 	int err = 0;
2606 
2607 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2608 	hwqp = quiesce_api_data->hwqp;
2609 	fc_port = hwqp->fc_port;
2610 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2611 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2612 
2613 	/*
2614 	 * Decrement the callback/quiesced queue count.
2615 	 */
2616 	port_quiesce_ctx->quiesce_count--;
2617 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2618 
2619 	free(quiesce_api_data);
2620 	/*
2621 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2622 	 */
2623 	if (port_quiesce_ctx->quiesce_count > 0) {
2624 		return;
2625 	}
2626 
2627 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2628 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2629 	} else {
2630 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesced.\n", fc_port->port_hdl);
2631 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2632 	}
2633 
2634 	if (cb_func) {
2635 		/*
2636 		 * Callback function for the called of quiesce.
2637 		 */
2638 		cb_func(port_quiesce_ctx->ctx, err);
2639 	}
2640 
2641 	/*
2642 	 * Free the context structure.
2643 	 */
2644 	free(port_quiesce_ctx);
2645 
2646 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2647 		      err);
2648 }
2649 
2650 static int
2651 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2652 			     spdk_nvmf_fc_poller_api_cb cb_func)
2653 {
2654 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2655 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2656 	int err = 0;
2657 
2658 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2659 
2660 	if (args == NULL) {
2661 		err = -ENOMEM;
2662 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2663 		goto done;
2664 	}
2665 	args->hwqp = fc_hwqp;
2666 	args->ctx = ctx;
2667 	args->cb_info.cb_func = cb_func;
2668 	args->cb_info.cb_data = args;
2669 	args->cb_info.cb_thread = spdk_get_thread();
2670 
2671 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2672 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2673 	if (rc) {
2674 		free(args);
2675 		err = -EINVAL;
2676 	}
2677 
2678 done:
2679 	return err;
2680 }
2681 
2682 /*
2683  * Hw port Quiesce
2684  */
2685 static int
2686 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2687 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2688 {
2689 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2690 	uint32_t i = 0;
2691 	int err = 0;
2692 
2693 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2694 
2695 	/*
2696 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2697 	 * and execute the callback.
2698 	 */
2699 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2700 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2701 	}
2702 
2703 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2704 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Port %d already in quiesced state.\n",
2705 			      fc_port->port_hdl);
2706 		/*
2707 		 * Execute the callback function directly.
2708 		 */
2709 		cb_func(ctx, err);
2710 		goto out;
2711 	}
2712 
2713 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2714 
2715 	if (port_quiesce_ctx == NULL) {
2716 		err = -ENOMEM;
2717 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2718 			    fc_port->port_hdl);
2719 		goto out;
2720 	}
2721 
2722 	port_quiesce_ctx->quiesce_count = 0;
2723 	port_quiesce_ctx->ctx = ctx;
2724 	port_quiesce_ctx->cb_func = cb_func;
2725 
2726 	/*
2727 	 * Quiesce the LS queue.
2728 	 */
2729 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2730 					   nvmf_fc_adm_queue_quiesce_cb);
2731 	if (err != 0) {
2732 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2733 		goto out;
2734 	}
2735 	port_quiesce_ctx->quiesce_count++;
2736 
2737 	/*
2738 	 * Quiesce the IO queues.
2739 	 */
2740 	for (i = 0; i < fc_port->num_io_queues; i++) {
2741 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2742 						   port_quiesce_ctx,
2743 						   nvmf_fc_adm_queue_quiesce_cb);
2744 		if (err != 0) {
2745 			DEV_VERIFY(0);
2746 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2747 		}
2748 		port_quiesce_ctx->quiesce_count++;
2749 	}
2750 
2751 out:
2752 	if (port_quiesce_ctx && err != 0) {
2753 		free(port_quiesce_ctx);
2754 	}
2755 	return err;
2756 }
2757 
2758 /*
2759  * Initialize and add a HW port entry to the global
2760  * HW port list.
2761  */
2762 static void
2763 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2764 {
2765 	ASSERT_SPDK_FC_MASTER_THREAD();
2766 	struct spdk_nvmf_fc_port *fc_port = NULL;
2767 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2768 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2769 			api_data->api_args;
2770 	int err = 0;
2771 
2772 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2773 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2774 		err = EINVAL;
2775 		goto abort_port_init;
2776 	}
2777 
2778 	/*
2779 	 * 1. Check for duplicate initialization.
2780 	 */
2781 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2782 	if (fc_port != NULL) {
2783 		/* Port already exists, check if it has to be re-initialized */
2784 		err = nvmf_fc_adm_hw_port_reinit_validate(fc_port, args);
2785 		if (err) {
2786 			/*
2787 			 * In case of an error we do not want to free the fc_port
2788 			 * so we set that pointer to NULL.
2789 			 */
2790 			fc_port = NULL;
2791 		}
2792 		goto abort_port_init;
2793 	}
2794 
2795 	/*
2796 	 * 2. Get the memory to instantiate a fc port.
2797 	 */
2798 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2799 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2800 	if (fc_port == NULL) {
2801 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2802 		err = -ENOMEM;
2803 		goto abort_port_init;
2804 	}
2805 
2806 	/* assign the io_queues array */
2807 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2808 				     struct spdk_nvmf_fc_port));
2809 
2810 	/*
2811 	 * 3. Initialize the contents for the FC-port
2812 	 */
2813 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2814 
2815 	if (err != 0) {
2816 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2817 		DEV_VERIFY(!"Data initialization failed for fc_port");
2818 		goto abort_port_init;
2819 	}
2820 
2821 	/*
2822 	 * 4. Add this port to the global fc port list in the library.
2823 	 */
2824 	nvmf_fc_port_add(fc_port);
2825 
2826 abort_port_init:
2827 	if (err && fc_port) {
2828 		free(fc_port);
2829 	}
2830 	if (api_data->cb_func != NULL) {
2831 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2832 	}
2833 
2834 	free(arg);
2835 
2836 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d initialize done, rc = %d.\n",
2837 		      args->port_handle, err);
2838 }
2839 
2840 /*
2841  * Online a HW port.
2842  */
2843 static void
2844 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2845 {
2846 	ASSERT_SPDK_FC_MASTER_THREAD();
2847 	struct spdk_nvmf_fc_port *fc_port = NULL;
2848 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2849 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2850 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2851 			api_data->api_args;
2852 	int i = 0;
2853 	int err = 0;
2854 
2855 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2856 	if (fc_port) {
2857 		/* Set the port state to online */
2858 		err = nvmf_fc_port_set_online(fc_port);
2859 		if (err != 0) {
2860 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2861 			DEV_VERIFY(!"Hw port online failed");
2862 			goto out;
2863 		}
2864 
2865 		hwqp = &fc_port->ls_queue;
2866 		hwqp->context = NULL;
2867 		(void)nvmf_fc_hwqp_set_online(hwqp);
2868 
2869 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2870 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2871 			hwqp = &fc_port->io_queues[i];
2872 			hwqp->context = NULL;
2873 			(void)nvmf_fc_hwqp_set_online(hwqp);
2874 			nvmf_fc_poll_group_add_hwqp(hwqp);
2875 		}
2876 	} else {
2877 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2878 		err = -EINVAL;
2879 	}
2880 
2881 out:
2882 	if (api_data->cb_func != NULL) {
2883 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2884 	}
2885 
2886 	free(arg);
2887 
2888 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d online done, rc = %d.\n", args->port_handle,
2889 		      err);
2890 }
2891 
2892 /*
2893  * Offline a HW port.
2894  */
2895 static void
2896 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
2897 {
2898 	ASSERT_SPDK_FC_MASTER_THREAD();
2899 	struct spdk_nvmf_fc_port *fc_port = NULL;
2900 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2901 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2902 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
2903 			api_data->api_args;
2904 	int i = 0;
2905 	int err = 0;
2906 
2907 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2908 	if (fc_port) {
2909 		/* Set the port state to offline, if it is not already. */
2910 		err = nvmf_fc_port_set_offline(fc_port);
2911 		if (err != 0) {
2912 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
2913 			err = 0;
2914 			goto out;
2915 		}
2916 
2917 		hwqp = &fc_port->ls_queue;
2918 		(void)nvmf_fc_hwqp_set_offline(hwqp);
2919 
2920 		/* Remove poller for all the io queues. */
2921 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2922 			hwqp = &fc_port->io_queues[i];
2923 			(void)nvmf_fc_hwqp_set_offline(hwqp);
2924 			nvmf_fc_poll_group_remove_hwqp(hwqp);
2925 		}
2926 
2927 		/*
2928 		 * Delete all the nports. Ideally, the nports should have been purged
2929 		 * before the offline event, in which case, only a validation is required.
2930 		 */
2931 		nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
2932 	} else {
2933 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2934 		err = -EINVAL;
2935 	}
2936 out:
2937 	if (api_data->cb_func != NULL) {
2938 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
2939 	}
2940 
2941 	free(arg);
2942 
2943 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d offline done, rc = %d.\n", args->port_handle,
2944 		      err);
2945 }
2946 
2947 struct nvmf_fc_add_rem_listener_ctx {
2948 	struct spdk_nvmf_subsystem *subsystem;
2949 	bool add_listener;
2950 	struct spdk_nvme_transport_id trid;
2951 };
2952 
2953 static void
2954 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2955 {
2956 	ASSERT_SPDK_FC_MASTER_THREAD();
2957 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2958 	free(ctx);
2959 }
2960 
2961 static void
2962 nvmf_fc_adm_listen_done(void *cb_arg, int status)
2963 {
2964 	ASSERT_SPDK_FC_MASTER_THREAD();
2965 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
2966 
2967 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
2968 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
2969 		free(ctx);
2970 	}
2971 }
2972 
2973 static void
2974 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
2975 {
2976 	ASSERT_SPDK_FC_MASTER_THREAD();
2977 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
2978 
2979 	if (ctx->add_listener) {
2980 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
2981 	} else {
2982 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
2983 		nvmf_fc_adm_listen_done(ctx, 0);
2984 	}
2985 }
2986 
2987 static int
2988 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
2989 {
2990 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
2991 	struct spdk_nvmf_subsystem *subsystem;
2992 
2993 	if (!tgt) {
2994 		SPDK_ERRLOG("No nvmf target defined\n");
2995 		return -EINVAL;
2996 	}
2997 
2998 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
2999 	while (subsystem) {
3000 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3001 
3002 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3003 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3004 			if (ctx) {
3005 				ctx->add_listener = add;
3006 				ctx->subsystem = subsystem;
3007 				nvmf_fc_create_trid(&ctx->trid,
3008 						    nport->fc_nodename.u.wwn,
3009 						    nport->fc_portname.u.wwn);
3010 
3011 				if (spdk_nvmf_tgt_listen(subsystem->tgt, &ctx->trid)) {
3012 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3013 						    ctx->trid.traddr);
3014 					free(ctx);
3015 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3016 								     nvmf_fc_adm_subsystem_paused_cb,
3017 								     ctx)) {
3018 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3019 						    subsystem->subnqn);
3020 					free(ctx);
3021 				}
3022 			}
3023 		}
3024 
3025 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3026 	}
3027 
3028 	return 0;
3029 }
3030 
3031 /*
3032  * Create a Nport.
3033  */
3034 static void
3035 nvmf_fc_adm_evnt_nport_create(void *arg)
3036 {
3037 	ASSERT_SPDK_FC_MASTER_THREAD();
3038 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3039 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3040 			api_data->api_args;
3041 	struct spdk_nvmf_fc_nport *nport = NULL;
3042 	struct spdk_nvmf_fc_port *fc_port = NULL;
3043 	int err = 0;
3044 
3045 	/*
3046 	 * Get the physical port.
3047 	 */
3048 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3049 	if (fc_port == NULL) {
3050 		err = -EINVAL;
3051 		goto out;
3052 	}
3053 
3054 	/*
3055 	 * Check for duplicate initialization.
3056 	 */
3057 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3058 	if (nport != NULL) {
3059 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3060 			    args->port_handle);
3061 		err = -EINVAL;
3062 		goto out;
3063 	}
3064 
3065 	/*
3066 	 * Get the memory to instantiate a fc nport.
3067 	 */
3068 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3069 	if (nport == NULL) {
3070 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3071 			    args->nport_handle);
3072 		err = -ENOMEM;
3073 		goto out;
3074 	}
3075 
3076 	/*
3077 	 * Initialize the contents for the nport
3078 	 */
3079 	nport->nport_hdl    = args->nport_handle;
3080 	nport->port_hdl     = args->port_handle;
3081 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3082 	nport->fc_nodename  = args->fc_nodename;
3083 	nport->fc_portname  = args->fc_portname;
3084 	nport->d_id         = args->d_id;
3085 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3086 
3087 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3088 	TAILQ_INIT(&nport->rem_port_list);
3089 	nport->rport_count = 0;
3090 	TAILQ_INIT(&nport->fc_associations);
3091 	nport->assoc_count = 0;
3092 
3093 	/*
3094 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3095 	 */
3096 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3097 
3098 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3099 out:
3100 	if (err && nport) {
3101 		free(nport);
3102 	}
3103 
3104 	if (api_data->cb_func != NULL) {
3105 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3106 	}
3107 
3108 	free(arg);
3109 }
3110 
3111 static void
3112 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3113 			    void *cb_args, int spdk_err)
3114 {
3115 	ASSERT_SPDK_FC_MASTER_THREAD();
3116 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3117 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3118 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3119 	int err = 0;
3120 	uint16_t nport_hdl = 0;
3121 	char log_str[256];
3122 
3123 	/*
3124 	 * Assert on any delete failure.
3125 	 */
3126 	if (nport == NULL) {
3127 		SPDK_ERRLOG("Nport delete callback returned null nport");
3128 		DEV_VERIFY(!"nport is null.");
3129 		goto out;
3130 	}
3131 
3132 	nport_hdl = nport->nport_hdl;
3133 	if (0 != spdk_err) {
3134 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3135 			    "%d, Nport: %d\n",
3136 			    nport->port_hdl, nport->nport_hdl);
3137 		DEV_VERIFY(!"nport delete callback error.");
3138 	}
3139 
3140 	/*
3141 	 * Free the nport if this is the last rport being deleted and
3142 	 * execute the callback(s).
3143 	 */
3144 	if (nvmf_fc_nport_has_no_rport(nport)) {
3145 		if (0 != nport->assoc_count) {
3146 			SPDK_ERRLOG("association count != 0\n");
3147 			DEV_VERIFY(!"association count != 0");
3148 		}
3149 
3150 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3151 		if (0 != err) {
3152 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3153 				    "nport from nport list. FC Port:%d Nport:%d\n",
3154 				    nport->port_hdl, nport->nport_hdl);
3155 		}
3156 		/* Free the nport */
3157 		free(nport);
3158 
3159 		if (cb_func != NULL) {
3160 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3161 		}
3162 		free(cb_data);
3163 	}
3164 out:
3165 	snprintf(log_str, sizeof(log_str),
3166 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3167 		 port_handle, nport_hdl, event_type, spdk_err);
3168 
3169 	if (err != 0) {
3170 		SPDK_ERRLOG("%s", log_str);
3171 	} else {
3172 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3173 	}
3174 }
3175 
3176 /*
3177  * Delete Nport.
3178  */
3179 static void
3180 nvmf_fc_adm_evnt_nport_delete(void *arg)
3181 {
3182 	ASSERT_SPDK_FC_MASTER_THREAD();
3183 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3184 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3185 			api_data->api_args;
3186 	struct spdk_nvmf_fc_nport *nport = NULL;
3187 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3188 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3189 	int err = 0;
3190 	uint32_t rport_cnt = 0;
3191 	int rc = 0;
3192 
3193 	/*
3194 	 * Make sure that the nport exists.
3195 	 */
3196 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3197 	if (nport == NULL) {
3198 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3199 			    args->port_handle);
3200 		err = -EINVAL;
3201 		goto out;
3202 	}
3203 
3204 	/*
3205 	 * Allocate memory for callback data.
3206 	 */
3207 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3208 	if (NULL == cb_data) {
3209 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3210 		err = -ENOMEM;
3211 		goto out;
3212 	}
3213 
3214 	cb_data->nport = nport;
3215 	cb_data->port_handle = args->port_handle;
3216 	cb_data->fc_cb_func = api_data->cb_func;
3217 	cb_data->fc_cb_ctx = args->cb_ctx;
3218 
3219 	/*
3220 	 * Begin nport tear down
3221 	 */
3222 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3223 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3224 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3225 		/*
3226 		 * Deletion of this nport already in progress. Register callback
3227 		 * and return.
3228 		 */
3229 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3230 		err = -ENODEV;
3231 		goto out;
3232 	} else {
3233 		/* nport partially created/deleted */
3234 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3235 		DEV_VERIFY(0 != "Nport in zombie state");
3236 		err = -ENODEV;
3237 		goto out;
3238 	}
3239 
3240 	/*
3241 	 * Remove this nport from listening addresses across subsystems
3242 	 */
3243 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3244 
3245 	if (0 != rc) {
3246 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3247 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3248 			    nport->nport_hdl);
3249 		goto out;
3250 	}
3251 
3252 	/*
3253 	 * Delete all the remote ports (if any) for the nport
3254 	 */
3255 	/* TODO - Need to do this with a "first" and a "next" accessor function
3256 	 * for completeness. Look at app-subsystem as examples.
3257 	 */
3258 	if (nvmf_fc_nport_has_no_rport(nport)) {
3259 		/* No rports to delete. Complete the nport deletion. */
3260 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3261 		goto out;
3262 	}
3263 
3264 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3265 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3266 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3267 
3268 		if (it_del_args == NULL) {
3269 			err = -ENOMEM;
3270 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3271 				    rport_iter->rpi, rport_iter->s_id);
3272 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3273 			goto out;
3274 		}
3275 
3276 		rport_cnt++;
3277 		it_del_args->port_handle = nport->port_hdl;
3278 		it_del_args->nport_handle = nport->nport_hdl;
3279 		it_del_args->cb_ctx = (void *)cb_data;
3280 		it_del_args->rpi = rport_iter->rpi;
3281 		it_del_args->s_id = rport_iter->s_id;
3282 
3283 		nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3284 					     nvmf_fc_adm_delete_nport_cb);
3285 	}
3286 
3287 out:
3288 	/* On failure, execute the callback function now */
3289 	if ((err != 0) || (rc != 0)) {
3290 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3291 			    "rport_cnt:%d rc:%d.\n",
3292 			    args->nport_handle, err, args->port_handle,
3293 			    rport_cnt, rc);
3294 		if (cb_data) {
3295 			free(cb_data);
3296 		}
3297 		if (api_data->cb_func != NULL) {
3298 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3299 		}
3300 
3301 	} else {
3302 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3303 			      "NPort %d delete done succesfully, fc port:%d. "
3304 			      "rport_cnt:%d\n",
3305 			      args->nport_handle, args->port_handle, rport_cnt);
3306 	}
3307 
3308 	free(arg);
3309 }
3310 
3311 /*
3312  * Process an PRLI/IT add.
3313  */
3314 static void
3315 nvmf_fc_adm_evnt_i_t_add(void *arg)
3316 {
3317 	ASSERT_SPDK_FC_MASTER_THREAD();
3318 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3319 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3320 			api_data->api_args;
3321 	struct spdk_nvmf_fc_nport *nport = NULL;
3322 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3323 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3324 	int err = 0;
3325 
3326 	/*
3327 	 * Make sure the nport port exists.
3328 	 */
3329 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3330 	if (nport == NULL) {
3331 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3332 		err = -EINVAL;
3333 		goto out;
3334 	}
3335 
3336 	/*
3337 	 * Check for duplicate i_t_add.
3338 	 */
3339 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3340 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3341 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3342 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3343 			err = -EEXIST;
3344 			goto out;
3345 		}
3346 	}
3347 
3348 	/*
3349 	 * Get the memory to instantiate the remote port
3350 	 */
3351 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3352 	if (rport == NULL) {
3353 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3354 		err = -ENOMEM;
3355 		goto out;
3356 	}
3357 
3358 	/*
3359 	 * Initialize the contents for the rport
3360 	 */
3361 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3362 	rport->s_id = args->s_id;
3363 	rport->rpi = args->rpi;
3364 	rport->fc_nodename = args->fc_nodename;
3365 	rport->fc_portname = args->fc_portname;
3366 
3367 	/*
3368 	 * Add remote port to nport
3369 	 */
3370 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3371 		DEV_VERIFY(!"Error while adding rport to list");
3372 	};
3373 
3374 	/*
3375 	 * TODO: Do we validate the initiators service parameters?
3376 	 */
3377 
3378 	/*
3379 	 * Get the targets service parameters from the library
3380 	 * to return back to the driver.
3381 	 */
3382 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3383 
3384 out:
3385 	if (api_data->cb_func != NULL) {
3386 		/*
3387 		 * Passing pointer to the args struct as the first argument.
3388 		 * The cb_func should handle this appropriately.
3389 		 */
3390 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3391 	}
3392 
3393 	free(arg);
3394 
3395 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3396 		      "IT add on nport %d done, rc = %d.\n",
3397 		      args->nport_handle, err);
3398 }
3399 
3400 /**
3401  * Process a IT delete.
3402  */
3403 static void
3404 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3405 {
3406 	ASSERT_SPDK_FC_MASTER_THREAD();
3407 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3408 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3409 			api_data->api_args;
3410 	int rc = 0;
3411 	struct spdk_nvmf_fc_nport *nport = NULL;
3412 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3413 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3414 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3415 	uint32_t num_rport = 0;
3416 	char log_str[256];
3417 
3418 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete on nport:%d begin.\n", args->nport_handle);
3419 
3420 	/*
3421 	 * Make sure the nport port exists. If it does not, error out.
3422 	 */
3423 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3424 	if (nport == NULL) {
3425 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3426 		rc = -EINVAL;
3427 		goto out;
3428 	}
3429 
3430 	/*
3431 	 * Find this ITN / rport (remote port).
3432 	 */
3433 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3434 		num_rport++;
3435 		if ((rport_iter->s_id == args->s_id) &&
3436 		    (rport_iter->rpi == args->rpi) &&
3437 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3438 			rport = rport_iter;
3439 			break;
3440 		}
3441 	}
3442 
3443 	/*
3444 	 * We should find either zero or exactly one rport.
3445 	 *
3446 	 * If we find zero rports, that means that a previous request has
3447 	 * removed the rport by the time we reached here. In this case,
3448 	 * simply return out.
3449 	 */
3450 	if (rport == NULL) {
3451 		rc = -ENODEV;
3452 		goto out;
3453 	}
3454 
3455 	/*
3456 	 * We have found exactly one rport. Allocate memory for callback data.
3457 	 */
3458 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3459 	if (NULL == cb_data) {
3460 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3461 		rc = -ENOMEM;
3462 		goto out;
3463 	}
3464 
3465 	cb_data->nport = nport;
3466 	cb_data->rport = rport;
3467 	cb_data->port_handle = args->port_handle;
3468 	cb_data->fc_cb_func = api_data->cb_func;
3469 	cb_data->fc_cb_ctx = args->cb_ctx;
3470 
3471 	/*
3472 	 * Validate rport object state.
3473 	 */
3474 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3475 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3476 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3477 		/*
3478 		 * Deletion of this rport already in progress. Register callback
3479 		 * and return.
3480 		 */
3481 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3482 		rc = -ENODEV;
3483 		goto out;
3484 	} else {
3485 		/* rport partially created/deleted */
3486 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3487 		DEV_VERIFY(!"Invalid rport_state");
3488 		rc = -ENODEV;
3489 		goto out;
3490 	}
3491 
3492 	/*
3493 	 * We have successfully found a rport to delete. Call
3494 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3495 	 * IT-delete processing as well as free the cb_data.
3496 	 */
3497 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3498 				     (void *)cb_data);
3499 
3500 out:
3501 	if (rc != 0) {
3502 		/*
3503 		 * We have entered here because either we encountered an
3504 		 * error, or we did not find a rport to delete.
3505 		 * As a result, we will not call the function
3506 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3507 		 * processing. Therefore, execute the callback function now.
3508 		 */
3509 		if (cb_data) {
3510 			free(cb_data);
3511 		}
3512 		if (api_data->cb_func != NULL) {
3513 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3514 		}
3515 	}
3516 
3517 	snprintf(log_str, sizeof(log_str),
3518 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3519 		 args->nport_handle, num_rport, rc);
3520 
3521 	if (rc != 0) {
3522 		SPDK_ERRLOG("%s", log_str);
3523 	} else {
3524 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3525 	}
3526 
3527 	free(arg);
3528 }
3529 
3530 /*
3531  * Process ABTS received
3532  */
3533 static void
3534 nvmf_fc_adm_evnt_abts_recv(void *arg)
3535 {
3536 	ASSERT_SPDK_FC_MASTER_THREAD();
3537 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3538 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3539 	struct spdk_nvmf_fc_nport *nport = NULL;
3540 	int err = 0;
3541 
3542 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3543 		      args->oxid, args->rxid);
3544 
3545 	/*
3546 	 * 1. Make sure the nport port exists.
3547 	 */
3548 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3549 	if (nport == NULL) {
3550 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3551 		err = -EINVAL;
3552 		goto out;
3553 	}
3554 
3555 	/*
3556 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3557 	 */
3558 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3559 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3560 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3561 			      args->rpi, args->oxid, args->rxid);
3562 		err = 0;
3563 		goto out;
3564 
3565 	}
3566 
3567 	/*
3568 	 * 3. Pass the received ABTS-LS to the library for handling.
3569 	 */
3570 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3571 
3572 out:
3573 	if (api_data->cb_func != NULL) {
3574 		/*
3575 		 * Passing pointer to the args struct as the first argument.
3576 		 * The cb_func should handle this appropriately.
3577 		 */
3578 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3579 	} else {
3580 		/* No callback set, free the args */
3581 		free(args);
3582 	}
3583 
3584 	free(arg);
3585 }
3586 
3587 /*
3588  * Callback function for hw port quiesce.
3589  */
3590 static void
3591 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3592 {
3593 	ASSERT_SPDK_FC_MASTER_THREAD();
3594 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3595 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3596 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3597 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3598 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3599 	struct spdk_nvmf_fc_port *fc_port = NULL;
3600 	char *dump_buf = NULL;
3601 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3602 
3603 	/*
3604 	 * Free the callback context struct.
3605 	 */
3606 	free(ctx);
3607 
3608 	if (err != 0) {
3609 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3610 		goto out;
3611 	}
3612 
3613 	if (args->dump_queues == false) {
3614 		/*
3615 		 * Queues need not be dumped.
3616 		 */
3617 		goto out;
3618 	}
3619 
3620 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3621 
3622 	/*
3623 	 * Get the fc port.
3624 	 */
3625 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3626 	if (fc_port == NULL) {
3627 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3628 		err = -EINVAL;
3629 		goto out;
3630 	}
3631 
3632 	/*
3633 	 * Allocate memory for the dump buffer.
3634 	 * This memory will be freed by FCT.
3635 	 */
3636 	dump_buf = (char *)calloc(1, dump_buf_size);
3637 	if (dump_buf == NULL) {
3638 		err = -ENOMEM;
3639 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3640 		goto out;
3641 	}
3642 	*args->dump_buf  = (uint32_t *)dump_buf;
3643 	dump_info.buffer = dump_buf;
3644 	dump_info.offset = 0;
3645 
3646 	/*
3647 	 * Add the dump reason to the top of the buffer.
3648 	 */
3649 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3650 
3651 	/*
3652 	 * Dump the hwqp.
3653 	 */
3654 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3655 				fc_port->num_io_queues, &dump_info);
3656 
3657 out:
3658 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3659 		      args->port_handle, args->dump_queues, err);
3660 
3661 	if (cb_func != NULL) {
3662 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3663 	}
3664 }
3665 
3666 /*
3667  * HW port reset
3668 
3669  */
3670 static void
3671 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3672 {
3673 	ASSERT_SPDK_FC_MASTER_THREAD();
3674 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3675 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3676 			api_data->api_args;
3677 	struct spdk_nvmf_fc_port *fc_port = NULL;
3678 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3679 	int err = 0;
3680 
3681 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump\n", args->port_handle);
3682 
3683 	/*
3684 	 * Make sure the physical port exists.
3685 	 */
3686 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3687 	if (fc_port == NULL) {
3688 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3689 		err = -EINVAL;
3690 		goto out;
3691 	}
3692 
3693 	/*
3694 	 * Save the reset event args and the callback in a context struct.
3695 	 */
3696 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3697 
3698 	if (ctx == NULL) {
3699 		err = -ENOMEM;
3700 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3701 		goto fail;
3702 	}
3703 
3704 	ctx->reset_args = arg;
3705 	ctx->reset_cb_func = api_data->cb_func;
3706 
3707 	/*
3708 	 * Quiesce the hw port.
3709 	 */
3710 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3711 	if (err != 0) {
3712 		goto fail;
3713 	}
3714 
3715 	/*
3716 	 * Once the ports are successfully quiesced the reset processing
3717 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3718 	 */
3719 	return;
3720 fail:
3721 	free(ctx);
3722 
3723 out:
3724 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump done, rc = %d.\n", args->port_handle,
3725 		      err);
3726 
3727 	if (api_data->cb_func != NULL) {
3728 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3729 	}
3730 
3731 	free(arg);
3732 }
3733 
3734 /*
3735  * Process a link break event on a HW port.
3736  */
3737 static void
3738 nvmf_fc_adm_evnt_hw_port_link_break(void *arg)
3739 {
3740 	ASSERT_SPDK_FC_MASTER_THREAD();
3741 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3742 	struct spdk_nvmf_hw_port_link_break_args *args = (struct spdk_nvmf_hw_port_link_break_args *)
3743 			api_data->api_args;
3744 	struct spdk_nvmf_fc_port *fc_port = NULL;
3745 	int err = 0;
3746 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *cb_data = NULL;
3747 	struct spdk_nvmf_fc_nport *nport = NULL;
3748 	uint32_t nport_deletes_sent = 0;
3749 	uint32_t nport_deletes_skipped = 0;
3750 	struct spdk_nvmf_fc_nport_delete_args *nport_del_args = NULL;
3751 	char log_str[256];
3752 
3753 	/*
3754 	 * Get the fc port using the port handle.
3755 	 */
3756 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3757 	if (!fc_port) {
3758 		SPDK_ERRLOG("port link break: Unable to find the SPDK FC port %d\n",
3759 			    args->port_handle);
3760 		err = -EINVAL;
3761 		goto out;
3762 	}
3763 
3764 	/*
3765 	 * Set the port state to offline, if it is not already.
3766 	 */
3767 	err = nvmf_fc_port_set_offline(fc_port);
3768 	if (err != 0) {
3769 		SPDK_ERRLOG("port link break: HW port %d already offline. rc = %d\n",
3770 			    fc_port->port_hdl, err);
3771 		err = 0;
3772 		goto out;
3773 	}
3774 
3775 	/*
3776 	 * Delete all the nports, if any.
3777 	 */
3778 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
3779 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
3780 			/* Skipped the nports that are not in CREATED state */
3781 			if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
3782 				nport_deletes_skipped++;
3783 				continue;
3784 			}
3785 
3786 			/* Allocate memory for callback data. */
3787 			cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_port_link_break_cb_data));
3788 			if (NULL == cb_data) {
3789 				SPDK_ERRLOG("port link break: Failed to allocate memory for cb_data %d.\n",
3790 					    args->port_handle);
3791 				err = -ENOMEM;
3792 				goto out;
3793 			}
3794 			cb_data->args = args;
3795 			cb_data->cb_func = api_data->cb_func;
3796 			nport_del_args = &cb_data->nport_del_args;
3797 			nport_del_args->port_handle = args->port_handle;
3798 			nport_del_args->nport_handle = nport->nport_hdl;
3799 			nport_del_args->cb_ctx = cb_data;
3800 
3801 			nvmf_fc_master_enqueue_event(SPDK_FC_NPORT_DELETE,
3802 						     (void *)nport_del_args,
3803 						     nvmf_fc_adm_hw_port_link_break_cb);
3804 
3805 			nport_deletes_sent++;
3806 		}
3807 	}
3808 
3809 	if (nport_deletes_sent == 0 && err == 0) {
3810 		/*
3811 		 * Mark the hwqps as offline and unregister the pollers.
3812 		 */
3813 		(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
3814 	}
3815 
3816 out:
3817 	snprintf(log_str, sizeof(log_str),
3818 		 "port link break done: port:%d nport_deletes_sent:%d nport_deletes_skipped:%d rc:%d.\n",
3819 		 args->port_handle, nport_deletes_sent, nport_deletes_skipped, err);
3820 
3821 	if (err != 0) {
3822 		SPDK_ERRLOG("%s", log_str);
3823 	} else {
3824 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3825 	}
3826 
3827 	if ((api_data->cb_func != NULL) && (nport_deletes_sent == 0)) {
3828 		/*
3829 		 * No nport_deletes are sent, which would have eventually
3830 		 * called the port_link_break callback. Therefore, call the
3831 		 * port_link_break callback here.
3832 		 */
3833 		(void)api_data->cb_func(args->port_handle, SPDK_FC_LINK_BREAK, args->cb_ctx, err);
3834 	}
3835 
3836 	free(arg);
3837 }
3838 
3839 static inline void
3840 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args)
3841 {
3842 	if (nvmf_fc_get_master_thread()) {
3843 		spdk_thread_send_msg(nvmf_fc_get_master_thread(), fn, args);
3844 	}
3845 }
3846 
3847 /*
3848  * Queue up an event in the SPDK masters event queue.
3849  * Used by the FC driver to notify the SPDK master of FC related events.
3850  */
3851 int
3852 nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args,
3853 			     spdk_nvmf_fc_callback cb_func)
3854 {
3855 	int err = 0;
3856 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3857 	spdk_msg_fn event_fn = NULL;
3858 
3859 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d.\n", event_type);
3860 
3861 	if (event_type >= SPDK_FC_EVENT_MAX) {
3862 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3863 		err = -EINVAL;
3864 		goto done;
3865 	}
3866 
3867 	if (args == NULL) {
3868 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3869 		err = -EINVAL;
3870 		goto done;
3871 	}
3872 
3873 	api_data = calloc(1, sizeof(*api_data));
3874 
3875 	if (api_data == NULL) {
3876 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3877 		err = -ENOMEM;
3878 		goto done;
3879 	}
3880 
3881 	api_data->api_args = args;
3882 	api_data->cb_func = cb_func;
3883 
3884 	switch (event_type) {
3885 	case SPDK_FC_HW_PORT_INIT:
3886 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3887 		break;
3888 
3889 	case SPDK_FC_HW_PORT_ONLINE:
3890 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3891 		break;
3892 
3893 	case SPDK_FC_HW_PORT_OFFLINE:
3894 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3895 		break;
3896 
3897 	case SPDK_FC_NPORT_CREATE:
3898 		event_fn = nvmf_fc_adm_evnt_nport_create;
3899 		break;
3900 
3901 	case SPDK_FC_NPORT_DELETE:
3902 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3903 		break;
3904 
3905 	case SPDK_FC_IT_ADD:
3906 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3907 		break;
3908 
3909 	case SPDK_FC_IT_DELETE:
3910 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3911 		break;
3912 
3913 	case SPDK_FC_ABTS_RECV:
3914 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3915 		break;
3916 
3917 	case SPDK_FC_LINK_BREAK:
3918 		event_fn = nvmf_fc_adm_evnt_hw_port_link_break;
3919 		break;
3920 
3921 	case SPDK_FC_HW_PORT_RESET:
3922 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3923 		break;
3924 
3925 	case SPDK_FC_UNRECOVERABLE_ERR:
3926 	default:
3927 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3928 		err = -EINVAL;
3929 		break;
3930 	}
3931 
3932 done:
3933 
3934 	if (err == 0) {
3935 		assert(event_fn != NULL);
3936 		nvmf_fc_adm_run_on_master_thread(event_fn, (void *)api_data);
3937 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d done successfully\n", event_type);
3938 	} else {
3939 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3940 		if (api_data) {
3941 			free(api_data);
3942 		}
3943 	}
3944 
3945 	return err;
3946 }
3947 
3948 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3949 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc_adm_api", SPDK_LOG_NVMF_FC_ADM_API);
3950 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc", SPDK_LOG_NVMF_FC)
3951