xref: /spdk/lib/nvmf/fc.c (revision 510f4c134a21b45ff3a5add9ebc6c6cf7e49aeab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
3  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
4  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 /*
8  * NVMe_FC transport functions.
9  */
10 
11 #include "spdk/env.h"
12 #include "spdk/assert.h"
13 #include "spdk/nvmf_transport.h"
14 #include "spdk/string.h"
15 #include "spdk/trace.h"
16 #include "spdk/util.h"
17 #include "spdk/likely.h"
18 #include "spdk/endian.h"
19 #include "spdk/log.h"
20 #include "spdk/thread.h"
21 
22 #include "nvmf_fc.h"
23 #include "fc_lld.h"
24 
25 #include "spdk_internal/trace_defs.h"
26 
27 #ifndef DEV_VERIFY
28 #define DEV_VERIFY assert
29 #endif
30 
31 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
32 #define ASSERT_SPDK_FC_MAIN_THREAD() \
33         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
34 #endif
35 
36 /*
37  * PRLI service parameters
38  */
39 enum spdk_nvmf_fc_service_parameters {
40 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
41 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
42 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
43 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
44 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
45 };
46 
47 static char *fc_req_state_strs[] = {
48 	"SPDK_NVMF_FC_REQ_INIT",
49 	"SPDK_NVMF_FC_REQ_READ_BDEV",
50 	"SPDK_NVMF_FC_REQ_READ_XFER",
51 	"SPDK_NVMF_FC_REQ_READ_RSP",
52 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
53 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
54 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
55 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
56 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
57 	"SPDK_NVMF_FC_REQ_NONE_RSP",
58 	"SPDK_NVMF_FC_REQ_SUCCESS",
59 	"SPDK_NVMF_FC_REQ_FAILED",
60 	"SPDK_NVMF_FC_REQ_ABORTED",
61 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
62 	"SPDK_NVMF_FC_REQ_PENDING",
63 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
64 };
65 
66 #define HWQP_CONN_TABLE_SIZE			8192
67 #define HWQP_RPI_TABLE_SIZE			4096
68 
69 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
70 {
71 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
72 	spdk_trace_register_description("FC_NEW",
73 					TRACE_FC_REQ_INIT,
74 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1,
75 					SPDK_TRACE_ARG_TYPE_INT, "");
76 	spdk_trace_register_description("FC_READ_SBMT_TO_BDEV",
77 					TRACE_FC_REQ_READ_BDEV,
78 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
79 					SPDK_TRACE_ARG_TYPE_INT, "");
80 	spdk_trace_register_description("FC_READ_XFER_DATA",
81 					TRACE_FC_REQ_READ_XFER,
82 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
83 					SPDK_TRACE_ARG_TYPE_INT, "");
84 	spdk_trace_register_description("FC_READ_RSP",
85 					TRACE_FC_REQ_READ_RSP,
86 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
87 					SPDK_TRACE_ARG_TYPE_INT, "");
88 	spdk_trace_register_description("FC_WRITE_NEED_BUFFER",
89 					TRACE_FC_REQ_WRITE_BUFFS,
90 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
91 					SPDK_TRACE_ARG_TYPE_INT, "");
92 	spdk_trace_register_description("FC_WRITE_XFER_DATA",
93 					TRACE_FC_REQ_WRITE_XFER,
94 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
95 					SPDK_TRACE_ARG_TYPE_INT, "");
96 	spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV",
97 					TRACE_FC_REQ_WRITE_BDEV,
98 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
99 					SPDK_TRACE_ARG_TYPE_INT, "");
100 	spdk_trace_register_description("FC_WRITE_RSP",
101 					TRACE_FC_REQ_WRITE_RSP,
102 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
103 					SPDK_TRACE_ARG_TYPE_INT, "");
104 	spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV",
105 					TRACE_FC_REQ_NONE_BDEV,
106 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
107 					SPDK_TRACE_ARG_TYPE_INT, "");
108 	spdk_trace_register_description("FC_NONE_RSP",
109 					TRACE_FC_REQ_NONE_RSP,
110 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0,
111 					SPDK_TRACE_ARG_TYPE_INT, "");
112 	spdk_trace_register_description("FC_SUCCESS",
113 					TRACE_FC_REQ_SUCCESS,
114 					OWNER_NONE, OBJECT_NONE, 0,
115 					SPDK_TRACE_ARG_TYPE_INT, "");
116 	spdk_trace_register_description("FC_FAILED",
117 					TRACE_FC_REQ_FAILED,
118 					OWNER_NONE, OBJECT_NONE, 0,
119 					SPDK_TRACE_ARG_TYPE_INT, "");
120 	spdk_trace_register_description("FC_ABRT",
121 					TRACE_FC_REQ_ABORTED,
122 					OWNER_NONE, OBJECT_NONE, 0,
123 					SPDK_TRACE_ARG_TYPE_INT, "");
124 	spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV",
125 					TRACE_FC_REQ_BDEV_ABORTED,
126 					OWNER_NONE, OBJECT_NONE, 0,
127 					SPDK_TRACE_ARG_TYPE_INT, "");
128 	spdk_trace_register_description("FC_PENDING",
129 					TRACE_FC_REQ_PENDING,
130 					OWNER_NONE, OBJECT_NONE, 0,
131 					SPDK_TRACE_ARG_TYPE_INT, "");
132 	spdk_trace_register_description("FC_FUSED_WAITING",
133 					TRACE_FC_REQ_FUSED_WAITING,
134 					OWNER_NONE, OBJECT_NONE, 0,
135 					SPDK_TRACE_ARG_TYPE_INT, "");
136 }
137 
138 /**
139  * The structure used by all fc adm functions
140  */
141 struct spdk_nvmf_fc_adm_api_data {
142 	void *api_args;
143 	spdk_nvmf_fc_callback cb_func;
144 };
145 
146 /**
147  * The callback structure for nport-delete
148  */
149 struct spdk_nvmf_fc_adm_nport_del_cb_data {
150 	struct spdk_nvmf_fc_nport *nport;
151 	uint8_t port_handle;
152 	spdk_nvmf_fc_callback fc_cb_func;
153 	void *fc_cb_ctx;
154 };
155 
156 /**
157  * The callback structure for it-delete
158  */
159 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
160 	struct spdk_nvmf_fc_nport *nport;
161 	struct spdk_nvmf_fc_remote_port_info *rport;
162 	uint8_t port_handle;
163 	spdk_nvmf_fc_callback fc_cb_func;
164 	void *fc_cb_ctx;
165 };
166 
167 
168 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
169 
170 /**
171  * The callback structure for the it-delete-assoc callback
172  */
173 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
174 	struct spdk_nvmf_fc_nport *nport;
175 	struct spdk_nvmf_fc_remote_port_info *rport;
176 	uint8_t port_handle;
177 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
178 	void *cb_ctx;
179 };
180 
181 /*
182  * Call back function pointer for HW port quiesce.
183  */
184 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
185 
186 /**
187  * Context structure for quiescing a hardware port
188  */
189 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
190 	int quiesce_count;
191 	void *ctx;
192 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
193 };
194 
195 /**
196  * Context structure used to reset a hardware port
197  */
198 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
199 	void *reset_args;
200 	spdk_nvmf_fc_callback reset_cb_func;
201 };
202 
203 struct spdk_nvmf_fc_transport {
204 	struct spdk_nvmf_transport transport;
205 	struct spdk_poller *accept_poller;
206 	pthread_mutex_t lock;
207 };
208 
209 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
210 
211 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
212 
213 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
214 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
215 
216 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
217 
218 static uint32_t g_nvmf_fgroup_count = 0;
219 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
220 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
221 
222 struct spdk_thread *
223 nvmf_fc_get_main_thread(void)
224 {
225 	return g_nvmf_fc_main_thread;
226 }
227 
228 static inline void
229 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
230 			       enum spdk_nvmf_fc_request_state state)
231 {
232 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
233 
234 	switch (state) {
235 	case SPDK_NVMF_FC_REQ_INIT:
236 		/* Start IO tracing */
237 		tpoint_id = TRACE_FC_REQ_INIT;
238 		break;
239 	case SPDK_NVMF_FC_REQ_READ_BDEV:
240 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
241 		break;
242 	case SPDK_NVMF_FC_REQ_READ_XFER:
243 		tpoint_id = TRACE_FC_REQ_READ_XFER;
244 		break;
245 	case SPDK_NVMF_FC_REQ_READ_RSP:
246 		tpoint_id = TRACE_FC_REQ_READ_RSP;
247 		break;
248 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
249 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
250 		break;
251 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
252 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
253 		break;
254 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
255 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
256 		break;
257 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
258 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
259 		break;
260 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
261 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
262 		break;
263 	case SPDK_NVMF_FC_REQ_NONE_RSP:
264 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
265 		break;
266 	case SPDK_NVMF_FC_REQ_SUCCESS:
267 		tpoint_id = TRACE_FC_REQ_SUCCESS;
268 		break;
269 	case SPDK_NVMF_FC_REQ_FAILED:
270 		tpoint_id = TRACE_FC_REQ_FAILED;
271 		break;
272 	case SPDK_NVMF_FC_REQ_ABORTED:
273 		tpoint_id = TRACE_FC_REQ_ABORTED;
274 		break;
275 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
276 		tpoint_id = TRACE_FC_REQ_ABORTED;
277 		break;
278 	case SPDK_NVMF_FC_REQ_PENDING:
279 		tpoint_id = TRACE_FC_REQ_PENDING;
280 		break;
281 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
282 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
283 		break;
284 	default:
285 		assert(0);
286 		break;
287 	}
288 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
289 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
290 				  (uint64_t)(&fc_req->req));
291 	}
292 }
293 
294 static struct rte_hash *
295 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
296 {
297 	struct rte_hash_parameters hash_params = { 0 };
298 
299 	hash_params.entries = num_entries;
300 	hash_params.key_len = key_len;
301 	hash_params.name = name;
302 
303 	return rte_hash_create(&hash_params);
304 }
305 
306 void
307 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
308 {
309 	free(fc_conn->pool_memory);
310 	fc_conn->pool_memory = NULL;
311 }
312 
313 int
314 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
315 {
316 	uint32_t i, qd;
317 	struct spdk_nvmf_fc_pooled_request *req;
318 
319 	/*
320 	 * Create number of fc-requests to be more than the actual SQ size.
321 	 * This is to handle race conditions where the target driver may send
322 	 * back a RSP and before the target driver gets to process the CQE
323 	 * for the RSP, the initiator may have sent a new command.
324 	 * Depending on the load on the HWQP, there is a slim possibility
325 	 * that the target reaps the RQE corresponding to the new
326 	 * command before processing the CQE corresponding to the RSP.
327 	 */
328 	qd = fc_conn->max_queue_depth * 2;
329 
330 	STAILQ_INIT(&fc_conn->pool_queue);
331 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
332 				      sizeof(struct spdk_nvmf_fc_request));
333 	if (!fc_conn->pool_memory) {
334 		SPDK_ERRLOG("create fc req ring objects failed\n");
335 		goto error;
336 	}
337 	fc_conn->pool_size = qd;
338 	fc_conn->pool_free_elems = qd;
339 
340 	/* Initialise value in ring objects and link the objects */
341 	for (i = 0; i < qd; i++) {
342 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
343 				i * sizeof(struct spdk_nvmf_fc_request));
344 
345 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
346 	}
347 	return 0;
348 error:
349 	nvmf_fc_free_conn_reqpool(fc_conn);
350 	return -1;
351 }
352 
353 static inline struct spdk_nvmf_fc_request *
354 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
355 {
356 	struct spdk_nvmf_fc_request *fc_req;
357 	struct spdk_nvmf_fc_pooled_request *pooled_req;
358 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
359 
360 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
361 	if (!pooled_req) {
362 		SPDK_ERRLOG("Alloc request buffer failed\n");
363 		return NULL;
364 	}
365 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
366 	fc_conn->pool_free_elems -= 1;
367 
368 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
369 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
370 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
371 
372 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
373 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
374 	TAILQ_INIT(&fc_req->abort_cbs);
375 	return fc_req;
376 }
377 
378 static inline void
379 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
380 {
381 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
382 		/* Log an error for debug purpose. */
383 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
384 	}
385 
386 	/* set the magic to mark req as no longer valid. */
387 	fc_req->magic = 0xDEADBEEF;
388 
389 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
390 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
391 
392 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
393 	fc_conn->pool_free_elems += 1;
394 }
395 
396 static inline void
397 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
398 {
399 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
400 		      spdk_nvmf_request, buf_link);
401 }
402 
403 int
404 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
405 {
406 	char name[64];
407 
408 	hwqp->fc_port = fc_port;
409 
410 	/* clear counters */
411 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
412 
413 	TAILQ_INIT(&hwqp->in_use_reqs);
414 	TAILQ_INIT(&hwqp->sync_cbs);
415 	TAILQ_INIT(&hwqp->ls_pending_queue);
416 
417 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
418 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
419 				     sizeof(uint64_t));
420 	if (!hwqp->connection_list_hash) {
421 		SPDK_ERRLOG("Failed to create connection hash table.\n");
422 		return -ENOMEM;
423 	}
424 
425 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
426 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
427 	if (!hwqp->rport_list_hash) {
428 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
429 		rte_hash_free(hwqp->connection_list_hash);
430 		return -ENOMEM;
431 	}
432 
433 	/* Init low level driver queues */
434 	nvmf_fc_init_q(hwqp);
435 	return 0;
436 }
437 
438 static struct spdk_nvmf_fc_poll_group *
439 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
440 {
441 	uint32_t max_count = UINT32_MAX;
442 	struct spdk_nvmf_fc_poll_group *fgroup;
443 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
444 
445 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
446 	/* find poll group with least number of hwqp's assigned to it */
447 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
448 		if (fgroup->hwqp_count < max_count) {
449 			ret_fgroup = fgroup;
450 			max_count = fgroup->hwqp_count;
451 		}
452 	}
453 
454 	if (ret_fgroup) {
455 		ret_fgroup->hwqp_count++;
456 		hwqp->thread = ret_fgroup->group.group->thread;
457 		hwqp->fgroup = ret_fgroup;
458 	}
459 
460 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
461 
462 	return ret_fgroup;
463 }
464 
465 bool
466 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
467 {
468 	struct spdk_nvmf_fc_poll_group *tmp;
469 	bool rc = false;
470 
471 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
472 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
473 		if (tmp == fgroup) {
474 			rc = true;
475 			break;
476 		}
477 	}
478 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
479 	return rc;
480 }
481 
482 void
483 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
484 {
485 	assert(hwqp);
486 	if (hwqp == NULL) {
487 		SPDK_ERRLOG("Error: hwqp is NULL\n");
488 		return;
489 	}
490 
491 	assert(g_nvmf_fgroup_count);
492 
493 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
494 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
495 		return;
496 	}
497 
498 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
499 }
500 
501 static void
502 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
503 {
504 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
505 
506 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
507 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
508 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
509 	} else {
510 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
511 	}
512 
513 	if (args->cb_fn) {
514 		args->cb_fn(args->cb_ctx, 0);
515 	}
516 
517 	free(args);
518 }
519 
520 void
521 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
522 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
523 {
524 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
525 	struct spdk_nvmf_fc_poll_group *tmp;
526 	int rc = 0;
527 
528 	assert(hwqp);
529 
530 	SPDK_DEBUGLOG(nvmf_fc,
531 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
532 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
533 
534 	if (!hwqp->fgroup) {
535 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
536 	} else {
537 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
538 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
539 			if (tmp == hwqp->fgroup) {
540 				hwqp->fgroup->hwqp_count--;
541 				break;
542 			}
543 		}
544 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
545 
546 		if (tmp != hwqp->fgroup) {
547 			/* Pollgroup was already removed. Dont bother. */
548 			goto done;
549 		}
550 
551 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
552 		if (args == NULL) {
553 			rc = -ENOMEM;
554 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
555 			goto done;
556 		}
557 
558 		args->hwqp   = hwqp;
559 		args->cb_fn  = cb_fn;
560 		args->cb_ctx = cb_ctx;
561 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
562 		args->cb_info.cb_data = args;
563 		args->cb_info.cb_thread = spdk_get_thread();
564 
565 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
566 		if (rc) {
567 			rc = -EINVAL;
568 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
569 			free(args);
570 			goto done;
571 		}
572 		return;
573 	}
574 done:
575 	if (cb_fn) {
576 		cb_fn(cb_ctx, rc);
577 	}
578 }
579 
580 /*
581  * Note: This needs to be used only on main poller.
582  */
583 static uint64_t
584 nvmf_fc_get_abts_unique_id(void)
585 {
586 	static uint32_t u_id = 0;
587 
588 	return (uint64_t)(++u_id);
589 }
590 
591 static void
592 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
593 {
594 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
595 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
596 
597 	ctx->hwqps_responded++;
598 
599 	if (ctx->hwqps_responded < ctx->num_hwqps) {
600 		/* Wait for all pollers to complete. */
601 		return;
602 	}
603 
604 	/* Free the queue sync poller args. */
605 	free(ctx->sync_poller_args);
606 
607 	/* Mark as queue synced */
608 	ctx->queue_synced = true;
609 
610 	/* Reset the ctx values */
611 	ctx->hwqps_responded = 0;
612 	ctx->handled = false;
613 
614 	SPDK_DEBUGLOG(nvmf_fc,
615 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
616 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
617 
618 	/* Resend ABTS to pollers */
619 	args = ctx->abts_poller_args;
620 	for (int i = 0; i < ctx->num_hwqps; i++) {
621 		poller_arg = args + i;
622 		nvmf_fc_poller_api_func(poller_arg->hwqp,
623 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
624 					poller_arg);
625 	}
626 }
627 
628 static int
629 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
630 {
631 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
632 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
633 
634 	/* check if FC driver supports queue sync */
635 	if (!nvmf_fc_q_sync_available()) {
636 		return -EPERM;
637 	}
638 
639 	assert(ctx);
640 	if (!ctx) {
641 		SPDK_ERRLOG("NULL ctx pointer");
642 		return -EINVAL;
643 	}
644 
645 	/* Reset the ctx values */
646 	ctx->hwqps_responded = 0;
647 
648 	args = calloc(ctx->num_hwqps,
649 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
650 	if (!args) {
651 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
652 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
653 		return -ENOMEM;
654 	}
655 	ctx->sync_poller_args = args;
656 
657 	abts_args = ctx->abts_poller_args;
658 	for (int i = 0; i < ctx->num_hwqps; i++) {
659 		abts_poller_arg = abts_args + i;
660 		poller_arg = args + i;
661 		poller_arg->u_id = ctx->u_id;
662 		poller_arg->hwqp = abts_poller_arg->hwqp;
663 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
664 		poller_arg->cb_info.cb_data = ctx;
665 		poller_arg->cb_info.cb_thread = spdk_get_thread();
666 
667 		/* Send a Queue sync message to interested pollers */
668 		nvmf_fc_poller_api_func(poller_arg->hwqp,
669 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
670 					poller_arg);
671 	}
672 
673 	SPDK_DEBUGLOG(nvmf_fc,
674 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
675 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
676 
677 	/* Post Marker to queue to track aborted request */
678 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
679 
680 	return 0;
681 }
682 
683 static void
684 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
685 {
686 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
687 	struct spdk_nvmf_fc_nport *nport  = NULL;
688 
689 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
690 		ctx->handled = true;
691 	}
692 
693 	ctx->hwqps_responded++;
694 
695 	if (ctx->hwqps_responded < ctx->num_hwqps) {
696 		/* Wait for all pollers to complete. */
697 		return;
698 	}
699 
700 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
701 
702 	if (ctx->nport != nport) {
703 		/* Nport can be deleted while this abort is being
704 		 * processed by the pollers.
705 		 */
706 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
707 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
708 	} else {
709 		if (!ctx->handled) {
710 			/* Try syncing the queues and try one more time */
711 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
712 				SPDK_DEBUGLOG(nvmf_fc,
713 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
714 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
715 				return;
716 			} else {
717 				/* Send Reject */
718 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
719 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
720 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
721 			}
722 		} else {
723 			/* Send Accept */
724 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
725 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
726 					    0, NULL, NULL);
727 		}
728 	}
729 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
730 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
731 
732 	free(ctx->abts_poller_args);
733 	free(ctx);
734 }
735 
736 void
737 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
738 			  uint16_t oxid, uint16_t rxid)
739 {
740 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
741 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
742 	struct spdk_nvmf_fc_association *assoc = NULL;
743 	struct spdk_nvmf_fc_conn *conn = NULL;
744 	uint32_t hwqp_cnt = 0;
745 	bool skip_hwqp_cnt;
746 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
747 	uint32_t i;
748 
749 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
750 		       nport->nport_hdl, rpi, oxid, rxid);
751 
752 	/* Allocate memory to track hwqp's with at least 1 active connection. */
753 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
754 	if (hwqps == NULL) {
755 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
756 		goto bls_rej;
757 	}
758 
759 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
760 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
761 			if ((conn->rpi != rpi) || !conn->hwqp) {
762 				continue;
763 			}
764 
765 			skip_hwqp_cnt = false;
766 			for (i = 0; i < hwqp_cnt; i++) {
767 				if (hwqps[i] == conn->hwqp) {
768 					/* Skip. This is already present */
769 					skip_hwqp_cnt = true;
770 					break;
771 				}
772 			}
773 			if (!skip_hwqp_cnt) {
774 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
775 				hwqps[hwqp_cnt] = conn->hwqp;
776 				hwqp_cnt++;
777 			}
778 		}
779 	}
780 
781 	if (!hwqp_cnt) {
782 		goto bls_rej;
783 	}
784 
785 	args = calloc(hwqp_cnt,
786 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
787 	if (!args) {
788 		goto bls_rej;
789 	}
790 
791 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
792 	if (!ctx) {
793 		goto bls_rej;
794 	}
795 	ctx->rpi = rpi;
796 	ctx->oxid = oxid;
797 	ctx->rxid = rxid;
798 	ctx->nport = nport;
799 	ctx->nport_hdl = nport->nport_hdl;
800 	ctx->port_hdl = nport->fc_port->port_hdl;
801 	ctx->num_hwqps = hwqp_cnt;
802 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
803 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
804 	ctx->abts_poller_args = args;
805 
806 	/* Get a unique context for this ABTS */
807 	ctx->u_id = nvmf_fc_get_abts_unique_id();
808 
809 	for (i = 0; i < hwqp_cnt; i++) {
810 		poller_arg = args + i;
811 		poller_arg->hwqp = hwqps[i];
812 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
813 		poller_arg->cb_info.cb_data = ctx;
814 		poller_arg->cb_info.cb_thread = spdk_get_thread();
815 		poller_arg->ctx = ctx;
816 
817 		nvmf_fc_poller_api_func(poller_arg->hwqp,
818 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
819 					poller_arg);
820 	}
821 
822 	free(hwqps);
823 
824 	return;
825 bls_rej:
826 	free(args);
827 	free(hwqps);
828 
829 	/* Send Reject */
830 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
831 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
832 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
833 		       nport->nport_hdl, rpi, oxid, rxid);
834 	return;
835 }
836 
837 /*** Accessor functions for the FC structures - BEGIN */
838 /*
839  * Returns true if the port is in offline state.
840  */
841 bool
842 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
843 {
844 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
845 		return true;
846 	}
847 
848 	return false;
849 }
850 
851 /*
852  * Returns true if the port is in online state.
853  */
854 bool
855 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
856 {
857 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
858 		return true;
859 	}
860 
861 	return false;
862 }
863 
864 int
865 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
866 {
867 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
868 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
869 		return 0;
870 	}
871 
872 	return -EPERM;
873 }
874 
875 int
876 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
877 {
878 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
879 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
880 		return 0;
881 	}
882 
883 	return -EPERM;
884 }
885 
886 int
887 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
888 {
889 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
890 		hwqp->state = SPDK_FC_HWQP_ONLINE;
891 		/* reset some queue counters */
892 		hwqp->num_conns = 0;
893 		return nvmf_fc_set_q_online_state(hwqp, true);
894 	}
895 
896 	return -EPERM;
897 }
898 
899 int
900 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
901 {
902 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
903 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
904 		return nvmf_fc_set_q_online_state(hwqp, false);
905 	}
906 
907 	return -EPERM;
908 }
909 
910 void
911 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
912 {
913 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
914 
915 	/*
916 	 * Let LLD add the port to its list.
917 	 */
918 	nvmf_fc_lld_port_add(fc_port);
919 }
920 
921 static void
922 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
923 {
924 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
925 
926 	/*
927 	 * Let LLD remove the port from its list.
928 	 */
929 	nvmf_fc_lld_port_remove(fc_port);
930 }
931 
932 struct spdk_nvmf_fc_port *
933 nvmf_fc_port_lookup(uint8_t port_hdl)
934 {
935 	struct spdk_nvmf_fc_port *fc_port = NULL;
936 
937 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
938 		if (fc_port->port_hdl == port_hdl) {
939 			return fc_port;
940 		}
941 	}
942 	return NULL;
943 }
944 
945 uint32_t
946 nvmf_fc_get_prli_service_params(void)
947 {
948 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
949 }
950 
951 int
952 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
953 		       struct spdk_nvmf_fc_nport *nport)
954 {
955 	if (fc_port) {
956 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
957 		fc_port->num_nports++;
958 		return 0;
959 	}
960 
961 	return -EINVAL;
962 }
963 
964 int
965 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
966 			  struct spdk_nvmf_fc_nport *nport)
967 {
968 	if (fc_port && nport) {
969 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
970 		fc_port->num_nports--;
971 		return 0;
972 	}
973 
974 	return -EINVAL;
975 }
976 
977 static struct spdk_nvmf_fc_nport *
978 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
979 {
980 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
981 
982 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
983 		if (fc_nport->nport_hdl == nport_hdl) {
984 			return fc_nport;
985 		}
986 	}
987 
988 	return NULL;
989 }
990 
991 struct spdk_nvmf_fc_nport *
992 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
993 {
994 	struct spdk_nvmf_fc_port *fc_port = NULL;
995 
996 	fc_port = nvmf_fc_port_lookup(port_hdl);
997 	if (fc_port) {
998 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
999 	}
1000 
1001 	return NULL;
1002 }
1003 
1004 static inline int
1005 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1006 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1007 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1008 {
1009 	struct spdk_nvmf_fc_nport *n_port;
1010 	struct spdk_nvmf_fc_remote_port_info *r_port;
1011 
1012 	assert(hwqp);
1013 	if (hwqp == NULL) {
1014 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1015 		return -EINVAL;
1016 	}
1017 	assert(nport);
1018 	if (nport == NULL) {
1019 		SPDK_ERRLOG("Error: nport is NULL\n");
1020 		return -EINVAL;
1021 	}
1022 	assert(rport);
1023 	if (rport == NULL) {
1024 		SPDK_ERRLOG("Error: rport is NULL\n");
1025 		return -EINVAL;
1026 	}
1027 
1028 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1029 		if (n_port->d_id == d_id) {
1030 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1031 				if (r_port->s_id == s_id) {
1032 					*nport = n_port;
1033 					*rport = r_port;
1034 					return 0;
1035 				}
1036 			}
1037 			break;
1038 		}
1039 	}
1040 
1041 	return -ENOENT;
1042 }
1043 
1044 /* Returns true if the Nport is empty of all rem_ports */
1045 bool
1046 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1047 {
1048 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1049 		assert(nport->rport_count == 0);
1050 		return true;
1051 	} else {
1052 		return false;
1053 	}
1054 }
1055 
1056 int
1057 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1058 			enum spdk_nvmf_fc_object_state state)
1059 {
1060 	if (nport) {
1061 		nport->nport_state = state;
1062 		return 0;
1063 	} else {
1064 		return -EINVAL;
1065 	}
1066 }
1067 
1068 bool
1069 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1070 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1071 {
1072 	if (nport && rem_port) {
1073 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1074 		nport->rport_count++;
1075 		return 0;
1076 	} else {
1077 		return -EINVAL;
1078 	}
1079 }
1080 
1081 bool
1082 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1083 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1084 {
1085 	if (nport && rem_port) {
1086 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1087 		nport->rport_count--;
1088 		return 0;
1089 	} else {
1090 		return -EINVAL;
1091 	}
1092 }
1093 
1094 int
1095 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1096 			enum spdk_nvmf_fc_object_state state)
1097 {
1098 	if (rport) {
1099 		rport->rport_state = state;
1100 		return 0;
1101 	} else {
1102 		return -EINVAL;
1103 	}
1104 }
1105 int
1106 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1107 			enum spdk_nvmf_fc_object_state state)
1108 {
1109 	if (assoc) {
1110 		assoc->assoc_state = state;
1111 		return 0;
1112 	} else {
1113 		return -EINVAL;
1114 	}
1115 }
1116 
1117 static struct spdk_nvmf_fc_association *
1118 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1119 {
1120 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1121 	struct spdk_nvmf_fc_conn *fc_conn;
1122 
1123 	if (!qpair) {
1124 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1125 		return NULL;
1126 	}
1127 
1128 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1129 
1130 	return fc_conn->fc_assoc;
1131 }
1132 
1133 bool
1134 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1135 		       struct spdk_nvmf_ctrlr *ctrlr)
1136 {
1137 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1138 	struct spdk_nvmf_fc_association *assoc = NULL;
1139 
1140 	if (!ctrlr) {
1141 		return false;
1142 	}
1143 
1144 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1145 	if (!fc_nport) {
1146 		return false;
1147 	}
1148 
1149 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1150 	if (assoc && assoc->tgtport == fc_nport) {
1151 		SPDK_DEBUGLOG(nvmf_fc,
1152 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1153 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1154 			      nport_hdl);
1155 		return true;
1156 	}
1157 	return false;
1158 }
1159 
1160 static void
1161 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1162 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1163 {
1164 	assert(ls_rqst);
1165 
1166 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1167 
1168 	/* Return buffer to chip */
1169 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1170 }
1171 
1172 static int
1173 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1174 			  struct spdk_nvmf_fc_nport *nport,
1175 			  struct spdk_nvmf_fc_remote_port_info *rport)
1176 {
1177 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1178 	int num_deleted = 0;
1179 
1180 	assert(hwqp);
1181 	assert(nport);
1182 	assert(rport);
1183 
1184 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1185 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1186 			num_deleted++;
1187 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1188 		}
1189 	}
1190 	return num_deleted;
1191 }
1192 
1193 static void
1194 nvmf_fc_req_bdev_abort(void *arg1)
1195 {
1196 	struct spdk_nvmf_fc_request *fc_req = arg1;
1197 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1198 	int i;
1199 
1200 	/* Initial release - we don't have to abort Admin Queue or
1201 	 * Fabric commands. The AQ commands supported at this time are
1202 	 * Get-Log-Page,
1203 	 * Identify
1204 	 * Set Features
1205 	 * Get Features
1206 	 * AER -> Special case and handled differently.
1207 	 * Every one of the above Admin commands (except AER) run
1208 	 * to completion and so an Abort of such commands doesn't
1209 	 * make sense.
1210 	 */
1211 	/* The Fabric commands supported are
1212 	 * Property Set
1213 	 * Property Get
1214 	 * Connect -> Special case (async. handling). Not sure how to
1215 	 * handle at this point. Let it run to completion.
1216 	 */
1217 	for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
1218 		if (ctrlr->aer_req[i] == &fc_req->req) {
1219 			SPDK_NOTICELOG("Abort AER request\n");
1220 			nvmf_qpair_free_aer(fc_req->req.qpair);
1221 		}
1222 	}
1223 }
1224 
1225 void
1226 nvmf_fc_request_abort_complete(void *arg1)
1227 {
1228 	struct spdk_nvmf_fc_request *fc_req =
1229 		(struct spdk_nvmf_fc_request *)arg1;
1230 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1231 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1232 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1233 
1234 	/* Make a copy of the cb list from fc_req */
1235 	TAILQ_INIT(&abort_cbs);
1236 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1237 
1238 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1239 		       fc_req_state_strs[fc_req->state]);
1240 
1241 	_nvmf_fc_request_free(fc_req);
1242 
1243 	/* Request abort completed. Notify all the callbacks */
1244 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1245 		/* Notify */
1246 		ctx->cb(hwqp, 0, ctx->cb_args);
1247 		/* Remove */
1248 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1249 		/* free */
1250 		free(ctx);
1251 	}
1252 }
1253 
1254 void
1255 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1256 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1257 {
1258 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1259 	bool kill_req = false;
1260 
1261 	/* Add the cb to list */
1262 	if (cb) {
1263 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1264 		if (!ctx) {
1265 			SPDK_ERRLOG("ctx alloc failed.\n");
1266 			return;
1267 		}
1268 		ctx->cb = cb;
1269 		ctx->cb_args = cb_args;
1270 
1271 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1272 	}
1273 
1274 	if (!fc_req->is_aborted) {
1275 		/* Increment aborted command counter */
1276 		fc_req->hwqp->counters.num_aborted++;
1277 	}
1278 
1279 	/* If port is dead, skip abort wqe */
1280 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1281 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1282 		fc_req->is_aborted = true;
1283 		goto complete;
1284 	}
1285 
1286 	/* Check if the request is already marked for deletion */
1287 	if (fc_req->is_aborted) {
1288 		return;
1289 	}
1290 
1291 	/* Mark request as aborted */
1292 	fc_req->is_aborted = true;
1293 
1294 	/* If xchg is allocated, then save if we need to send abts or not. */
1295 	if (fc_req->xchg) {
1296 		fc_req->xchg->send_abts = send_abts;
1297 		fc_req->xchg->aborted	= true;
1298 	}
1299 
1300 	switch (fc_req->state) {
1301 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1302 		/* Aborted by backend */
1303 		goto complete;
1304 
1305 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1306 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1307 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1308 		/* Notify bdev */
1309 		spdk_thread_send_msg(fc_req->hwqp->thread,
1310 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1311 		break;
1312 
1313 	case SPDK_NVMF_FC_REQ_READ_XFER:
1314 	case SPDK_NVMF_FC_REQ_READ_RSP:
1315 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1316 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1317 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1318 		/* Notify HBA to abort this exchange  */
1319 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1320 		break;
1321 
1322 	case SPDK_NVMF_FC_REQ_PENDING:
1323 		/* Remove from pending */
1324 		nvmf_fc_request_remove_from_pending(fc_req);
1325 		goto complete;
1326 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1327 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1328 		goto complete;
1329 	default:
1330 		SPDK_ERRLOG("Request in invalid state.\n");
1331 		goto complete;
1332 	}
1333 
1334 	return;
1335 complete:
1336 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1337 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1338 				(void *)fc_req);
1339 }
1340 
1341 static int
1342 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1343 {
1344 	uint32_t length = fc_req->req.length;
1345 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1346 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1347 	struct spdk_nvmf_transport *transport = group->transport;
1348 
1349 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1350 		return -ENOMEM;
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 static int
1357 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1358 {
1359 	/* Allocate an XCHG if we dont use send frame for this command. */
1360 	if (!nvmf_fc_use_send_frame(fc_req)) {
1361 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1362 		if (!fc_req->xchg) {
1363 			fc_req->hwqp->counters.no_xchg++;
1364 			return -EAGAIN;
1365 		}
1366 	}
1367 
1368 	if (fc_req->req.length) {
1369 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1370 			fc_req->hwqp->counters.buf_alloc_err++;
1371 			if (fc_req->xchg) {
1372 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1373 				fc_req->xchg = NULL;
1374 			}
1375 			return -EAGAIN;
1376 		}
1377 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1378 	}
1379 
1380 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1381 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1382 
1383 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1384 
1385 		if (nvmf_fc_recv_data(fc_req)) {
1386 			/* Dropped return success to caller */
1387 			fc_req->hwqp->counters.unexpected_err++;
1388 			_nvmf_fc_request_free(fc_req);
1389 		}
1390 	} else {
1391 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1392 
1393 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1394 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1395 		} else {
1396 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1397 		}
1398 		spdk_nvmf_request_exec(&fc_req->req);
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static void
1405 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1406 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1407 {
1408 	uint8_t df_ctl = fchdr->df_ctl;
1409 	uint32_t f_ctl = fchdr->f_ctl;
1410 
1411 	/* VMID */
1412 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1413 		struct spdk_nvmf_fc_vm_header *vhdr;
1414 		uint32_t vmhdr_offset = 0;
1415 
1416 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1417 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1418 		}
1419 
1420 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1421 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1422 		}
1423 
1424 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1425 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1426 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1427 	}
1428 
1429 	/* Priority */
1430 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1431 		fc_req->csctl = fchdr->cs_ctl;
1432 	}
1433 }
1434 
1435 static int
1436 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1437 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1438 {
1439 	uint16_t cmnd_len;
1440 	uint64_t rqst_conn_id;
1441 	struct spdk_nvmf_fc_request *fc_req = NULL;
1442 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1443 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1444 	enum spdk_nvme_data_transfer xfer;
1445 	uint32_t s_id, d_id;
1446 
1447 	s_id = (uint32_t)frame->s_id;
1448 	d_id = (uint32_t)frame->d_id;
1449 	s_id = from_be32(&s_id) >> 8;
1450 	d_id = from_be32(&d_id) >> 8;
1451 
1452 	cmd_iu = buffer->virt;
1453 	cmnd_len = cmd_iu->cmnd_iu_len;
1454 	cmnd_len = from_be16(&cmnd_len);
1455 
1456 	/* check for a valid cmnd_iu format */
1457 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1458 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1459 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1460 		SPDK_ERRLOG("IU CMD error\n");
1461 		hwqp->counters.nvme_cmd_iu_err++;
1462 		return -ENXIO;
1463 	}
1464 
1465 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1466 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1467 		SPDK_ERRLOG("IU CMD xfer error\n");
1468 		hwqp->counters.nvme_cmd_xfer_err++;
1469 		return -EPERM;
1470 	}
1471 
1472 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1473 
1474 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1475 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1476 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1477 		hwqp->counters.invalid_conn_err++;
1478 		return -ENODEV;
1479 	}
1480 
1481 	/* Validate s_id and d_id */
1482 	if (s_id != fc_conn->s_id) {
1483 		hwqp->counters.rport_invalid++;
1484 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1485 		return -ENODEV;
1486 	}
1487 
1488 	if (d_id != fc_conn->d_id) {
1489 		hwqp->counters.nport_invalid++;
1490 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1491 		return -ENODEV;
1492 	}
1493 
1494 	/* If association/connection is being deleted - return */
1495 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1496 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1497 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1498 		return -EACCES;
1499 	}
1500 
1501 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1502 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1503 			    rqst_conn_id, fc_conn->conn_state);
1504 		return -EACCES;
1505 	}
1506 
1507 	if (fc_conn->qpair.state != SPDK_NVMF_QPAIR_ACTIVE) {
1508 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1509 			    rqst_conn_id, fc_conn->qpair.state);
1510 		return -EACCES;
1511 	}
1512 
1513 	/* Make sure xfer len is according to mdts */
1514 	if (from_be32(&cmd_iu->data_len) >
1515 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1516 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1517 		return -EINVAL;
1518 	}
1519 
1520 	/* allocate a request buffer */
1521 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1522 	if (fc_req == NULL) {
1523 		return -ENOMEM;
1524 	}
1525 
1526 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1527 	fc_req->req.qpair = &fc_conn->qpair;
1528 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1529 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1530 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1531 	fc_req->oxid = frame->ox_id;
1532 	fc_req->oxid = from_be16(&fc_req->oxid);
1533 	fc_req->rpi = fc_conn->rpi;
1534 	fc_req->poller_lcore = hwqp->lcore_id;
1535 	fc_req->poller_thread = hwqp->thread;
1536 	fc_req->hwqp = hwqp;
1537 	fc_req->fc_conn = fc_conn;
1538 	fc_req->req.xfer = xfer;
1539 	fc_req->s_id = s_id;
1540 	fc_req->d_id = d_id;
1541 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1542 	nvmf_fc_set_vmid_priority(fc_req, frame);
1543 
1544 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1545 
1546 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1547 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1548 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 /*
1555  * These functions are called from the FC LLD
1556  */
1557 
1558 void
1559 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1560 {
1561 	struct spdk_nvmf_fc_hwqp *hwqp;
1562 	struct spdk_nvmf_transport_poll_group *group;
1563 
1564 	if (!fc_req) {
1565 		return;
1566 	}
1567 	hwqp = fc_req->hwqp;
1568 
1569 	if (fc_req->xchg) {
1570 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1571 		fc_req->xchg = NULL;
1572 	}
1573 
1574 	/* Release IO buffers */
1575 	if (fc_req->req.data_from_pool) {
1576 		group = &hwqp->fgroup->group;
1577 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1578 					       group->transport);
1579 	}
1580 	fc_req->req.data = NULL;
1581 	fc_req->req.iovcnt  = 0;
1582 
1583 	/* Free Fc request */
1584 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1585 }
1586 
1587 void
1588 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1589 			  enum spdk_nvmf_fc_request_state state)
1590 {
1591 	assert(fc_req->magic != 0xDEADBEEF);
1592 
1593 	SPDK_DEBUGLOG(nvmf_fc,
1594 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1595 		      nvmf_fc_request_get_state_str(fc_req->state),
1596 		      nvmf_fc_request_get_state_str(state));
1597 	nvmf_fc_record_req_trace_point(fc_req, state);
1598 	fc_req->state = state;
1599 }
1600 
1601 char *
1602 nvmf_fc_request_get_state_str(int state)
1603 {
1604 	static char *unk_str = "unknown";
1605 
1606 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1607 		fc_req_state_strs[state] : unk_str);
1608 }
1609 
1610 int
1611 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1612 			   uint32_t buff_idx,
1613 			   struct spdk_nvmf_fc_frame_hdr *frame,
1614 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1615 			   uint32_t plen)
1616 {
1617 	int rc = 0;
1618 	uint32_t s_id, d_id;
1619 	struct spdk_nvmf_fc_nport *nport = NULL;
1620 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1621 
1622 	s_id = (uint32_t)frame->s_id;
1623 	d_id = (uint32_t)frame->d_id;
1624 	s_id = from_be32(&s_id) >> 8;
1625 	d_id = from_be32(&d_id) >> 8;
1626 
1627 	SPDK_DEBUGLOG(nvmf_fc,
1628 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1629 		      s_id, d_id,
1630 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1631 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1632 
1633 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1634 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1635 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1636 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1637 
1638 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1639 
1640 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1641 		if (rc) {
1642 			if (nport == NULL) {
1643 				SPDK_ERRLOG("Nport not found. Dropping\n");
1644 				/* increment invalid nport counter */
1645 				hwqp->counters.nport_invalid++;
1646 			} else if (rport == NULL) {
1647 				SPDK_ERRLOG("Rport not found. Dropping\n");
1648 				/* increment invalid rport counter */
1649 				hwqp->counters.rport_invalid++;
1650 			}
1651 			return rc;
1652 		}
1653 
1654 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1655 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1656 			SPDK_ERRLOG("%s state not created. Dropping\n",
1657 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1658 				    "Nport" : "Rport");
1659 			return -EACCES;
1660 		}
1661 
1662 		/* Use the RQ buffer for holding LS request. */
1663 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1664 
1665 		/* Fill in the LS request structure */
1666 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1667 		ls_rqst->rqstbuf.phys = buffer->phys +
1668 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1669 		ls_rqst->rqstbuf.buf_index = buff_idx;
1670 		ls_rqst->rqst_len = plen;
1671 
1672 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1673 		ls_rqst->rspbuf.phys = buffer->phys +
1674 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1675 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1676 
1677 		ls_rqst->private_data = (void *)hwqp;
1678 		ls_rqst->rpi = rport->rpi;
1679 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1680 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1681 		ls_rqst->s_id = s_id;
1682 		ls_rqst->d_id = d_id;
1683 		ls_rqst->nport = nport;
1684 		ls_rqst->rport = rport;
1685 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1686 
1687 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1688 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1689 		} else {
1690 			ls_rqst->xchg = NULL;
1691 		}
1692 
1693 		if (ls_rqst->xchg) {
1694 			/* Handover the request to LS module */
1695 			nvmf_fc_handle_ls_rqst(ls_rqst);
1696 		} else {
1697 			/* No XCHG available. Add to pending list. */
1698 			hwqp->counters.no_xchg++;
1699 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1700 		}
1701 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1702 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1703 
1704 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1705 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1706 		if (!rc) {
1707 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1708 		}
1709 	} else {
1710 
1711 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1712 		hwqp->counters.unknown_frame++;
1713 		rc = -EINVAL;
1714 	}
1715 
1716 	return rc;
1717 }
1718 
1719 void
1720 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1721 {
1722 	struct spdk_nvmf_request *req = NULL, *tmp;
1723 	struct spdk_nvmf_fc_request *fc_req;
1724 	int budget = 64;
1725 
1726 	if (!hwqp->fgroup) {
1727 		/* LS queue is tied to acceptor_poll group and LS pending requests
1728 		 * are stagged and processed using hwqp->ls_pending_queue.
1729 		 */
1730 		return;
1731 	}
1732 
1733 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1734 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1735 		if (!nvmf_fc_request_execute(fc_req)) {
1736 			/* Successfully posted, Delete from pending. */
1737 			nvmf_fc_request_remove_from_pending(fc_req);
1738 		}
1739 
1740 		if (budget) {
1741 			budget--;
1742 		} else {
1743 			return;
1744 		}
1745 	}
1746 }
1747 
1748 void
1749 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1750 {
1751 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1752 	struct spdk_nvmf_fc_nport *nport = NULL;
1753 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1754 
1755 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1756 		/* lookup nport and rport again - make sure they are still valid */
1757 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1758 		if (rc) {
1759 			if (nport == NULL) {
1760 				SPDK_ERRLOG("Nport not found. Dropping\n");
1761 				/* increment invalid nport counter */
1762 				hwqp->counters.nport_invalid++;
1763 			} else if (rport == NULL) {
1764 				SPDK_ERRLOG("Rport not found. Dropping\n");
1765 				/* increment invalid rport counter */
1766 				hwqp->counters.rport_invalid++;
1767 			}
1768 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1769 			continue;
1770 		}
1771 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1772 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1773 			SPDK_ERRLOG("%s state not created. Dropping\n",
1774 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1775 				    "Nport" : "Rport");
1776 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1777 			continue;
1778 		}
1779 
1780 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1781 		if (ls_rqst->xchg) {
1782 			/* Got an XCHG */
1783 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1784 			/* Handover the request to LS module */
1785 			nvmf_fc_handle_ls_rqst(ls_rqst);
1786 		} else {
1787 			/* No more XCHGs. Stop processing. */
1788 			hwqp->counters.no_xchg++;
1789 			return;
1790 		}
1791 	}
1792 }
1793 
1794 int
1795 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1796 {
1797 	int rc = 0;
1798 	struct spdk_nvmf_request *req = &fc_req->req;
1799 	struct spdk_nvmf_qpair *qpair = req->qpair;
1800 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1801 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1802 	uint16_t ersp_len = 0;
1803 
1804 	/* set sq head value in resp */
1805 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1806 
1807 	/* Increment connection responses */
1808 	fc_conn->rsp_count++;
1809 
1810 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1811 				       fc_req->transferred_len)) {
1812 		/* Fill ERSP Len */
1813 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1814 				    sizeof(uint32_t)));
1815 		fc_req->ersp.ersp_len = ersp_len;
1816 
1817 		/* Fill RSN */
1818 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1819 		fc_conn->rsn++;
1820 
1821 		/* Fill transfer length */
1822 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
1823 
1824 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1825 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1826 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1827 	} else {
1828 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1829 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1830 	}
1831 
1832 	return rc;
1833 }
1834 
1835 bool
1836 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1837 			   uint32_t rsp_cnt, uint32_t xfer_len)
1838 {
1839 	struct spdk_nvmf_request *req = &fc_req->req;
1840 	struct spdk_nvmf_qpair *qpair = req->qpair;
1841 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1842 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1843 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1844 	uint16_t status = *((uint16_t *)&rsp->status);
1845 
1846 	/*
1847 	 * Check if we need to send ERSP
1848 	 * 1) For every N responses where N == ersp_ratio
1849 	 * 2) Fabric commands.
1850 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1851 	 * 4) SQ == 90% full.
1852 	 * 5) Transfer length not equal to CMD IU length
1853 	 */
1854 
1855 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1856 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1857 	    (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 ||
1858 	    (req->length != xfer_len)) {
1859 		return true;
1860 	}
1861 	return false;
1862 }
1863 
1864 static int
1865 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1866 {
1867 	int rc = 0;
1868 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1869 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1870 
1871 	if (fc_req->is_aborted) {
1872 		/* Defer this to make sure we dont call io cleanup in same context. */
1873 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1874 					(void *)fc_req);
1875 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1876 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1877 
1878 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1879 
1880 		rc = nvmf_fc_send_data(fc_req);
1881 	} else {
1882 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1883 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1884 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1885 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1886 		} else {
1887 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1888 		}
1889 
1890 		rc = nvmf_fc_handle_rsp(fc_req);
1891 	}
1892 
1893 	if (rc) {
1894 		SPDK_ERRLOG("Error in request complete.\n");
1895 		_nvmf_fc_request_free(fc_req);
1896 	}
1897 	return 0;
1898 }
1899 
1900 struct spdk_nvmf_tgt *
1901 nvmf_fc_get_tgt(void)
1902 {
1903 	if (g_nvmf_ftransport) {
1904 		return g_nvmf_ftransport->transport.tgt;
1905 	}
1906 	return NULL;
1907 }
1908 
1909 /*
1910  * FC Transport Public API begins here
1911  */
1912 
1913 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1914 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1915 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1916 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1917 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1918 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1919 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1920 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1921 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1922 
1923 static void
1924 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1925 {
1926 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1927 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1928 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1929 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1930 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1931 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1932 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1933 }
1934 
1935 static int nvmf_fc_accept(void *ctx);
1936 
1937 static struct spdk_nvmf_transport *
1938 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1939 {
1940 	uint32_t sge_count;
1941 
1942 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1943 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1944 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1945 		     "  max_aq_depth=%d\n",
1946 		     opts->max_queue_depth,
1947 		     opts->max_io_size,
1948 		     opts->max_qpairs_per_ctrlr - 1,
1949 		     opts->io_unit_size,
1950 		     opts->max_aq_depth);
1951 
1952 	if (g_nvmf_ftransport) {
1953 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1954 		return NULL;
1955 	}
1956 
1957 	if (spdk_env_get_last_core() < 1) {
1958 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1959 			    spdk_env_get_last_core() + 1);
1960 		return NULL;
1961 	}
1962 
1963 	sge_count = opts->max_io_size / opts->io_unit_size;
1964 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1965 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1966 		return NULL;
1967 	}
1968 
1969 	g_nvmf_fc_main_thread = spdk_get_thread();
1970 	g_nvmf_fgroup_count = 0;
1971 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1972 
1973 	if (!g_nvmf_ftransport) {
1974 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1975 		return NULL;
1976 	}
1977 
1978 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1979 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1980 		free(g_nvmf_ftransport);
1981 		g_nvmf_ftransport = NULL;
1982 		return NULL;
1983 	}
1984 
1985 	g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept,
1986 					   &g_nvmf_ftransport->transport, opts->acceptor_poll_rate);
1987 	if (!g_nvmf_ftransport->accept_poller) {
1988 		free(g_nvmf_ftransport);
1989 		g_nvmf_ftransport = NULL;
1990 		return NULL;
1991 	}
1992 
1993 	/* initialize the low level FC driver */
1994 	nvmf_fc_lld_init();
1995 
1996 	return &g_nvmf_ftransport->transport;
1997 }
1998 
1999 static void
2000 nvmf_fc_destroy_done_cb(void *cb_arg)
2001 {
2002 	free(g_nvmf_ftransport);
2003 	if (g_transport_destroy_done_cb) {
2004 		g_transport_destroy_done_cb(cb_arg);
2005 		g_transport_destroy_done_cb = NULL;
2006 	}
2007 }
2008 
2009 static int
2010 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2011 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2012 {
2013 	if (transport) {
2014 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2015 
2016 		/* clean up any FC poll groups still around */
2017 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2018 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2019 			free(fgroup);
2020 		}
2021 
2022 		spdk_poller_unregister(&g_nvmf_ftransport->accept_poller);
2023 		g_nvmf_fgroup_count = 0;
2024 		g_transport_destroy_done_cb = cb_fn;
2025 
2026 		/* low level FC driver clean up */
2027 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2028 	}
2029 
2030 	return 0;
2031 }
2032 
2033 static int
2034 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2035 	       struct spdk_nvmf_listen_opts *listen_opts)
2036 {
2037 	return 0;
2038 }
2039 
2040 static void
2041 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2042 		    const struct spdk_nvme_transport_id *_trid)
2043 {
2044 }
2045 
2046 static int
2047 nvmf_fc_accept(void *ctx)
2048 {
2049 	struct spdk_nvmf_fc_port *fc_port = NULL;
2050 	uint32_t count = 0;
2051 	static bool start_lld = false;
2052 
2053 	if (spdk_unlikely(!start_lld)) {
2054 		start_lld  = true;
2055 		nvmf_fc_lld_start();
2056 	}
2057 
2058 	/* poll the LS queue on each port */
2059 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2060 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2061 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2062 		}
2063 	}
2064 
2065 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
2066 }
2067 
2068 static void
2069 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2070 		 struct spdk_nvme_transport_id *trid,
2071 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2072 {
2073 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2074 	entry->adrfam = trid->adrfam;
2075 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2076 
2077 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2078 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2079 }
2080 
2081 static struct spdk_nvmf_transport_poll_group *
2082 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport,
2083 			  struct spdk_nvmf_poll_group *group)
2084 {
2085 	struct spdk_nvmf_fc_poll_group *fgroup;
2086 	struct spdk_nvmf_fc_transport *ftransport =
2087 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2088 
2089 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2090 	if (!fgroup) {
2091 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2092 		return NULL;
2093 	}
2094 
2095 	TAILQ_INIT(&fgroup->hwqp_list);
2096 
2097 	pthread_mutex_lock(&ftransport->lock);
2098 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2099 	g_nvmf_fgroup_count++;
2100 	pthread_mutex_unlock(&ftransport->lock);
2101 
2102 	return &fgroup->group;
2103 }
2104 
2105 static void
2106 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2107 {
2108 	struct spdk_nvmf_fc_poll_group *fgroup;
2109 	struct spdk_nvmf_fc_transport *ftransport =
2110 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2111 
2112 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2113 	pthread_mutex_lock(&ftransport->lock);
2114 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2115 	g_nvmf_fgroup_count--;
2116 	pthread_mutex_unlock(&ftransport->lock);
2117 
2118 	free(fgroup);
2119 }
2120 
2121 static int
2122 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2123 		       struct spdk_nvmf_qpair *qpair)
2124 {
2125 	struct spdk_nvmf_fc_poll_group *fgroup;
2126 	struct spdk_nvmf_fc_conn *fc_conn;
2127 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2128 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2129 	bool hwqp_found = false;
2130 
2131 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2132 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2133 
2134 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2135 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2136 			hwqp_found = true;
2137 			break;
2138 		}
2139 	}
2140 
2141 	if (!hwqp_found) {
2142 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2143 		goto err;
2144 	}
2145 
2146 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2147 					 &fc_conn->conn_id,
2148 					 fc_conn->max_queue_depth)) {
2149 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2150 		goto err;
2151 	}
2152 
2153 	fc_conn->hwqp = hwqp;
2154 
2155 	/* If this is for ADMIN connection, then update assoc ID. */
2156 	if (fc_conn->qpair.qid == 0) {
2157 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2158 	}
2159 
2160 	api_data = &fc_conn->create_opd->u.add_conn;
2161 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2162 	return 0;
2163 err:
2164 	return -1;
2165 }
2166 
2167 static int
2168 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2169 {
2170 	uint32_t count = 0;
2171 	struct spdk_nvmf_fc_poll_group *fgroup;
2172 	struct spdk_nvmf_fc_hwqp *hwqp;
2173 
2174 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2175 
2176 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2177 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2178 			count += nvmf_fc_process_queue(hwqp);
2179 		}
2180 	}
2181 
2182 	return (int) count;
2183 }
2184 
2185 static int
2186 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2187 {
2188 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2189 
2190 	if (!fc_req->is_aborted) {
2191 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2192 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2193 	} else {
2194 		nvmf_fc_request_abort_complete(fc_req);
2195 	}
2196 
2197 	return 0;
2198 }
2199 
2200 static void
2201 nvmf_fc_connection_delete_done_cb(void *arg)
2202 {
2203 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2204 
2205 	if (fc_ctx->cb_fn) {
2206 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2207 	}
2208 	free(fc_ctx);
2209 }
2210 
2211 static void
2212 _nvmf_fc_close_qpair(void *arg)
2213 {
2214 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2215 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2216 	struct spdk_nvmf_fc_conn *fc_conn;
2217 	int rc;
2218 
2219 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2220 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2221 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2222 
2223 		if (fc_conn->create_opd) {
2224 			api_data = &fc_conn->create_opd->u.add_conn;
2225 
2226 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2227 						    api_data->args.fc_conn, api_data->aq_conn);
2228 		}
2229 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2230 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2231 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2232 		if (!rc) {
2233 			/* Wait for transport to complete its work. */
2234 			return;
2235 		}
2236 
2237 		SPDK_ERRLOG("%s: Delete FC connection failed.\n", __func__);
2238 	}
2239 
2240 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2241 }
2242 
2243 static void
2244 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2245 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2246 {
2247 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2248 
2249 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2250 	if (!fc_ctx) {
2251 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2252 		if (cb_fn) {
2253 			cb_fn(cb_arg);
2254 		}
2255 		return;
2256 	}
2257 	fc_ctx->qpair = qpair;
2258 	fc_ctx->cb_fn = cb_fn;
2259 	fc_ctx->cb_ctx = cb_arg;
2260 	fc_ctx->qpair_thread = spdk_get_thread();
2261 
2262 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2263 }
2264 
2265 static int
2266 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2267 			    struct spdk_nvme_transport_id *trid)
2268 {
2269 	struct spdk_nvmf_fc_conn *fc_conn;
2270 
2271 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2272 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2273 	return 0;
2274 }
2275 
2276 static int
2277 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2278 			     struct spdk_nvme_transport_id *trid)
2279 {
2280 	struct spdk_nvmf_fc_conn *fc_conn;
2281 
2282 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2283 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2284 	return 0;
2285 }
2286 
2287 static int
2288 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2289 			      struct spdk_nvme_transport_id *trid)
2290 {
2291 	struct spdk_nvmf_fc_conn *fc_conn;
2292 
2293 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2294 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2295 	return 0;
2296 }
2297 
2298 static void
2299 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2300 			    struct spdk_nvmf_request *req)
2301 {
2302 	spdk_nvmf_request_complete(req);
2303 }
2304 
2305 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2306 	.name = "FC",
2307 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2308 	.opts_init = nvmf_fc_opts_init,
2309 	.create = nvmf_fc_create,
2310 	.destroy = nvmf_fc_destroy,
2311 
2312 	.listen = nvmf_fc_listen,
2313 	.stop_listen = nvmf_fc_stop_listen,
2314 
2315 	.listener_discover = nvmf_fc_discover,
2316 
2317 	.poll_group_create = nvmf_fc_poll_group_create,
2318 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2319 	.poll_group_add = nvmf_fc_poll_group_add,
2320 	.poll_group_poll = nvmf_fc_poll_group_poll,
2321 
2322 	.req_complete = nvmf_fc_request_complete,
2323 	.req_free = nvmf_fc_request_free,
2324 	.qpair_fini = nvmf_fc_close_qpair,
2325 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2326 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2327 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2328 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2329 };
2330 
2331 /* Initializes the data for the creation of a FC-Port object in the SPDK
2332  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2333  * the API to the library. The contents added to this well defined structure
2334  * is private to each vendors implementation.
2335  */
2336 static int
2337 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2338 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2339 {
2340 	int rc = 0;
2341 	/* Used a high number for the LS HWQP so that it does not clash with the
2342 	 * IO HWQP's and immediately shows a LS queue during tracing.
2343 	 */
2344 	uint32_t i;
2345 
2346 	fc_port->port_hdl       = args->port_handle;
2347 	fc_port->lld_fc_port	= args->lld_fc_port;
2348 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2349 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2350 	fc_port->num_io_queues  = args->io_queue_cnt;
2351 
2352 	/*
2353 	 * Set port context from init args. Used for FCP port stats.
2354 	 */
2355 	fc_port->port_ctx = args->port_ctx;
2356 
2357 	/*
2358 	 * Initialize the LS queue wherever needed.
2359 	 */
2360 	fc_port->ls_queue.queues = args->ls_queue;
2361 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2362 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2363 	fc_port->ls_queue.is_ls_queue = true;
2364 
2365 	/*
2366 	 * Initialize the LS queue.
2367 	 */
2368 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2369 	if (rc) {
2370 		return rc;
2371 	}
2372 
2373 	/*
2374 	 * Initialize the IO queues.
2375 	 */
2376 	for (i = 0; i < args->io_queue_cnt; i++) {
2377 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2378 		hwqp->hwqp_id = i;
2379 		hwqp->queues = args->io_queues[i];
2380 		hwqp->is_ls_queue = false;
2381 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2382 		if (rc) {
2383 			for (; i > 0; --i) {
2384 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2385 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2386 			}
2387 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2388 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2389 			return rc;
2390 		}
2391 	}
2392 
2393 	/*
2394 	 * Initialize the LS processing for port
2395 	 */
2396 	nvmf_fc_ls_init(fc_port);
2397 
2398 	/*
2399 	 * Initialize the list of nport on this HW port.
2400 	 */
2401 	TAILQ_INIT(&fc_port->nport_list);
2402 	fc_port->num_nports = 0;
2403 
2404 	return 0;
2405 }
2406 
2407 /*
2408  * FC port must have all its nports deleted before transitioning to offline state.
2409  */
2410 static void
2411 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2412 {
2413 	struct spdk_nvmf_fc_nport *nport = NULL;
2414 	/* All nports must have been deleted at this point for this fc port */
2415 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2416 	DEV_VERIFY(fc_port->num_nports == 0);
2417 	/* Mark the nport states to be zombie, if they exist */
2418 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2419 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2420 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2421 		}
2422 	}
2423 }
2424 
2425 static void
2426 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2427 {
2428 	ASSERT_SPDK_FC_MAIN_THREAD();
2429 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2430 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2431 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2432 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2433 	int spdk_err = 0;
2434 	uint8_t port_handle = cb_data->port_handle;
2435 	uint32_t s_id = rport->s_id;
2436 	uint32_t rpi = rport->rpi;
2437 	uint32_t assoc_count = rport->assoc_count;
2438 	uint32_t nport_hdl = nport->nport_hdl;
2439 	uint32_t d_id = nport->d_id;
2440 	char log_str[256];
2441 
2442 	/*
2443 	 * Assert on any delete failure.
2444 	 */
2445 	if (0 != err) {
2446 		DEV_VERIFY(!"Error in IT Delete callback.");
2447 		goto out;
2448 	}
2449 
2450 	if (cb_func != NULL) {
2451 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2452 	}
2453 
2454 out:
2455 	free(cb_data);
2456 
2457 	snprintf(log_str, sizeof(log_str),
2458 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2459 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2460 
2461 	if (err != 0) {
2462 		SPDK_ERRLOG("%s", log_str);
2463 	} else {
2464 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2465 	}
2466 }
2467 
2468 static void
2469 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2470 {
2471 	ASSERT_SPDK_FC_MAIN_THREAD();
2472 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2473 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2474 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2475 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2476 	uint32_t s_id = rport->s_id;
2477 	uint32_t rpi = rport->rpi;
2478 	uint32_t assoc_count = rport->assoc_count;
2479 	uint32_t nport_hdl = nport->nport_hdl;
2480 	uint32_t d_id = nport->d_id;
2481 	char log_str[256];
2482 
2483 	/*
2484 	 * Assert on any association delete failure. We continue to delete other
2485 	 * associations in promoted builds.
2486 	 */
2487 	if (0 != err) {
2488 		DEV_VERIFY(!"Nport's association delete callback returned error");
2489 		if (nport->assoc_count > 0) {
2490 			nport->assoc_count--;
2491 		}
2492 		if (rport->assoc_count > 0) {
2493 			rport->assoc_count--;
2494 		}
2495 	}
2496 
2497 	/*
2498 	 * If this is the last association being deleted for the ITN,
2499 	 * execute the callback(s).
2500 	 */
2501 	if (0 == rport->assoc_count) {
2502 		/* Remove the rport from the remote port list. */
2503 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2504 			SPDK_ERRLOG("Error while removing rport from list.\n");
2505 			DEV_VERIFY(!"Error while removing rport from list.");
2506 		}
2507 
2508 		if (cb_func != NULL) {
2509 			/*
2510 			 * Callback function is provided by the caller
2511 			 * of nvmf_fc_adm_i_t_delete_assoc().
2512 			 */
2513 			(void)cb_func(cb_data->cb_ctx, 0);
2514 		}
2515 		free(rport);
2516 		free(args);
2517 	}
2518 
2519 	snprintf(log_str, sizeof(log_str),
2520 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2521 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2522 
2523 	if (err != 0) {
2524 		SPDK_ERRLOG("%s", log_str);
2525 	} else {
2526 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2527 	}
2528 }
2529 
2530 /**
2531  * Process a IT delete.
2532  */
2533 static void
2534 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2535 			     struct spdk_nvmf_fc_remote_port_info *rport,
2536 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2537 			     void *cb_ctx)
2538 {
2539 	int err = 0;
2540 	struct spdk_nvmf_fc_association *assoc = NULL;
2541 	int assoc_err = 0;
2542 	uint32_t num_assoc = 0;
2543 	uint32_t num_assoc_del_scheduled = 0;
2544 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2545 	uint8_t port_hdl = nport->port_hdl;
2546 	uint32_t s_id = rport->s_id;
2547 	uint32_t rpi = rport->rpi;
2548 	uint32_t assoc_count = rport->assoc_count;
2549 	char log_str[256];
2550 
2551 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2552 		      nport->nport_hdl);
2553 
2554 	/*
2555 	 * Allocate memory for callback data.
2556 	 * This memory will be freed by the callback function.
2557 	 */
2558 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2559 	if (NULL == cb_data) {
2560 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2561 		err = -ENOMEM;
2562 		goto out;
2563 	}
2564 	cb_data->nport       = nport;
2565 	cb_data->rport       = rport;
2566 	cb_data->port_handle = port_hdl;
2567 	cb_data->cb_func     = cb_func;
2568 	cb_data->cb_ctx      = cb_ctx;
2569 
2570 	/*
2571 	 * Delete all associations, if any, related with this ITN/remote_port.
2572 	 */
2573 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2574 		num_assoc++;
2575 		if (assoc->s_id == s_id) {
2576 			assoc_err = nvmf_fc_delete_association(nport,
2577 							       assoc->assoc_id,
2578 							       false /* send abts */, false,
2579 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2580 			if (0 != assoc_err) {
2581 				/*
2582 				 * Mark this association as zombie.
2583 				 */
2584 				err = -EINVAL;
2585 				DEV_VERIFY(!"Error while deleting association");
2586 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2587 			} else {
2588 				num_assoc_del_scheduled++;
2589 			}
2590 		}
2591 	}
2592 
2593 out:
2594 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2595 		/*
2596 		 * Since there are no association_delete calls
2597 		 * successfully scheduled, the association_delete
2598 		 * callback function will never be called.
2599 		 * In this case, call the callback function now.
2600 		 */
2601 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2602 	}
2603 
2604 	snprintf(log_str, sizeof(log_str),
2605 		 "IT delete associations on nport:%d end. "
2606 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2607 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2608 
2609 	if (err == 0) {
2610 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2611 	} else {
2612 		SPDK_ERRLOG("%s", log_str);
2613 	}
2614 }
2615 
2616 static void
2617 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2618 {
2619 	ASSERT_SPDK_FC_MAIN_THREAD();
2620 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2621 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2622 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2623 	struct spdk_nvmf_fc_port *fc_port = NULL;
2624 	int err = 0;
2625 
2626 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2627 	hwqp = quiesce_api_data->hwqp;
2628 	fc_port = hwqp->fc_port;
2629 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2630 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2631 
2632 	/*
2633 	 * Decrement the callback/quiesced queue count.
2634 	 */
2635 	port_quiesce_ctx->quiesce_count--;
2636 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2637 
2638 	free(quiesce_api_data);
2639 	/*
2640 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2641 	 */
2642 	if (port_quiesce_ctx->quiesce_count > 0) {
2643 		return;
2644 	}
2645 
2646 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2647 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2648 	} else {
2649 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2650 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2651 	}
2652 
2653 	if (cb_func) {
2654 		/*
2655 		 * Callback function for the called of quiesce.
2656 		 */
2657 		cb_func(port_quiesce_ctx->ctx, err);
2658 	}
2659 
2660 	/*
2661 	 * Free the context structure.
2662 	 */
2663 	free(port_quiesce_ctx);
2664 
2665 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2666 		      err);
2667 }
2668 
2669 static int
2670 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2671 			     spdk_nvmf_fc_poller_api_cb cb_func)
2672 {
2673 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2674 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2675 	int err = 0;
2676 
2677 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2678 
2679 	if (args == NULL) {
2680 		err = -ENOMEM;
2681 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2682 		goto done;
2683 	}
2684 	args->hwqp = fc_hwqp;
2685 	args->ctx = ctx;
2686 	args->cb_info.cb_func = cb_func;
2687 	args->cb_info.cb_data = args;
2688 	args->cb_info.cb_thread = spdk_get_thread();
2689 
2690 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2691 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2692 	if (rc) {
2693 		free(args);
2694 		err = -EINVAL;
2695 	}
2696 
2697 done:
2698 	return err;
2699 }
2700 
2701 /*
2702  * Hw port Quiesce
2703  */
2704 static int
2705 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2706 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2707 {
2708 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2709 	uint32_t i = 0;
2710 	int err = 0;
2711 
2712 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2713 
2714 	/*
2715 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2716 	 * and execute the callback.
2717 	 */
2718 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2719 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2720 	}
2721 
2722 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2723 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2724 			      fc_port->port_hdl);
2725 		/*
2726 		 * Execute the callback function directly.
2727 		 */
2728 		cb_func(ctx, err);
2729 		goto out;
2730 	}
2731 
2732 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2733 
2734 	if (port_quiesce_ctx == NULL) {
2735 		err = -ENOMEM;
2736 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2737 			    fc_port->port_hdl);
2738 		goto out;
2739 	}
2740 
2741 	port_quiesce_ctx->quiesce_count = 0;
2742 	port_quiesce_ctx->ctx = ctx;
2743 	port_quiesce_ctx->cb_func = cb_func;
2744 
2745 	/*
2746 	 * Quiesce the LS queue.
2747 	 */
2748 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2749 					   nvmf_fc_adm_queue_quiesce_cb);
2750 	if (err != 0) {
2751 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2752 		goto out;
2753 	}
2754 	port_quiesce_ctx->quiesce_count++;
2755 
2756 	/*
2757 	 * Quiesce the IO queues.
2758 	 */
2759 	for (i = 0; i < fc_port->num_io_queues; i++) {
2760 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2761 						   port_quiesce_ctx,
2762 						   nvmf_fc_adm_queue_quiesce_cb);
2763 		if (err != 0) {
2764 			DEV_VERIFY(0);
2765 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2766 		}
2767 		port_quiesce_ctx->quiesce_count++;
2768 	}
2769 
2770 out:
2771 	if (port_quiesce_ctx && err != 0) {
2772 		free(port_quiesce_ctx);
2773 	}
2774 	return err;
2775 }
2776 
2777 /*
2778  * Initialize and add a HW port entry to the global
2779  * HW port list.
2780  */
2781 static void
2782 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2783 {
2784 	ASSERT_SPDK_FC_MAIN_THREAD();
2785 	struct spdk_nvmf_fc_port *fc_port = NULL;
2786 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2787 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2788 			api_data->api_args;
2789 	int err = 0;
2790 
2791 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2792 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2793 		err = EINVAL;
2794 		goto abort_port_init;
2795 	}
2796 
2797 	/*
2798 	 * 1. Check for duplicate initialization.
2799 	 */
2800 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2801 	if (fc_port != NULL) {
2802 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2803 		goto abort_port_init;
2804 	}
2805 
2806 	/*
2807 	 * 2. Get the memory to instantiate a fc port.
2808 	 */
2809 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2810 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2811 	if (fc_port == NULL) {
2812 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2813 		err = -ENOMEM;
2814 		goto abort_port_init;
2815 	}
2816 
2817 	/* assign the io_queues array */
2818 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2819 				     struct spdk_nvmf_fc_port));
2820 
2821 	/*
2822 	 * 3. Initialize the contents for the FC-port
2823 	 */
2824 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2825 
2826 	if (err != 0) {
2827 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2828 		DEV_VERIFY(!"Data initialization failed for fc_port");
2829 		goto abort_port_init;
2830 	}
2831 
2832 	/*
2833 	 * 4. Add this port to the global fc port list in the library.
2834 	 */
2835 	nvmf_fc_port_add(fc_port);
2836 
2837 abort_port_init:
2838 	if (err && fc_port) {
2839 		free(fc_port);
2840 	}
2841 	if (api_data->cb_func != NULL) {
2842 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2843 	}
2844 
2845 	free(arg);
2846 
2847 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2848 		      args->port_handle, err);
2849 }
2850 
2851 static void
2852 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2853 {
2854 	struct spdk_nvmf_fc_abts_ctx *ctx;
2855 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2856 
2857 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2858 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2859 		ctx = args->cb_info.cb_data;
2860 		if (ctx) {
2861 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2862 				free(ctx->sync_poller_args);
2863 				free(ctx->abts_poller_args);
2864 				free(ctx);
2865 			}
2866 		}
2867 	}
2868 }
2869 
2870 static void
2871 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2872 {
2873 	ASSERT_SPDK_FC_MAIN_THREAD();
2874 	int err = 0, i;
2875 	struct spdk_nvmf_fc_port *fc_port = NULL;
2876 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2877 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2878 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2879 			api_data->api_args;
2880 
2881 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2882 	if (!fc_port) {
2883 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2884 		err = -EINVAL;
2885 		goto out;
2886 	}
2887 
2888 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2889 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2890 		err = -EIO;
2891 		goto out;
2892 	}
2893 
2894 	/* Clean up and free fc_port */
2895 	hwqp = &fc_port->ls_queue;
2896 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2897 	rte_hash_free(hwqp->connection_list_hash);
2898 	rte_hash_free(hwqp->rport_list_hash);
2899 
2900 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2901 		hwqp = &fc_port->io_queues[i];
2902 
2903 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2904 		rte_hash_free(hwqp->connection_list_hash);
2905 		rte_hash_free(hwqp->rport_list_hash);
2906 	}
2907 
2908 	nvmf_fc_port_remove(fc_port);
2909 	free(fc_port);
2910 out:
2911 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2912 		      args->port_handle, err);
2913 	if (api_data->cb_func != NULL) {
2914 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2915 	}
2916 
2917 	free(arg);
2918 }
2919 
2920 /*
2921  * Online a HW port.
2922  */
2923 static void
2924 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2925 {
2926 	ASSERT_SPDK_FC_MAIN_THREAD();
2927 	struct spdk_nvmf_fc_port *fc_port = NULL;
2928 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2929 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2930 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2931 			api_data->api_args;
2932 	int i = 0;
2933 	int err = 0;
2934 
2935 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2936 	if (fc_port) {
2937 		/* Set the port state to online */
2938 		err = nvmf_fc_port_set_online(fc_port);
2939 		if (err != 0) {
2940 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2941 			DEV_VERIFY(!"Hw port online failed");
2942 			goto out;
2943 		}
2944 
2945 		hwqp = &fc_port->ls_queue;
2946 		hwqp->context = NULL;
2947 		(void)nvmf_fc_hwqp_set_online(hwqp);
2948 
2949 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2950 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2951 			hwqp = &fc_port->io_queues[i];
2952 			hwqp->context = NULL;
2953 			(void)nvmf_fc_hwqp_set_online(hwqp);
2954 			nvmf_fc_poll_group_add_hwqp(hwqp);
2955 		}
2956 	} else {
2957 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2958 		err = -EINVAL;
2959 	}
2960 
2961 out:
2962 	if (api_data->cb_func != NULL) {
2963 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2964 	}
2965 
2966 	free(arg);
2967 
2968 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
2969 		      err);
2970 }
2971 
2972 static void
2973 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
2974 {
2975 	int err = 0;
2976 	struct spdk_nvmf_fc_port *fc_port = NULL;
2977 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
2978 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
2979 
2980 	if (--remove_hwqp_args->pending_remove_hwqp) {
2981 		return;
2982 	}
2983 
2984 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2985 	if (!fc_port) {
2986 		err = -EINVAL;
2987 		SPDK_ERRLOG("fc_port not found.\n");
2988 		goto out;
2989 	}
2990 
2991 	/*
2992 	 * Delete all the nports. Ideally, the nports should have been purged
2993 	 * before the offline event, in which case, only a validation is required.
2994 	 */
2995 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
2996 out:
2997 	if (remove_hwqp_args->cb_fn) {
2998 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
2999 	}
3000 
3001 	free(remove_hwqp_args);
3002 }
3003 
3004 /*
3005  * Offline a HW port.
3006  */
3007 static void
3008 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3009 {
3010 	ASSERT_SPDK_FC_MAIN_THREAD();
3011 	struct spdk_nvmf_fc_port *fc_port = NULL;
3012 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3013 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3014 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3015 			api_data->api_args;
3016 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3017 	int i = 0;
3018 	int err = 0;
3019 
3020 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3021 	if (fc_port) {
3022 		/* Set the port state to offline, if it is not already. */
3023 		err = nvmf_fc_port_set_offline(fc_port);
3024 		if (err != 0) {
3025 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3026 			err = 0;
3027 			goto out;
3028 		}
3029 
3030 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3031 		if (!remove_hwqp_args) {
3032 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3033 			err = -ENOMEM;
3034 			goto out;
3035 		}
3036 		remove_hwqp_args->cb_fn = api_data->cb_func;
3037 		remove_hwqp_args->cb_args = api_data->api_args;
3038 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3039 
3040 		hwqp = &fc_port->ls_queue;
3041 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3042 
3043 		/* Remove poller for all the io queues. */
3044 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3045 			hwqp = &fc_port->io_queues[i];
3046 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3047 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3048 						       remove_hwqp_args);
3049 		}
3050 
3051 		free(arg);
3052 
3053 		/* Wait untill all the hwqps are removed from poll groups. */
3054 		return;
3055 	} else {
3056 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3057 		err = -EINVAL;
3058 	}
3059 out:
3060 	if (api_data->cb_func != NULL) {
3061 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3062 	}
3063 
3064 	free(arg);
3065 
3066 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3067 		      err);
3068 }
3069 
3070 struct nvmf_fc_add_rem_listener_ctx {
3071 	struct spdk_nvmf_subsystem *subsystem;
3072 	bool add_listener;
3073 	struct spdk_nvme_transport_id trid;
3074 };
3075 
3076 static void
3077 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3078 {
3079 	ASSERT_SPDK_FC_MAIN_THREAD();
3080 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3081 	free(ctx);
3082 }
3083 
3084 static void
3085 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3086 {
3087 	ASSERT_SPDK_FC_MAIN_THREAD();
3088 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3089 
3090 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3091 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3092 		free(ctx);
3093 	}
3094 }
3095 
3096 static void
3097 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3098 {
3099 	ASSERT_SPDK_FC_MAIN_THREAD();
3100 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3101 
3102 	if (ctx->add_listener) {
3103 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3104 	} else {
3105 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3106 		nvmf_fc_adm_listen_done(ctx, 0);
3107 	}
3108 }
3109 
3110 static int
3111 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3112 {
3113 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3114 	struct spdk_nvmf_subsystem *subsystem;
3115 	struct spdk_nvmf_listen_opts opts;
3116 
3117 	if (!tgt) {
3118 		SPDK_ERRLOG("No nvmf target defined\n");
3119 		return -EINVAL;
3120 	}
3121 
3122 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
3123 
3124 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3125 	while (subsystem) {
3126 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3127 
3128 		if (spdk_nvmf_subsytem_any_listener_allowed(subsystem) == true) {
3129 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3130 			if (ctx) {
3131 				ctx->add_listener = add;
3132 				ctx->subsystem = subsystem;
3133 				nvmf_fc_create_trid(&ctx->trid,
3134 						    nport->fc_nodename.u.wwn,
3135 						    nport->fc_portname.u.wwn);
3136 
3137 				if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) {
3138 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3139 						    ctx->trid.traddr);
3140 					free(ctx);
3141 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3142 								     0,
3143 								     nvmf_fc_adm_subsystem_paused_cb,
3144 								     ctx)) {
3145 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3146 						    subsystem->subnqn);
3147 					free(ctx);
3148 				}
3149 			}
3150 		}
3151 
3152 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3153 	}
3154 
3155 	return 0;
3156 }
3157 
3158 /*
3159  * Create a Nport.
3160  */
3161 static void
3162 nvmf_fc_adm_evnt_nport_create(void *arg)
3163 {
3164 	ASSERT_SPDK_FC_MAIN_THREAD();
3165 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3166 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3167 			api_data->api_args;
3168 	struct spdk_nvmf_fc_nport *nport = NULL;
3169 	struct spdk_nvmf_fc_port *fc_port = NULL;
3170 	int err = 0;
3171 
3172 	/*
3173 	 * Get the physical port.
3174 	 */
3175 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3176 	if (fc_port == NULL) {
3177 		err = -EINVAL;
3178 		goto out;
3179 	}
3180 
3181 	/*
3182 	 * Check for duplicate initialization.
3183 	 */
3184 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3185 	if (nport != NULL) {
3186 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3187 			    args->port_handle);
3188 		err = -EINVAL;
3189 		goto out;
3190 	}
3191 
3192 	/*
3193 	 * Get the memory to instantiate a fc nport.
3194 	 */
3195 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3196 	if (nport == NULL) {
3197 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3198 			    args->nport_handle);
3199 		err = -ENOMEM;
3200 		goto out;
3201 	}
3202 
3203 	/*
3204 	 * Initialize the contents for the nport
3205 	 */
3206 	nport->nport_hdl    = args->nport_handle;
3207 	nport->port_hdl     = args->port_handle;
3208 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3209 	nport->fc_nodename  = args->fc_nodename;
3210 	nport->fc_portname  = args->fc_portname;
3211 	nport->d_id         = args->d_id;
3212 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3213 
3214 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3215 	TAILQ_INIT(&nport->rem_port_list);
3216 	nport->rport_count = 0;
3217 	TAILQ_INIT(&nport->fc_associations);
3218 	nport->assoc_count = 0;
3219 
3220 	/*
3221 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3222 	 */
3223 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3224 
3225 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3226 out:
3227 	if (err && nport) {
3228 		free(nport);
3229 	}
3230 
3231 	if (api_data->cb_func != NULL) {
3232 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3233 	}
3234 
3235 	free(arg);
3236 }
3237 
3238 static void
3239 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3240 			    void *cb_args, int spdk_err)
3241 {
3242 	ASSERT_SPDK_FC_MAIN_THREAD();
3243 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3244 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3245 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3246 	int err = 0;
3247 	uint16_t nport_hdl = 0;
3248 	char log_str[256];
3249 
3250 	/*
3251 	 * Assert on any delete failure.
3252 	 */
3253 	if (nport == NULL) {
3254 		SPDK_ERRLOG("Nport delete callback returned null nport");
3255 		DEV_VERIFY(!"nport is null.");
3256 		goto out;
3257 	}
3258 
3259 	nport_hdl = nport->nport_hdl;
3260 	if (0 != spdk_err) {
3261 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3262 			    "%d, Nport: %d\n",
3263 			    nport->port_hdl, nport->nport_hdl);
3264 		DEV_VERIFY(!"nport delete callback error.");
3265 	}
3266 
3267 	/*
3268 	 * Free the nport if this is the last rport being deleted and
3269 	 * execute the callback(s).
3270 	 */
3271 	if (nvmf_fc_nport_has_no_rport(nport)) {
3272 		if (0 != nport->assoc_count) {
3273 			SPDK_ERRLOG("association count != 0\n");
3274 			DEV_VERIFY(!"association count != 0");
3275 		}
3276 
3277 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3278 		if (0 != err) {
3279 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3280 				    "nport from nport list. FC Port:%d Nport:%d\n",
3281 				    nport->port_hdl, nport->nport_hdl);
3282 		}
3283 		/* Free the nport */
3284 		free(nport);
3285 
3286 		if (cb_func != NULL) {
3287 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3288 		}
3289 		free(cb_data);
3290 	}
3291 out:
3292 	snprintf(log_str, sizeof(log_str),
3293 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3294 		 port_handle, nport_hdl, event_type, spdk_err);
3295 
3296 	if (err != 0) {
3297 		SPDK_ERRLOG("%s", log_str);
3298 	} else {
3299 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3300 	}
3301 }
3302 
3303 /*
3304  * Delete Nport.
3305  */
3306 static void
3307 nvmf_fc_adm_evnt_nport_delete(void *arg)
3308 {
3309 	ASSERT_SPDK_FC_MAIN_THREAD();
3310 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3311 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3312 			api_data->api_args;
3313 	struct spdk_nvmf_fc_nport *nport = NULL;
3314 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3315 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3316 	int err = 0;
3317 	uint32_t rport_cnt = 0;
3318 	int rc = 0;
3319 
3320 	/*
3321 	 * Make sure that the nport exists.
3322 	 */
3323 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3324 	if (nport == NULL) {
3325 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3326 			    args->port_handle);
3327 		err = -EINVAL;
3328 		goto out;
3329 	}
3330 
3331 	/*
3332 	 * Allocate memory for callback data.
3333 	 */
3334 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3335 	if (NULL == cb_data) {
3336 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3337 		err = -ENOMEM;
3338 		goto out;
3339 	}
3340 
3341 	cb_data->nport = nport;
3342 	cb_data->port_handle = args->port_handle;
3343 	cb_data->fc_cb_func = api_data->cb_func;
3344 	cb_data->fc_cb_ctx = args->cb_ctx;
3345 
3346 	/*
3347 	 * Begin nport tear down
3348 	 */
3349 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3350 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3351 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3352 		/*
3353 		 * Deletion of this nport already in progress. Register callback
3354 		 * and return.
3355 		 */
3356 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3357 		err = -ENODEV;
3358 		goto out;
3359 	} else {
3360 		/* nport partially created/deleted */
3361 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3362 		DEV_VERIFY(0 != "Nport in zombie state");
3363 		err = -ENODEV;
3364 		goto out;
3365 	}
3366 
3367 	/*
3368 	 * Remove this nport from listening addresses across subsystems
3369 	 */
3370 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3371 
3372 	if (0 != rc) {
3373 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3374 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3375 			    nport->nport_hdl);
3376 		goto out;
3377 	}
3378 
3379 	/*
3380 	 * Delete all the remote ports (if any) for the nport
3381 	 */
3382 	/* TODO - Need to do this with a "first" and a "next" accessor function
3383 	 * for completeness. Look at app-subsystem as examples.
3384 	 */
3385 	if (nvmf_fc_nport_has_no_rport(nport)) {
3386 		/* No rports to delete. Complete the nport deletion. */
3387 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3388 		goto out;
3389 	}
3390 
3391 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3392 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3393 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3394 
3395 		if (it_del_args == NULL) {
3396 			err = -ENOMEM;
3397 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3398 				    rport_iter->rpi, rport_iter->s_id);
3399 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3400 			goto out;
3401 		}
3402 
3403 		rport_cnt++;
3404 		it_del_args->port_handle = nport->port_hdl;
3405 		it_del_args->nport_handle = nport->nport_hdl;
3406 		it_del_args->cb_ctx = (void *)cb_data;
3407 		it_del_args->rpi = rport_iter->rpi;
3408 		it_del_args->s_id = rport_iter->s_id;
3409 
3410 		nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3411 					   nvmf_fc_adm_delete_nport_cb);
3412 	}
3413 
3414 out:
3415 	/* On failure, execute the callback function now */
3416 	if ((err != 0) || (rc != 0)) {
3417 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3418 			    "rport_cnt:%d rc:%d.\n",
3419 			    args->nport_handle, err, args->port_handle,
3420 			    rport_cnt, rc);
3421 		if (cb_data) {
3422 			free(cb_data);
3423 		}
3424 		if (api_data->cb_func != NULL) {
3425 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3426 		}
3427 
3428 	} else {
3429 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3430 			      "NPort %d delete done successfully, fc port:%d. "
3431 			      "rport_cnt:%d\n",
3432 			      args->nport_handle, args->port_handle, rport_cnt);
3433 	}
3434 
3435 	free(arg);
3436 }
3437 
3438 /*
3439  * Process an PRLI/IT add.
3440  */
3441 static void
3442 nvmf_fc_adm_evnt_i_t_add(void *arg)
3443 {
3444 	ASSERT_SPDK_FC_MAIN_THREAD();
3445 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3446 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3447 			api_data->api_args;
3448 	struct spdk_nvmf_fc_nport *nport = NULL;
3449 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3450 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3451 	int err = 0;
3452 
3453 	/*
3454 	 * Make sure the nport port exists.
3455 	 */
3456 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3457 	if (nport == NULL) {
3458 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3459 		err = -EINVAL;
3460 		goto out;
3461 	}
3462 
3463 	/*
3464 	 * Check for duplicate i_t_add.
3465 	 */
3466 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3467 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3468 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3469 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3470 			err = -EEXIST;
3471 			goto out;
3472 		}
3473 	}
3474 
3475 	/*
3476 	 * Get the memory to instantiate the remote port
3477 	 */
3478 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3479 	if (rport == NULL) {
3480 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3481 		err = -ENOMEM;
3482 		goto out;
3483 	}
3484 
3485 	/*
3486 	 * Initialize the contents for the rport
3487 	 */
3488 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3489 	rport->s_id = args->s_id;
3490 	rport->rpi = args->rpi;
3491 	rport->fc_nodename = args->fc_nodename;
3492 	rport->fc_portname = args->fc_portname;
3493 
3494 	/*
3495 	 * Add remote port to nport
3496 	 */
3497 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3498 		DEV_VERIFY(!"Error while adding rport to list");
3499 	};
3500 
3501 	/*
3502 	 * TODO: Do we validate the initiators service parameters?
3503 	 */
3504 
3505 	/*
3506 	 * Get the targets service parameters from the library
3507 	 * to return back to the driver.
3508 	 */
3509 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3510 
3511 out:
3512 	if (api_data->cb_func != NULL) {
3513 		/*
3514 		 * Passing pointer to the args struct as the first argument.
3515 		 * The cb_func should handle this appropriately.
3516 		 */
3517 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3518 	}
3519 
3520 	free(arg);
3521 
3522 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3523 		      "IT add on nport %d done, rc = %d.\n",
3524 		      args->nport_handle, err);
3525 }
3526 
3527 /**
3528  * Process a IT delete.
3529  */
3530 static void
3531 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3532 {
3533 	ASSERT_SPDK_FC_MAIN_THREAD();
3534 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3535 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3536 			api_data->api_args;
3537 	int rc = 0;
3538 	struct spdk_nvmf_fc_nport *nport = NULL;
3539 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3540 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3541 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3542 	uint32_t num_rport = 0;
3543 	char log_str[256];
3544 
3545 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3546 
3547 	/*
3548 	 * Make sure the nport port exists. If it does not, error out.
3549 	 */
3550 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3551 	if (nport == NULL) {
3552 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3553 		rc = -EINVAL;
3554 		goto out;
3555 	}
3556 
3557 	/*
3558 	 * Find this ITN / rport (remote port).
3559 	 */
3560 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3561 		num_rport++;
3562 		if ((rport_iter->s_id == args->s_id) &&
3563 		    (rport_iter->rpi == args->rpi) &&
3564 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3565 			rport = rport_iter;
3566 			break;
3567 		}
3568 	}
3569 
3570 	/*
3571 	 * We should find either zero or exactly one rport.
3572 	 *
3573 	 * If we find zero rports, that means that a previous request has
3574 	 * removed the rport by the time we reached here. In this case,
3575 	 * simply return out.
3576 	 */
3577 	if (rport == NULL) {
3578 		rc = -ENODEV;
3579 		goto out;
3580 	}
3581 
3582 	/*
3583 	 * We have the rport slated for deletion. At this point clean up
3584 	 * any LS requests that are sitting in the pending list. Do this
3585 	 * first, then, set the states of the rport so that new LS requests
3586 	 * are not accepted. Then start the cleanup.
3587 	 */
3588 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3589 
3590 	/*
3591 	 * We have found exactly one rport. Allocate memory for callback data.
3592 	 */
3593 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3594 	if (NULL == cb_data) {
3595 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3596 		rc = -ENOMEM;
3597 		goto out;
3598 	}
3599 
3600 	cb_data->nport = nport;
3601 	cb_data->rport = rport;
3602 	cb_data->port_handle = args->port_handle;
3603 	cb_data->fc_cb_func = api_data->cb_func;
3604 	cb_data->fc_cb_ctx = args->cb_ctx;
3605 
3606 	/*
3607 	 * Validate rport object state.
3608 	 */
3609 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3610 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3611 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3612 		/*
3613 		 * Deletion of this rport already in progress. Register callback
3614 		 * and return.
3615 		 */
3616 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3617 		rc = -ENODEV;
3618 		goto out;
3619 	} else {
3620 		/* rport partially created/deleted */
3621 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3622 		DEV_VERIFY(!"Invalid rport_state");
3623 		rc = -ENODEV;
3624 		goto out;
3625 	}
3626 
3627 	/*
3628 	 * We have successfully found a rport to delete. Call
3629 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3630 	 * IT-delete processing as well as free the cb_data.
3631 	 */
3632 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3633 				     (void *)cb_data);
3634 
3635 out:
3636 	if (rc != 0) {
3637 		/*
3638 		 * We have entered here because either we encountered an
3639 		 * error, or we did not find a rport to delete.
3640 		 * As a result, we will not call the function
3641 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3642 		 * processing. Therefore, execute the callback function now.
3643 		 */
3644 		if (cb_data) {
3645 			free(cb_data);
3646 		}
3647 		if (api_data->cb_func != NULL) {
3648 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3649 		}
3650 	}
3651 
3652 	snprintf(log_str, sizeof(log_str),
3653 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3654 		 args->nport_handle, num_rport, rc);
3655 
3656 	if (rc != 0) {
3657 		SPDK_ERRLOG("%s", log_str);
3658 	} else {
3659 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3660 	}
3661 
3662 	free(arg);
3663 }
3664 
3665 /*
3666  * Process ABTS received
3667  */
3668 static void
3669 nvmf_fc_adm_evnt_abts_recv(void *arg)
3670 {
3671 	ASSERT_SPDK_FC_MAIN_THREAD();
3672 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3673 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3674 	struct spdk_nvmf_fc_nport *nport = NULL;
3675 	int err = 0;
3676 
3677 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3678 		      args->oxid, args->rxid);
3679 
3680 	/*
3681 	 * 1. Make sure the nport port exists.
3682 	 */
3683 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3684 	if (nport == NULL) {
3685 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3686 		err = -EINVAL;
3687 		goto out;
3688 	}
3689 
3690 	/*
3691 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3692 	 */
3693 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3694 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3695 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3696 			      args->rpi, args->oxid, args->rxid);
3697 		err = 0;
3698 		goto out;
3699 
3700 	}
3701 
3702 	/*
3703 	 * 3. Pass the received ABTS-LS to the library for handling.
3704 	 */
3705 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3706 
3707 out:
3708 	if (api_data->cb_func != NULL) {
3709 		/*
3710 		 * Passing pointer to the args struct as the first argument.
3711 		 * The cb_func should handle this appropriately.
3712 		 */
3713 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3714 	} else {
3715 		/* No callback set, free the args */
3716 		free(args);
3717 	}
3718 
3719 	free(arg);
3720 }
3721 
3722 /*
3723  * Callback function for hw port quiesce.
3724  */
3725 static void
3726 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3727 {
3728 	ASSERT_SPDK_FC_MAIN_THREAD();
3729 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3730 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3731 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3732 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3733 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3734 	struct spdk_nvmf_fc_port *fc_port = NULL;
3735 	char *dump_buf = NULL;
3736 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3737 
3738 	/*
3739 	 * Free the callback context struct.
3740 	 */
3741 	free(ctx);
3742 
3743 	if (err != 0) {
3744 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3745 		goto out;
3746 	}
3747 
3748 	if (args->dump_queues == false) {
3749 		/*
3750 		 * Queues need not be dumped.
3751 		 */
3752 		goto out;
3753 	}
3754 
3755 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3756 
3757 	/*
3758 	 * Get the fc port.
3759 	 */
3760 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3761 	if (fc_port == NULL) {
3762 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3763 		err = -EINVAL;
3764 		goto out;
3765 	}
3766 
3767 	/*
3768 	 * Allocate memory for the dump buffer.
3769 	 * This memory will be freed by FCT.
3770 	 */
3771 	dump_buf = (char *)calloc(1, dump_buf_size);
3772 	if (dump_buf == NULL) {
3773 		err = -ENOMEM;
3774 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3775 		goto out;
3776 	}
3777 	*args->dump_buf  = (uint32_t *)dump_buf;
3778 	dump_info.buffer = dump_buf;
3779 	dump_info.offset = 0;
3780 
3781 	/*
3782 	 * Add the dump reason to the top of the buffer.
3783 	 */
3784 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3785 
3786 	/*
3787 	 * Dump the hwqp.
3788 	 */
3789 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3790 				fc_port->num_io_queues, &dump_info);
3791 
3792 out:
3793 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3794 		      args->port_handle, args->dump_queues, err);
3795 
3796 	if (cb_func != NULL) {
3797 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3798 	}
3799 }
3800 
3801 /*
3802  * HW port reset
3803 
3804  */
3805 static void
3806 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3807 {
3808 	ASSERT_SPDK_FC_MAIN_THREAD();
3809 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3810 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3811 			api_data->api_args;
3812 	struct spdk_nvmf_fc_port *fc_port = NULL;
3813 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3814 	int err = 0;
3815 
3816 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3817 
3818 	/*
3819 	 * Make sure the physical port exists.
3820 	 */
3821 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3822 	if (fc_port == NULL) {
3823 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3824 		err = -EINVAL;
3825 		goto out;
3826 	}
3827 
3828 	/*
3829 	 * Save the reset event args and the callback in a context struct.
3830 	 */
3831 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3832 
3833 	if (ctx == NULL) {
3834 		err = -ENOMEM;
3835 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3836 		goto fail;
3837 	}
3838 
3839 	ctx->reset_args = args;
3840 	ctx->reset_cb_func = api_data->cb_func;
3841 
3842 	/*
3843 	 * Quiesce the hw port.
3844 	 */
3845 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3846 	if (err != 0) {
3847 		goto fail;
3848 	}
3849 
3850 	/*
3851 	 * Once the ports are successfully quiesced the reset processing
3852 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3853 	 */
3854 	return;
3855 fail:
3856 	free(ctx);
3857 
3858 out:
3859 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3860 		      err);
3861 
3862 	if (api_data->cb_func != NULL) {
3863 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3864 	}
3865 
3866 	free(arg);
3867 }
3868 
3869 static inline void
3870 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3871 {
3872 	if (nvmf_fc_get_main_thread()) {
3873 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3874 	}
3875 }
3876 
3877 /*
3878  * Queue up an event in the SPDK main threads event queue.
3879  * Used by the FC driver to notify the SPDK main thread of FC related events.
3880  */
3881 int
3882 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3883 			   spdk_nvmf_fc_callback cb_func)
3884 {
3885 	int err = 0;
3886 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3887 	spdk_msg_fn event_fn = NULL;
3888 
3889 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3890 
3891 	if (event_type >= SPDK_FC_EVENT_MAX) {
3892 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3893 		err = -EINVAL;
3894 		goto done;
3895 	}
3896 
3897 	if (args == NULL) {
3898 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3899 		err = -EINVAL;
3900 		goto done;
3901 	}
3902 
3903 	api_data = calloc(1, sizeof(*api_data));
3904 
3905 	if (api_data == NULL) {
3906 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3907 		err = -ENOMEM;
3908 		goto done;
3909 	}
3910 
3911 	api_data->api_args = args;
3912 	api_data->cb_func = cb_func;
3913 
3914 	switch (event_type) {
3915 	case SPDK_FC_HW_PORT_INIT:
3916 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3917 		break;
3918 
3919 	case SPDK_FC_HW_PORT_FREE:
3920 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3921 		break;
3922 
3923 	case SPDK_FC_HW_PORT_ONLINE:
3924 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3925 		break;
3926 
3927 	case SPDK_FC_HW_PORT_OFFLINE:
3928 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3929 		break;
3930 
3931 	case SPDK_FC_NPORT_CREATE:
3932 		event_fn = nvmf_fc_adm_evnt_nport_create;
3933 		break;
3934 
3935 	case SPDK_FC_NPORT_DELETE:
3936 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3937 		break;
3938 
3939 	case SPDK_FC_IT_ADD:
3940 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3941 		break;
3942 
3943 	case SPDK_FC_IT_DELETE:
3944 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3945 		break;
3946 
3947 	case SPDK_FC_ABTS_RECV:
3948 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3949 		break;
3950 
3951 	case SPDK_FC_HW_PORT_RESET:
3952 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3953 		break;
3954 
3955 	case SPDK_FC_UNRECOVERABLE_ERR:
3956 	default:
3957 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3958 		err = -EINVAL;
3959 		break;
3960 	}
3961 
3962 done:
3963 
3964 	if (err == 0) {
3965 		assert(event_fn != NULL);
3966 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
3967 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
3968 	} else {
3969 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
3970 		if (api_data) {
3971 			free(api_data);
3972 		}
3973 	}
3974 
3975 	return err;
3976 }
3977 
3978 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
3979 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
3980 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
3981