xref: /spdk/lib/nvmf/fc.c (revision b37db06935181fd0e8f5592a96d860040abaa201)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
6  */
7 
8 /*
9  * NVMe_FC transport functions.
10  */
11 
12 #include "spdk/env.h"
13 #include "spdk/assert.h"
14 #include "spdk/nvmf_transport.h"
15 #include "spdk/string.h"
16 #include "spdk/trace.h"
17 #include "spdk/util.h"
18 #include "spdk/likely.h"
19 #include "spdk/endian.h"
20 #include "spdk/log.h"
21 #include "spdk/thread.h"
22 
23 #include "nvmf_fc.h"
24 #include "fc_lld.h"
25 
26 #include "spdk_internal/trace_defs.h"
27 
28 #ifndef DEV_VERIFY
29 #define DEV_VERIFY assert
30 #endif
31 
32 #ifndef ASSERT_SPDK_FC_MAIN_THREAD
33 #define ASSERT_SPDK_FC_MAIN_THREAD() \
34         DEV_VERIFY(spdk_get_thread() == nvmf_fc_get_main_thread());
35 #endif
36 
37 /*
38  * PRLI service parameters
39  */
40 enum spdk_nvmf_fc_service_parameters {
41 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
42 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
43 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
44 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
45 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
46 };
47 
48 static char *fc_req_state_strs[] = {
49 	"SPDK_NVMF_FC_REQ_INIT",
50 	"SPDK_NVMF_FC_REQ_READ_BDEV",
51 	"SPDK_NVMF_FC_REQ_READ_XFER",
52 	"SPDK_NVMF_FC_REQ_READ_RSP",
53 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
54 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
55 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
56 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
57 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
58 	"SPDK_NVMF_FC_REQ_NONE_RSP",
59 	"SPDK_NVMF_FC_REQ_SUCCESS",
60 	"SPDK_NVMF_FC_REQ_FAILED",
61 	"SPDK_NVMF_FC_REQ_ABORTED",
62 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
63 	"SPDK_NVMF_FC_REQ_PENDING",
64 	"SPDK_NVMF_FC_REQ_FUSED_WAITING"
65 };
66 
67 #define HWQP_CONN_TABLE_SIZE			8192
68 #define HWQP_RPI_TABLE_SIZE			4096
69 
70 static void
71 nvmf_fc_trace(void)
72 {
73 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
74 	spdk_trace_register_description("FC_NEW",
75 					TRACE_FC_REQ_INIT,
76 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 1,
77 					SPDK_TRACE_ARG_TYPE_INT, "");
78 	spdk_trace_register_description("FC_READ_SBMT_TO_BDEV",
79 					TRACE_FC_REQ_READ_BDEV,
80 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
81 					SPDK_TRACE_ARG_TYPE_INT, "");
82 	spdk_trace_register_description("FC_READ_XFER_DATA",
83 					TRACE_FC_REQ_READ_XFER,
84 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
85 					SPDK_TRACE_ARG_TYPE_INT, "");
86 	spdk_trace_register_description("FC_READ_RSP",
87 					TRACE_FC_REQ_READ_RSP,
88 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
89 					SPDK_TRACE_ARG_TYPE_INT, "");
90 	spdk_trace_register_description("FC_WRITE_NEED_BUFFER",
91 					TRACE_FC_REQ_WRITE_BUFFS,
92 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
93 					SPDK_TRACE_ARG_TYPE_INT, "");
94 	spdk_trace_register_description("FC_WRITE_XFER_DATA",
95 					TRACE_FC_REQ_WRITE_XFER,
96 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
97 					SPDK_TRACE_ARG_TYPE_INT, "");
98 	spdk_trace_register_description("FC_WRITE_SBMT_TO_BDEV",
99 					TRACE_FC_REQ_WRITE_BDEV,
100 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
101 					SPDK_TRACE_ARG_TYPE_INT, "");
102 	spdk_trace_register_description("FC_WRITE_RSP",
103 					TRACE_FC_REQ_WRITE_RSP,
104 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
105 					SPDK_TRACE_ARG_TYPE_INT, "");
106 	spdk_trace_register_description("FC_NONE_SBMT_TO_BDEV",
107 					TRACE_FC_REQ_NONE_BDEV,
108 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
109 					SPDK_TRACE_ARG_TYPE_INT, "");
110 	spdk_trace_register_description("FC_NONE_RSP",
111 					TRACE_FC_REQ_NONE_RSP,
112 					OWNER_TYPE_NONE, OBJECT_NVMF_FC_IO, 0,
113 					SPDK_TRACE_ARG_TYPE_INT, "");
114 	spdk_trace_register_description("FC_SUCCESS",
115 					TRACE_FC_REQ_SUCCESS,
116 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
117 					SPDK_TRACE_ARG_TYPE_INT, "");
118 	spdk_trace_register_description("FC_FAILED",
119 					TRACE_FC_REQ_FAILED,
120 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
121 					SPDK_TRACE_ARG_TYPE_INT, "");
122 	spdk_trace_register_description("FC_ABRT",
123 					TRACE_FC_REQ_ABORTED,
124 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
125 					SPDK_TRACE_ARG_TYPE_INT, "");
126 	spdk_trace_register_description("FC_ABRT_SBMT_TO_BDEV",
127 					TRACE_FC_REQ_BDEV_ABORTED,
128 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
129 					SPDK_TRACE_ARG_TYPE_INT, "");
130 	spdk_trace_register_description("FC_PENDING",
131 					TRACE_FC_REQ_PENDING,
132 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
133 					SPDK_TRACE_ARG_TYPE_INT, "");
134 	spdk_trace_register_description("FC_FUSED_WAITING",
135 					TRACE_FC_REQ_FUSED_WAITING,
136 					OWNER_TYPE_NONE, OBJECT_NONE, 0,
137 					SPDK_TRACE_ARG_TYPE_INT, "");
138 }
139 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
140 
141 /**
142  * The structure used by all fc adm functions
143  */
144 struct spdk_nvmf_fc_adm_api_data {
145 	void *api_args;
146 	spdk_nvmf_fc_callback cb_func;
147 };
148 
149 /**
150  * The callback structure for nport-delete
151  */
152 struct spdk_nvmf_fc_adm_nport_del_cb_data {
153 	struct spdk_nvmf_fc_nport *nport;
154 	uint8_t port_handle;
155 	spdk_nvmf_fc_callback fc_cb_func;
156 	void *fc_cb_ctx;
157 };
158 
159 /**
160  * The callback structure for it-delete
161  */
162 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
163 	struct spdk_nvmf_fc_nport *nport;
164 	struct spdk_nvmf_fc_remote_port_info *rport;
165 	uint8_t port_handle;
166 	spdk_nvmf_fc_callback fc_cb_func;
167 	void *fc_cb_ctx;
168 };
169 
170 
171 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
172 
173 /**
174  * The callback structure for the it-delete-assoc callback
175  */
176 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
177 	struct spdk_nvmf_fc_nport *nport;
178 	struct spdk_nvmf_fc_remote_port_info *rport;
179 	uint8_t port_handle;
180 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
181 	void *cb_ctx;
182 };
183 
184 /*
185  * Call back function pointer for HW port quiesce.
186  */
187 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
188 
189 /**
190  * Context structure for quiescing a hardware port
191  */
192 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
193 	int quiesce_count;
194 	void *ctx;
195 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
196 };
197 
198 /**
199  * Context structure used to reset a hardware port
200  */
201 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
202 	void *reset_args;
203 	spdk_nvmf_fc_callback reset_cb_func;
204 };
205 
206 struct spdk_nvmf_fc_transport {
207 	struct spdk_nvmf_transport transport;
208 	struct spdk_poller *accept_poller;
209 	pthread_mutex_t lock;
210 };
211 
212 static struct spdk_nvmf_fc_transport *g_nvmf_ftransport;
213 
214 static spdk_nvmf_transport_destroy_done_cb g_transport_destroy_done_cb = NULL;
215 
216 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
217 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
218 
219 static struct spdk_thread *g_nvmf_fc_main_thread = NULL;
220 
221 static uint32_t g_nvmf_fgroup_count = 0;
222 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fgroups =
223 	TAILQ_HEAD_INITIALIZER(g_nvmf_fgroups);
224 
225 struct spdk_thread *
226 nvmf_fc_get_main_thread(void)
227 {
228 	return g_nvmf_fc_main_thread;
229 }
230 
231 static inline void
232 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
233 			       enum spdk_nvmf_fc_request_state state)
234 {
235 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
236 
237 	switch (state) {
238 	case SPDK_NVMF_FC_REQ_INIT:
239 		/* Start IO tracing */
240 		tpoint_id = TRACE_FC_REQ_INIT;
241 		break;
242 	case SPDK_NVMF_FC_REQ_READ_BDEV:
243 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
244 		break;
245 	case SPDK_NVMF_FC_REQ_READ_XFER:
246 		tpoint_id = TRACE_FC_REQ_READ_XFER;
247 		break;
248 	case SPDK_NVMF_FC_REQ_READ_RSP:
249 		tpoint_id = TRACE_FC_REQ_READ_RSP;
250 		break;
251 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
252 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
253 		break;
254 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
255 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
256 		break;
257 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
258 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
259 		break;
260 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
261 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
262 		break;
263 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
264 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
265 		break;
266 	case SPDK_NVMF_FC_REQ_NONE_RSP:
267 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
268 		break;
269 	case SPDK_NVMF_FC_REQ_SUCCESS:
270 		tpoint_id = TRACE_FC_REQ_SUCCESS;
271 		break;
272 	case SPDK_NVMF_FC_REQ_FAILED:
273 		tpoint_id = TRACE_FC_REQ_FAILED;
274 		break;
275 	case SPDK_NVMF_FC_REQ_ABORTED:
276 		tpoint_id = TRACE_FC_REQ_ABORTED;
277 		break;
278 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
279 		tpoint_id = TRACE_FC_REQ_ABORTED;
280 		break;
281 	case SPDK_NVMF_FC_REQ_PENDING:
282 		tpoint_id = TRACE_FC_REQ_PENDING;
283 		break;
284 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
285 		tpoint_id = TRACE_FC_REQ_FUSED_WAITING;
286 		break;
287 	default:
288 		assert(0);
289 		break;
290 	}
291 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
292 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
293 				  (uint64_t)(&fc_req->req));
294 	}
295 }
296 
297 static struct rte_hash *
298 nvmf_fc_create_hash_table(const char *name, size_t num_entries, size_t key_len)
299 {
300 	struct rte_hash_parameters hash_params = { 0 };
301 
302 	hash_params.entries = num_entries;
303 	hash_params.key_len = key_len;
304 	hash_params.name = name;
305 
306 	return rte_hash_create(&hash_params);
307 }
308 
309 void
310 nvmf_fc_free_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
311 {
312 	free(fc_conn->pool_memory);
313 	fc_conn->pool_memory = NULL;
314 }
315 
316 int
317 nvmf_fc_create_conn_reqpool(struct spdk_nvmf_fc_conn *fc_conn)
318 {
319 	uint32_t i, qd;
320 	struct spdk_nvmf_fc_pooled_request *req;
321 
322 	/*
323 	 * Create number of fc-requests to be more than the actual SQ size.
324 	 * This is to handle race conditions where the target driver may send
325 	 * back a RSP and before the target driver gets to process the CQE
326 	 * for the RSP, the initiator may have sent a new command.
327 	 * Depending on the load on the HWQP, there is a slim possibility
328 	 * that the target reaps the RQE corresponding to the new
329 	 * command before processing the CQE corresponding to the RSP.
330 	 */
331 	qd = fc_conn->max_queue_depth * 2;
332 
333 	STAILQ_INIT(&fc_conn->pool_queue);
334 	fc_conn->pool_memory = calloc((fc_conn->max_queue_depth * 2),
335 				      sizeof(struct spdk_nvmf_fc_request));
336 	if (!fc_conn->pool_memory) {
337 		SPDK_ERRLOG("create fc req ring objects failed\n");
338 		goto error;
339 	}
340 	fc_conn->pool_size = qd;
341 	fc_conn->pool_free_elems = qd;
342 
343 	/* Initialise value in ring objects and link the objects */
344 	for (i = 0; i < qd; i++) {
345 		req = (struct spdk_nvmf_fc_pooled_request *)((char *)fc_conn->pool_memory +
346 				i * sizeof(struct spdk_nvmf_fc_request));
347 
348 		STAILQ_INSERT_TAIL(&fc_conn->pool_queue, req, pool_link);
349 	}
350 	return 0;
351 error:
352 	nvmf_fc_free_conn_reqpool(fc_conn);
353 	return -1;
354 }
355 
356 static inline struct spdk_nvmf_fc_request *
357 nvmf_fc_conn_alloc_fc_request(struct spdk_nvmf_fc_conn *fc_conn)
358 {
359 	struct spdk_nvmf_fc_request *fc_req;
360 	struct spdk_nvmf_fc_pooled_request *pooled_req;
361 	struct spdk_nvmf_fc_hwqp *hwqp = fc_conn->hwqp;
362 
363 	pooled_req = STAILQ_FIRST(&fc_conn->pool_queue);
364 	if (!pooled_req) {
365 		SPDK_ERRLOG("Alloc request buffer failed\n");
366 		return NULL;
367 	}
368 	STAILQ_REMOVE_HEAD(&fc_conn->pool_queue, pool_link);
369 	fc_conn->pool_free_elems -= 1;
370 
371 	fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
372 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
373 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
374 
375 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
376 	TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
377 	TAILQ_INIT(&fc_req->abort_cbs);
378 	return fc_req;
379 }
380 
381 static inline void
382 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
383 {
384 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
385 		/* Log an error for debug purpose. */
386 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
387 	}
388 
389 	/* set the magic to mark req as no longer valid. */
390 	fc_req->magic = 0xDEADBEEF;
391 
392 	TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
393 	TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
394 
395 	STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
396 	fc_conn->pool_free_elems += 1;
397 }
398 
399 static inline void
400 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
401 {
402 	STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
403 		      spdk_nvmf_request, buf_link);
404 }
405 
406 int
407 nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
408 {
409 	char name[64];
410 
411 	hwqp->fc_port = fc_port;
412 
413 	/* clear counters */
414 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
415 
416 	TAILQ_INIT(&hwqp->in_use_reqs);
417 	TAILQ_INIT(&hwqp->sync_cbs);
418 	TAILQ_INIT(&hwqp->ls_pending_queue);
419 
420 	snprintf(name, sizeof(name), "nvmf_fc_conn_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
421 	hwqp->connection_list_hash = nvmf_fc_create_hash_table(name, HWQP_CONN_TABLE_SIZE,
422 				     sizeof(uint64_t));
423 	if (!hwqp->connection_list_hash) {
424 		SPDK_ERRLOG("Failed to create connection hash table.\n");
425 		return -ENOMEM;
426 	}
427 
428 	snprintf(name, sizeof(name), "nvmf_fc_rpi_hash:%d-%d", fc_port->port_hdl, hwqp->hwqp_id);
429 	hwqp->rport_list_hash = nvmf_fc_create_hash_table(name, HWQP_RPI_TABLE_SIZE, sizeof(uint16_t));
430 	if (!hwqp->rport_list_hash) {
431 		SPDK_ERRLOG("Failed to create rpi hash table.\n");
432 		rte_hash_free(hwqp->connection_list_hash);
433 		return -ENOMEM;
434 	}
435 
436 	/* Init low level driver queues */
437 	nvmf_fc_init_q(hwqp);
438 	return 0;
439 }
440 
441 static struct spdk_nvmf_fc_poll_group *
442 nvmf_fc_assign_idlest_poll_group(struct spdk_nvmf_fc_hwqp *hwqp)
443 {
444 	uint32_t max_count = UINT32_MAX;
445 	struct spdk_nvmf_fc_poll_group *fgroup;
446 	struct spdk_nvmf_fc_poll_group *ret_fgroup = NULL;
447 
448 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
449 	/* find poll group with least number of hwqp's assigned to it */
450 	TAILQ_FOREACH(fgroup, &g_nvmf_fgroups, link) {
451 		if (fgroup->hwqp_count < max_count) {
452 			ret_fgroup = fgroup;
453 			max_count = fgroup->hwqp_count;
454 		}
455 	}
456 
457 	if (ret_fgroup) {
458 		ret_fgroup->hwqp_count++;
459 		hwqp->thread = ret_fgroup->group.group->thread;
460 		hwqp->fgroup = ret_fgroup;
461 	}
462 
463 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
464 
465 	return ret_fgroup;
466 }
467 
468 bool
469 nvmf_fc_poll_group_valid(struct spdk_nvmf_fc_poll_group *fgroup)
470 {
471 	struct spdk_nvmf_fc_poll_group *tmp;
472 	bool rc = false;
473 
474 	pthread_mutex_lock(&g_nvmf_ftransport->lock);
475 	TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
476 		if (tmp == fgroup) {
477 			rc = true;
478 			break;
479 		}
480 	}
481 	pthread_mutex_unlock(&g_nvmf_ftransport->lock);
482 	return rc;
483 }
484 
485 void
486 nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
487 {
488 	assert(hwqp);
489 	if (hwqp == NULL) {
490 		SPDK_ERRLOG("Error: hwqp is NULL\n");
491 		return;
492 	}
493 
494 	assert(g_nvmf_fgroup_count);
495 
496 	if (!nvmf_fc_assign_idlest_poll_group(hwqp)) {
497 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
498 		return;
499 	}
500 
501 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
502 }
503 
504 static void
505 nvmf_fc_poll_group_remove_hwqp_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
506 {
507 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args = cb_data;
508 
509 	if (ret == SPDK_NVMF_FC_POLLER_API_SUCCESS) {
510 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
511 			      "Remove hwqp%d from fgroup success\n", args->hwqp->hwqp_id);
512 	} else {
513 		SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", args->hwqp->hwqp_id);
514 	}
515 
516 	if (args->cb_fn) {
517 		args->cb_fn(args->cb_ctx, 0);
518 	}
519 
520 	free(args);
521 }
522 
523 void
524 nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
525 			       spdk_nvmf_fc_remove_hwqp_cb cb_fn, void *cb_ctx)
526 {
527 	struct spdk_nvmf_fc_poller_api_remove_hwqp_args *args;
528 	struct spdk_nvmf_fc_poll_group *tmp;
529 	int rc = 0;
530 
531 	assert(hwqp);
532 
533 	SPDK_DEBUGLOG(nvmf_fc,
534 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
535 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
536 
537 	if (!hwqp->fgroup) {
538 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
539 	} else {
540 		pthread_mutex_lock(&g_nvmf_ftransport->lock);
541 		TAILQ_FOREACH(tmp, &g_nvmf_fgroups, link) {
542 			if (tmp == hwqp->fgroup) {
543 				hwqp->fgroup->hwqp_count--;
544 				break;
545 			}
546 		}
547 		pthread_mutex_unlock(&g_nvmf_ftransport->lock);
548 
549 		if (tmp != hwqp->fgroup) {
550 			/* Pollgroup was already removed. Dont bother. */
551 			goto done;
552 		}
553 
554 		args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_remove_hwqp_args));
555 		if (args == NULL) {
556 			rc = -ENOMEM;
557 			SPDK_ERRLOG("Failed to allocate memory for poller remove hwqp:%d\n", hwqp->hwqp_id);
558 			goto done;
559 		}
560 
561 		args->hwqp   = hwqp;
562 		args->cb_fn  = cb_fn;
563 		args->cb_ctx = cb_ctx;
564 		args->cb_info.cb_func = nvmf_fc_poll_group_remove_hwqp_cb;
565 		args->cb_info.cb_data = args;
566 		args->cb_info.cb_thread = spdk_get_thread();
567 
568 		rc = nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, args);
569 		if (rc) {
570 			rc = -EINVAL;
571 			SPDK_ERRLOG("Remove hwqp%d from fgroup failed.\n", hwqp->hwqp_id);
572 			free(args);
573 			goto done;
574 		}
575 		return;
576 	}
577 done:
578 	if (cb_fn) {
579 		cb_fn(cb_ctx, rc);
580 	}
581 }
582 
583 /*
584  * Note: This needs to be used only on main poller.
585  */
586 static uint64_t
587 nvmf_fc_get_abts_unique_id(void)
588 {
589 	static uint32_t u_id = 0;
590 
591 	return (uint64_t)(++u_id);
592 }
593 
594 static void
595 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
596 {
597 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
598 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
599 
600 	ctx->hwqps_responded++;
601 
602 	if (ctx->hwqps_responded < ctx->num_hwqps) {
603 		/* Wait for all pollers to complete. */
604 		return;
605 	}
606 
607 	/* Free the queue sync poller args. */
608 	free(ctx->sync_poller_args);
609 
610 	/* Mark as queue synced */
611 	ctx->queue_synced = true;
612 
613 	/* Reset the ctx values */
614 	ctx->hwqps_responded = 0;
615 	ctx->handled = false;
616 
617 	SPDK_DEBUGLOG(nvmf_fc,
618 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
619 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
620 
621 	/* Resend ABTS to pollers */
622 	args = ctx->abts_poller_args;
623 	for (int i = 0; i < ctx->num_hwqps; i++) {
624 		poller_arg = args + i;
625 		nvmf_fc_poller_api_func(poller_arg->hwqp,
626 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
627 					poller_arg);
628 	}
629 }
630 
631 static int
632 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
633 {
634 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
635 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
636 
637 	/* check if FC driver supports queue sync */
638 	if (!nvmf_fc_q_sync_available()) {
639 		return -EPERM;
640 	}
641 
642 	assert(ctx);
643 	if (!ctx) {
644 		SPDK_ERRLOG("NULL ctx pointer");
645 		return -EINVAL;
646 	}
647 
648 	/* Reset the ctx values */
649 	ctx->hwqps_responded = 0;
650 
651 	args = calloc(ctx->num_hwqps,
652 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
653 	if (!args) {
654 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
655 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
656 		return -ENOMEM;
657 	}
658 	ctx->sync_poller_args = args;
659 
660 	abts_args = ctx->abts_poller_args;
661 	for (int i = 0; i < ctx->num_hwqps; i++) {
662 		abts_poller_arg = abts_args + i;
663 		poller_arg = args + i;
664 		poller_arg->u_id = ctx->u_id;
665 		poller_arg->hwqp = abts_poller_arg->hwqp;
666 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
667 		poller_arg->cb_info.cb_data = ctx;
668 		poller_arg->cb_info.cb_thread = spdk_get_thread();
669 
670 		/* Send a Queue sync message to interested pollers */
671 		nvmf_fc_poller_api_func(poller_arg->hwqp,
672 					SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
673 					poller_arg);
674 	}
675 
676 	SPDK_DEBUGLOG(nvmf_fc,
677 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
678 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
679 
680 	/* Post Marker to queue to track aborted request */
681 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
682 
683 	return 0;
684 }
685 
686 static void
687 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
688 {
689 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
690 	struct spdk_nvmf_fc_nport *nport  = NULL;
691 
692 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
693 		ctx->handled = true;
694 	}
695 
696 	ctx->hwqps_responded++;
697 
698 	if (ctx->hwqps_responded < ctx->num_hwqps) {
699 		/* Wait for all pollers to complete. */
700 		return;
701 	}
702 
703 	nport = nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
704 
705 	if (ctx->nport != nport) {
706 		/* Nport can be deleted while this abort is being
707 		 * processed by the pollers.
708 		 */
709 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
710 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
711 	} else {
712 		if (!ctx->handled) {
713 			/* Try syncing the queues and try one more time */
714 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
715 				SPDK_DEBUGLOG(nvmf_fc,
716 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
717 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
718 				return;
719 			} else {
720 				/* Send Reject */
721 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
722 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
723 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
724 			}
725 		} else {
726 			/* Send Accept */
727 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
728 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
729 					    0, NULL, NULL);
730 		}
731 	}
732 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
733 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
734 
735 	free(ctx->abts_poller_args);
736 	free(ctx);
737 }
738 
739 void
740 nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
741 			  uint16_t oxid, uint16_t rxid)
742 {
743 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
744 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
745 	struct spdk_nvmf_fc_association *assoc = NULL;
746 	struct spdk_nvmf_fc_conn *conn = NULL;
747 	uint32_t hwqp_cnt = 0;
748 	bool skip_hwqp_cnt;
749 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
750 	uint32_t i;
751 
752 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
753 		       nport->nport_hdl, rpi, oxid, rxid);
754 
755 	/* Allocate memory to track hwqp's with at least 1 active connection. */
756 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
757 	if (hwqps == NULL) {
758 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
759 		goto bls_rej;
760 	}
761 
762 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
763 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
764 			if ((conn->rpi != rpi) || !conn->hwqp) {
765 				continue;
766 			}
767 
768 			skip_hwqp_cnt = false;
769 			for (i = 0; i < hwqp_cnt; i++) {
770 				if (hwqps[i] == conn->hwqp) {
771 					/* Skip. This is already present */
772 					skip_hwqp_cnt = true;
773 					break;
774 				}
775 			}
776 			if (!skip_hwqp_cnt) {
777 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
778 				hwqps[hwqp_cnt] = conn->hwqp;
779 				hwqp_cnt++;
780 			}
781 		}
782 	}
783 
784 	if (!hwqp_cnt) {
785 		goto bls_rej;
786 	}
787 
788 	args = calloc(hwqp_cnt,
789 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
790 	if (!args) {
791 		goto bls_rej;
792 	}
793 
794 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
795 	if (!ctx) {
796 		goto bls_rej;
797 	}
798 	ctx->rpi = rpi;
799 	ctx->oxid = oxid;
800 	ctx->rxid = rxid;
801 	ctx->nport = nport;
802 	ctx->nport_hdl = nport->nport_hdl;
803 	ctx->port_hdl = nport->fc_port->port_hdl;
804 	ctx->num_hwqps = hwqp_cnt;
805 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
806 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
807 	ctx->abts_poller_args = args;
808 
809 	/* Get a unique context for this ABTS */
810 	ctx->u_id = nvmf_fc_get_abts_unique_id();
811 
812 	for (i = 0; i < hwqp_cnt; i++) {
813 		poller_arg = args + i;
814 		poller_arg->hwqp = hwqps[i];
815 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
816 		poller_arg->cb_info.cb_data = ctx;
817 		poller_arg->cb_info.cb_thread = spdk_get_thread();
818 		poller_arg->ctx = ctx;
819 
820 		nvmf_fc_poller_api_func(poller_arg->hwqp,
821 					SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
822 					poller_arg);
823 	}
824 
825 	free(hwqps);
826 
827 	return;
828 bls_rej:
829 	free(args);
830 	free(hwqps);
831 
832 	/* Send Reject */
833 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
834 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
835 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
836 		       nport->nport_hdl, rpi, oxid, rxid);
837 	return;
838 }
839 
840 /*** Accessor functions for the FC structures - BEGIN */
841 /*
842  * Returns true if the port is in offline state.
843  */
844 bool
845 nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
846 {
847 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
848 		return true;
849 	}
850 
851 	return false;
852 }
853 
854 /*
855  * Returns true if the port is in online state.
856  */
857 bool
858 nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
859 {
860 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
861 		return true;
862 	}
863 
864 	return false;
865 }
866 
867 int
868 nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
869 {
870 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
871 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
872 		return 0;
873 	}
874 
875 	return -EPERM;
876 }
877 
878 int
879 nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
880 {
881 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
882 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
883 		return 0;
884 	}
885 
886 	return -EPERM;
887 }
888 
889 int
890 nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
891 {
892 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
893 		hwqp->state = SPDK_FC_HWQP_ONLINE;
894 		/* reset some queue counters */
895 		hwqp->num_conns = 0;
896 		return nvmf_fc_set_q_online_state(hwqp, true);
897 	}
898 
899 	return -EPERM;
900 }
901 
902 int
903 nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
904 {
905 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
906 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
907 		return nvmf_fc_set_q_online_state(hwqp, false);
908 	}
909 
910 	return -EPERM;
911 }
912 
913 void
914 nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
915 {
916 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
917 
918 	/*
919 	 * Let LLD add the port to its list.
920 	 */
921 	nvmf_fc_lld_port_add(fc_port);
922 }
923 
924 static void
925 nvmf_fc_port_remove(struct spdk_nvmf_fc_port *fc_port)
926 {
927 	TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list, fc_port, link);
928 
929 	/*
930 	 * Let LLD remove the port from its list.
931 	 */
932 	nvmf_fc_lld_port_remove(fc_port);
933 }
934 
935 struct spdk_nvmf_fc_port *
936 nvmf_fc_port_lookup(uint8_t port_hdl)
937 {
938 	struct spdk_nvmf_fc_port *fc_port = NULL;
939 
940 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
941 		if (fc_port->port_hdl == port_hdl) {
942 			return fc_port;
943 		}
944 	}
945 	return NULL;
946 }
947 
948 uint32_t
949 nvmf_fc_get_prli_service_params(void)
950 {
951 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
952 }
953 
954 int
955 nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
956 		       struct spdk_nvmf_fc_nport *nport)
957 {
958 	if (fc_port) {
959 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
960 		fc_port->num_nports++;
961 		return 0;
962 	}
963 
964 	return -EINVAL;
965 }
966 
967 int
968 nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
969 			  struct spdk_nvmf_fc_nport *nport)
970 {
971 	if (fc_port && nport) {
972 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
973 		fc_port->num_nports--;
974 		return 0;
975 	}
976 
977 	return -EINVAL;
978 }
979 
980 static struct spdk_nvmf_fc_nport *
981 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
982 {
983 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
984 
985 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
986 		if (fc_nport->nport_hdl == nport_hdl) {
987 			return fc_nport;
988 		}
989 	}
990 
991 	return NULL;
992 }
993 
994 struct spdk_nvmf_fc_nport *
995 nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
996 {
997 	struct spdk_nvmf_fc_port *fc_port = NULL;
998 
999 	fc_port = nvmf_fc_port_lookup(port_hdl);
1000 	if (fc_port) {
1001 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
1002 	}
1003 
1004 	return NULL;
1005 }
1006 
1007 static inline int
1008 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
1009 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
1010 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
1011 {
1012 	struct spdk_nvmf_fc_nport *n_port;
1013 	struct spdk_nvmf_fc_remote_port_info *r_port;
1014 
1015 	assert(hwqp);
1016 	if (hwqp == NULL) {
1017 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1018 		return -EINVAL;
1019 	}
1020 	assert(nport);
1021 	if (nport == NULL) {
1022 		SPDK_ERRLOG("Error: nport is NULL\n");
1023 		return -EINVAL;
1024 	}
1025 	assert(rport);
1026 	if (rport == NULL) {
1027 		SPDK_ERRLOG("Error: rport is NULL\n");
1028 		return -EINVAL;
1029 	}
1030 
1031 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1032 		if (n_port->d_id == d_id) {
1033 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1034 				if (r_port->s_id == s_id) {
1035 					*nport = n_port;
1036 					*rport = r_port;
1037 					return 0;
1038 				}
1039 			}
1040 			break;
1041 		}
1042 	}
1043 
1044 	return -ENOENT;
1045 }
1046 
1047 /* Returns true if the Nport is empty of all rem_ports */
1048 bool
1049 nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1050 {
1051 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1052 		assert(nport->rport_count == 0);
1053 		return true;
1054 	} else {
1055 		return false;
1056 	}
1057 }
1058 
1059 int
1060 nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1061 			enum spdk_nvmf_fc_object_state state)
1062 {
1063 	if (nport) {
1064 		nport->nport_state = state;
1065 		return 0;
1066 	} else {
1067 		return -EINVAL;
1068 	}
1069 }
1070 
1071 bool
1072 nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1073 			   struct spdk_nvmf_fc_remote_port_info *rem_port)
1074 {
1075 	if (nport && rem_port) {
1076 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1077 		nport->rport_count++;
1078 		return 0;
1079 	} else {
1080 		return -EINVAL;
1081 	}
1082 }
1083 
1084 bool
1085 nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1086 			      struct spdk_nvmf_fc_remote_port_info *rem_port)
1087 {
1088 	if (nport && rem_port) {
1089 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1090 		nport->rport_count--;
1091 		return 0;
1092 	} else {
1093 		return -EINVAL;
1094 	}
1095 }
1096 
1097 int
1098 nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1099 			enum spdk_nvmf_fc_object_state state)
1100 {
1101 	if (rport) {
1102 		rport->rport_state = state;
1103 		return 0;
1104 	} else {
1105 		return -EINVAL;
1106 	}
1107 }
1108 int
1109 nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1110 			enum spdk_nvmf_fc_object_state state)
1111 {
1112 	if (assoc) {
1113 		assoc->assoc_state = state;
1114 		return 0;
1115 	} else {
1116 		return -EINVAL;
1117 	}
1118 }
1119 
1120 static struct spdk_nvmf_fc_association *
1121 nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1122 {
1123 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1124 	struct spdk_nvmf_fc_conn *fc_conn;
1125 
1126 	if (!qpair) {
1127 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1128 		return NULL;
1129 	}
1130 
1131 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1132 
1133 	return fc_conn->fc_assoc;
1134 }
1135 
1136 bool
1137 nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1138 		       struct spdk_nvmf_ctrlr *ctrlr)
1139 {
1140 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1141 	struct spdk_nvmf_fc_association *assoc = NULL;
1142 
1143 	if (!ctrlr) {
1144 		return false;
1145 	}
1146 
1147 	fc_nport = nvmf_fc_nport_find(port_hdl, nport_hdl);
1148 	if (!fc_nport) {
1149 		return false;
1150 	}
1151 
1152 	assoc = nvmf_ctrlr_get_fc_assoc(ctrlr);
1153 	if (assoc && assoc->tgtport == fc_nport) {
1154 		SPDK_DEBUGLOG(nvmf_fc,
1155 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1156 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1157 			      nport_hdl);
1158 		return true;
1159 	}
1160 	return false;
1161 }
1162 
1163 static void
1164 nvmf_fc_release_ls_rqst(struct spdk_nvmf_fc_hwqp *hwqp,
1165 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1166 {
1167 	assert(ls_rqst);
1168 
1169 	TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1170 
1171 	/* Return buffer to chip */
1172 	nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1173 }
1174 
1175 static int
1176 nvmf_fc_delete_ls_pending(struct spdk_nvmf_fc_hwqp *hwqp,
1177 			  struct spdk_nvmf_fc_nport *nport,
1178 			  struct spdk_nvmf_fc_remote_port_info *rport)
1179 {
1180 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1181 	int num_deleted = 0;
1182 
1183 	assert(hwqp);
1184 	assert(nport);
1185 	assert(rport);
1186 
1187 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1188 		if ((ls_rqst->d_id == nport->d_id) && (ls_rqst->s_id == rport->s_id)) {
1189 			num_deleted++;
1190 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1191 		}
1192 	}
1193 	return num_deleted;
1194 }
1195 
1196 static void
1197 nvmf_fc_req_bdev_abort(void *arg1)
1198 {
1199 	struct spdk_nvmf_fc_request *fc_req = arg1;
1200 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1201 	int i;
1202 
1203 	/* Initial release - we don't have to abort Admin Queue or
1204 	 * Fabric commands. The AQ commands supported at this time are
1205 	 * Get-Log-Page,
1206 	 * Identify
1207 	 * Set Features
1208 	 * Get Features
1209 	 * AER -> Special case and handled differently.
1210 	 * Every one of the above Admin commands (except AER) run
1211 	 * to completion and so an Abort of such commands doesn't
1212 	 * make sense.
1213 	 */
1214 	/* The Fabric commands supported are
1215 	 * Property Set
1216 	 * Property Get
1217 	 * Connect -> Special case (async. handling). Not sure how to
1218 	 * handle at this point. Let it run to completion.
1219 	 */
1220 	if (ctrlr) {
1221 		for (i = 0; i < SPDK_NVMF_MAX_ASYNC_EVENTS; i++) {
1222 			if (ctrlr->aer_req[i] == &fc_req->req) {
1223 				SPDK_NOTICELOG("Abort AER request\n");
1224 				nvmf_qpair_free_aer(fc_req->req.qpair);
1225 			}
1226 		}
1227 	}
1228 }
1229 
1230 void
1231 nvmf_fc_request_abort_complete(void *arg1)
1232 {
1233 	struct spdk_nvmf_fc_request *fc_req =
1234 		(struct spdk_nvmf_fc_request *)arg1;
1235 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1236 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1237 	TAILQ_HEAD(, spdk_nvmf_fc_caller_ctx) abort_cbs;
1238 
1239 	/* Make a copy of the cb list from fc_req */
1240 	TAILQ_INIT(&abort_cbs);
1241 	TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1242 
1243 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1244 		       fc_req_state_strs[fc_req->state]);
1245 
1246 	_nvmf_fc_request_free(fc_req);
1247 
1248 	/* Request abort completed. Notify all the callbacks */
1249 	TAILQ_FOREACH_SAFE(ctx, &abort_cbs, link, tmp) {
1250 		/* Notify */
1251 		ctx->cb(hwqp, 0, ctx->cb_args);
1252 		/* Remove */
1253 		TAILQ_REMOVE(&abort_cbs, ctx, link);
1254 		/* free */
1255 		free(ctx);
1256 	}
1257 }
1258 
1259 void
1260 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1261 		      spdk_nvmf_fc_caller_cb cb, void *cb_args)
1262 {
1263 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1264 	bool kill_req = false;
1265 
1266 	/* Add the cb to list */
1267 	if (cb) {
1268 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1269 		if (!ctx) {
1270 			SPDK_ERRLOG("ctx alloc failed.\n");
1271 			return;
1272 		}
1273 		ctx->cb = cb;
1274 		ctx->cb_args = cb_args;
1275 
1276 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1277 	}
1278 
1279 	if (!fc_req->is_aborted) {
1280 		/* Increment aborted command counter */
1281 		fc_req->hwqp->counters.num_aborted++;
1282 	}
1283 
1284 	/* If port is dead, skip abort wqe */
1285 	kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1286 	if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1287 		fc_req->is_aborted = true;
1288 		goto complete;
1289 	}
1290 
1291 	/* Check if the request is already marked for deletion */
1292 	if (fc_req->is_aborted) {
1293 		return;
1294 	}
1295 
1296 	/* Mark request as aborted */
1297 	fc_req->is_aborted = true;
1298 
1299 	/* If xchg is allocated, then save if we need to send abts or not. */
1300 	if (fc_req->xchg) {
1301 		fc_req->xchg->send_abts = send_abts;
1302 		fc_req->xchg->aborted	= true;
1303 	}
1304 
1305 	switch (fc_req->state) {
1306 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
1307 		/* Aborted by backend */
1308 		_nvmf_fc_request_free(fc_req);
1309 		break;
1310 
1311 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1312 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1313 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1314 		/* Notify bdev */
1315 		spdk_thread_send_msg(fc_req->hwqp->thread,
1316 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1317 		break;
1318 
1319 	case SPDK_NVMF_FC_REQ_READ_XFER:
1320 	case SPDK_NVMF_FC_REQ_READ_RSP:
1321 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
1322 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
1323 	case SPDK_NVMF_FC_REQ_NONE_RSP:
1324 		/* Notify HBA to abort this exchange  */
1325 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1326 		break;
1327 
1328 	case SPDK_NVMF_FC_REQ_PENDING:
1329 		/* Remove from pending */
1330 		nvmf_fc_request_remove_from_pending(fc_req);
1331 		goto complete;
1332 	case SPDK_NVMF_FC_REQ_FUSED_WAITING:
1333 		TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1334 		goto complete;
1335 	default:
1336 		SPDK_ERRLOG("Request in invalid state.\n");
1337 		goto complete;
1338 	}
1339 
1340 	return;
1341 complete:
1342 	nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1343 	nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1344 				(void *)fc_req);
1345 }
1346 
1347 static int
1348 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1349 {
1350 	uint32_t length = fc_req->req.length;
1351 	struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1352 	struct spdk_nvmf_transport_poll_group *group = &fgroup->group;
1353 	struct spdk_nvmf_transport *transport = group->transport;
1354 
1355 	if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1356 		return -ENOMEM;
1357 	}
1358 
1359 	return 0;
1360 }
1361 
1362 static int
1363 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1364 {
1365 	/* Allocate an XCHG if we dont use send frame for this command. */
1366 	if (!nvmf_fc_use_send_frame(fc_req)) {
1367 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1368 		if (!fc_req->xchg) {
1369 			fc_req->hwqp->counters.no_xchg++;
1370 			return -EAGAIN;
1371 		}
1372 	}
1373 
1374 	if (fc_req->req.length) {
1375 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1376 			fc_req->hwqp->counters.buf_alloc_err++;
1377 			if (fc_req->xchg) {
1378 				nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1379 				fc_req->xchg = NULL;
1380 			}
1381 			return -EAGAIN;
1382 		}
1383 	}
1384 
1385 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1386 		SPDK_DEBUGLOG(nvmf_fc, "WRITE CMD.\n");
1387 
1388 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1389 
1390 		if (nvmf_fc_recv_data(fc_req)) {
1391 			/* Dropped return success to caller */
1392 			fc_req->hwqp->counters.unexpected_err++;
1393 			_nvmf_fc_request_free(fc_req);
1394 		}
1395 	} else {
1396 		SPDK_DEBUGLOG(nvmf_fc, "READ/NONE CMD\n");
1397 
1398 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1399 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1400 		} else {
1401 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1402 		}
1403 		spdk_nvmf_request_exec(&fc_req->req);
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static void
1410 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1411 			  struct spdk_nvmf_fc_frame_hdr *fchdr)
1412 {
1413 	uint8_t df_ctl = fchdr->df_ctl;
1414 	uint32_t f_ctl = fchdr->f_ctl;
1415 
1416 	/* VMID */
1417 	if (df_ctl & FCNVME_D_FCTL_DEVICE_HDR_16_MASK) {
1418 		struct spdk_nvmf_fc_vm_header *vhdr;
1419 		uint32_t vmhdr_offset = 0;
1420 
1421 		if (df_ctl & FCNVME_D_FCTL_ESP_HDR_MASK) {
1422 			vmhdr_offset += FCNVME_D_FCTL_ESP_HDR_SIZE;
1423 		}
1424 
1425 		if (df_ctl & FCNVME_D_FCTL_NETWORK_HDR_MASK) {
1426 			vmhdr_offset += FCNVME_D_FCTL_NETWORK_HDR_SIZE;
1427 		}
1428 
1429 		vhdr = (struct spdk_nvmf_fc_vm_header *)((char *)fchdr +
1430 				sizeof(struct spdk_nvmf_fc_frame_hdr) + vmhdr_offset);
1431 		fc_req->app_id = from_be32(&vhdr->src_vmid);
1432 	}
1433 
1434 	/* Priority */
1435 	if ((from_be32(&f_ctl) >> 8) & FCNVME_F_CTL_PRIORITY_ENABLE) {
1436 		fc_req->csctl = fchdr->cs_ctl;
1437 	}
1438 }
1439 
1440 static int
1441 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1442 			    struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1443 {
1444 	uint16_t cmnd_len;
1445 	uint64_t rqst_conn_id;
1446 	struct spdk_nvmf_fc_request *fc_req = NULL;
1447 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1448 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1449 	enum spdk_nvme_data_transfer xfer;
1450 	uint32_t s_id, d_id;
1451 
1452 	s_id = (uint32_t)frame->s_id;
1453 	d_id = (uint32_t)frame->d_id;
1454 	s_id = from_be32(&s_id) >> 8;
1455 	d_id = from_be32(&d_id) >> 8;
1456 
1457 	cmd_iu = buffer->virt;
1458 	cmnd_len = cmd_iu->cmnd_iu_len;
1459 	cmnd_len = from_be16(&cmnd_len);
1460 
1461 	/* check for a valid cmnd_iu format */
1462 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1463 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1464 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1465 		SPDK_ERRLOG("IU CMD error\n");
1466 		hwqp->counters.nvme_cmd_iu_err++;
1467 		return -ENXIO;
1468 	}
1469 
1470 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1471 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1472 		SPDK_ERRLOG("IU CMD xfer error\n");
1473 		hwqp->counters.nvme_cmd_xfer_err++;
1474 		return -EPERM;
1475 	}
1476 
1477 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1478 
1479 	if (rte_hash_lookup_data(hwqp->connection_list_hash,
1480 				 (void *)&rqst_conn_id, (void **)&fc_conn) < 0) {
1481 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1482 		hwqp->counters.invalid_conn_err++;
1483 		return -ENODEV;
1484 	}
1485 
1486 	/* Validate s_id and d_id */
1487 	if (s_id != fc_conn->s_id) {
1488 		hwqp->counters.rport_invalid++;
1489 		SPDK_ERRLOG("Frame s_id invalid for connection %ld\n", rqst_conn_id);
1490 		return -ENODEV;
1491 	}
1492 
1493 	if (d_id != fc_conn->d_id) {
1494 		hwqp->counters.nport_invalid++;
1495 		SPDK_ERRLOG("Frame d_id invalid for connection %ld\n", rqst_conn_id);
1496 		return -ENODEV;
1497 	}
1498 
1499 	/* If association/connection is being deleted - return */
1500 	if (fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1501 		SPDK_ERRLOG("Association %ld state = %d not valid\n",
1502 			    fc_conn->fc_assoc->assoc_id, fc_conn->fc_assoc->assoc_state);
1503 		return -EACCES;
1504 	}
1505 
1506 	if (fc_conn->conn_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1507 		SPDK_ERRLOG("Connection %ld state = %d not valid\n",
1508 			    rqst_conn_id, fc_conn->conn_state);
1509 		return -EACCES;
1510 	}
1511 
1512 	if (!spdk_nvmf_qpair_is_active(&fc_conn->qpair)) {
1513 		SPDK_ERRLOG("Connection %ld qpair state = %d not valid\n",
1514 			    rqst_conn_id, fc_conn->qpair.state);
1515 		return -EACCES;
1516 	}
1517 
1518 	/* Make sure xfer len is according to mdts */
1519 	if (from_be32(&cmd_iu->data_len) >
1520 	    hwqp->fgroup->group.transport->opts.max_io_size) {
1521 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1522 		return -EINVAL;
1523 	}
1524 
1525 	/* allocate a request buffer */
1526 	fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1527 	if (fc_req == NULL) {
1528 		return -ENOMEM;
1529 	}
1530 
1531 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1532 	fc_req->req.qpair = &fc_conn->qpair;
1533 	memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1534 	fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1535 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1536 	fc_req->oxid = frame->ox_id;
1537 	fc_req->oxid = from_be16(&fc_req->oxid);
1538 	fc_req->rpi = fc_conn->rpi;
1539 	fc_req->poller_lcore = hwqp->lcore_id;
1540 	fc_req->poller_thread = hwqp->thread;
1541 	fc_req->hwqp = hwqp;
1542 	fc_req->fc_conn = fc_conn;
1543 	fc_req->req.xfer = xfer;
1544 	fc_req->s_id = s_id;
1545 	fc_req->d_id = d_id;
1546 	fc_req->csn  = from_be32(&cmd_iu->cmnd_seq_num);
1547 	nvmf_fc_set_vmid_priority(fc_req, frame);
1548 
1549 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1550 
1551 	if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1552 		STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1553 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1554 	}
1555 
1556 	return 0;
1557 }
1558 
1559 /*
1560  * These functions are called from the FC LLD
1561  */
1562 
1563 void
1564 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1565 {
1566 	struct spdk_nvmf_fc_hwqp *hwqp;
1567 	struct spdk_nvmf_transport_poll_group *group;
1568 
1569 	if (!fc_req) {
1570 		return;
1571 	}
1572 	hwqp = fc_req->hwqp;
1573 
1574 	if (fc_req->xchg) {
1575 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1576 		fc_req->xchg = NULL;
1577 	}
1578 
1579 	/* Release IO buffers */
1580 	if (fc_req->req.data_from_pool) {
1581 		group = &hwqp->fgroup->group;
1582 		spdk_nvmf_request_free_buffers(&fc_req->req, group,
1583 					       group->transport);
1584 	}
1585 	fc_req->req.iovcnt = 0;
1586 
1587 	/* Free Fc request */
1588 	nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1589 }
1590 
1591 void
1592 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1593 			  enum spdk_nvmf_fc_request_state state)
1594 {
1595 	assert(fc_req->magic != 0xDEADBEEF);
1596 
1597 	SPDK_DEBUGLOG(nvmf_fc,
1598 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1599 		      nvmf_fc_request_get_state_str(fc_req->state),
1600 		      nvmf_fc_request_get_state_str(state));
1601 	nvmf_fc_record_req_trace_point(fc_req, state);
1602 	fc_req->state = state;
1603 }
1604 
1605 char *
1606 nvmf_fc_request_get_state_str(int state)
1607 {
1608 	static char *unk_str = "unknown";
1609 
1610 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1611 		fc_req_state_strs[state] : unk_str);
1612 }
1613 
1614 int
1615 nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1616 			   uint32_t buff_idx,
1617 			   struct spdk_nvmf_fc_frame_hdr *frame,
1618 			   struct spdk_nvmf_fc_buffer_desc *buffer,
1619 			   uint32_t plen)
1620 {
1621 	int rc = 0;
1622 	uint32_t s_id, d_id;
1623 	struct spdk_nvmf_fc_nport *nport = NULL;
1624 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1625 
1626 	s_id = (uint32_t)frame->s_id;
1627 	d_id = (uint32_t)frame->d_id;
1628 	s_id = from_be32(&s_id) >> 8;
1629 	d_id = from_be32(&d_id) >> 8;
1630 
1631 	SPDK_DEBUGLOG(nvmf_fc,
1632 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1633 		      s_id, d_id,
1634 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1635 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1636 
1637 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1638 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1639 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1640 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1641 
1642 		SPDK_DEBUGLOG(nvmf_fc, "Process LS NVME frame\n");
1643 
1644 		rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1645 		if (rc) {
1646 			if (nport == NULL) {
1647 				SPDK_ERRLOG("Nport not found. Dropping\n");
1648 				/* increment invalid nport counter */
1649 				hwqp->counters.nport_invalid++;
1650 			} else if (rport == NULL) {
1651 				SPDK_ERRLOG("Rport not found. Dropping\n");
1652 				/* increment invalid rport counter */
1653 				hwqp->counters.rport_invalid++;
1654 			}
1655 			return rc;
1656 		}
1657 
1658 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1659 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1660 			SPDK_ERRLOG("%s state not created. Dropping\n",
1661 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1662 				    "Nport" : "Rport");
1663 			return -EACCES;
1664 		}
1665 
1666 		/* Use the RQ buffer for holding LS request. */
1667 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1668 
1669 		/* Fill in the LS request structure */
1670 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1671 		ls_rqst->rqstbuf.phys = buffer->phys +
1672 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1673 		ls_rqst->rqstbuf.buf_index = buff_idx;
1674 		ls_rqst->rqst_len = plen;
1675 
1676 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1677 		ls_rqst->rspbuf.phys = buffer->phys +
1678 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1679 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1680 
1681 		ls_rqst->private_data = (void *)hwqp;
1682 		ls_rqst->rpi = rport->rpi;
1683 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1684 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1685 		ls_rqst->s_id = s_id;
1686 		ls_rqst->d_id = d_id;
1687 		ls_rqst->nport = nport;
1688 		ls_rqst->rport = rport;
1689 		ls_rqst->nvmf_tgt = g_nvmf_ftransport->transport.tgt;
1690 
1691 		if (TAILQ_EMPTY(&hwqp->ls_pending_queue)) {
1692 			ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1693 		} else {
1694 			ls_rqst->xchg = NULL;
1695 		}
1696 
1697 		if (ls_rqst->xchg) {
1698 			/* Handover the request to LS module */
1699 			nvmf_fc_handle_ls_rqst(ls_rqst);
1700 		} else {
1701 			/* No XCHG available. Add to pending list. */
1702 			hwqp->counters.no_xchg++;
1703 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1704 		}
1705 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1706 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1707 
1708 		SPDK_DEBUGLOG(nvmf_fc, "Process IO NVME frame\n");
1709 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buffer, plen);
1710 		if (!rc) {
1711 			nvmf_fc_rqpair_buffer_release(hwqp, buff_idx);
1712 		}
1713 	} else {
1714 
1715 		SPDK_ERRLOG("Unknown frame received. Dropping\n");
1716 		hwqp->counters.unknown_frame++;
1717 		rc = -EINVAL;
1718 	}
1719 
1720 	return rc;
1721 }
1722 
1723 void
1724 nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1725 {
1726 	struct spdk_nvmf_request *req = NULL, *tmp;
1727 	struct spdk_nvmf_fc_request *fc_req;
1728 	int budget = 64;
1729 
1730 	if (!hwqp->fgroup) {
1731 		/* LS queue is tied to acceptor_poll group and LS pending requests
1732 		 * are stagged and processed using hwqp->ls_pending_queue.
1733 		 */
1734 		return;
1735 	}
1736 
1737 	STAILQ_FOREACH_SAFE(req, &hwqp->fgroup->group.pending_buf_queue, buf_link, tmp) {
1738 		fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1739 		if (!nvmf_fc_request_execute(fc_req)) {
1740 			/* Successfully posted, Delete from pending. */
1741 			nvmf_fc_request_remove_from_pending(fc_req);
1742 		}
1743 
1744 		if (budget) {
1745 			budget--;
1746 		} else {
1747 			return;
1748 		}
1749 	}
1750 }
1751 
1752 void
1753 nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1754 {
1755 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1756 	struct spdk_nvmf_fc_nport *nport = NULL;
1757 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1758 
1759 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1760 		/* lookup nport and rport again - make sure they are still valid */
1761 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1762 		if (rc) {
1763 			if (nport == NULL) {
1764 				SPDK_ERRLOG("Nport not found. Dropping\n");
1765 				/* increment invalid nport counter */
1766 				hwqp->counters.nport_invalid++;
1767 			} else if (rport == NULL) {
1768 				SPDK_ERRLOG("Rport not found. Dropping\n");
1769 				/* increment invalid rport counter */
1770 				hwqp->counters.rport_invalid++;
1771 			}
1772 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1773 			continue;
1774 		}
1775 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1776 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1777 			SPDK_ERRLOG("%s state not created. Dropping\n",
1778 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1779 				    "Nport" : "Rport");
1780 			nvmf_fc_release_ls_rqst(hwqp, ls_rqst);
1781 			continue;
1782 		}
1783 
1784 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1785 		if (ls_rqst->xchg) {
1786 			/* Got an XCHG */
1787 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1788 			/* Handover the request to LS module */
1789 			nvmf_fc_handle_ls_rqst(ls_rqst);
1790 		} else {
1791 			/* No more XCHGs. Stop processing. */
1792 			hwqp->counters.no_xchg++;
1793 			return;
1794 		}
1795 	}
1796 }
1797 
1798 int
1799 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1800 {
1801 	int rc = 0;
1802 	struct spdk_nvmf_request *req = &fc_req->req;
1803 	struct spdk_nvmf_qpair *qpair = req->qpair;
1804 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1805 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1806 	uint16_t ersp_len = 0;
1807 
1808 	/* set sq head value in resp */
1809 	rsp->sqhd = nvmf_fc_advance_conn_sqhead(qpair);
1810 
1811 	/* Increment connection responses */
1812 	fc_conn->rsp_count++;
1813 
1814 	if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1815 				       fc_req->transferred_len)) {
1816 		/* Fill ERSP Len */
1817 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1818 				    sizeof(uint32_t)));
1819 		fc_req->ersp.ersp_len = ersp_len;
1820 
1821 		/* Fill RSN */
1822 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1823 		fc_conn->rsn++;
1824 
1825 		/* Fill transfer length */
1826 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
1827 
1828 		SPDK_DEBUGLOG(nvmf_fc, "Posting ERSP.\n");
1829 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1830 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1831 	} else {
1832 		SPDK_DEBUGLOG(nvmf_fc, "Posting RSP.\n");
1833 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1834 	}
1835 
1836 	return rc;
1837 }
1838 
1839 bool
1840 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1841 			   uint32_t rsp_cnt, uint32_t xfer_len)
1842 {
1843 	struct spdk_nvmf_request *req = &fc_req->req;
1844 	struct spdk_nvmf_qpair *qpair = req->qpair;
1845 	struct spdk_nvmf_fc_conn *fc_conn = nvmf_fc_get_conn(qpair);
1846 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1847 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1848 	uint16_t status = *((uint16_t *)&rsp->status);
1849 
1850 	/*
1851 	 * Check if we need to send ERSP
1852 	 * 1) For every N responses where N == ersp_ratio
1853 	 * 2) Fabric commands.
1854 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1855 	 * 4) SQ == 90% full.
1856 	 * 5) Transfer length not equal to CMD IU length
1857 	 */
1858 
1859 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1860 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1861 	    (status & 0xFFFE) || rsp->cdw0 || rsp->cdw1 ||
1862 	    (req->length != xfer_len)) {
1863 		return true;
1864 	}
1865 	return false;
1866 }
1867 
1868 static int
1869 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1870 {
1871 	int rc = 0;
1872 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1873 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1874 
1875 	if (fc_req->is_aborted) {
1876 		/* Defer this to make sure we dont call io cleanup in same context. */
1877 		nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1878 					(void *)fc_req);
1879 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1880 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1881 
1882 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1883 
1884 		rc = nvmf_fc_send_data(fc_req);
1885 	} else {
1886 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1887 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1888 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1889 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1890 		} else {
1891 			nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1892 		}
1893 
1894 		rc = nvmf_fc_handle_rsp(fc_req);
1895 	}
1896 
1897 	if (rc) {
1898 		SPDK_ERRLOG("Error in request complete.\n");
1899 		_nvmf_fc_request_free(fc_req);
1900 	}
1901 	return 0;
1902 }
1903 
1904 struct spdk_nvmf_tgt *
1905 nvmf_fc_get_tgt(void)
1906 {
1907 	if (g_nvmf_ftransport) {
1908 		return g_nvmf_ftransport->transport.tgt;
1909 	}
1910 	return NULL;
1911 }
1912 
1913 /*
1914  * FC Transport Public API begins here
1915  */
1916 
1917 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1918 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1919 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1920 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1921 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1922 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1923 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1924 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1925 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1926 
1927 static void
1928 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1929 {
1930 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1931 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1932 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1933 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1934 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1935 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1936 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1937 }
1938 
1939 static int nvmf_fc_accept(void *ctx);
1940 
1941 static struct spdk_nvmf_transport *
1942 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1943 {
1944 	uint32_t sge_count;
1945 
1946 	SPDK_INFOLOG(nvmf_fc, "*** FC Transport Init ***\n"
1947 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1948 		     "  max_io_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1949 		     "  max_aq_depth=%d\n",
1950 		     opts->max_queue_depth,
1951 		     opts->max_io_size,
1952 		     opts->max_qpairs_per_ctrlr - 1,
1953 		     opts->io_unit_size,
1954 		     opts->max_aq_depth);
1955 
1956 	if (g_nvmf_ftransport) {
1957 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1958 		return NULL;
1959 	}
1960 
1961 	if (spdk_env_get_last_core() < 1) {
1962 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1963 			    spdk_env_get_last_core() + 1);
1964 		return NULL;
1965 	}
1966 
1967 	sge_count = opts->max_io_size / opts->io_unit_size;
1968 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1969 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1970 		return NULL;
1971 	}
1972 
1973 	g_nvmf_fc_main_thread = spdk_get_thread();
1974 	g_nvmf_fgroup_count = 0;
1975 	g_nvmf_ftransport = calloc(1, sizeof(*g_nvmf_ftransport));
1976 
1977 	if (!g_nvmf_ftransport) {
1978 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1979 		return NULL;
1980 	}
1981 
1982 	if (pthread_mutex_init(&g_nvmf_ftransport->lock, NULL)) {
1983 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1984 		free(g_nvmf_ftransport);
1985 		g_nvmf_ftransport = NULL;
1986 		return NULL;
1987 	}
1988 
1989 	g_nvmf_ftransport->accept_poller = SPDK_POLLER_REGISTER(nvmf_fc_accept,
1990 					   &g_nvmf_ftransport->transport, opts->acceptor_poll_rate);
1991 	if (!g_nvmf_ftransport->accept_poller) {
1992 		free(g_nvmf_ftransport);
1993 		g_nvmf_ftransport = NULL;
1994 		return NULL;
1995 	}
1996 
1997 	/* initialize the low level FC driver */
1998 	nvmf_fc_lld_init();
1999 
2000 	return &g_nvmf_ftransport->transport;
2001 }
2002 
2003 static void
2004 nvmf_fc_destroy_done_cb(void *cb_arg)
2005 {
2006 	free(g_nvmf_ftransport);
2007 	if (g_transport_destroy_done_cb) {
2008 		g_transport_destroy_done_cb(cb_arg);
2009 		g_transport_destroy_done_cb = NULL;
2010 	}
2011 }
2012 
2013 static int
2014 nvmf_fc_destroy(struct spdk_nvmf_transport *transport,
2015 		spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg)
2016 {
2017 	if (transport) {
2018 		struct spdk_nvmf_fc_poll_group *fgroup, *pg_tmp;
2019 
2020 		/* clean up any FC poll groups still around */
2021 		TAILQ_FOREACH_SAFE(fgroup, &g_nvmf_fgroups, link, pg_tmp) {
2022 			TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2023 			free(fgroup);
2024 		}
2025 
2026 		spdk_poller_unregister(&g_nvmf_ftransport->accept_poller);
2027 		g_nvmf_fgroup_count = 0;
2028 		g_transport_destroy_done_cb = cb_fn;
2029 
2030 		/* low level FC driver clean up */
2031 		nvmf_fc_lld_fini(nvmf_fc_destroy_done_cb, cb_arg);
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static int
2038 nvmf_fc_listen(struct spdk_nvmf_transport *transport, const struct spdk_nvme_transport_id *trid,
2039 	       struct spdk_nvmf_listen_opts *listen_opts)
2040 {
2041 	return 0;
2042 }
2043 
2044 static void
2045 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2046 		    const struct spdk_nvme_transport_id *_trid)
2047 {
2048 }
2049 
2050 static int
2051 nvmf_fc_accept(void *ctx)
2052 {
2053 	struct spdk_nvmf_fc_port *fc_port = NULL;
2054 	uint32_t count = 0;
2055 	static bool start_lld = false;
2056 
2057 	if (spdk_unlikely(!start_lld)) {
2058 		start_lld  = true;
2059 		nvmf_fc_lld_start();
2060 	}
2061 
2062 	/* poll the LS queue on each port */
2063 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2064 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2065 			count += nvmf_fc_process_queue(&fc_port->ls_queue);
2066 		}
2067 	}
2068 
2069 	return count > 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE;
2070 }
2071 
2072 static void
2073 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2074 		 struct spdk_nvme_transport_id *trid,
2075 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2076 {
2077 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2078 	entry->adrfam = trid->adrfam;
2079 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2080 
2081 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2082 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2083 }
2084 
2085 static struct spdk_nvmf_transport_poll_group *
2086 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport,
2087 			  struct spdk_nvmf_poll_group *group)
2088 {
2089 	struct spdk_nvmf_fc_poll_group *fgroup;
2090 	struct spdk_nvmf_fc_transport *ftransport =
2091 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2092 
2093 	if (spdk_interrupt_mode_is_enabled()) {
2094 		SPDK_ERRLOG("FC transport does not support interrupt mode\n");
2095 		return NULL;
2096 	}
2097 
2098 	fgroup = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2099 	if (!fgroup) {
2100 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2101 		return NULL;
2102 	}
2103 
2104 	TAILQ_INIT(&fgroup->hwqp_list);
2105 
2106 	pthread_mutex_lock(&ftransport->lock);
2107 	TAILQ_INSERT_TAIL(&g_nvmf_fgroups, fgroup, link);
2108 	g_nvmf_fgroup_count++;
2109 	pthread_mutex_unlock(&ftransport->lock);
2110 
2111 	return &fgroup->group;
2112 }
2113 
2114 static void
2115 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2116 {
2117 	struct spdk_nvmf_fc_poll_group *fgroup;
2118 	struct spdk_nvmf_fc_transport *ftransport =
2119 		SPDK_CONTAINEROF(group->transport, struct spdk_nvmf_fc_transport, transport);
2120 
2121 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2122 	pthread_mutex_lock(&ftransport->lock);
2123 	TAILQ_REMOVE(&g_nvmf_fgroups, fgroup, link);
2124 	g_nvmf_fgroup_count--;
2125 	pthread_mutex_unlock(&ftransport->lock);
2126 
2127 	free(fgroup);
2128 }
2129 
2130 static int
2131 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2132 		       struct spdk_nvmf_qpair *qpair)
2133 {
2134 	struct spdk_nvmf_fc_poll_group *fgroup;
2135 	struct spdk_nvmf_fc_conn *fc_conn;
2136 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2137 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2138 	bool hwqp_found = false;
2139 
2140 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2141 	fc_conn  = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2142 
2143 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2144 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2145 			hwqp_found = true;
2146 			break;
2147 		}
2148 	}
2149 
2150 	if (!hwqp_found) {
2151 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2152 		goto err;
2153 	}
2154 
2155 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2156 					 &fc_conn->conn_id,
2157 					 fc_conn->max_queue_depth)) {
2158 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2159 		goto err;
2160 	}
2161 
2162 	fc_conn->hwqp = hwqp;
2163 
2164 	/* If this is for ADMIN connection, then update assoc ID. */
2165 	if (fc_conn->qpair.qid == 0) {
2166 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2167 	}
2168 
2169 	api_data = &fc_conn->create_opd->u.add_conn;
2170 	nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2171 	return 0;
2172 err:
2173 	return -1;
2174 }
2175 
2176 static int
2177 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2178 {
2179 	uint32_t count = 0;
2180 	struct spdk_nvmf_fc_poll_group *fgroup;
2181 	struct spdk_nvmf_fc_hwqp *hwqp;
2182 
2183 	fgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, group);
2184 
2185 	TAILQ_FOREACH(hwqp, &fgroup->hwqp_list, link) {
2186 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2187 			count += nvmf_fc_process_queue(hwqp);
2188 		}
2189 	}
2190 
2191 	return (int) count;
2192 }
2193 
2194 static int
2195 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2196 {
2197 	struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2198 
2199 	if (!fc_req->is_aborted) {
2200 		nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2201 		nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2202 	} else {
2203 		nvmf_fc_request_abort_complete(fc_req);
2204 	}
2205 
2206 	return 0;
2207 }
2208 
2209 static void
2210 nvmf_fc_connection_delete_done_cb(void *arg)
2211 {
2212 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2213 
2214 	if (fc_ctx->cb_fn) {
2215 		spdk_thread_send_msg(fc_ctx->qpair_thread, fc_ctx->cb_fn, fc_ctx->cb_ctx);
2216 	}
2217 	free(fc_ctx);
2218 }
2219 
2220 static void
2221 _nvmf_fc_close_qpair(void *arg)
2222 {
2223 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx = arg;
2224 	struct spdk_nvmf_qpair *qpair = fc_ctx->qpair;
2225 	struct spdk_nvmf_fc_conn *fc_conn;
2226 	int rc;
2227 
2228 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2229 
2230 	SPDK_NOTICELOG("Close qpair %p, fc_conn %p conn_state %d conn_id 0x%lx\n",
2231 		       qpair, fc_conn, fc_conn->conn_state, fc_conn->conn_id);
2232 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2233 		struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2234 
2235 		if (fc_conn->create_opd) {
2236 			api_data = &fc_conn->create_opd->u.add_conn;
2237 
2238 			nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
2239 						    api_data->args.fc_conn, api_data->aq_conn);
2240 		}
2241 	} else if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_CREATED) {
2242 		rc = nvmf_fc_delete_connection(fc_conn, false, true,
2243 					       nvmf_fc_connection_delete_done_cb, fc_ctx);
2244 		if (!rc) {
2245 			/* Wait for transport to complete its work. */
2246 			return;
2247 		}
2248 
2249 		SPDK_ERRLOG("Delete fc_conn %p failed.\n", fc_conn);
2250 	}
2251 
2252 	nvmf_fc_connection_delete_done_cb(fc_ctx);
2253 }
2254 
2255 static void
2256 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair,
2257 		    spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg)
2258 {
2259 	struct spdk_nvmf_fc_qpair_remove_ctx *fc_ctx;
2260 	struct spdk_nvmf_fc_conn *fc_conn;
2261 
2262 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2263 	fc_conn->qpair_fini_done = true;
2264 
2265 	if (fc_conn->conn_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2266 		if (fc_conn->qpair_fini_done_cb) {
2267 			SPDK_NOTICELOG("Invoke qpair_fini_done_cb, fc_conn %p conn_id 0x%lx qpair %p conn_state %d\n",
2268 				       fc_conn, fc_conn->conn_id, qpair, fc_conn->conn_state);
2269 
2270 			fc_conn->qpair_fini_done_cb(fc_conn->hwqp, 0, fc_conn->qpair_fini_done_cb_args);
2271 		}
2272 
2273 		if (cb_fn) {
2274 			cb_fn(cb_arg);
2275 		}
2276 
2277 		return;
2278 	}
2279 
2280 	fc_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_qpair_remove_ctx));
2281 	if (!fc_ctx) {
2282 		SPDK_ERRLOG("Unable to allocate close_qpair ctx.");
2283 		if (cb_fn) {
2284 			cb_fn(cb_arg);
2285 		}
2286 
2287 		return;
2288 	}
2289 
2290 	fc_ctx->qpair = qpair;
2291 	fc_ctx->cb_fn = cb_fn;
2292 	fc_ctx->cb_ctx = cb_arg;
2293 	fc_ctx->qpair_thread = spdk_get_thread();
2294 
2295 	spdk_thread_send_msg(nvmf_fc_get_main_thread(), _nvmf_fc_close_qpair, fc_ctx);
2296 }
2297 
2298 static int
2299 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2300 			    struct spdk_nvme_transport_id *trid)
2301 {
2302 	struct spdk_nvmf_fc_conn *fc_conn;
2303 
2304 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2305 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2306 	return 0;
2307 }
2308 
2309 static int
2310 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2311 			     struct spdk_nvme_transport_id *trid)
2312 {
2313 	struct spdk_nvmf_fc_conn *fc_conn;
2314 
2315 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2316 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2317 	return 0;
2318 }
2319 
2320 static int
2321 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2322 			      struct spdk_nvme_transport_id *trid)
2323 {
2324 	struct spdk_nvmf_fc_conn *fc_conn;
2325 
2326 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2327 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2328 	return 0;
2329 }
2330 
2331 static void
2332 nvmf_fc_qpair_abort_request(struct spdk_nvmf_qpair *qpair,
2333 			    struct spdk_nvmf_request *req)
2334 {
2335 	spdk_nvmf_request_complete(req);
2336 }
2337 
2338 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2339 	.name = "FC",
2340 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2341 	.opts_init = nvmf_fc_opts_init,
2342 	.create = nvmf_fc_create,
2343 	.destroy = nvmf_fc_destroy,
2344 
2345 	.listen = nvmf_fc_listen,
2346 	.stop_listen = nvmf_fc_stop_listen,
2347 
2348 	.listener_discover = nvmf_fc_discover,
2349 
2350 	.poll_group_create = nvmf_fc_poll_group_create,
2351 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2352 	.poll_group_add = nvmf_fc_poll_group_add,
2353 	.poll_group_poll = nvmf_fc_poll_group_poll,
2354 
2355 	.req_complete = nvmf_fc_request_complete,
2356 	.req_free = nvmf_fc_request_free,
2357 	.qpair_fini = nvmf_fc_close_qpair,
2358 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2359 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2360 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2361 	.qpair_abort_request = nvmf_fc_qpair_abort_request,
2362 };
2363 
2364 /* Initializes the data for the creation of a FC-Port object in the SPDK
2365  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2366  * the API to the library. The contents added to this well defined structure
2367  * is private to each vendors implementation.
2368  */
2369 static int
2370 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2371 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2372 {
2373 	int rc = 0;
2374 	/* Used a high number for the LS HWQP so that it does not clash with the
2375 	 * IO HWQP's and immediately shows a LS queue during tracing.
2376 	 */
2377 	uint32_t i;
2378 
2379 	fc_port->port_hdl       = args->port_handle;
2380 	fc_port->lld_fc_port	= args->lld_fc_port;
2381 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2382 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2383 	fc_port->num_io_queues  = args->io_queue_cnt;
2384 
2385 	/*
2386 	 * Set port context from init args. Used for FCP port stats.
2387 	 */
2388 	fc_port->port_ctx = args->port_ctx;
2389 
2390 	/*
2391 	 * Initialize the LS queue wherever needed.
2392 	 */
2393 	fc_port->ls_queue.queues = args->ls_queue;
2394 	fc_port->ls_queue.thread = nvmf_fc_get_main_thread();
2395 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2396 	fc_port->ls_queue.is_ls_queue = true;
2397 
2398 	/*
2399 	 * Initialize the LS queue.
2400 	 */
2401 	rc = nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2402 	if (rc) {
2403 		return rc;
2404 	}
2405 
2406 	/*
2407 	 * Initialize the IO queues.
2408 	 */
2409 	for (i = 0; i < args->io_queue_cnt; i++) {
2410 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2411 		hwqp->hwqp_id = i;
2412 		hwqp->queues = args->io_queues[i];
2413 		hwqp->is_ls_queue = false;
2414 		rc = nvmf_fc_init_hwqp(fc_port, hwqp);
2415 		if (rc) {
2416 			for (; i > 0; --i) {
2417 				rte_hash_free(fc_port->io_queues[i - 1].connection_list_hash);
2418 				rte_hash_free(fc_port->io_queues[i - 1].rport_list_hash);
2419 			}
2420 			rte_hash_free(fc_port->ls_queue.connection_list_hash);
2421 			rte_hash_free(fc_port->ls_queue.rport_list_hash);
2422 			return rc;
2423 		}
2424 	}
2425 
2426 	/*
2427 	 * Initialize the LS processing for port
2428 	 */
2429 	nvmf_fc_ls_init(fc_port);
2430 
2431 	/*
2432 	 * Initialize the list of nport on this HW port.
2433 	 */
2434 	TAILQ_INIT(&fc_port->nport_list);
2435 	fc_port->num_nports = 0;
2436 
2437 	return 0;
2438 }
2439 
2440 /*
2441  * FC port must have all its nports deleted before transitioning to offline state.
2442  */
2443 static void
2444 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2445 {
2446 	struct spdk_nvmf_fc_nport *nport = NULL;
2447 	/* All nports must have been deleted at this point for this fc port */
2448 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2449 	DEV_VERIFY(fc_port->num_nports == 0);
2450 	/* Mark the nport states to be zombie, if they exist */
2451 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2452 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2453 			(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2454 		}
2455 	}
2456 }
2457 
2458 static void
2459 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2460 {
2461 	ASSERT_SPDK_FC_MAIN_THREAD();
2462 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2463 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2464 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2465 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2466 	int spdk_err = 0;
2467 	uint8_t port_handle = cb_data->port_handle;
2468 	uint32_t s_id = rport->s_id;
2469 	uint32_t rpi = rport->rpi;
2470 	uint32_t assoc_count = rport->assoc_count;
2471 	uint32_t nport_hdl = nport->nport_hdl;
2472 	uint32_t d_id = nport->d_id;
2473 	char log_str[256];
2474 
2475 	/*
2476 	 * Assert on any delete failure.
2477 	 */
2478 	if (0 != err) {
2479 		DEV_VERIFY(!"Error in IT Delete callback.");
2480 		goto out;
2481 	}
2482 
2483 	if (cb_func != NULL) {
2484 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2485 	}
2486 
2487 out:
2488 	free(cb_data);
2489 
2490 	snprintf(log_str, sizeof(log_str),
2491 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2492 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2493 
2494 	if (err != 0) {
2495 		SPDK_ERRLOG("%s", log_str);
2496 	} else {
2497 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2498 	}
2499 }
2500 
2501 static void
2502 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2503 {
2504 	ASSERT_SPDK_FC_MAIN_THREAD();
2505 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2506 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2507 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2508 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2509 	uint32_t s_id = rport->s_id;
2510 	uint32_t rpi = rport->rpi;
2511 	uint32_t assoc_count = rport->assoc_count;
2512 	uint32_t nport_hdl = nport->nport_hdl;
2513 	uint32_t d_id = nport->d_id;
2514 	char log_str[256];
2515 
2516 	/*
2517 	 * Assert on any association delete failure. We continue to delete other
2518 	 * associations in promoted builds.
2519 	 */
2520 	if (0 != err) {
2521 		DEV_VERIFY(!"Nport's association delete callback returned error");
2522 		if (nport->assoc_count > 0) {
2523 			nport->assoc_count--;
2524 		}
2525 		if (rport->assoc_count > 0) {
2526 			rport->assoc_count--;
2527 		}
2528 	}
2529 
2530 	/*
2531 	 * If this is the last association being deleted for the ITN,
2532 	 * execute the callback(s).
2533 	 */
2534 	if (0 == rport->assoc_count) {
2535 		/* Remove the rport from the remote port list. */
2536 		if (nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2537 			SPDK_ERRLOG("Error while removing rport from list.\n");
2538 			DEV_VERIFY(!"Error while removing rport from list.");
2539 		}
2540 
2541 		if (cb_func != NULL) {
2542 			/*
2543 			 * Callback function is provided by the caller
2544 			 * of nvmf_fc_adm_i_t_delete_assoc().
2545 			 */
2546 			(void)cb_func(cb_data->cb_ctx, 0);
2547 		}
2548 		free(rport);
2549 		free(args);
2550 	}
2551 
2552 	snprintf(log_str, sizeof(log_str),
2553 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2554 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2555 
2556 	if (err != 0) {
2557 		SPDK_ERRLOG("%s", log_str);
2558 	} else {
2559 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2560 	}
2561 }
2562 
2563 /**
2564  * Process a IT delete.
2565  */
2566 static void
2567 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2568 			     struct spdk_nvmf_fc_remote_port_info *rport,
2569 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2570 			     void *cb_ctx)
2571 {
2572 	int err = 0;
2573 	struct spdk_nvmf_fc_association *assoc = NULL;
2574 	int assoc_err = 0;
2575 	uint32_t num_assoc = 0;
2576 	uint32_t num_assoc_del_scheduled = 0;
2577 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2578 	uint8_t port_hdl = nport->port_hdl;
2579 	uint32_t s_id = rport->s_id;
2580 	uint32_t rpi = rport->rpi;
2581 	uint32_t assoc_count = rport->assoc_count;
2582 	char log_str[256];
2583 
2584 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete associations on nport:%d begin.\n",
2585 		      nport->nport_hdl);
2586 
2587 	/*
2588 	 * Allocate memory for callback data.
2589 	 * This memory will be freed by the callback function.
2590 	 */
2591 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2592 	if (NULL == cb_data) {
2593 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2594 		err = -ENOMEM;
2595 		goto out;
2596 	}
2597 	cb_data->nport       = nport;
2598 	cb_data->rport       = rport;
2599 	cb_data->port_handle = port_hdl;
2600 	cb_data->cb_func     = cb_func;
2601 	cb_data->cb_ctx      = cb_ctx;
2602 
2603 	/*
2604 	 * Delete all associations, if any, related with this ITN/remote_port.
2605 	 */
2606 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2607 		num_assoc++;
2608 		if (assoc->s_id == s_id) {
2609 			assoc_err = nvmf_fc_delete_association(nport,
2610 							       assoc->assoc_id,
2611 							       false /* send abts */, false,
2612 							       nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2613 			if (0 != assoc_err) {
2614 				/*
2615 				 * Mark this association as zombie.
2616 				 */
2617 				err = -EINVAL;
2618 				DEV_VERIFY(!"Error while deleting association");
2619 				(void)nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2620 			} else {
2621 				num_assoc_del_scheduled++;
2622 			}
2623 		}
2624 	}
2625 
2626 out:
2627 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2628 		/*
2629 		 * Since there are no association_delete calls
2630 		 * successfully scheduled, the association_delete
2631 		 * callback function will never be called.
2632 		 * In this case, call the callback function now.
2633 		 */
2634 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2635 	}
2636 
2637 	snprintf(log_str, sizeof(log_str),
2638 		 "IT delete associations on nport:%d end. "
2639 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2640 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2641 
2642 	if (err == 0) {
2643 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
2644 	} else {
2645 		SPDK_ERRLOG("%s", log_str);
2646 	}
2647 }
2648 
2649 static void
2650 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2651 {
2652 	ASSERT_SPDK_FC_MAIN_THREAD();
2653 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2654 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2655 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2656 	struct spdk_nvmf_fc_port *fc_port = NULL;
2657 	int err = 0;
2658 
2659 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2660 	hwqp = quiesce_api_data->hwqp;
2661 	fc_port = hwqp->fc_port;
2662 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2663 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2664 
2665 	/*
2666 	 * Decrement the callback/quiesced queue count.
2667 	 */
2668 	port_quiesce_ctx->quiesce_count--;
2669 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2670 
2671 	free(quiesce_api_data);
2672 	/*
2673 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2674 	 */
2675 	if (port_quiesce_ctx->quiesce_count > 0) {
2676 		return;
2677 	}
2678 
2679 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2680 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2681 	} else {
2682 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesced.\n", fc_port->port_hdl);
2683 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2684 	}
2685 
2686 	if (cb_func) {
2687 		/*
2688 		 * Callback function for the called of quiesce.
2689 		 */
2690 		cb_func(port_quiesce_ctx->ctx, err);
2691 	}
2692 
2693 	/*
2694 	 * Free the context structure.
2695 	 */
2696 	free(port_quiesce_ctx);
2697 
2698 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2699 		      err);
2700 }
2701 
2702 static int
2703 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2704 			     spdk_nvmf_fc_poller_api_cb cb_func)
2705 {
2706 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2707 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2708 	int err = 0;
2709 
2710 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2711 
2712 	if (args == NULL) {
2713 		err = -ENOMEM;
2714 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2715 		goto done;
2716 	}
2717 	args->hwqp = fc_hwqp;
2718 	args->ctx = ctx;
2719 	args->cb_info.cb_func = cb_func;
2720 	args->cb_info.cb_data = args;
2721 	args->cb_info.cb_thread = spdk_get_thread();
2722 
2723 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2724 	rc = nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2725 	if (rc) {
2726 		free(args);
2727 		err = -EINVAL;
2728 	}
2729 
2730 done:
2731 	return err;
2732 }
2733 
2734 /*
2735  * Hw port Quiesce
2736  */
2737 static int
2738 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2739 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2740 {
2741 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2742 	uint32_t i = 0;
2743 	int err = 0;
2744 
2745 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2746 
2747 	/*
2748 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2749 	 * and execute the callback.
2750 	 */
2751 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2752 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2753 	}
2754 
2755 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2756 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Port %d already in quiesced state.\n",
2757 			      fc_port->port_hdl);
2758 		/*
2759 		 * Execute the callback function directly.
2760 		 */
2761 		cb_func(ctx, err);
2762 		goto out;
2763 	}
2764 
2765 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2766 
2767 	if (port_quiesce_ctx == NULL) {
2768 		err = -ENOMEM;
2769 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2770 			    fc_port->port_hdl);
2771 		goto out;
2772 	}
2773 
2774 	port_quiesce_ctx->quiesce_count = 0;
2775 	port_quiesce_ctx->ctx = ctx;
2776 	port_quiesce_ctx->cb_func = cb_func;
2777 
2778 	/*
2779 	 * Quiesce the LS queue.
2780 	 */
2781 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2782 					   nvmf_fc_adm_queue_quiesce_cb);
2783 	if (err != 0) {
2784 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2785 		goto out;
2786 	}
2787 	port_quiesce_ctx->quiesce_count++;
2788 
2789 	/*
2790 	 * Quiesce the IO queues.
2791 	 */
2792 	for (i = 0; i < fc_port->num_io_queues; i++) {
2793 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2794 						   port_quiesce_ctx,
2795 						   nvmf_fc_adm_queue_quiesce_cb);
2796 		if (err != 0) {
2797 			DEV_VERIFY(0);
2798 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2799 		}
2800 		port_quiesce_ctx->quiesce_count++;
2801 	}
2802 
2803 out:
2804 	if (port_quiesce_ctx && err != 0) {
2805 		free(port_quiesce_ctx);
2806 	}
2807 	return err;
2808 }
2809 
2810 /*
2811  * Initialize and add a HW port entry to the global
2812  * HW port list.
2813  */
2814 static void
2815 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2816 {
2817 	ASSERT_SPDK_FC_MAIN_THREAD();
2818 	struct spdk_nvmf_fc_port *fc_port = NULL;
2819 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2820 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2821 			api_data->api_args;
2822 	int err = 0;
2823 
2824 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2825 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2826 		err = EINVAL;
2827 		goto abort_port_init;
2828 	}
2829 
2830 	/*
2831 	 * 1. Check for duplicate initialization.
2832 	 */
2833 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2834 	if (fc_port != NULL) {
2835 		SPDK_ERRLOG("Duplicate port found %d.\n", args->port_handle);
2836 		goto abort_port_init;
2837 	}
2838 
2839 	/*
2840 	 * 2. Get the memory to instantiate a fc port.
2841 	 */
2842 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2843 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2844 	if (fc_port == NULL) {
2845 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2846 		err = -ENOMEM;
2847 		goto abort_port_init;
2848 	}
2849 
2850 	/* assign the io_queues array */
2851 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2852 				     struct spdk_nvmf_fc_port));
2853 
2854 	/*
2855 	 * 3. Initialize the contents for the FC-port
2856 	 */
2857 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2858 
2859 	if (err != 0) {
2860 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2861 		DEV_VERIFY(!"Data initialization failed for fc_port");
2862 		goto abort_port_init;
2863 	}
2864 
2865 	/*
2866 	 * 4. Add this port to the global fc port list in the library.
2867 	 */
2868 	nvmf_fc_port_add(fc_port);
2869 
2870 abort_port_init:
2871 	if (err && fc_port) {
2872 		free(fc_port);
2873 	}
2874 	if (api_data->cb_func != NULL) {
2875 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2876 	}
2877 
2878 	free(arg);
2879 
2880 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d initialize done, rc = %d.\n",
2881 		      args->port_handle, err);
2882 }
2883 
2884 static void
2885 nvmf_fc_adm_hwqp_clean_sync_cb(struct spdk_nvmf_fc_hwqp *hwqp)
2886 {
2887 	struct spdk_nvmf_fc_abts_ctx *ctx;
2888 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
2889 
2890 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
2891 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
2892 		ctx = args->cb_info.cb_data;
2893 		if (ctx) {
2894 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
2895 				free(ctx->sync_poller_args);
2896 				free(ctx->abts_poller_args);
2897 				free(ctx);
2898 			}
2899 		}
2900 	}
2901 }
2902 
2903 static void
2904 nvmf_fc_adm_evnt_hw_port_free(void *arg)
2905 {
2906 	ASSERT_SPDK_FC_MAIN_THREAD();
2907 	int err = 0, i;
2908 	struct spdk_nvmf_fc_port *fc_port = NULL;
2909 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2910 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2911 	struct spdk_nvmf_fc_hw_port_free_args *args = (struct spdk_nvmf_fc_hw_port_free_args *)
2912 			api_data->api_args;
2913 
2914 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2915 	if (!fc_port) {
2916 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2917 		err = -EINVAL;
2918 		goto out;
2919 	}
2920 
2921 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2922 		SPDK_ERRLOG("Hw port %d: nports not cleared up yet.\n", args->port_handle);
2923 		err = -EIO;
2924 		goto out;
2925 	}
2926 
2927 	/* Clean up and free fc_port */
2928 	hwqp = &fc_port->ls_queue;
2929 	nvmf_fc_adm_hwqp_clean_sync_cb(hwqp);
2930 	rte_hash_free(hwqp->connection_list_hash);
2931 	rte_hash_free(hwqp->rport_list_hash);
2932 
2933 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2934 		hwqp = &fc_port->io_queues[i];
2935 
2936 		nvmf_fc_adm_hwqp_clean_sync_cb(&fc_port->io_queues[i]);
2937 		rte_hash_free(hwqp->connection_list_hash);
2938 		rte_hash_free(hwqp->rport_list_hash);
2939 	}
2940 
2941 	nvmf_fc_port_remove(fc_port);
2942 	free(fc_port);
2943 out:
2944 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d free done, rc = %d.\n",
2945 		      args->port_handle, err);
2946 	if (api_data->cb_func != NULL) {
2947 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_FREE, args->cb_ctx, err);
2948 	}
2949 
2950 	free(arg);
2951 }
2952 
2953 /*
2954  * Online a HW port.
2955  */
2956 static void
2957 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2958 {
2959 	ASSERT_SPDK_FC_MAIN_THREAD();
2960 	struct spdk_nvmf_fc_port *fc_port = NULL;
2961 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2962 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2963 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2964 			api_data->api_args;
2965 	int i = 0;
2966 	int err = 0;
2967 
2968 	fc_port = nvmf_fc_port_lookup(args->port_handle);
2969 	if (fc_port) {
2970 		/* Set the port state to online */
2971 		err = nvmf_fc_port_set_online(fc_port);
2972 		if (err != 0) {
2973 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2974 			DEV_VERIFY(!"Hw port online failed");
2975 			goto out;
2976 		}
2977 
2978 		hwqp = &fc_port->ls_queue;
2979 		hwqp->context = NULL;
2980 		(void)nvmf_fc_hwqp_set_online(hwqp);
2981 
2982 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2983 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2984 			hwqp = &fc_port->io_queues[i];
2985 			hwqp->context = NULL;
2986 			(void)nvmf_fc_hwqp_set_online(hwqp);
2987 			nvmf_fc_poll_group_add_hwqp(hwqp);
2988 		}
2989 	} else {
2990 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2991 		err = -EINVAL;
2992 	}
2993 
2994 out:
2995 	if (api_data->cb_func != NULL) {
2996 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2997 	}
2998 
2999 	free(arg);
3000 
3001 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d online done, rc = %d.\n", args->port_handle,
3002 		      err);
3003 }
3004 
3005 static void
3006 nvmf_fc_adm_hw_port_offline_cb(void *ctx, int status)
3007 {
3008 	int err = 0;
3009 	struct spdk_nvmf_fc_port *fc_port = NULL;
3010 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args = ctx;
3011 	struct spdk_nvmf_fc_hw_port_offline_args *args = remove_hwqp_args->cb_args;
3012 
3013 	if (--remove_hwqp_args->pending_remove_hwqp) {
3014 		return;
3015 	}
3016 
3017 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3018 	if (!fc_port) {
3019 		err = -EINVAL;
3020 		SPDK_ERRLOG("fc_port not found.\n");
3021 		goto out;
3022 	}
3023 
3024 	/*
3025 	 * Delete all the nports. Ideally, the nports should have been purged
3026 	 * before the offline event, in which case, only a validation is required.
3027 	 */
3028 	nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3029 out:
3030 	if (remove_hwqp_args->cb_fn) {
3031 		remove_hwqp_args->cb_fn(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3032 	}
3033 
3034 	free(remove_hwqp_args);
3035 }
3036 
3037 /*
3038  * Offline a HW port.
3039  */
3040 static void
3041 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
3042 {
3043 	ASSERT_SPDK_FC_MAIN_THREAD();
3044 	struct spdk_nvmf_fc_port *fc_port = NULL;
3045 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
3046 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3047 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
3048 			api_data->api_args;
3049 	struct spdk_nvmf_fc_remove_hwqp_cb_args *remove_hwqp_args;
3050 	int i = 0;
3051 	int err = 0;
3052 
3053 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3054 	if (fc_port) {
3055 		/* Set the port state to offline, if it is not already. */
3056 		err = nvmf_fc_port_set_offline(fc_port);
3057 		if (err != 0) {
3058 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3059 			err = 0;
3060 			goto out;
3061 		}
3062 
3063 		remove_hwqp_args = calloc(1, sizeof(struct spdk_nvmf_fc_remove_hwqp_cb_args));
3064 		if (!remove_hwqp_args) {
3065 			SPDK_ERRLOG("Failed to alloc memory for remove_hwqp_args\n");
3066 			err = -ENOMEM;
3067 			goto out;
3068 		}
3069 		remove_hwqp_args->cb_fn = api_data->cb_func;
3070 		remove_hwqp_args->cb_args = api_data->api_args;
3071 		remove_hwqp_args->pending_remove_hwqp = fc_port->num_io_queues;
3072 
3073 		hwqp = &fc_port->ls_queue;
3074 		(void)nvmf_fc_hwqp_set_offline(hwqp);
3075 
3076 		/* Remove poller for all the io queues. */
3077 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3078 			hwqp = &fc_port->io_queues[i];
3079 			(void)nvmf_fc_hwqp_set_offline(hwqp);
3080 			nvmf_fc_poll_group_remove_hwqp(hwqp, nvmf_fc_adm_hw_port_offline_cb,
3081 						       remove_hwqp_args);
3082 		}
3083 
3084 		free(arg);
3085 
3086 		/* Wait until all the hwqps are removed from poll groups. */
3087 		return;
3088 	} else {
3089 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3090 		err = -EINVAL;
3091 	}
3092 out:
3093 	if (api_data->cb_func != NULL) {
3094 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3095 	}
3096 
3097 	free(arg);
3098 
3099 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d offline done, rc = %d.\n", args->port_handle,
3100 		      err);
3101 }
3102 
3103 struct nvmf_fc_add_rem_listener_ctx {
3104 	struct spdk_nvmf_subsystem *subsystem;
3105 	bool add_listener;
3106 	struct spdk_nvme_transport_id trid;
3107 };
3108 
3109 static void
3110 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3111 {
3112 	ASSERT_SPDK_FC_MAIN_THREAD();
3113 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3114 	free(ctx);
3115 }
3116 
3117 static void
3118 nvmf_fc_adm_listen_done(void *cb_arg, int status)
3119 {
3120 	ASSERT_SPDK_FC_MAIN_THREAD();
3121 	struct nvmf_fc_add_rem_listener_ctx *ctx = cb_arg;
3122 
3123 	if (spdk_nvmf_subsystem_resume(ctx->subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3124 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", ctx->subsystem->subnqn);
3125 		free(ctx);
3126 	}
3127 }
3128 
3129 static void
3130 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3131 {
3132 	ASSERT_SPDK_FC_MAIN_THREAD();
3133 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3134 
3135 	if (ctx->add_listener) {
3136 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid, nvmf_fc_adm_listen_done, ctx);
3137 	} else {
3138 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3139 		nvmf_fc_adm_listen_done(ctx, 0);
3140 	}
3141 }
3142 
3143 static int
3144 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3145 {
3146 	struct spdk_nvmf_tgt *tgt = nvmf_fc_get_tgt();
3147 	struct spdk_nvmf_subsystem *subsystem;
3148 	struct spdk_nvmf_listen_opts opts;
3149 
3150 	if (!tgt) {
3151 		SPDK_ERRLOG("No nvmf target defined\n");
3152 		return -EINVAL;
3153 	}
3154 
3155 	spdk_nvmf_listen_opts_init(&opts, sizeof(opts));
3156 
3157 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3158 	while (subsystem) {
3159 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3160 
3161 		if (spdk_nvmf_subsystem_any_listener_allowed(subsystem) == true) {
3162 			ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3163 			if (ctx) {
3164 				ctx->add_listener = add;
3165 				ctx->subsystem = subsystem;
3166 				nvmf_fc_create_trid(&ctx->trid,
3167 						    nport->fc_nodename.u.wwn,
3168 						    nport->fc_portname.u.wwn);
3169 
3170 				if (spdk_nvmf_tgt_listen_ext(subsystem->tgt, &ctx->trid, &opts)) {
3171 					SPDK_ERRLOG("Failed to add transport address %s to tgt listeners\n",
3172 						    ctx->trid.traddr);
3173 					free(ctx);
3174 				} else if (spdk_nvmf_subsystem_pause(subsystem,
3175 								     0,
3176 								     nvmf_fc_adm_subsystem_paused_cb,
3177 								     ctx)) {
3178 					SPDK_ERRLOG("Failed to pause subsystem: %s\n",
3179 						    subsystem->subnqn);
3180 					free(ctx);
3181 				}
3182 			}
3183 		}
3184 
3185 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3186 	}
3187 
3188 	return 0;
3189 }
3190 
3191 /*
3192  * Create a Nport.
3193  */
3194 static void
3195 nvmf_fc_adm_evnt_nport_create(void *arg)
3196 {
3197 	ASSERT_SPDK_FC_MAIN_THREAD();
3198 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3199 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3200 			api_data->api_args;
3201 	struct spdk_nvmf_fc_nport *nport = NULL;
3202 	struct spdk_nvmf_fc_port *fc_port = NULL;
3203 	int err = 0;
3204 
3205 	/*
3206 	 * Get the physical port.
3207 	 */
3208 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3209 	if (fc_port == NULL) {
3210 		err = -EINVAL;
3211 		goto out;
3212 	}
3213 
3214 	/*
3215 	 * Check for duplicate initialization.
3216 	 */
3217 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3218 	if (nport != NULL) {
3219 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3220 			    args->port_handle);
3221 		err = -EINVAL;
3222 		goto out;
3223 	}
3224 
3225 	/*
3226 	 * Get the memory to instantiate a fc nport.
3227 	 */
3228 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3229 	if (nport == NULL) {
3230 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3231 			    args->nport_handle);
3232 		err = -ENOMEM;
3233 		goto out;
3234 	}
3235 
3236 	/*
3237 	 * Initialize the contents for the nport
3238 	 */
3239 	nport->nport_hdl    = args->nport_handle;
3240 	nport->port_hdl     = args->port_handle;
3241 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3242 	nport->fc_nodename  = args->fc_nodename;
3243 	nport->fc_portname  = args->fc_portname;
3244 	nport->d_id         = args->d_id;
3245 	nport->fc_port      = nvmf_fc_port_lookup(args->port_handle);
3246 
3247 	(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3248 	TAILQ_INIT(&nport->rem_port_list);
3249 	nport->rport_count = 0;
3250 	TAILQ_INIT(&nport->fc_associations);
3251 	nport->assoc_count = 0;
3252 
3253 	/*
3254 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3255 	 */
3256 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3257 
3258 	(void)nvmf_fc_port_add_nport(fc_port, nport);
3259 out:
3260 	if (err && nport) {
3261 		free(nport);
3262 	}
3263 
3264 	if (api_data->cb_func != NULL) {
3265 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3266 	}
3267 
3268 	free(arg);
3269 }
3270 
3271 static void
3272 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3273 			    void *cb_args, int spdk_err)
3274 {
3275 	ASSERT_SPDK_FC_MAIN_THREAD();
3276 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3277 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3278 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3279 	int err = 0;
3280 	uint16_t nport_hdl = 0;
3281 	char log_str[256];
3282 
3283 	/*
3284 	 * Assert on any delete failure.
3285 	 */
3286 	if (nport == NULL) {
3287 		SPDK_ERRLOG("Nport delete callback returned null nport");
3288 		DEV_VERIFY(!"nport is null.");
3289 		goto out;
3290 	}
3291 
3292 	nport_hdl = nport->nport_hdl;
3293 	if (0 != spdk_err) {
3294 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3295 			    "%d, Nport: %d\n",
3296 			    nport->port_hdl, nport->nport_hdl);
3297 		DEV_VERIFY(!"nport delete callback error.");
3298 	}
3299 
3300 	/*
3301 	 * Free the nport if this is the last rport being deleted and
3302 	 * execute the callback(s).
3303 	 */
3304 	if (nvmf_fc_nport_has_no_rport(nport)) {
3305 		if (0 != nport->assoc_count) {
3306 			SPDK_ERRLOG("association count != 0\n");
3307 			DEV_VERIFY(!"association count != 0");
3308 		}
3309 
3310 		err = nvmf_fc_port_remove_nport(nport->fc_port, nport);
3311 		if (0 != err) {
3312 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3313 				    "nport from nport list. FC Port:%d Nport:%d\n",
3314 				    nport->port_hdl, nport->nport_hdl);
3315 		}
3316 		/* Free the nport */
3317 		free(nport);
3318 
3319 		if (cb_func != NULL) {
3320 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3321 		}
3322 		free(cb_data);
3323 	}
3324 out:
3325 	snprintf(log_str, sizeof(log_str),
3326 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3327 		 port_handle, nport_hdl, event_type, spdk_err);
3328 
3329 	if (err != 0) {
3330 		SPDK_ERRLOG("%s", log_str);
3331 	} else {
3332 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3333 	}
3334 }
3335 
3336 /*
3337  * Delete Nport.
3338  */
3339 static void
3340 nvmf_fc_adm_evnt_nport_delete(void *arg)
3341 {
3342 	ASSERT_SPDK_FC_MAIN_THREAD();
3343 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3344 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3345 			api_data->api_args;
3346 	struct spdk_nvmf_fc_nport *nport = NULL;
3347 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3348 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3349 	int err = 0;
3350 	uint32_t rport_cnt = 0;
3351 	int rc = 0;
3352 
3353 	/*
3354 	 * Make sure that the nport exists.
3355 	 */
3356 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3357 	if (nport == NULL) {
3358 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3359 			    args->port_handle);
3360 		err = -EINVAL;
3361 		goto out;
3362 	}
3363 
3364 	/*
3365 	 * Allocate memory for callback data.
3366 	 */
3367 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3368 	if (NULL == cb_data) {
3369 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3370 		err = -ENOMEM;
3371 		goto out;
3372 	}
3373 
3374 	cb_data->nport = nport;
3375 	cb_data->port_handle = args->port_handle;
3376 	cb_data->fc_cb_func = api_data->cb_func;
3377 	cb_data->fc_cb_ctx = args->cb_ctx;
3378 
3379 	/*
3380 	 * Begin nport tear down
3381 	 */
3382 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3383 		(void)nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3384 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3385 		/*
3386 		 * Deletion of this nport already in progress. Register callback
3387 		 * and return.
3388 		 */
3389 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3390 		err = -ENODEV;
3391 		goto out;
3392 	} else {
3393 		/* nport partially created/deleted */
3394 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3395 		DEV_VERIFY(0 != "Nport in zombie state");
3396 		err = -ENODEV;
3397 		goto out;
3398 	}
3399 
3400 	/*
3401 	 * Remove this nport from listening addresses across subsystems
3402 	 */
3403 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3404 
3405 	if (0 != rc) {
3406 		err = nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3407 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3408 			    nport->nport_hdl);
3409 		goto out;
3410 	}
3411 
3412 	/*
3413 	 * Delete all the remote ports (if any) for the nport
3414 	 */
3415 	/* TODO - Need to do this with a "first" and a "next" accessor function
3416 	 * for completeness. Look at app-subsystem as examples.
3417 	 */
3418 	if (nvmf_fc_nport_has_no_rport(nport)) {
3419 		/* No rports to delete. Complete the nport deletion. */
3420 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3421 		goto out;
3422 	}
3423 
3424 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3425 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3426 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3427 
3428 		if (it_del_args == NULL) {
3429 			err = -ENOMEM;
3430 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3431 				    rport_iter->rpi, rport_iter->s_id);
3432 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3433 			goto out;
3434 		}
3435 
3436 		rport_cnt++;
3437 		it_del_args->port_handle = nport->port_hdl;
3438 		it_del_args->nport_handle = nport->nport_hdl;
3439 		it_del_args->cb_ctx = (void *)cb_data;
3440 		it_del_args->rpi = rport_iter->rpi;
3441 		it_del_args->s_id = rport_iter->s_id;
3442 
3443 		err = nvmf_fc_main_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3444 						 nvmf_fc_adm_delete_nport_cb);
3445 		if (err) {
3446 			free(it_del_args);
3447 		}
3448 	}
3449 
3450 out:
3451 	/* On failure, execute the callback function now */
3452 	if ((err != 0) || (rc != 0)) {
3453 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3454 			    "rport_cnt:%d rc:%d.\n",
3455 			    args->nport_handle, err, args->port_handle,
3456 			    rport_cnt, rc);
3457 		if (cb_data) {
3458 			free(cb_data);
3459 		}
3460 		if (api_data->cb_func != NULL) {
3461 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3462 		}
3463 
3464 	} else {
3465 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3466 			      "NPort %d delete done successfully, fc port:%d. "
3467 			      "rport_cnt:%d\n",
3468 			      args->nport_handle, args->port_handle, rport_cnt);
3469 	}
3470 
3471 	free(arg);
3472 }
3473 
3474 /*
3475  * Process an PRLI/IT add.
3476  */
3477 static void
3478 nvmf_fc_adm_evnt_i_t_add(void *arg)
3479 {
3480 	ASSERT_SPDK_FC_MAIN_THREAD();
3481 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3482 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3483 			api_data->api_args;
3484 	struct spdk_nvmf_fc_nport *nport = NULL;
3485 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3486 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3487 	int err = 0;
3488 
3489 	/*
3490 	 * Make sure the nport port exists.
3491 	 */
3492 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3493 	if (nport == NULL) {
3494 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3495 		err = -EINVAL;
3496 		goto out;
3497 	}
3498 
3499 	/*
3500 	 * Check for duplicate i_t_add.
3501 	 */
3502 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3503 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3504 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3505 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3506 			err = -EEXIST;
3507 			goto out;
3508 		}
3509 	}
3510 
3511 	/*
3512 	 * Get the memory to instantiate the remote port
3513 	 */
3514 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3515 	if (rport == NULL) {
3516 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3517 		err = -ENOMEM;
3518 		goto out;
3519 	}
3520 
3521 	/*
3522 	 * Initialize the contents for the rport
3523 	 */
3524 	(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3525 	rport->s_id = args->s_id;
3526 	rport->rpi = args->rpi;
3527 	rport->fc_nodename = args->fc_nodename;
3528 	rport->fc_portname = args->fc_portname;
3529 
3530 	/*
3531 	 * Add remote port to nport
3532 	 */
3533 	if (nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3534 		DEV_VERIFY(!"Error while adding rport to list");
3535 	};
3536 
3537 	/*
3538 	 * TODO: Do we validate the initiators service parameters?
3539 	 */
3540 
3541 	/*
3542 	 * Get the targets service parameters from the library
3543 	 * to return back to the driver.
3544 	 */
3545 	args->target_prli_info = nvmf_fc_get_prli_service_params();
3546 
3547 out:
3548 	if (api_data->cb_func != NULL) {
3549 		/*
3550 		 * Passing pointer to the args struct as the first argument.
3551 		 * The cb_func should handle this appropriately.
3552 		 */
3553 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3554 	}
3555 
3556 	free(arg);
3557 
3558 	SPDK_DEBUGLOG(nvmf_fc_adm_api,
3559 		      "IT add on nport %d done, rc = %d.\n",
3560 		      args->nport_handle, err);
3561 }
3562 
3563 /**
3564  * Process a IT delete.
3565  */
3566 static void
3567 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3568 {
3569 	ASSERT_SPDK_FC_MAIN_THREAD();
3570 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3571 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3572 			api_data->api_args;
3573 	int rc = 0;
3574 	struct spdk_nvmf_fc_nport *nport = NULL;
3575 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3576 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3577 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3578 	uint32_t num_rport = 0;
3579 	char log_str[256];
3580 
3581 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "IT delete on nport:%d begin.\n", args->nport_handle);
3582 
3583 	/*
3584 	 * Make sure the nport port exists. If it does not, error out.
3585 	 */
3586 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3587 	if (nport == NULL) {
3588 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3589 		rc = -EINVAL;
3590 		goto out;
3591 	}
3592 
3593 	/*
3594 	 * Find this ITN / rport (remote port).
3595 	 */
3596 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3597 		num_rport++;
3598 		if ((rport_iter->s_id == args->s_id) &&
3599 		    (rport_iter->rpi == args->rpi) &&
3600 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3601 			rport = rport_iter;
3602 			break;
3603 		}
3604 	}
3605 
3606 	/*
3607 	 * We should find either zero or exactly one rport.
3608 	 *
3609 	 * If we find zero rports, that means that a previous request has
3610 	 * removed the rport by the time we reached here. In this case,
3611 	 * simply return out.
3612 	 */
3613 	if (rport == NULL) {
3614 		rc = -ENODEV;
3615 		goto out;
3616 	}
3617 
3618 	/*
3619 	 * We have the rport slated for deletion. At this point clean up
3620 	 * any LS requests that are sitting in the pending list. Do this
3621 	 * first, then, set the states of the rport so that new LS requests
3622 	 * are not accepted. Then start the cleanup.
3623 	 */
3624 	nvmf_fc_delete_ls_pending(&(nport->fc_port->ls_queue), nport, rport);
3625 
3626 	/*
3627 	 * We have found exactly one rport. Allocate memory for callback data.
3628 	 */
3629 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3630 	if (NULL == cb_data) {
3631 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3632 		rc = -ENOMEM;
3633 		goto out;
3634 	}
3635 
3636 	cb_data->nport = nport;
3637 	cb_data->rport = rport;
3638 	cb_data->port_handle = args->port_handle;
3639 	cb_data->fc_cb_func = api_data->cb_func;
3640 	cb_data->fc_cb_ctx = args->cb_ctx;
3641 
3642 	/*
3643 	 * Validate rport object state.
3644 	 */
3645 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3646 		(void)nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3647 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3648 		/*
3649 		 * Deletion of this rport already in progress. Register callback
3650 		 * and return.
3651 		 */
3652 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3653 		rc = -ENODEV;
3654 		goto out;
3655 	} else {
3656 		/* rport partially created/deleted */
3657 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3658 		DEV_VERIFY(!"Invalid rport_state");
3659 		rc = -ENODEV;
3660 		goto out;
3661 	}
3662 
3663 	/*
3664 	 * We have successfully found a rport to delete. Call
3665 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3666 	 * IT-delete processing as well as free the cb_data.
3667 	 */
3668 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3669 				     (void *)cb_data);
3670 
3671 out:
3672 	if (rc != 0) {
3673 		/*
3674 		 * We have entered here because either we encountered an
3675 		 * error, or we did not find a rport to delete.
3676 		 * As a result, we will not call the function
3677 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3678 		 * processing. Therefore, execute the callback function now.
3679 		 */
3680 		if (cb_data) {
3681 			free(cb_data);
3682 		}
3683 		if (api_data->cb_func != NULL) {
3684 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3685 		}
3686 	}
3687 
3688 	snprintf(log_str, sizeof(log_str),
3689 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3690 		 args->nport_handle, num_rport, rc);
3691 
3692 	if (rc != 0) {
3693 		SPDK_ERRLOG("%s", log_str);
3694 	} else {
3695 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "%s", log_str);
3696 	}
3697 
3698 	free(arg);
3699 }
3700 
3701 /*
3702  * Process ABTS received
3703  */
3704 static void
3705 nvmf_fc_adm_evnt_abts_recv(void *arg)
3706 {
3707 	ASSERT_SPDK_FC_MAIN_THREAD();
3708 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3709 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3710 	struct spdk_nvmf_fc_nport *nport = NULL;
3711 	int err = 0;
3712 
3713 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3714 		      args->oxid, args->rxid);
3715 
3716 	/*
3717 	 * 1. Make sure the nport port exists.
3718 	 */
3719 	nport = nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3720 	if (nport == NULL) {
3721 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3722 		err = -EINVAL;
3723 		goto out;
3724 	}
3725 
3726 	/*
3727 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3728 	 */
3729 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3730 		SPDK_DEBUGLOG(nvmf_fc_adm_api,
3731 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3732 			      args->rpi, args->oxid, args->rxid);
3733 		err = 0;
3734 		goto out;
3735 
3736 	}
3737 
3738 	/*
3739 	 * 3. Pass the received ABTS-LS to the library for handling.
3740 	 */
3741 	nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3742 
3743 out:
3744 	if (api_data->cb_func != NULL) {
3745 		/*
3746 		 * Passing pointer to the args struct as the first argument.
3747 		 * The cb_func should handle this appropriately.
3748 		 */
3749 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3750 	} else {
3751 		/* No callback set, free the args */
3752 		free(args);
3753 	}
3754 
3755 	free(arg);
3756 }
3757 
3758 /*
3759  * Callback function for hw port quiesce.
3760  */
3761 static void
3762 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3763 {
3764 	ASSERT_SPDK_FC_MAIN_THREAD();
3765 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3766 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3767 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3768 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3769 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3770 	struct spdk_nvmf_fc_port *fc_port = NULL;
3771 	char *dump_buf = NULL;
3772 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3773 
3774 	/*
3775 	 * Free the callback context struct.
3776 	 */
3777 	free(ctx);
3778 
3779 	if (err != 0) {
3780 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3781 		goto out;
3782 	}
3783 
3784 	if (args->dump_queues == false) {
3785 		/*
3786 		 * Queues need not be dumped.
3787 		 */
3788 		goto out;
3789 	}
3790 
3791 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3792 
3793 	/*
3794 	 * Get the fc port.
3795 	 */
3796 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3797 	if (fc_port == NULL) {
3798 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3799 		err = -EINVAL;
3800 		goto out;
3801 	}
3802 
3803 	/*
3804 	 * Allocate memory for the dump buffer.
3805 	 * This memory will be freed by FCT.
3806 	 */
3807 	dump_buf = (char *)calloc(1, dump_buf_size);
3808 	if (dump_buf == NULL) {
3809 		err = -ENOMEM;
3810 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3811 		goto out;
3812 	}
3813 	*args->dump_buf  = (uint32_t *)dump_buf;
3814 	dump_info.buffer = dump_buf;
3815 	dump_info.offset = 0;
3816 
3817 	/*
3818 	 * Add the dump reason to the top of the buffer.
3819 	 */
3820 	nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3821 
3822 	/*
3823 	 * Dump the hwqp.
3824 	 */
3825 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
3826 				fc_port->num_io_queues, &dump_info);
3827 
3828 out:
3829 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3830 		      args->port_handle, args->dump_queues, err);
3831 
3832 	if (cb_func != NULL) {
3833 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3834 	}
3835 }
3836 
3837 /*
3838  * HW port reset
3839 
3840  */
3841 static void
3842 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3843 {
3844 	ASSERT_SPDK_FC_MAIN_THREAD();
3845 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3846 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3847 			api_data->api_args;
3848 	struct spdk_nvmf_fc_port *fc_port = NULL;
3849 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3850 	int err = 0;
3851 
3852 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump\n", args->port_handle);
3853 
3854 	/*
3855 	 * Make sure the physical port exists.
3856 	 */
3857 	fc_port = nvmf_fc_port_lookup(args->port_handle);
3858 	if (fc_port == NULL) {
3859 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3860 		err = -EINVAL;
3861 		goto out;
3862 	}
3863 
3864 	/*
3865 	 * Save the reset event args and the callback in a context struct.
3866 	 */
3867 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3868 
3869 	if (ctx == NULL) {
3870 		err = -ENOMEM;
3871 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3872 		goto fail;
3873 	}
3874 
3875 	ctx->reset_args = args;
3876 	ctx->reset_cb_func = api_data->cb_func;
3877 
3878 	/*
3879 	 * Quiesce the hw port.
3880 	 */
3881 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3882 	if (err != 0) {
3883 		goto fail;
3884 	}
3885 
3886 	/*
3887 	 * Once the ports are successfully quiesced the reset processing
3888 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3889 	 */
3890 	return;
3891 fail:
3892 	free(ctx);
3893 
3894 out:
3895 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "HW port %d dump done, rc = %d.\n", args->port_handle,
3896 		      err);
3897 
3898 	if (api_data->cb_func != NULL) {
3899 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3900 	}
3901 
3902 	free(arg);
3903 }
3904 
3905 static inline void
3906 nvmf_fc_adm_run_on_main_thread(spdk_msg_fn fn, void *args)
3907 {
3908 	if (nvmf_fc_get_main_thread()) {
3909 		spdk_thread_send_msg(nvmf_fc_get_main_thread(), fn, args);
3910 	}
3911 }
3912 
3913 /*
3914  * Queue up an event in the SPDK main threads event queue.
3915  * Used by the FC driver to notify the SPDK main thread of FC related events.
3916  */
3917 int
3918 nvmf_fc_main_enqueue_event(enum spdk_fc_event event_type, void *args,
3919 			   spdk_nvmf_fc_callback cb_func)
3920 {
3921 	int err = 0;
3922 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3923 	spdk_msg_fn event_fn = NULL;
3924 
3925 	SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d.\n", event_type);
3926 
3927 	if (event_type >= SPDK_FC_EVENT_MAX) {
3928 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3929 		err = -EINVAL;
3930 		goto done;
3931 	}
3932 
3933 	if (args == NULL) {
3934 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3935 		err = -EINVAL;
3936 		goto done;
3937 	}
3938 
3939 	api_data = calloc(1, sizeof(*api_data));
3940 
3941 	if (api_data == NULL) {
3942 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3943 		err = -ENOMEM;
3944 		goto done;
3945 	}
3946 
3947 	api_data->api_args = args;
3948 	api_data->cb_func = cb_func;
3949 
3950 	switch (event_type) {
3951 	case SPDK_FC_HW_PORT_INIT:
3952 		event_fn = nvmf_fc_adm_evnt_hw_port_init;
3953 		break;
3954 
3955 	case SPDK_FC_HW_PORT_FREE:
3956 		event_fn = nvmf_fc_adm_evnt_hw_port_free;
3957 		break;
3958 
3959 	case SPDK_FC_HW_PORT_ONLINE:
3960 		event_fn = nvmf_fc_adm_evnt_hw_port_online;
3961 		break;
3962 
3963 	case SPDK_FC_HW_PORT_OFFLINE:
3964 		event_fn = nvmf_fc_adm_evnt_hw_port_offline;
3965 		break;
3966 
3967 	case SPDK_FC_NPORT_CREATE:
3968 		event_fn = nvmf_fc_adm_evnt_nport_create;
3969 		break;
3970 
3971 	case SPDK_FC_NPORT_DELETE:
3972 		event_fn = nvmf_fc_adm_evnt_nport_delete;
3973 		break;
3974 
3975 	case SPDK_FC_IT_ADD:
3976 		event_fn = nvmf_fc_adm_evnt_i_t_add;
3977 		break;
3978 
3979 	case SPDK_FC_IT_DELETE:
3980 		event_fn = nvmf_fc_adm_evnt_i_t_delete;
3981 		break;
3982 
3983 	case SPDK_FC_ABTS_RECV:
3984 		event_fn = nvmf_fc_adm_evnt_abts_recv;
3985 		break;
3986 
3987 	case SPDK_FC_HW_PORT_RESET:
3988 		event_fn = nvmf_fc_adm_evnt_hw_port_reset;
3989 		break;
3990 
3991 	case SPDK_FC_UNRECOVERABLE_ERR:
3992 	default:
3993 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
3994 		err = -EINVAL;
3995 		break;
3996 	}
3997 
3998 done:
3999 
4000 	if (err == 0) {
4001 		assert(event_fn != NULL);
4002 		nvmf_fc_adm_run_on_main_thread(event_fn, (void *)api_data);
4003 		SPDK_DEBUGLOG(nvmf_fc_adm_api, "Enqueue event %d done successfully\n", event_type);
4004 	} else {
4005 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
4006 		if (api_data) {
4007 			free(api_data);
4008 		}
4009 	}
4010 
4011 	return err;
4012 }
4013 
4014 SPDK_NVMF_TRANSPORT_REGISTER(fc, &spdk_nvmf_transport_fc);
4015 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc_adm_api)
4016 SPDK_LOG_REGISTER_COMPONENT(nvmf_fc)
4017