xref: /spdk/lib/nvmf/fc.c (revision 407e88fd2ab020d753e33014cf759353a9901b51)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * NVMe_FC transport functions.
36  */
37 
38 #include "spdk/env.h"
39 #include "spdk/assert.h"
40 #include "spdk/nvmf.h"
41 #include "spdk/nvmf_spec.h"
42 #include "spdk/string.h"
43 #include "spdk/trace.h"
44 #include "spdk/util.h"
45 #include "spdk/event.h"
46 #include "spdk/likely.h"
47 #include "spdk/endian.h"
48 #include "spdk/log.h"
49 #include "spdk/io_channel.h"
50 
51 #include "spdk_internal/log.h"
52 
53 #include "nvmf_internal.h"
54 #include "transport.h"
55 #include "nvmf_fc.h"
56 #include "fc_lld.h"
57 
58 #ifndef DEV_VERIFY
59 #define DEV_VERIFY assert
60 #endif
61 
62 #ifndef ASSERT_SPDK_FC_MASTER_THREAD
63 #define ASSERT_SPDK_FC_MASTER_THREAD() \
64         DEV_VERIFY(spdk_get_thread() == spdk_nvmf_fc_get_master_thread());
65 #endif
66 
67 /*
68  * PRLI service parameters
69  */
70 enum spdk_nvmf_fc_service_parameters {
71 	SPDK_NVMF_FC_FIRST_BURST_SUPPORTED = 0x0001,
72 	SPDK_NVMF_FC_DISCOVERY_SERVICE = 0x0008,
73 	SPDK_NVMF_FC_TARGET_FUNCTION = 0x0010,
74 	SPDK_NVMF_FC_INITIATOR_FUNCTION = 0x0020,
75 	SPDK_NVMF_FC_CONFIRMED_COMPLETION_SUPPORTED = 0x0080,
76 };
77 
78 static char *fc_req_state_strs[] = {
79 	"SPDK_NVMF_FC_REQ_INIT",
80 	"SPDK_NVMF_FC_REQ_READ_BDEV",
81 	"SPDK_NVMF_FC_REQ_READ_XFER",
82 	"SPDK_NVMF_FC_REQ_READ_RSP",
83 	"SPDK_NVMF_FC_REQ_WRITE_BUFFS",
84 	"SPDK_NVMF_FC_REQ_WRITE_XFER",
85 	"SPDK_NVMF_FC_REQ_WRITE_BDEV",
86 	"SPDK_NVMF_FC_REQ_WRITE_RSP",
87 	"SPDK_NVMF_FC_REQ_NONE_BDEV",
88 	"SPDK_NVMF_FC_REQ_NONE_RSP",
89 	"SPDK_NVMF_FC_REQ_SUCCESS",
90 	"SPDK_NVMF_FC_REQ_FAILED",
91 	"SPDK_NVMF_FC_REQ_ABORTED",
92 	"SPDK_NVMF_FC_REQ_BDEV_ABORTED",
93 	"SPDK_NVMF_FC_REQ_PENDING"
94 };
95 
96 #define OBJECT_NVMF_FC_IO				0xA0
97 
98 #define TRACE_GROUP_NVMF_FC				0x8
99 #define TRACE_FC_REQ_INIT                       SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x01)
100 #define TRACE_FC_REQ_READ_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x02)
101 #define TRACE_FC_REQ_READ_XFER                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x03)
102 #define TRACE_FC_REQ_READ_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x04)
103 #define TRACE_FC_REQ_WRITE_BUFFS                SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x05)
104 #define TRACE_FC_REQ_WRITE_XFER                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x06)
105 #define TRACE_FC_REQ_WRITE_BDEV                 SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x07)
106 #define TRACE_FC_REQ_WRITE_RSP                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x08)
107 #define TRACE_FC_REQ_NONE_BDEV                  SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x09)
108 #define TRACE_FC_REQ_NONE_RSP                   SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0A)
109 #define TRACE_FC_REQ_SUCCESS                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0B)
110 #define TRACE_FC_REQ_FAILED                     SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0C)
111 #define TRACE_FC_REQ_ABORTED                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0D)
112 #define TRACE_FC_REQ_BDEV_ABORTED               SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0E)
113 #define TRACE_FC_REQ_PENDING                    SPDK_TPOINT_ID(TRACE_GROUP_NVMF_FC, 0x0F)
114 
115 SPDK_TRACE_REGISTER_FN(nvmf_fc_trace, "nvmf_fc", TRACE_GROUP_NVMF_FC)
116 {
117 	spdk_trace_register_object(OBJECT_NVMF_FC_IO, 'r');
118 	spdk_trace_register_description("FC_REQ_NEW",
119 					TRACE_FC_REQ_INIT,
120 					OWNER_NONE, OBJECT_NVMF_FC_IO, 1, 1, "");
121 	spdk_trace_register_description("FC_REQ_READ_SUBMIT_TO_BDEV",
122 					TRACE_FC_REQ_READ_BDEV,
123 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
124 	spdk_trace_register_description("FC_REQ_READ_XFER_DATA",
125 					TRACE_FC_REQ_READ_XFER,
126 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
127 	spdk_trace_register_description("FC_REQ_READ_RSP",
128 					TRACE_FC_REQ_READ_RSP,
129 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
130 	spdk_trace_register_description("FC_REQ_WRITE_NEED_BUFFER",
131 					TRACE_FC_REQ_WRITE_BUFFS,
132 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
133 	spdk_trace_register_description("FC_REQ_WRITE_XFER_DATA",
134 					TRACE_FC_REQ_WRITE_XFER,
135 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
136 	spdk_trace_register_description("FC_REQ_WRITE_SUBMIT_TO_BDEV",
137 					TRACE_FC_REQ_WRITE_BDEV,
138 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
139 	spdk_trace_register_description("FC_REQ_WRITE_RSP",
140 					TRACE_FC_REQ_WRITE_RSP,
141 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
142 	spdk_trace_register_description("FC_REQ_NONE_SUBMIT_TO_BDEV",
143 					TRACE_FC_REQ_NONE_BDEV,
144 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
145 	spdk_trace_register_description("FC_REQ_NONE_RSP",
146 					TRACE_FC_REQ_NONE_RSP,
147 					OWNER_NONE, OBJECT_NVMF_FC_IO, 0, 1, "");
148 	spdk_trace_register_description("FC_REQ_SUCCESS",
149 					TRACE_FC_REQ_SUCCESS,
150 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
151 	spdk_trace_register_description("FC_REQ_FAILED",
152 					TRACE_FC_REQ_FAILED,
153 					OWNER_NONE, OBJECT_NONE, 0, 0, "");
154 	spdk_trace_register_description("FC_REQ_ABORTED",
155 					TRACE_FC_REQ_ABORTED,
156 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
157 	spdk_trace_register_description("FC_REQ_ABORTED_SUBMIT_TO_BDEV",
158 					TRACE_FC_REQ_BDEV_ABORTED,
159 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
160 	spdk_trace_register_description("FC_REQ_PENDING",
161 					TRACE_FC_REQ_PENDING,
162 					OWNER_NONE, OBJECT_NONE, 0, 1, "");
163 }
164 
165 /**
166  * The structure used by all fc adm functions
167  */
168 struct spdk_nvmf_fc_adm_api_data {
169 	void *api_args;
170 	spdk_nvmf_fc_callback cb_func;
171 };
172 
173 /**
174  * The callback structure for nport-delete
175  */
176 struct spdk_nvmf_fc_adm_nport_del_cb_data {
177 	struct spdk_nvmf_fc_nport *nport;
178 	uint8_t port_handle;
179 	spdk_nvmf_fc_callback fc_cb_func;
180 	void *fc_cb_ctx;
181 };
182 
183 /**
184  * The callback structure for it-delete
185  */
186 struct spdk_nvmf_fc_adm_i_t_del_cb_data {
187 	struct spdk_nvmf_fc_nport *nport;
188 	struct spdk_nvmf_fc_remote_port_info *rport;
189 	uint8_t port_handle;
190 	spdk_nvmf_fc_callback fc_cb_func;
191 	void *fc_cb_ctx;
192 };
193 
194 
195 typedef void (*spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn)(void *arg, uint32_t err);
196 
197 /**
198  * The callback structure for the it-delete-assoc callback
199  */
200 struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data {
201 	struct spdk_nvmf_fc_nport *nport;
202 	struct spdk_nvmf_fc_remote_port_info *rport;
203 	uint8_t port_handle;
204 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func;
205 	void *cb_ctx;
206 };
207 
208 /*
209  * Call back function pointer for HW port quiesce.
210  */
211 typedef void (*spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn)(void *ctx, int err);
212 
213 /**
214  * Context structure for quiescing a hardware port
215  */
216 struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx {
217 	int quiesce_count;
218 	void *ctx;
219 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func;
220 };
221 
222 /**
223  * Context structure used to reset a hardware port
224  */
225 struct spdk_nvmf_fc_adm_hw_port_reset_ctx {
226 	void *reset_args;
227 	spdk_nvmf_fc_callback reset_cb_func;
228 };
229 
230 /**
231  * The callback structure for HW port link break event
232  */
233 struct spdk_nvmf_fc_adm_port_link_break_cb_data {
234 	struct spdk_nvmf_hw_port_link_break_args *args;
235 	struct spdk_nvmf_fc_nport_delete_args nport_del_args;
236 	spdk_nvmf_fc_callback cb_func;
237 };
238 
239 struct spdk_nvmf_fc_transport {
240 	struct spdk_nvmf_transport transport;
241 	pthread_mutex_t lock;
242 };
243 
244 static struct spdk_nvmf_fc_transport *g_nvmf_fc_transport;
245 
246 static TAILQ_HEAD(, spdk_nvmf_fc_port) g_spdk_nvmf_fc_port_list =
247 	TAILQ_HEAD_INITIALIZER(g_spdk_nvmf_fc_port_list);
248 
249 static struct spdk_thread *g_nvmf_fc_master_thread = NULL;
250 
251 static uint32_t g_nvmf_fc_poll_group_count = 0;
252 static TAILQ_HEAD(, spdk_nvmf_fc_poll_group) g_nvmf_fc_poll_groups =
253 	TAILQ_HEAD_INITIALIZER(g_nvmf_fc_poll_groups);
254 
255 struct spdk_thread *
256 spdk_nvmf_fc_get_master_thread(void)
257 {
258 	return g_nvmf_fc_master_thread;
259 }
260 
261 static inline void
262 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
263 			       enum spdk_nvmf_fc_request_state state)
264 {
265 	uint16_t tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
266 
267 	switch (state) {
268 	case SPDK_NVMF_FC_REQ_INIT:
269 		/* Start IO tracing */
270 		tpoint_id = TRACE_FC_REQ_INIT;
271 		break;
272 	case SPDK_NVMF_FC_REQ_READ_BDEV:
273 		tpoint_id = TRACE_FC_REQ_READ_BDEV;
274 		break;
275 	case SPDK_NVMF_FC_REQ_READ_XFER:
276 		tpoint_id = TRACE_FC_REQ_READ_XFER;
277 		break;
278 	case SPDK_NVMF_FC_REQ_READ_RSP:
279 		tpoint_id = TRACE_FC_REQ_READ_RSP;
280 		break;
281 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
282 		tpoint_id = TRACE_FC_REQ_WRITE_BUFFS;
283 		break;
284 	case SPDK_NVMF_FC_REQ_WRITE_XFER:
285 		tpoint_id = TRACE_FC_REQ_WRITE_XFER;
286 		break;
287 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
288 		tpoint_id = TRACE_FC_REQ_WRITE_BDEV;
289 		break;
290 	case SPDK_NVMF_FC_REQ_WRITE_RSP:
291 		tpoint_id = TRACE_FC_REQ_WRITE_RSP;
292 		break;
293 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
294 		tpoint_id = TRACE_FC_REQ_NONE_BDEV;
295 		break;
296 	case SPDK_NVMF_FC_REQ_NONE_RSP:
297 		tpoint_id = TRACE_FC_REQ_NONE_RSP;
298 		break;
299 	case SPDK_NVMF_FC_REQ_SUCCESS:
300 		tpoint_id = TRACE_FC_REQ_SUCCESS;
301 		break;
302 	case SPDK_NVMF_FC_REQ_FAILED:
303 		tpoint_id = TRACE_FC_REQ_FAILED;
304 		break;
305 	case SPDK_NVMF_FC_REQ_ABORTED:
306 		tpoint_id = TRACE_FC_REQ_ABORTED;
307 		break;
308 	case SPDK_NVMF_FC_REQ_BDEV_ABORTED:
309 		tpoint_id = TRACE_FC_REQ_ABORTED;
310 		break;
311 	case SPDK_NVMF_FC_REQ_PENDING:
312 		tpoint_id = TRACE_FC_REQ_PENDING;
313 		break;
314 	default:
315 		assert(0);
316 		break;
317 	}
318 	if (tpoint_id != SPDK_TRACE_MAX_TPOINT_ID) {
319 		spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
320 				  (uint64_t)(&fc_req->req), 0);
321 	}
322 }
323 
324 static void
325 nvmf_fc_handle_connection_failure(void *arg)
326 {
327 	struct spdk_nvmf_fc_conn *fc_conn = arg;
328 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
329 
330 	if (!fc_conn->create_opd) {
331 		return;
332 	}
333 	api_data = &fc_conn->create_opd->u.add_conn;
334 
335 	nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
336 				    api_data->args.fc_conn, api_data->aq_conn);
337 }
338 
339 static void
340 nvmf_fc_handle_assoc_deletion(void *arg)
341 {
342 	struct spdk_nvmf_fc_conn *fc_conn = arg;
343 
344 	spdk_nvmf_fc_delete_association(fc_conn->fc_assoc->tgtport,
345 					fc_conn->fc_assoc->assoc_id, false, true, NULL, NULL);
346 }
347 
348 static int
349 nvmf_fc_create_req_mempool(struct spdk_nvmf_fc_hwqp *hwqp)
350 {
351 	uint32_t i;
352 	struct spdk_nvmf_fc_request *fc_req;
353 
354 	TAILQ_INIT(&hwqp->free_reqs);
355 	TAILQ_INIT(&hwqp->in_use_reqs);
356 
357 	hwqp->fc_reqs_buf = calloc(hwqp->rq_size, sizeof(struct spdk_nvmf_fc_request));
358 	if (hwqp->fc_reqs_buf == NULL) {
359 		SPDK_ERRLOG("create fc request pool failed\n");
360 		return -ENOMEM;
361 	}
362 
363 	for (i = 0; i < hwqp->rq_size; i++) {
364 		fc_req = hwqp->fc_reqs_buf + i;
365 
366 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
367 		TAILQ_INSERT_TAIL(&hwqp->free_reqs, fc_req, link);
368 	}
369 
370 	return 0;
371 }
372 
373 static inline struct spdk_nvmf_fc_request *
374 nvmf_fc_hwqp_alloc_fc_request(struct spdk_nvmf_fc_hwqp *hwqp)
375 {
376 	struct spdk_nvmf_fc_request *fc_req;
377 
378 	if (TAILQ_EMPTY(&hwqp->free_reqs)) {
379 		SPDK_ERRLOG("Alloc request buffer failed\n");
380 		return NULL;
381 	}
382 
383 	fc_req = TAILQ_FIRST(&hwqp->free_reqs);
384 	TAILQ_REMOVE(&hwqp->free_reqs, fc_req, link);
385 
386 	memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
387 	TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
388 	TAILQ_INIT(&fc_req->abort_cbs);
389 	return fc_req;
390 }
391 
392 static inline void
393 nvmf_fc_hwqp_free_fc_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_request *fc_req)
394 {
395 	if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
396 		/* Log an error for debug purpose. */
397 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
398 	}
399 
400 	/* set the magic to mark req as no longer valid. */
401 	fc_req->magic = 0xDEADBEEF;
402 
403 	TAILQ_REMOVE(&hwqp->in_use_reqs, fc_req, link);
404 	TAILQ_INSERT_HEAD(&hwqp->free_reqs, fc_req, link);
405 }
406 
407 static inline bool
408 nvmf_fc_req_in_get_buff(struct spdk_nvmf_fc_request *fc_req)
409 {
410 	switch (fc_req->state) {
411 	case SPDK_NVMF_FC_REQ_WRITE_BUFFS:
412 		return true;
413 	default:
414 		return false;
415 	}
416 }
417 
418 static void
419 nvmf_fc_request_free_buffers(struct spdk_nvmf_fc_request *fc_req,
420 			     struct spdk_nvmf_transport_poll_group *group,
421 			     struct spdk_nvmf_transport *transport,
422 			     uint32_t num_buffers)
423 {
424 	uint32_t i;
425 
426 	for (i = 0; i < num_buffers; i++) {
427 		if (group->buf_cache_count < group->buf_cache_size) {
428 			STAILQ_INSERT_HEAD(&group->buf_cache,
429 					   (struct spdk_nvmf_transport_pg_cache_buf *)fc_req->buffers[i],
430 					   link);
431 			group->buf_cache_count++;
432 		} else {
433 			spdk_mempool_put(transport->data_buf_pool, fc_req->buffers[i]);
434 		}
435 		fc_req->req.iov[i].iov_base = NULL;
436 		fc_req->buffers[i] = NULL;
437 	}
438 	fc_req->data_from_pool = false;
439 }
440 
441 void
442 spdk_nvmf_fc_init_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp)
443 {
444 	nvmf_fc_init_rqpair_buffers(hwqp);
445 }
446 
447 struct spdk_nvmf_fc_conn *
448 spdk_nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
449 {
450 	struct spdk_nvmf_fc_conn *fc_conn;
451 
452 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
453 		if (fc_conn->conn_id == conn_id) {
454 			return fc_conn;
455 		}
456 	}
457 
458 	return NULL;
459 }
460 
461 void
462 spdk_nvmf_fc_hwqp_reinit_poller_queues(struct spdk_nvmf_fc_hwqp *hwqp, void *queues_curr)
463 {
464 	struct spdk_nvmf_fc_abts_ctx *ctx;
465 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args = NULL, *tmp = NULL;
466 
467 	/* Clean up any pending sync callbacks */
468 	TAILQ_FOREACH_SAFE(args, &hwqp->sync_cbs, link, tmp) {
469 		TAILQ_REMOVE(&hwqp->sync_cbs, args, link);
470 		ctx = args->cb_info.cb_data;
471 		if (ctx) {
472 			if (++ctx->hwqps_responded == ctx->num_hwqps) {
473 				free(ctx->sync_poller_args);
474 				free(ctx->abts_poller_args);
475 				free(ctx);
476 			}
477 		}
478 	}
479 
480 	nvmf_fc_reinit_q(hwqp->queues, queues_curr);
481 }
482 
483 void
484 spdk_nvmf_fc_init_hwqp(struct spdk_nvmf_fc_port *fc_port, struct spdk_nvmf_fc_hwqp *hwqp)
485 {
486 	hwqp->fc_port = fc_port;
487 
488 	/* clear counters */
489 	memset(&hwqp->counters, 0, sizeof(struct spdk_nvmf_fc_errors));
490 
491 	spdk_nvmf_fc_init_poller_queues(hwqp);
492 	if (&fc_port->ls_queue != hwqp) {
493 		nvmf_fc_create_req_mempool(hwqp);
494 	}
495 
496 	nvmf_fc_init_q(hwqp);
497 	TAILQ_INIT(&hwqp->connection_list);
498 	TAILQ_INIT(&hwqp->sync_cbs);
499 	TAILQ_INIT(&hwqp->ls_pending_queue);
500 }
501 
502 static struct spdk_nvmf_fc_poll_group *
503 nvmf_fc_get_idlest_poll_group(void)
504 {
505 	uint32_t max_count = UINT32_MAX;
506 	struct spdk_nvmf_fc_poll_group *fc_poll_group;
507 	struct spdk_nvmf_fc_poll_group *ret_fc_poll_group = NULL;
508 
509 	/* find poll group with least number of hwqp's assigned to it */
510 	TAILQ_FOREACH(fc_poll_group, &g_nvmf_fc_poll_groups, link) {
511 		if (fc_poll_group->hwqp_count < max_count) {
512 			ret_fc_poll_group = fc_poll_group;
513 			max_count = fc_poll_group->hwqp_count;
514 		}
515 	}
516 
517 	return ret_fc_poll_group;
518 }
519 
520 void
521 spdk_nvmf_fc_poll_group_add_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
522 {
523 	struct spdk_nvmf_fc_poll_group *fc_poll_group = NULL;
524 
525 	assert(hwqp);
526 	if (hwqp == NULL) {
527 		SPDK_ERRLOG("Error: hwqp is NULL\n");
528 		return;
529 	}
530 
531 	assert(g_nvmf_fc_poll_group_count);
532 
533 	fc_poll_group = nvmf_fc_get_idlest_poll_group();
534 	if (!fc_poll_group) {
535 		SPDK_ERRLOG("Could not assign poll group for hwqp (%d)\n", hwqp->hwqp_id);
536 		return;
537 	}
538 
539 	hwqp->thread = fc_poll_group->poll_group->thread;
540 	hwqp->fc_poll_group = fc_poll_group;
541 	fc_poll_group->hwqp_count++;
542 	spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_HWQP, NULL);
543 }
544 
545 void
546 spdk_nvmf_fc_poll_group_remove_hwqp(struct spdk_nvmf_fc_hwqp *hwqp)
547 {
548 	assert(hwqp);
549 
550 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
551 		      "Remove hwqp from poller: for port: %d, hwqp: %d\n",
552 		      hwqp->fc_port->port_hdl, hwqp->hwqp_id);
553 
554 	if (!hwqp->fc_poll_group) {
555 		SPDK_ERRLOG("HWQP (%d) not assigned to poll group\n", hwqp->hwqp_id);
556 	} else {
557 		hwqp->fc_poll_group->hwqp_count--;
558 		spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_REMOVE_HWQP, NULL);
559 	}
560 }
561 
562 /*
563  * Note: This needs to be used only on master poller.
564  */
565 static uint64_t
566 nvmf_fc_get_abts_unique_id(void)
567 {
568 	static uint32_t u_id = 0;
569 
570 	return (uint64_t)(++u_id);
571 }
572 
573 static void
574 nvmf_fc_queue_synced_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
575 {
576 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
577 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args, *poller_arg;
578 
579 	ctx->hwqps_responded++;
580 
581 	if (ctx->hwqps_responded < ctx->num_hwqps) {
582 		/* Wait for all pollers to complete. */
583 		return;
584 	}
585 
586 	/* Free the queue sync poller args. */
587 	free(ctx->sync_poller_args);
588 
589 	/* Mark as queue synced */
590 	ctx->queue_synced = true;
591 
592 	/* Reset the ctx values */
593 	ctx->hwqps_responded = 0;
594 	ctx->handled = false;
595 
596 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
597 		      "QueueSync(0x%lx) completed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
598 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
599 
600 	/* Resend ABTS to pollers */
601 	args = ctx->abts_poller_args;
602 	for (int i = 0; i < ctx->num_hwqps; i++) {
603 		poller_arg = args + i;
604 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
605 					     SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
606 					     poller_arg);
607 	}
608 }
609 
610 static int
611 nvmf_fc_handle_abts_notfound(struct spdk_nvmf_fc_abts_ctx *ctx)
612 {
613 	struct spdk_nvmf_fc_poller_api_queue_sync_args *args, *poller_arg;
614 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *abts_args, *abts_poller_arg;
615 
616 	/* check if FC driver supports queue sync */
617 	if (!nvmf_fc_q_sync_available()) {
618 		return -EPERM;
619 	}
620 
621 	assert(ctx);
622 	if (!ctx) {
623 		SPDK_ERRLOG("NULL ctx pointer");
624 		return -EINVAL;
625 	}
626 
627 	/* Reset the ctx values */
628 	ctx->hwqps_responded = 0;
629 
630 	args = calloc(ctx->num_hwqps,
631 		      sizeof(struct spdk_nvmf_fc_poller_api_queue_sync_args));
632 	if (!args) {
633 		SPDK_ERRLOG("QueueSync(0x%lx) failed for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
634 			    ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
635 		return -ENOMEM;
636 	}
637 	ctx->sync_poller_args = args;
638 
639 	abts_args = ctx->abts_poller_args;
640 	for (int i = 0; i < ctx->num_hwqps; i++) {
641 		abts_poller_arg = abts_args + i;
642 		poller_arg = args + i;
643 		poller_arg->u_id = ctx->u_id;
644 		poller_arg->hwqp = abts_poller_arg->hwqp;
645 		poller_arg->cb_info.cb_func = nvmf_fc_queue_synced_cb;
646 		poller_arg->cb_info.cb_data = ctx;
647 		poller_arg->cb_info.cb_thread = spdk_get_thread();
648 
649 		/* Send a Queue sync message to interested pollers */
650 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
651 					     SPDK_NVMF_FC_POLLER_API_QUEUE_SYNC,
652 					     poller_arg);
653 	}
654 
655 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
656 		      "QueueSync(0x%lx) Sent for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
657 		      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
658 
659 	/* Post Marker to queue to track aborted request */
660 	nvmf_fc_issue_q_sync(ctx->ls_hwqp, ctx->u_id, ctx->fcp_rq_id);
661 
662 	return 0;
663 }
664 
665 static void
666 nvmf_fc_abts_handled_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
667 {
668 	struct spdk_nvmf_fc_abts_ctx *ctx = cb_data;
669 	struct spdk_nvmf_fc_nport *nport  = NULL;
670 
671 	if (ret != SPDK_NVMF_FC_POLLER_API_OXID_NOT_FOUND) {
672 		ctx->handled = true;
673 	}
674 
675 	ctx->hwqps_responded++;
676 
677 	if (ctx->hwqps_responded < ctx->num_hwqps) {
678 		/* Wait for all pollers to complete. */
679 		return;
680 	}
681 
682 	nport = spdk_nvmf_fc_nport_find(ctx->port_hdl, ctx->nport_hdl);
683 
684 	if (ctx->nport != nport) {
685 		/* Nport can be deleted while this abort is being
686 		 * processed by the pollers.
687 		 */
688 		SPDK_NOTICELOG("nport_%d deleted while processing ABTS frame, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
689 			       ctx->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
690 	} else {
691 		if (!ctx->handled) {
692 			/* Try syncing the queues and try one more time */
693 			if (!ctx->queue_synced && (nvmf_fc_handle_abts_notfound(ctx) == 0)) {
694 				SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
695 					      "QueueSync(0x%lx) for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
696 					      ctx->u_id, ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
697 				return;
698 			} else {
699 				/* Send Reject */
700 				nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
701 						    ctx->oxid, ctx->rxid, ctx->rpi, true,
702 						    FCNVME_BLS_REJECT_EXP_INVALID_OXID, NULL, NULL);
703 			}
704 		} else {
705 			/* Send Accept */
706 			nvmf_fc_xmt_bls_rsp(&ctx->nport->fc_port->ls_queue,
707 					    ctx->oxid, ctx->rxid, ctx->rpi, false,
708 					    0, NULL, NULL);
709 		}
710 	}
711 	SPDK_NOTICELOG("BLS_%s sent for ABTS frame nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
712 		       (ctx->handled) ? "ACC" : "REJ", ctx->nport->nport_hdl, ctx->rpi, ctx->oxid, ctx->rxid);
713 
714 	free(ctx->abts_poller_args);
715 	free(ctx);
716 }
717 
718 void
719 spdk_nvmf_fc_handle_abts_frame(struct spdk_nvmf_fc_nport *nport, uint16_t rpi,
720 			       uint16_t oxid, uint16_t rxid)
721 {
722 	struct spdk_nvmf_fc_abts_ctx *ctx = NULL;
723 	struct spdk_nvmf_fc_poller_api_abts_recvd_args *args = NULL, *poller_arg;
724 	struct spdk_nvmf_fc_association *assoc = NULL;
725 	struct spdk_nvmf_fc_conn *conn = NULL;
726 	uint32_t hwqp_cnt = 0;
727 	bool skip_hwqp_cnt;
728 	struct spdk_nvmf_fc_hwqp **hwqps = NULL;
729 	uint32_t i;
730 
731 	SPDK_NOTICELOG("Handle ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
732 		       nport->nport_hdl, rpi, oxid, rxid);
733 
734 	/* Allocate memory to track hwqp's with at least 1 active connection. */
735 	hwqps = calloc(nport->fc_port->num_io_queues, sizeof(struct spdk_nvmf_fc_hwqp *));
736 	if (hwqps == NULL) {
737 		SPDK_ERRLOG("Unable to allocate temp. hwqp array for abts processing!\n");
738 		goto bls_rej;
739 	}
740 
741 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
742 		TAILQ_FOREACH(conn, &assoc->fc_conns, assoc_link) {
743 			if (conn->rpi != rpi) {
744 				continue;
745 			}
746 
747 			skip_hwqp_cnt = false;
748 			for (i = 0; i < hwqp_cnt; i++) {
749 				if (hwqps[i] == conn->hwqp) {
750 					/* Skip. This is already present */
751 					skip_hwqp_cnt = true;
752 					break;
753 				}
754 			}
755 			if (!skip_hwqp_cnt) {
756 				assert(hwqp_cnt < nport->fc_port->num_io_queues);
757 				hwqps[hwqp_cnt] = conn->hwqp;
758 				hwqp_cnt++;
759 			}
760 		}
761 	}
762 
763 	if (!hwqp_cnt) {
764 		goto bls_rej;
765 	}
766 
767 	args = calloc(hwqp_cnt,
768 		      sizeof(struct spdk_nvmf_fc_poller_api_abts_recvd_args));
769 	if (!args) {
770 		goto bls_rej;
771 	}
772 
773 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_abts_ctx));
774 	if (!ctx) {
775 		goto bls_rej;
776 	}
777 	ctx->rpi = rpi;
778 	ctx->oxid = oxid;
779 	ctx->rxid = rxid;
780 	ctx->nport = nport;
781 	ctx->nport_hdl = nport->nport_hdl;
782 	ctx->port_hdl = nport->fc_port->port_hdl;
783 	ctx->num_hwqps = hwqp_cnt;
784 	ctx->ls_hwqp = &nport->fc_port->ls_queue;
785 	ctx->fcp_rq_id = nport->fc_port->fcp_rq_id;
786 	ctx->abts_poller_args = args;
787 
788 	/* Get a unique context for this ABTS */
789 	ctx->u_id = nvmf_fc_get_abts_unique_id();
790 
791 	for (i = 0; i < hwqp_cnt; i++) {
792 		poller_arg = args + i;
793 		poller_arg->hwqp = hwqps[i];
794 		poller_arg->cb_info.cb_func = nvmf_fc_abts_handled_cb;
795 		poller_arg->cb_info.cb_data = ctx;
796 		poller_arg->cb_info.cb_thread = spdk_get_thread();
797 		poller_arg->ctx = ctx;
798 
799 		spdk_nvmf_fc_poller_api_func(poller_arg->hwqp,
800 					     SPDK_NVMF_FC_POLLER_API_ABTS_RECEIVED,
801 					     poller_arg);
802 	}
803 
804 	free(hwqps);
805 
806 	return;
807 bls_rej:
808 	free(args);
809 	free(hwqps);
810 
811 	/* Send Reject */
812 	nvmf_fc_xmt_bls_rsp(&nport->fc_port->ls_queue, oxid, rxid, rpi,
813 			    true, FCNVME_BLS_REJECT_EXP_NOINFO, NULL, NULL);
814 	SPDK_NOTICELOG("BLS_RJT for ABTS frame for nport: %d, rpi: 0x%x, oxid: 0x%x, rxid: 0x%x\n",
815 		       nport->nport_hdl, rpi, oxid, rxid);
816 	return;
817 }
818 
819 /*** Accessor functions for the FC structures - BEGIN */
820 /*
821  * Returns true if the port is in offline state.
822  */
823 bool
824 spdk_nvmf_fc_port_is_offline(struct spdk_nvmf_fc_port *fc_port)
825 {
826 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE)) {
827 		return true;
828 	}
829 
830 	return false;
831 }
832 
833 /*
834  * Returns true if the port is in online state.
835  */
836 bool
837 spdk_nvmf_fc_port_is_online(struct spdk_nvmf_fc_port *fc_port)
838 {
839 	if (fc_port && (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE)) {
840 		return true;
841 	}
842 
843 	return false;
844 }
845 
846 int
847 spdk_nvmf_fc_port_set_online(struct spdk_nvmf_fc_port *fc_port)
848 {
849 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_ONLINE)) {
850 		fc_port->hw_port_status = SPDK_FC_PORT_ONLINE;
851 		return 0;
852 	}
853 
854 	return -EPERM;
855 }
856 
857 int
858 spdk_nvmf_fc_port_set_offline(struct spdk_nvmf_fc_port *fc_port)
859 {
860 	if (fc_port && (fc_port->hw_port_status != SPDK_FC_PORT_OFFLINE)) {
861 		fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
862 		return 0;
863 	}
864 
865 	return -EPERM;
866 }
867 
868 int
869 spdk_nvmf_fc_hwqp_set_online(struct spdk_nvmf_fc_hwqp *hwqp)
870 {
871 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_ONLINE)) {
872 		hwqp->state = SPDK_FC_HWQP_ONLINE;
873 		/* reset some queue counters */
874 		hwqp->num_conns = 0;
875 		return nvmf_fc_set_q_online_state(hwqp, true);
876 	}
877 
878 	return -EPERM;
879 }
880 
881 int
882 spdk_nvmf_fc_hwqp_set_offline(struct spdk_nvmf_fc_hwqp *hwqp)
883 {
884 	if (hwqp && (hwqp->state != SPDK_FC_HWQP_OFFLINE)) {
885 		hwqp->state = SPDK_FC_HWQP_OFFLINE;
886 		return nvmf_fc_set_q_online_state(hwqp, false);
887 	}
888 
889 	return -EPERM;
890 }
891 
892 void
893 spdk_nvmf_fc_port_add(struct spdk_nvmf_fc_port *fc_port)
894 {
895 	TAILQ_INSERT_TAIL(&g_spdk_nvmf_fc_port_list, fc_port, link);
896 }
897 
898 struct spdk_nvmf_fc_port *
899 spdk_nvmf_fc_port_lookup(uint8_t port_hdl)
900 {
901 	struct spdk_nvmf_fc_port *fc_port = NULL;
902 
903 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
904 		if (fc_port->port_hdl == port_hdl) {
905 			return fc_port;
906 		}
907 	}
908 	return NULL;
909 }
910 
911 static void
912 nvmf_fc_port_cleanup(void)
913 {
914 	struct spdk_nvmf_fc_port *fc_port, *tmp;
915 	struct spdk_nvmf_fc_hwqp *hwqp;
916 	uint32_t i;
917 
918 	TAILQ_FOREACH_SAFE(fc_port, &g_spdk_nvmf_fc_port_list, link, tmp) {
919 		TAILQ_REMOVE(&g_spdk_nvmf_fc_port_list,  fc_port, link);
920 		for (i = 0; i < fc_port->num_io_queues; i++) {
921 			hwqp = &fc_port->io_queues[i];
922 			if (hwqp->fc_reqs_buf) {
923 				free(hwqp->fc_reqs_buf);
924 			}
925 		}
926 		free(fc_port);
927 	}
928 }
929 
930 uint32_t
931 spdk_nvmf_fc_get_prli_service_params(void)
932 {
933 	return (SPDK_NVMF_FC_DISCOVERY_SERVICE | SPDK_NVMF_FC_TARGET_FUNCTION);
934 }
935 
936 int
937 spdk_nvmf_fc_port_add_nport(struct spdk_nvmf_fc_port *fc_port,
938 			    struct spdk_nvmf_fc_nport *nport)
939 {
940 	if (fc_port) {
941 		TAILQ_INSERT_TAIL(&fc_port->nport_list, nport, link);
942 		fc_port->num_nports++;
943 		return 0;
944 	}
945 
946 	return -EINVAL;
947 }
948 
949 int
950 spdk_nvmf_fc_port_remove_nport(struct spdk_nvmf_fc_port *fc_port,
951 			       struct spdk_nvmf_fc_nport *nport)
952 {
953 	if (fc_port && nport) {
954 		TAILQ_REMOVE(&fc_port->nport_list, nport, link);
955 		fc_port->num_nports--;
956 		return 0;
957 	}
958 
959 	return -EINVAL;
960 }
961 
962 static struct spdk_nvmf_fc_nport *
963 nvmf_fc_nport_hdl_lookup(struct spdk_nvmf_fc_port *fc_port, uint16_t nport_hdl)
964 {
965 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
966 
967 	TAILQ_FOREACH(fc_nport, &fc_port->nport_list, link) {
968 		if (fc_nport->nport_hdl == nport_hdl) {
969 			return fc_nport;
970 		}
971 	}
972 
973 	return NULL;
974 }
975 
976 struct spdk_nvmf_fc_nport *
977 spdk_nvmf_fc_nport_find(uint8_t port_hdl, uint16_t nport_hdl)
978 {
979 	struct spdk_nvmf_fc_port *fc_port = NULL;
980 
981 	fc_port = spdk_nvmf_fc_port_lookup(port_hdl);
982 	if (fc_port) {
983 		return nvmf_fc_nport_hdl_lookup(fc_port, nport_hdl);
984 	}
985 
986 	return NULL;
987 }
988 
989 static inline int
990 nvmf_fc_hwqp_find_nport_and_rport(struct spdk_nvmf_fc_hwqp *hwqp,
991 				  uint32_t d_id, struct spdk_nvmf_fc_nport **nport,
992 				  uint32_t s_id, struct spdk_nvmf_fc_remote_port_info **rport)
993 {
994 	struct spdk_nvmf_fc_nport *n_port;
995 	struct spdk_nvmf_fc_remote_port_info *r_port;
996 
997 	assert(hwqp);
998 	if (hwqp == NULL) {
999 		SPDK_ERRLOG("Error: hwqp is NULL\n");
1000 		return -EINVAL;
1001 	}
1002 	assert(nport);
1003 	if (nport == NULL) {
1004 		SPDK_ERRLOG("Error: nport is NULL\n");
1005 		return -EINVAL;
1006 	}
1007 	assert(rport);
1008 	if (rport == NULL) {
1009 		SPDK_ERRLOG("Error: rport is NULL\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	TAILQ_FOREACH(n_port, &hwqp->fc_port->nport_list, link) {
1014 		if (n_port->d_id == d_id) {
1015 			TAILQ_FOREACH(r_port, &n_port->rem_port_list, link) {
1016 				if (r_port->s_id == s_id) {
1017 					*nport = n_port;
1018 					*rport = r_port;
1019 					return 0;
1020 				}
1021 			}
1022 			break;
1023 		}
1024 	}
1025 
1026 	return -ENOENT;
1027 }
1028 
1029 /* Returns true if the Nport is empty of all rem_ports */
1030 bool
1031 spdk_nvmf_fc_nport_has_no_rport(struct spdk_nvmf_fc_nport *nport)
1032 {
1033 	if (nport && TAILQ_EMPTY(&nport->rem_port_list)) {
1034 		assert(nport->rport_count == 0);
1035 		return true;
1036 	} else {
1037 		return false;
1038 	}
1039 }
1040 
1041 int
1042 spdk_nvmf_fc_nport_set_state(struct spdk_nvmf_fc_nport *nport,
1043 			     enum spdk_nvmf_fc_object_state state)
1044 {
1045 	if (nport) {
1046 		nport->nport_state = state;
1047 		return 0;
1048 	} else {
1049 		return -EINVAL;
1050 	}
1051 }
1052 
1053 bool
1054 spdk_nvmf_fc_nport_add_rem_port(struct spdk_nvmf_fc_nport *nport,
1055 				struct spdk_nvmf_fc_remote_port_info *rem_port)
1056 {
1057 	if (nport && rem_port) {
1058 		TAILQ_INSERT_TAIL(&nport->rem_port_list, rem_port, link);
1059 		nport->rport_count++;
1060 		return 0;
1061 	} else {
1062 		return -EINVAL;
1063 	}
1064 }
1065 
1066 bool
1067 spdk_nvmf_fc_nport_remove_rem_port(struct spdk_nvmf_fc_nport *nport,
1068 				   struct spdk_nvmf_fc_remote_port_info *rem_port)
1069 {
1070 	if (nport && rem_port) {
1071 		TAILQ_REMOVE(&nport->rem_port_list, rem_port, link);
1072 		nport->rport_count--;
1073 		return 0;
1074 	} else {
1075 		return -EINVAL;
1076 	}
1077 }
1078 
1079 int
1080 spdk_nvmf_fc_rport_set_state(struct spdk_nvmf_fc_remote_port_info *rport,
1081 			     enum spdk_nvmf_fc_object_state state)
1082 {
1083 	if (rport) {
1084 		rport->rport_state = state;
1085 		return 0;
1086 	} else {
1087 		return -EINVAL;
1088 	}
1089 }
1090 int
1091 spdk_nvmf_fc_assoc_set_state(struct spdk_nvmf_fc_association *assoc,
1092 			     enum spdk_nvmf_fc_object_state state)
1093 {
1094 	if (assoc) {
1095 		assoc->assoc_state = state;
1096 		return 0;
1097 	} else {
1098 		return -EINVAL;
1099 	}
1100 }
1101 
1102 static struct spdk_nvmf_fc_association *
1103 spdk_nvmf_ctrlr_get_fc_assoc(struct spdk_nvmf_ctrlr *ctrlr)
1104 {
1105 	struct spdk_nvmf_qpair *qpair = ctrlr->admin_qpair;
1106 	struct spdk_nvmf_fc_conn *fc_conn;
1107 
1108 	if (!qpair) {
1109 		SPDK_ERRLOG("Controller %d has no associations\n", ctrlr->cntlid);
1110 		return NULL;
1111 	}
1112 
1113 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
1114 
1115 	return fc_conn->fc_assoc;
1116 }
1117 
1118 bool
1119 spdk_nvmf_ctrlr_is_on_nport(uint8_t port_hdl, uint16_t nport_hdl,
1120 			    struct spdk_nvmf_ctrlr *ctrlr)
1121 {
1122 	struct spdk_nvmf_fc_nport *fc_nport = NULL;
1123 	struct spdk_nvmf_fc_association *assoc = NULL;
1124 
1125 	if (!ctrlr) {
1126 		return false;
1127 	}
1128 
1129 	fc_nport = spdk_nvmf_fc_nport_find(port_hdl, nport_hdl);
1130 	if (!fc_nport) {
1131 		return false;
1132 	}
1133 
1134 	assoc = spdk_nvmf_ctrlr_get_fc_assoc(ctrlr);
1135 	if (assoc && assoc->tgtport == fc_nport) {
1136 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1137 			      "Controller: %d corresponding to association: %p(%lu:%d) is on port: %d nport: %d\n",
1138 			      ctrlr->cntlid, assoc, assoc->assoc_id, assoc->assoc_state, port_hdl,
1139 			      nport_hdl);
1140 		return true;
1141 	}
1142 	return false;
1143 }
1144 
1145 static inline bool
1146 nvmf_fc_req_in_bdev(struct spdk_nvmf_fc_request *fc_req)
1147 {
1148 	switch (fc_req->state) {
1149 	case SPDK_NVMF_FC_REQ_READ_BDEV:
1150 	case SPDK_NVMF_FC_REQ_WRITE_BDEV:
1151 	case SPDK_NVMF_FC_REQ_NONE_BDEV:
1152 		return true;
1153 	default:
1154 		return false;
1155 	}
1156 }
1157 
1158 static inline bool
1159 nvmf_fc_req_in_pending(struct spdk_nvmf_fc_request *fc_req)
1160 {
1161 	struct spdk_nvmf_fc_request *tmp = NULL;
1162 
1163 	TAILQ_FOREACH(tmp, &fc_req->fc_conn->pending_queue, pending_link) {
1164 		if (tmp == fc_req) {
1165 			return true;
1166 		}
1167 	}
1168 	return false;
1169 }
1170 
1171 static void
1172 nvmf_fc_req_bdev_abort(void *arg1)
1173 {
1174 	struct spdk_nvmf_fc_request *fc_req = arg1;
1175 	struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1176 
1177 	/* Initial release - we don't have to abort Admin Queue or
1178 	 * Fabric commands. The AQ commands supported at this time are
1179 	 * Get-Log-Page,
1180 	 * Identify
1181 	 * Set Features
1182 	 * Get Features
1183 	 * AER -> Special case and handled differently.
1184 	 * Every one of the above Admin commands (except AER) run
1185 	 * to completion and so an Abort of such commands doesn't
1186 	 * make sense.
1187 	 */
1188 	/* The Fabric commands supported are
1189 	 * Property Set
1190 	 * Property Get
1191 	 * Connect -> Special case (async. handling). Not sure how to
1192 	 * handle at this point. Let it run to completion.
1193 	 */
1194 	if (ctrlr->aer_req == &fc_req->req) {
1195 		SPDK_NOTICELOG("Abort AER request\n");
1196 		spdk_nvmf_qpair_free_aer(fc_req->req.qpair);
1197 	}
1198 }
1199 
1200 void
1201 spdk_nvmf_fc_request_abort_complete(void *arg1)
1202 {
1203 	struct spdk_nvmf_fc_request *fc_req =
1204 		(struct spdk_nvmf_fc_request *)arg1;
1205 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL, *tmp = NULL;
1206 
1207 	/* Request abort completed. Notify all the callbacks */
1208 	TAILQ_FOREACH_SAFE(ctx, &fc_req->abort_cbs, link, tmp) {
1209 		/* Notify */
1210 		ctx->cb(fc_req->hwqp, 0, ctx->cb_args);
1211 		/* Remove */
1212 		TAILQ_REMOVE(&fc_req->abort_cbs, ctx, link);
1213 		/* free */
1214 		free(ctx);
1215 	}
1216 
1217 	SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1218 		       fc_req_state_strs[fc_req->state]);
1219 
1220 	spdk_nvmf_fc_request_free(fc_req);
1221 }
1222 
1223 void
1224 spdk_nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1225 			   spdk_nvmf_fc_caller_cb cb, void *cb_args)
1226 {
1227 	struct spdk_nvmf_fc_caller_ctx *ctx = NULL;
1228 	bool kill_req = false;
1229 
1230 	/* Add the cb to list */
1231 	if (cb) {
1232 		ctx = calloc(1, sizeof(struct spdk_nvmf_fc_caller_ctx));
1233 		if (!ctx) {
1234 			SPDK_ERRLOG("%s: ctx alloc failed.\n", __func__);
1235 			return;
1236 		}
1237 		ctx->cb = cb;
1238 		ctx->cb_args = cb_args;
1239 
1240 		TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1241 	}
1242 
1243 	if (!fc_req->is_aborted) {
1244 		/* Increment aborted command counter */
1245 		fc_req->hwqp->counters.num_aborted++;
1246 	}
1247 
1248 	/* If port is dead, skip abort wqe */
1249 	kill_req = spdk_nvmf_fc_is_port_dead(fc_req->hwqp);
1250 	if (kill_req && spdk_nvmf_fc_req_in_xfer(fc_req)) {
1251 		fc_req->is_aborted = true;
1252 		goto complete;
1253 	}
1254 
1255 	/* Check if the request is already marked for deletion */
1256 	if (fc_req->is_aborted) {
1257 		return;
1258 	}
1259 
1260 	/* Mark request as aborted */
1261 	fc_req->is_aborted = true;
1262 
1263 	/* If xchg is allocated, then save if we need to send abts or not. */
1264 	if (fc_req->xchg) {
1265 		fc_req->xchg->send_abts = send_abts;
1266 		fc_req->xchg->aborted	= true;
1267 	}
1268 
1269 	if (fc_req->state == SPDK_NVMF_FC_REQ_BDEV_ABORTED) {
1270 		/* Aborted by backend */
1271 		goto complete;
1272 	} else if (nvmf_fc_req_in_bdev(fc_req)) {
1273 		/* Notify bdev */
1274 		spdk_thread_send_msg(fc_req->hwqp->thread,
1275 				     nvmf_fc_req_bdev_abort, (void *)fc_req);
1276 	} else if (spdk_nvmf_fc_req_in_xfer(fc_req)) {
1277 		/* Notify HBA to abort this exchange  */
1278 		nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1279 	} else if (nvmf_fc_req_in_get_buff(fc_req)) {
1280 		/* Will be completed by request_complete callback. */
1281 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Abort req when getting buffers.\n");
1282 	} else if (nvmf_fc_req_in_pending(fc_req)) {
1283 		/* Remove from pending */
1284 		TAILQ_REMOVE(&fc_req->fc_conn->pending_queue, fc_req, pending_link);
1285 		goto complete;
1286 	} else {
1287 		/* Should never happen */
1288 		SPDK_ERRLOG("%s: Request in invalid state\n", __func__);
1289 		goto complete;
1290 	}
1291 
1292 	return;
1293 complete:
1294 	spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1295 	spdk_nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1296 				     (void *)fc_req);
1297 }
1298 
1299 static int
1300 nvmf_fc_request_get_buffers(struct spdk_nvmf_fc_request *fc_req,
1301 			    struct spdk_nvmf_transport_poll_group *group,
1302 			    struct spdk_nvmf_transport *transport,
1303 			    uint32_t num_buffers)
1304 {
1305 	uint32_t i = 0;
1306 
1307 	while (i < num_buffers) {
1308 		if (!(STAILQ_EMPTY(&group->buf_cache))) {
1309 			group->buf_cache_count--;
1310 			fc_req->buffers[i] = STAILQ_FIRST(&group->buf_cache);
1311 			STAILQ_REMOVE_HEAD(&group->buf_cache, link);
1312 			assert(fc_req->buffers[i] != NULL);
1313 			i++;
1314 		} else {
1315 			if (spdk_mempool_get_bulk(transport->data_buf_pool, &fc_req->buffers[i],
1316 						  num_buffers - i)) {
1317 				goto err_exit;
1318 			}
1319 			i += num_buffers - i;
1320 		}
1321 	}
1322 	return 0;
1323 
1324 err_exit:
1325 	nvmf_fc_request_free_buffers(fc_req, group, transport, i);
1326 	return -ENOMEM;
1327 }
1328 
1329 static void
1330 nvmf_fc_request_fill_buffers(struct spdk_nvmf_fc_request *fc_req,
1331 			     struct spdk_nvmf_transport *transport, uint32_t length)
1332 {
1333 	uint32_t i;
1334 
1335 	fc_req->req.iovcnt = 0;
1336 
1337 	while (length) {
1338 		i = fc_req->req.iovcnt;
1339 		fc_req->req.iov[i].iov_base = (void *)((uintptr_t)((char *)fc_req->buffers[i] +
1340 						       NVMF_DATA_BUFFER_MASK) &
1341 						       ~NVMF_DATA_BUFFER_MASK);
1342 		fc_req->req.iov[i].iov_len  = spdk_min(length, transport->opts.io_unit_size);
1343 		fc_req->req.iovcnt++;
1344 		length -= fc_req->req.iov[i].iov_len;
1345 	}
1346 	fc_req->data_from_pool = true;
1347 }
1348 
1349 static int
1350 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1351 {
1352 	uint32_t length = fc_req->req.length;
1353 	uint32_t num_buffers;
1354 	struct spdk_nvmf_fc_poll_group *fc_poll_group = fc_req->hwqp->fc_poll_group;
1355 	struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
1356 	struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;
1357 
1358 	num_buffers = SPDK_CEIL_DIV(length, transport->opts.io_unit_size);
1359 
1360 	if (nvmf_fc_request_get_buffers(fc_req, group, transport, num_buffers)) {
1361 		return -ENOMEM;
1362 	}
1363 
1364 	nvmf_fc_request_fill_buffers(fc_req, transport, length);
1365 
1366 	return 0;
1367 }
1368 
1369 static int
1370 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1371 {
1372 	/* Allocate an XCHG if we dont use send frame for this command. */
1373 	if (!spdk_nvmf_fc_use_send_frame(&fc_req->req)) {
1374 		fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1375 		if (!fc_req->xchg) {
1376 			fc_req->hwqp->counters.no_xchg++;
1377 			printf("NO XCHGs!\n");
1378 			goto pending;
1379 		}
1380 	}
1381 
1382 	if (fc_req->req.length) {
1383 		if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1384 			fc_req->hwqp->counters.buf_alloc_err++;
1385 			goto pending;
1386 		}
1387 		fc_req->req.data = fc_req->req.iov[0].iov_base;
1388 	}
1389 
1390 	if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1391 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "WRITE CMD.\n");
1392 
1393 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1394 
1395 		if (nvmf_fc_recv_data(fc_req)) {
1396 			/* Dropped return success to caller */
1397 			fc_req->hwqp->counters.unexpected_err++;
1398 			spdk_nvmf_fc_request_free(fc_req);
1399 		}
1400 	} else {
1401 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "READ/NONE CMD\n");
1402 
1403 		if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1404 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1405 		} else {
1406 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1407 		}
1408 		spdk_nvmf_request_exec(&fc_req->req);
1409 	}
1410 
1411 	return 0;
1412 
1413 pending:
1414 	if (fc_req->xchg) {
1415 		nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1416 		fc_req->xchg = NULL;
1417 	}
1418 
1419 	spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1420 
1421 	return -EAGAIN;
1422 }
1423 
1424 static int
1425 nvmf_fc_hwqp_handle_request(struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_frame_hdr *frame,
1426 			    uint32_t buf_idx, struct spdk_nvmf_fc_buffer_desc *buffer, uint32_t plen)
1427 {
1428 	uint16_t cmnd_len;
1429 	uint64_t rqst_conn_id;
1430 	struct spdk_nvmf_fc_request *fc_req = NULL;
1431 	struct spdk_nvmf_fc_cmnd_iu *cmd_iu = NULL;
1432 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1433 	enum spdk_nvme_data_transfer xfer;
1434 
1435 	cmd_iu = buffer->virt;
1436 	cmnd_len = cmd_iu->cmnd_iu_len;
1437 	cmnd_len = from_be16(&cmnd_len);
1438 
1439 	/* check for a valid cmnd_iu format */
1440 	if ((cmd_iu->fc_id != FCNVME_CMND_IU_FC_ID) ||
1441 	    (cmd_iu->scsi_id != FCNVME_CMND_IU_SCSI_ID) ||
1442 	    (cmnd_len != sizeof(struct spdk_nvmf_fc_cmnd_iu) / 4)) {
1443 		SPDK_ERRLOG("IU CMD error\n");
1444 		hwqp->counters.nvme_cmd_iu_err++;
1445 		return -ENXIO;
1446 	}
1447 
1448 	xfer = spdk_nvme_opc_get_data_transfer(cmd_iu->flags);
1449 	if (xfer == SPDK_NVME_DATA_BIDIRECTIONAL) {
1450 		SPDK_ERRLOG("IU CMD xfer error\n");
1451 		hwqp->counters.nvme_cmd_xfer_err++;
1452 		return -EPERM;
1453 	}
1454 
1455 	rqst_conn_id = from_be64(&cmd_iu->conn_id);
1456 
1457 	/* Check if conn id is valid */
1458 	fc_conn = spdk_nvmf_fc_hwqp_find_fc_conn(hwqp, rqst_conn_id);
1459 	if (!fc_conn) {
1460 		SPDK_ERRLOG("IU CMD conn(%ld) invalid\n", rqst_conn_id);
1461 		hwqp->counters.invalid_conn_err++;
1462 		return -ENODEV;
1463 	}
1464 
1465 	/* If association/connection is being deleted - return */
1466 	if (fc_conn->fc_assoc->assoc_state !=  SPDK_NVMF_FC_OBJECT_CREATED) {
1467 		SPDK_ERRLOG("Association state not valid\n");
1468 		return -EACCES;
1469 	}
1470 
1471 	if (fc_conn->qpair.state == SPDK_NVMF_QPAIR_ERROR) {
1472 		return -EACCES;
1473 	}
1474 
1475 	/* Make sure xfer len is according to mdts */
1476 	if (from_be32(&cmd_iu->data_len) >
1477 	    hwqp->fc_poll_group->fc_transport->transport.opts.max_io_size) {
1478 		SPDK_ERRLOG("IO length requested is greater than MDTS\n");
1479 		return -EINVAL;
1480 	}
1481 
1482 	/* allocate a request buffer */
1483 	fc_req = nvmf_fc_hwqp_alloc_fc_request(hwqp);
1484 	if (fc_req == NULL) {
1485 		/* Should not happen. Since fc_reqs == RQ buffers */
1486 		return -ENOMEM;
1487 	}
1488 
1489 	fc_req->req.length = from_be32(&cmd_iu->data_len);
1490 	fc_req->req.qpair = &fc_conn->qpair;
1491 	fc_req->req.cmd = (union nvmf_h2c_msg *)&cmd_iu->cmd;
1492 	fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1493 	fc_req->oxid = frame->ox_id;
1494 	fc_req->oxid = from_be16(&fc_req->oxid);
1495 	fc_req->rpi = fc_conn->rpi;
1496 	fc_req->buf_index = buf_idx;
1497 	fc_req->poller_lcore = hwqp->lcore_id;
1498 	fc_req->poller_thread = hwqp->thread;
1499 	fc_req->hwqp = hwqp;
1500 	fc_req->fc_conn = fc_conn;
1501 	fc_req->req.xfer = xfer;
1502 	fc_req->s_id = (uint32_t)frame->s_id;
1503 	fc_req->d_id = (uint32_t)frame->d_id;
1504 	fc_req->s_id = from_be32(&fc_req->s_id) >> 8;
1505 	fc_req->d_id = from_be32(&fc_req->d_id) >> 8;
1506 
1507 	nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1508 	if (nvmf_fc_request_execute(fc_req)) {
1509 		TAILQ_INSERT_TAIL(&fc_conn->pending_queue, fc_req, pending_link);
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 /*
1516  * These functions are called from the FC LLD
1517  */
1518 
1519 void
1520 spdk_nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1521 {
1522 	struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1523 	struct spdk_nvmf_fc_poll_group *fc_poll_group = hwqp->fc_poll_group;
1524 	struct spdk_nvmf_transport_poll_group *group = &fc_poll_group->tp_poll_group;
1525 	struct spdk_nvmf_transport *transport = &fc_poll_group->fc_transport->transport;
1526 
1527 	if (!fc_req) {
1528 		return;
1529 	}
1530 
1531 	if (fc_req->xchg) {
1532 		nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1533 		fc_req->xchg = NULL;
1534 	}
1535 
1536 	/* Release IO buffers */
1537 	if (fc_req->data_from_pool) {
1538 		nvmf_fc_request_free_buffers(fc_req, group, transport, fc_req->req.iovcnt);
1539 	}
1540 	fc_req->req.data = NULL;
1541 	fc_req->req.iovcnt  = 0;
1542 
1543 	/* Release Q buffer */
1544 	nvmf_fc_rqpair_buffer_release(hwqp, fc_req->buf_index);
1545 
1546 	/* Free Fc request */
1547 	nvmf_fc_hwqp_free_fc_request(hwqp, fc_req);
1548 }
1549 
1550 void
1551 spdk_nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1552 			       enum spdk_nvmf_fc_request_state state)
1553 {
1554 	assert(fc_req->magic != 0xDEADBEEF);
1555 
1556 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1557 		      "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1558 		      spdk_nvmf_fc_request_get_state_str(fc_req->state),
1559 		      spdk_nvmf_fc_request_get_state_str(state));
1560 	nvmf_fc_record_req_trace_point(fc_req, state);
1561 	fc_req->state = state;
1562 }
1563 
1564 char *
1565 spdk_nvmf_fc_request_get_state_str(int state)
1566 {
1567 	static char *unk_str = "unknown";
1568 
1569 	return (state >= 0 && state < (int)(sizeof(fc_req_state_strs) / sizeof(char *)) ?
1570 		fc_req_state_strs[state] : unk_str);
1571 }
1572 
1573 int
1574 spdk_nvmf_fc_hwqp_process_frame(struct spdk_nvmf_fc_hwqp *hwqp,
1575 				uint32_t buff_idx,
1576 				struct spdk_nvmf_fc_frame_hdr *frame,
1577 				struct spdk_nvmf_fc_buffer_desc *buffer,
1578 				uint32_t plen)
1579 {
1580 	int rc = 0;
1581 	uint32_t s_id, d_id;
1582 	struct spdk_nvmf_fc_nport *nport = NULL;
1583 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1584 
1585 	s_id = (uint32_t)frame->s_id;
1586 	d_id = (uint32_t)frame->d_id;
1587 	s_id = from_be32(&s_id) >> 8;
1588 	d_id = from_be32(&d_id) >> 8;
1589 
1590 	/* Note: In tracelog below, we directly do endian conversion on rx_id and.
1591 	 * ox_id Since these are fields, we can't pass address to from_be16().
1592 	 * Since ox_id and rx_id are only needed for tracelog, assigning to local
1593 	 * vars. and doing conversion is a waste of time in non-debug builds. */
1594 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC,
1595 		      "Process NVME frame s_id:0x%x d_id:0x%x oxid:0x%x rxid:0x%x.\n",
1596 		      s_id, d_id,
1597 		      ((frame->ox_id << 8) & 0xff00) | ((frame->ox_id >> 8) & 0xff),
1598 		      ((frame->rx_id << 8) & 0xff00) | ((frame->rx_id >> 8) & 0xff));
1599 
1600 	rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, d_id, &nport, s_id, &rport);
1601 	if (rc) {
1602 		if (nport == NULL) {
1603 			SPDK_ERRLOG("%s: Nport not found. Dropping\n", __func__);
1604 			/* increment invalid nport counter */
1605 			hwqp->counters.nport_invalid++;
1606 		} else if (rport == NULL) {
1607 			SPDK_ERRLOG("%s: Rport not found. Dropping\n", __func__);
1608 			/* increment invalid rport counter */
1609 			hwqp->counters.rport_invalid++;
1610 		}
1611 		return rc;
1612 	}
1613 
1614 	if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1615 	    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1616 		SPDK_ERRLOG("%s: %s state not created. Dropping\n", __func__,
1617 			    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1618 			    "Nport" : "Rport");
1619 		return -EACCES;
1620 	}
1621 
1622 	if ((frame->r_ctl == FCNVME_R_CTL_LS_REQUEST) &&
1623 	    (frame->type == FCNVME_TYPE_NVMF_DATA)) {
1624 		struct spdk_nvmf_fc_rq_buf_ls_request *req_buf = buffer->virt;
1625 		struct spdk_nvmf_fc_ls_rqst *ls_rqst;
1626 
1627 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process LS NVME frame\n");
1628 
1629 		/* Use the RQ buffer for holding LS request. */
1630 		ls_rqst = (struct spdk_nvmf_fc_ls_rqst *)&req_buf->ls_rqst;
1631 
1632 		/* Fill in the LS request structure */
1633 		ls_rqst->rqstbuf.virt = (void *)&req_buf->rqst;
1634 		ls_rqst->rqstbuf.phys = buffer->phys +
1635 					offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, rqst);
1636 		ls_rqst->rqstbuf.buf_index = buff_idx;
1637 		ls_rqst->rqst_len = plen;
1638 
1639 		ls_rqst->rspbuf.virt = (void *)&req_buf->resp;
1640 		ls_rqst->rspbuf.phys = buffer->phys +
1641 				       offsetof(struct spdk_nvmf_fc_rq_buf_ls_request, resp);
1642 		ls_rqst->rsp_len = FCNVME_MAX_LS_RSP_SIZE;
1643 
1644 		ls_rqst->private_data = (void *)hwqp;
1645 		ls_rqst->rpi = rport->rpi;
1646 		ls_rqst->oxid = (uint16_t)frame->ox_id;
1647 		ls_rqst->oxid = from_be16(&ls_rqst->oxid);
1648 		ls_rqst->s_id = s_id;
1649 		ls_rqst->d_id = d_id;
1650 		ls_rqst->nport = nport;
1651 		ls_rqst->rport = rport;
1652 		ls_rqst->nvmf_tgt = g_nvmf_fc_transport->transport.tgt;
1653 
1654 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1655 		if (ls_rqst->xchg) {
1656 			/* Handover the request to LS module */
1657 			spdk_nvmf_fc_handle_ls_rqst(ls_rqst);
1658 		} else {
1659 			/* No XCHG available. Add to pending list. */
1660 			hwqp->counters.no_xchg++;
1661 			TAILQ_INSERT_TAIL(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1662 		}
1663 	} else if ((frame->r_ctl == FCNVME_R_CTL_CMD_REQ) &&
1664 		   (frame->type == FCNVME_TYPE_FC_EXCHANGE)) {
1665 
1666 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Process IO NVME frame\n");
1667 		rc = nvmf_fc_hwqp_handle_request(hwqp, frame, buff_idx, buffer, plen);
1668 	} else {
1669 
1670 		SPDK_ERRLOG("%s Unknown frame received. Dropping\n", __func__);
1671 		hwqp->counters.unknown_frame++;
1672 		rc = -EINVAL;
1673 	}
1674 
1675 	return rc;
1676 }
1677 
1678 void
1679 spdk_nvmf_fc_hwqp_process_pending_reqs(struct spdk_nvmf_fc_hwqp *hwqp)
1680 {
1681 	struct spdk_nvmf_fc_conn *fc_conn = NULL;
1682 	struct spdk_nvmf_fc_request *fc_req = NULL, *tmp;
1683 	int budget = 64;
1684 
1685 	TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
1686 		TAILQ_FOREACH_SAFE(fc_req, &fc_conn->pending_queue, pending_link, tmp) {
1687 			if (!nvmf_fc_request_execute(fc_req)) {
1688 				/* Succesfuly posted, Delete from pending. */
1689 				TAILQ_REMOVE(&fc_conn->pending_queue, fc_req, pending_link);
1690 			}
1691 
1692 			if (budget) {
1693 				budget--;
1694 			} else {
1695 				return;
1696 			}
1697 		}
1698 	}
1699 }
1700 
1701 void
1702 spdk_nvmf_fc_hwqp_process_pending_ls_rqsts(struct spdk_nvmf_fc_hwqp *hwqp)
1703 {
1704 	struct spdk_nvmf_fc_ls_rqst *ls_rqst = NULL, *tmp;
1705 	struct spdk_nvmf_fc_nport *nport = NULL;
1706 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
1707 
1708 	TAILQ_FOREACH_SAFE(ls_rqst, &hwqp->ls_pending_queue, ls_pending_link, tmp) {
1709 		/* lookup nport and rport again - make sure they are still valid */
1710 		int rc = nvmf_fc_hwqp_find_nport_and_rport(hwqp, ls_rqst->d_id, &nport, ls_rqst->s_id, &rport);
1711 		if (rc) {
1712 			if (nport == NULL) {
1713 				SPDK_ERRLOG("%s: Nport not found. Dropping\n", __func__);
1714 				/* increment invalid nport counter */
1715 				hwqp->counters.nport_invalid++;
1716 			} else if (rport == NULL) {
1717 				SPDK_ERRLOG("%s: Rport not found. Dropping\n", __func__);
1718 				/* increment invalid rport counter */
1719 				hwqp->counters.rport_invalid++;
1720 			}
1721 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1722 			/* Return buffer to chip */
1723 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1724 			continue;
1725 		}
1726 		if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ||
1727 		    rport->rport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
1728 			SPDK_ERRLOG("%s: %s state not created. Dropping\n", __func__,
1729 				    nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED ?
1730 				    "Nport" : "Rport");
1731 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1732 			/* Return buffer to chip */
1733 			nvmf_fc_rqpair_buffer_release(hwqp, ls_rqst->rqstbuf.buf_index);
1734 			continue;
1735 		}
1736 
1737 		ls_rqst->xchg = nvmf_fc_get_xri(hwqp);
1738 		if (ls_rqst->xchg) {
1739 			/* Got an XCHG */
1740 			TAILQ_REMOVE(&hwqp->ls_pending_queue, ls_rqst, ls_pending_link);
1741 			/* Handover the request to LS module */
1742 			spdk_nvmf_fc_handle_ls_rqst(ls_rqst);
1743 		} else {
1744 			/* No more XCHGs. Stop processing. */
1745 			hwqp->counters.no_xchg++;
1746 			return;
1747 		}
1748 	}
1749 }
1750 
1751 int
1752 spdk_nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1753 {
1754 	int rc = 0;
1755 	struct spdk_nvmf_request *req = &fc_req->req;
1756 	struct spdk_nvmf_qpair *qpair = req->qpair;
1757 	struct spdk_nvmf_fc_conn *fc_conn = spdk_nvmf_fc_get_conn(qpair);
1758 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1759 	uint16_t ersp_len = 0;
1760 
1761 	/* set sq head value in resp */
1762 	rsp->sqhd = spdk_nvmf_fc_advance_conn_sqhead(qpair);
1763 
1764 	/* Increment connection responses */
1765 	fc_conn->rsp_count++;
1766 
1767 	if (spdk_nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1768 					    fc_req->transfered_len)) {
1769 		/* Fill ERSP Len */
1770 		to_be16(&ersp_len, (sizeof(struct spdk_nvmf_fc_ersp_iu) /
1771 				    sizeof(uint32_t)));
1772 		fc_req->ersp.ersp_len = ersp_len;
1773 
1774 		/* Fill RSN */
1775 		to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1776 		fc_conn->rsn++;
1777 
1778 		/* Fill transfer length */
1779 		to_be32(&fc_req->ersp.transferred_data_len, fc_req->transfered_len);
1780 
1781 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting ERSP.\n");
1782 		rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1783 				     sizeof(struct spdk_nvmf_fc_ersp_iu));
1784 	} else {
1785 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC, "Posting RSP.\n");
1786 		rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1787 	}
1788 
1789 	return rc;
1790 }
1791 
1792 int
1793 spdk_nvmf_fc_xmt_ls_rsp(struct spdk_nvmf_fc_nport *tgtport,
1794 			struct spdk_nvmf_fc_ls_rqst *ls_rqst)
1795 {
1796 	return nvmf_fc_xmt_ls_rsp(tgtport, ls_rqst);
1797 }
1798 
1799 int
1800 spdk_nvmf_fc_xmt_srsr_req(struct spdk_nvmf_fc_hwqp *hwqp,
1801 			  struct spdk_nvmf_fc_srsr_bufs *srsr_bufs,
1802 			  spdk_nvmf_fc_caller_cb cb, void *cb_args)
1803 {
1804 	return nvmf_fc_xmt_srsr_req(hwqp, srsr_bufs, cb, cb_args);
1805 }
1806 
1807 bool
1808 spdk_nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1809 				uint32_t rsp_cnt, uint32_t xfer_len)
1810 {
1811 	struct spdk_nvmf_request *req = &fc_req->req;
1812 	struct spdk_nvmf_qpair *qpair = req->qpair;
1813 	struct spdk_nvmf_fc_conn *fc_conn = spdk_nvmf_fc_get_conn(qpair);
1814 	struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
1815 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1816 	uint16_t status = *((uint16_t *)&rsp->status);
1817 
1818 	/*
1819 	 * Check if we need to send ERSP
1820 	 * 1) For every N responses where N == ersp_ratio
1821 	 * 2) Fabric commands.
1822 	 * 3) Completion status failed or Completion dw0 or dw1 valid.
1823 	 * 4) SQ == 90% full.
1824 	 * 5) Transfer length not equal to CMD IU length
1825 	 */
1826 
1827 	if (!(rsp_cnt % fc_conn->esrp_ratio) ||
1828 	    (cmd->opc == SPDK_NVME_OPC_FABRIC) ||
1829 	    (status & 0xFFFE) || rsp->cdw0 || rsp->rsvd1 ||
1830 	    (req->length != xfer_len)) {
1831 		return true;
1832 	}
1833 	return false;
1834 }
1835 
1836 void
1837 spdk_nvmf_fc_dump_all_queues(struct spdk_nvmf_fc_port *fc_port,
1838 			     struct spdk_nvmf_fc_queue_dump_info *dump_info)
1839 {
1840 	nvmf_fc_dump_all_queues(&fc_port->ls_queue, fc_port->io_queues,
1841 				fc_port->num_io_queues, dump_info);
1842 }
1843 
1844 static int
1845 nvmf_fc_request_complete(struct spdk_nvmf_request *req)
1846 {
1847 	int rc = 0;
1848 	struct spdk_nvmf_fc_request *fc_req = spdk_nvmf_fc_get_fc_req(req);
1849 	struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1850 
1851 	if (fc_req->is_aborted) {
1852 		/* Defer this to make sure we dont call io cleanup in same context. */
1853 		spdk_nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1854 					     (void *)fc_req);
1855 	} else if (rsp->status.sc == SPDK_NVME_SC_SUCCESS &&
1856 		   req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1857 
1858 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1859 
1860 		rc = nvmf_fc_send_data(fc_req);
1861 	} else {
1862 		if (req->xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1863 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1864 		} else if (req->xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1865 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1866 		} else {
1867 			spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1868 		}
1869 
1870 		rc = spdk_nvmf_fc_handle_rsp(fc_req);
1871 	}
1872 
1873 	if (rc) {
1874 		SPDK_ERRLOG("Error in request complete.\n");
1875 		spdk_nvmf_fc_request_free(fc_req);
1876 	}
1877 	return 0;
1878 }
1879 
1880 struct spdk_nvmf_tgt *
1881 spdk_nvmf_fc_get_tgt(void)
1882 {
1883 	if (g_nvmf_fc_transport) {
1884 		return g_nvmf_fc_transport->transport.tgt;
1885 	}
1886 	return NULL;
1887 }
1888 
1889 /*
1890  * FC Transport Public API begins here
1891  */
1892 
1893 #define SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH 128
1894 #define SPDK_NVMF_FC_DEFAULT_AQ_DEPTH 32
1895 #define SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR 5
1896 #define SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE 0
1897 #define SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE 65536
1898 #define SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE 4096
1899 #define SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS 8192
1900 #define SPDK_NVMF_FC_DEFAULT_MAX_SGE (SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE /	\
1901 				      SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE)
1902 
1903 static void
1904 nvmf_fc_opts_init(struct spdk_nvmf_transport_opts *opts)
1905 {
1906 	opts->max_queue_depth =      SPDK_NVMF_FC_DEFAULT_MAX_QUEUE_DEPTH;
1907 	opts->max_qpairs_per_ctrlr = SPDK_NVMF_FC_DEFAULT_MAX_QPAIRS_PER_CTRLR;
1908 	opts->in_capsule_data_size = SPDK_NVMF_FC_DEFAULT_IN_CAPSULE_DATA_SIZE;
1909 	opts->max_io_size =          SPDK_NVMF_FC_DEFAULT_MAX_IO_SIZE;
1910 	opts->io_unit_size =         SPDK_NVMF_FC_DEFAULT_IO_UNIT_SIZE;
1911 	opts->max_aq_depth =         SPDK_NVMF_FC_DEFAULT_AQ_DEPTH;
1912 	opts->num_shared_buffers =   SPDK_NVMF_FC_DEFAULT_NUM_SHARED_BUFFERS;
1913 }
1914 
1915 static struct spdk_nvmf_transport *
1916 nvmf_fc_create(struct spdk_nvmf_transport_opts *opts)
1917 {
1918 	uint32_t sge_count;
1919 
1920 	SPDK_INFOLOG(SPDK_LOG_NVMF_FC, "*** FC Transport Init ***\n"
1921 		     "  Transport opts:  max_ioq_depth=%d, max_io_size=%d,\n"
1922 		     "  max_qpairs_per_ctrlr=%d, io_unit_size=%d,\n"
1923 		     "  max_aq_depth=%d\n",
1924 		     opts->max_queue_depth,
1925 		     opts->max_io_size,
1926 		     opts->max_qpairs_per_ctrlr,
1927 		     opts->io_unit_size,
1928 		     opts->max_aq_depth);
1929 
1930 	if (g_nvmf_fc_transport) {
1931 		SPDK_ERRLOG("Duplicate NVMF-FC transport create request!\n");
1932 		return NULL;
1933 	}
1934 
1935 	if (spdk_env_get_last_core() < 1) {
1936 		SPDK_ERRLOG("Not enough cores/threads (%d) to run NVMF-FC transport!\n",
1937 			    spdk_env_get_last_core() + 1);
1938 		return NULL;
1939 	}
1940 
1941 	sge_count = opts->max_io_size / opts->io_unit_size;
1942 	if (sge_count > SPDK_NVMF_FC_DEFAULT_MAX_SGE) {
1943 		SPDK_ERRLOG("Unsupported IO Unit size specified, %d bytes\n", opts->io_unit_size);
1944 		return NULL;
1945 	}
1946 
1947 	g_nvmf_fc_master_thread = spdk_get_thread();
1948 	g_nvmf_fc_poll_group_count = 0;
1949 	g_nvmf_fc_transport = calloc(1, sizeof(*g_nvmf_fc_transport));
1950 
1951 	if (!g_nvmf_fc_transport) {
1952 		SPDK_ERRLOG("Failed to allocate NVMF-FC transport\n");
1953 		return NULL;
1954 	}
1955 
1956 	if (pthread_mutex_init(&g_nvmf_fc_transport->lock, NULL)) {
1957 		SPDK_ERRLOG("pthread_mutex_init() failed\n");
1958 		free(g_nvmf_fc_transport);
1959 		g_nvmf_fc_transport = NULL;
1960 		return NULL;
1961 	}
1962 
1963 	/* initialize the low level FC driver */
1964 	nvmf_fc_lld_init();
1965 
1966 	return &g_nvmf_fc_transport->transport;
1967 }
1968 
1969 static int
1970 nvmf_fc_destroy(struct spdk_nvmf_transport *transport)
1971 {
1972 	if (transport) {
1973 		struct spdk_nvmf_fc_transport *fc_transport;
1974 		struct spdk_nvmf_fc_poll_group *fc_poll_group, *pg_tmp;
1975 
1976 		fc_transport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
1977 
1978 		free(fc_transport);
1979 
1980 		/* clean up any FC poll groups still around */
1981 		TAILQ_FOREACH_SAFE(fc_poll_group, &g_nvmf_fc_poll_groups, link, pg_tmp) {
1982 			TAILQ_REMOVE(&g_nvmf_fc_poll_groups, fc_poll_group, link);
1983 			free(fc_poll_group);
1984 		}
1985 		g_nvmf_fc_poll_group_count = 0;
1986 
1987 		/* low level FC driver clean up */
1988 		nvmf_fc_lld_fini();
1989 
1990 		nvmf_fc_port_cleanup();
1991 	}
1992 
1993 	return 0;
1994 }
1995 
1996 static int
1997 nvmf_fc_listen(struct spdk_nvmf_transport *transport,
1998 	       const struct spdk_nvme_transport_id *trid)
1999 {
2000 	return 0;
2001 }
2002 
2003 static int
2004 nvmf_fc_stop_listen(struct spdk_nvmf_transport *transport,
2005 		    const struct spdk_nvme_transport_id *_trid)
2006 {
2007 	return 0;
2008 }
2009 
2010 static void
2011 nvmf_fc_accept(struct spdk_nvmf_transport *transport, new_qpair_fn cb_fn)
2012 {
2013 	struct spdk_nvmf_fc_port *fc_port = NULL;
2014 	static bool start_lld = false;
2015 
2016 	if (spdk_unlikely(!start_lld)) {
2017 		start_lld  = true;
2018 		nvmf_fc_lld_start();
2019 	}
2020 
2021 	/* poll the LS queue on each port */
2022 	TAILQ_FOREACH(fc_port, &g_spdk_nvmf_fc_port_list, link) {
2023 		if (fc_port->hw_port_status == SPDK_FC_PORT_ONLINE) {
2024 			fc_port->new_qp_cb = cb_fn;
2025 			nvmf_fc_process_queue(&fc_port->ls_queue);
2026 		}
2027 	}
2028 }
2029 
2030 static void
2031 nvmf_fc_discover(struct spdk_nvmf_transport *transport,
2032 		 struct spdk_nvme_transport_id *trid,
2033 		 struct spdk_nvmf_discovery_log_page_entry *entry)
2034 {
2035 	entry->trtype = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC;
2036 	entry->adrfam = trid->adrfam;
2037 	entry->treq.secure_channel = SPDK_NVMF_TREQ_SECURE_CHANNEL_NOT_SPECIFIED;
2038 
2039 	spdk_strcpy_pad(entry->trsvcid, trid->trsvcid, sizeof(entry->trsvcid), ' ');
2040 	spdk_strcpy_pad(entry->traddr, trid->traddr, sizeof(entry->traddr), ' ');
2041 }
2042 
2043 static struct spdk_nvmf_transport_poll_group *
2044 nvmf_fc_poll_group_create(struct spdk_nvmf_transport *transport)
2045 {
2046 	struct spdk_nvmf_fc_poll_group *fc_poll_group;
2047 	struct spdk_io_channel *ch;
2048 	struct spdk_nvmf_fc_transport *fc_transport =
2049 		SPDK_CONTAINEROF(transport, struct spdk_nvmf_fc_transport, transport);
2050 
2051 	fc_poll_group = calloc(1, sizeof(struct spdk_nvmf_fc_poll_group));
2052 	if (!fc_poll_group) {
2053 		SPDK_ERRLOG("Unable to alloc FC poll group\n");
2054 		return NULL;
2055 	}
2056 
2057 	TAILQ_INIT(&fc_poll_group->hwqp_list);
2058 	fc_poll_group->fc_transport = fc_transport;
2059 
2060 	pthread_mutex_lock(&fc_transport->lock);
2061 	TAILQ_INSERT_TAIL(&g_nvmf_fc_poll_groups, fc_poll_group, link);
2062 	g_nvmf_fc_poll_group_count++;
2063 	pthread_mutex_unlock(&fc_transport->lock);
2064 
2065 	ch = spdk_get_io_channel(g_nvmf_fc_transport->transport.tgt);
2066 	if (ch) {
2067 		fc_poll_group->poll_group = spdk_io_channel_get_ctx(ch);
2068 		spdk_put_io_channel(ch);
2069 	}
2070 
2071 	return &fc_poll_group->tp_poll_group;
2072 }
2073 
2074 static void
2075 nvmf_fc_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
2076 {
2077 	struct spdk_nvmf_fc_poll_group *fc_poll_group;
2078 
2079 	fc_poll_group = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, tp_poll_group);
2080 	pthread_mutex_lock(&fc_poll_group->fc_transport->lock);
2081 	TAILQ_REMOVE(&g_nvmf_fc_poll_groups, fc_poll_group, link);
2082 	g_nvmf_fc_poll_group_count--;
2083 	pthread_mutex_unlock(&fc_poll_group->fc_transport->lock);
2084 
2085 	free(fc_poll_group);
2086 }
2087 
2088 static int
2089 nvmf_fc_poll_group_add(struct spdk_nvmf_transport_poll_group *group,
2090 		       struct spdk_nvmf_qpair *qpair)
2091 {
2092 	struct spdk_nvmf_fc_poll_group *fc_poll_group;
2093 	struct spdk_nvmf_fc_conn *fc_conn;
2094 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2095 	struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
2096 	bool hwqp_found = false;
2097 
2098 	fc_poll_group   = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, tp_poll_group);
2099 	fc_conn         = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2100 
2101 	TAILQ_FOREACH(hwqp, &fc_poll_group->hwqp_list, link) {
2102 		if (fc_conn->fc_assoc->tgtport->fc_port == hwqp->fc_port) {
2103 			hwqp_found = true;
2104 			break;
2105 		}
2106 	}
2107 
2108 	if (!hwqp_found) {
2109 		SPDK_ERRLOG("No valid hwqp found for new QP.\n");
2110 		goto err;
2111 	}
2112 
2113 	if (!nvmf_fc_assign_conn_to_hwqp(hwqp,
2114 					 &fc_conn->conn_id,
2115 					 fc_conn->max_queue_depth)) {
2116 		SPDK_ERRLOG("Failed to get a connection id for new QP.\n");
2117 		goto err;
2118 	}
2119 
2120 	fc_conn->hwqp = hwqp;
2121 
2122 	/* If this is for ADMIN connection, then update assoc ID. */
2123 	if (fc_conn->qpair.qid == 0) {
2124 		fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
2125 	}
2126 
2127 	api_data = &fc_conn->create_opd->u.add_conn;
2128 	spdk_nvmf_fc_poller_api_func(hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
2129 	return 0;
2130 err:
2131 	return -1;
2132 }
2133 
2134 static int
2135 nvmf_fc_poll_group_poll(struct spdk_nvmf_transport_poll_group *group)
2136 {
2137 	uint32_t count = 0;
2138 	struct spdk_nvmf_fc_poll_group *fc_poll_group;
2139 	struct spdk_nvmf_fc_hwqp *hwqp;
2140 
2141 	fc_poll_group = SPDK_CONTAINEROF(group, struct spdk_nvmf_fc_poll_group, tp_poll_group);
2142 
2143 	TAILQ_FOREACH(hwqp, &fc_poll_group->hwqp_list, link) {
2144 		if (hwqp->state == SPDK_FC_HWQP_ONLINE) {
2145 			count += nvmf_fc_process_queue(hwqp);
2146 		}
2147 	}
2148 
2149 	return (int) count;
2150 }
2151 
2152 static int
2153 nvmf_fc_request_free(struct spdk_nvmf_request *req)
2154 {
2155 	struct spdk_nvmf_fc_request *fc_req = spdk_nvmf_fc_get_fc_req(req);
2156 
2157 	if (!fc_req->is_aborted) {
2158 		spdk_nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2159 		spdk_nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2160 	} else {
2161 		spdk_nvmf_fc_request_abort_complete(fc_req);
2162 	}
2163 	return 0;
2164 }
2165 
2166 
2167 static void
2168 nvmf_fc_close_qpair(struct spdk_nvmf_qpair *qpair)
2169 {
2170 	struct spdk_nvmf_fc_conn *fc_conn;
2171 
2172 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2173 
2174 	if (fc_conn->conn_id == NVMF_FC_INVALID_CONN_ID) {
2175 		/* QP creation failure in FC tranport. Cleanup. */
2176 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(),
2177 				     nvmf_fc_handle_connection_failure, fc_conn);
2178 	} else if (fc_conn->fc_assoc->assoc_id == fc_conn->conn_id &&
2179 		   fc_conn->fc_assoc->assoc_state != SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
2180 		/* Admin connection */
2181 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(),
2182 				     nvmf_fc_handle_assoc_deletion, fc_conn);
2183 	}
2184 }
2185 
2186 static int
2187 nvmf_fc_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair,
2188 			    struct spdk_nvme_transport_id *trid)
2189 {
2190 	struct spdk_nvmf_fc_conn *fc_conn;
2191 
2192 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2193 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2194 	return 0;
2195 }
2196 
2197 static int
2198 nvmf_fc_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair,
2199 			     struct spdk_nvme_transport_id *trid)
2200 {
2201 	struct spdk_nvmf_fc_conn *fc_conn;
2202 
2203 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2204 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2205 	return 0;
2206 }
2207 
2208 static int
2209 nvmf_fc_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair,
2210 			      struct spdk_nvme_transport_id *trid)
2211 {
2212 	struct spdk_nvmf_fc_conn *fc_conn;
2213 
2214 	fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
2215 	memcpy(trid, &fc_conn->trid, sizeof(struct spdk_nvme_transport_id));
2216 	return 0;
2217 }
2218 
2219 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
2220 	.type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
2221 	.opts_init = nvmf_fc_opts_init,
2222 	.create = nvmf_fc_create,
2223 	.destroy = nvmf_fc_destroy,
2224 
2225 	.listen = nvmf_fc_listen,
2226 	.stop_listen = nvmf_fc_stop_listen,
2227 	.accept = nvmf_fc_accept,
2228 
2229 	.listener_discover = nvmf_fc_discover,
2230 
2231 	.poll_group_create = nvmf_fc_poll_group_create,
2232 	.poll_group_destroy = nvmf_fc_poll_group_destroy,
2233 	.poll_group_add = nvmf_fc_poll_group_add,
2234 	.poll_group_poll = nvmf_fc_poll_group_poll,
2235 
2236 	.req_complete = nvmf_fc_request_complete,
2237 	.req_free = nvmf_fc_request_free,
2238 	.qpair_fini = nvmf_fc_close_qpair,
2239 	.qpair_get_peer_trid = nvmf_fc_qpair_get_peer_trid,
2240 	.qpair_get_local_trid = nvmf_fc_qpair_get_local_trid,
2241 	.qpair_get_listen_trid = nvmf_fc_qpair_get_listen_trid,
2242 };
2243 
2244 /*
2245  * Re-initialize the FC-Port after an offline event.
2246  * Only the queue information needs to be populated. XCHG, lcore and other hwqp information remains
2247  * unchanged after the first initialization.
2248  *
2249  */
2250 static int
2251 nvmf_fc_adm_hw_port_reinit_validate(struct spdk_nvmf_fc_port *fc_port,
2252 				    struct spdk_nvmf_fc_hw_port_init_args *args)
2253 {
2254 	uint32_t i;
2255 
2256 	/* Verify that the port was previously in offline or quiesced state */
2257 	if (spdk_nvmf_fc_port_is_online(fc_port)) {
2258 		SPDK_ERRLOG("SPDK FC port %d already initialized and online.\n", args->port_handle);
2259 		return -EINVAL;
2260 	}
2261 
2262 	/* Reinit information in new LS queue from previous queue */
2263 	spdk_nvmf_fc_hwqp_reinit_poller_queues(&fc_port->ls_queue, args->ls_queue);
2264 
2265 	fc_port->fcp_rq_id = args->fcp_rq_id;
2266 
2267 	/* Initialize the LS queue */
2268 	fc_port->ls_queue.queues = args->ls_queue;
2269 	spdk_nvmf_fc_init_poller_queues(fc_port->ls_queue.queues);
2270 
2271 	for (i = 0; i < fc_port->num_io_queues; i++) {
2272 		/* Reinit information in new IO queue from previous queue */
2273 		spdk_nvmf_fc_hwqp_reinit_poller_queues(&fc_port->io_queues[i],
2274 						       args->io_queues[i]);
2275 		fc_port->io_queues[i].queues = args->io_queues[i];
2276 		/* Initialize the IO queues */
2277 		spdk_nvmf_fc_init_poller_queues(fc_port->io_queues[i].queues);
2278 	}
2279 
2280 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2281 
2282 	/* Validate the port information */
2283 	DEV_VERIFY(TAILQ_EMPTY(&fc_port->nport_list));
2284 	DEV_VERIFY(fc_port->num_nports == 0);
2285 	if (!TAILQ_EMPTY(&fc_port->nport_list) || (fc_port->num_nports != 0)) {
2286 		return -EINVAL;
2287 	}
2288 
2289 	return 0;
2290 }
2291 
2292 /* Initializes the data for the creation of a FC-Port object in the SPDK
2293  * library. The spdk_nvmf_fc_port is a well defined structure that is part of
2294  * the API to the library. The contents added to this well defined structure
2295  * is private to each vendors implementation.
2296  */
2297 static int
2298 nvmf_fc_adm_hw_port_data_init(struct spdk_nvmf_fc_port *fc_port,
2299 			      struct spdk_nvmf_fc_hw_port_init_args *args)
2300 {
2301 	/* Used a high number for the LS HWQP so that it does not clash with the
2302 	 * IO HWQP's and immediately shows a LS queue during tracing.
2303 	 */
2304 	uint32_t i;
2305 
2306 	fc_port->port_hdl       = args->port_handle;
2307 	fc_port->hw_port_status = SPDK_FC_PORT_OFFLINE;
2308 	fc_port->fcp_rq_id      = args->fcp_rq_id;
2309 	fc_port->num_io_queues  = args->io_queue_cnt;
2310 
2311 	/*
2312 	 * Set port context from init args. Used for FCP port stats.
2313 	 */
2314 	fc_port->port_ctx = args->port_ctx;
2315 
2316 	/*
2317 	 * Initialize the LS queue wherever needed.
2318 	 */
2319 	fc_port->ls_queue.queues = args->ls_queue;
2320 	fc_port->ls_queue.thread = spdk_nvmf_fc_get_master_thread();
2321 	fc_port->ls_queue.hwqp_id = SPDK_MAX_NUM_OF_FC_PORTS * fc_port->num_io_queues;
2322 
2323 	/*
2324 	 * Initialize the LS queue.
2325 	 */
2326 	spdk_nvmf_fc_init_hwqp(fc_port, &fc_port->ls_queue);
2327 
2328 	/*
2329 	 * Initialize the IO queues.
2330 	 */
2331 	for (i = 0; i < args->io_queue_cnt; i++) {
2332 		struct spdk_nvmf_fc_hwqp *hwqp = &fc_port->io_queues[i];
2333 		hwqp->hwqp_id = i;
2334 		hwqp->queues = args->io_queues[i];
2335 		hwqp->rq_size = args->io_queue_size;
2336 		spdk_nvmf_fc_init_hwqp(fc_port, hwqp);
2337 	}
2338 
2339 	/*
2340 	 * Initialize the LS processing for port
2341 	 */
2342 	spdk_nvmf_fc_ls_init(fc_port);
2343 
2344 	/*
2345 	 * Initialize the list of nport on this HW port.
2346 	 */
2347 	TAILQ_INIT(&fc_port->nport_list);
2348 	fc_port->num_nports = 0;
2349 
2350 	return 0;
2351 }
2352 
2353 static void
2354 nvmf_fc_adm_port_hwqp_offline_del_poller(struct spdk_nvmf_fc_port *fc_port)
2355 {
2356 	struct spdk_nvmf_fc_hwqp *hwqp    = NULL;
2357 	int i = 0;
2358 
2359 	hwqp = &fc_port->ls_queue;
2360 	(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2361 
2362 	/*  Remove poller for all the io queues. */
2363 	for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2364 		hwqp = &fc_port->io_queues[i];
2365 		(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
2366 		spdk_nvmf_fc_poll_group_remove_hwqp(hwqp);
2367 	}
2368 }
2369 
2370 /*
2371  * Callback function for HW port link break operation.
2372  *
2373  * Notice that this callback is being triggered when spdk_fc_nport_delete()
2374  * completes, if that spdk_fc_nport_delete() called is issued by
2375  * nvmf_fc_adm_evnt_hw_port_link_break().
2376  *
2377  * Since nvmf_fc_adm_evnt_hw_port_link_break() can invoke spdk_fc_nport_delete() multiple
2378  * times (one per nport in the HW port's nport_list), a single call to
2379  * nvmf_fc_adm_evnt_hw_port_link_break() can result in multiple calls to this callback function.
2380  *
2381  * As a result, this function only invokes a callback to the caller of
2382  * nvmf_fc_adm_evnt_hw_port_link_break() only when the HW port's nport_list is empty.
2383  */
2384 static void
2385 nvmf_fc_adm_hw_port_link_break_cb(uint8_t port_handle,
2386 				  enum spdk_fc_event event_type, void *cb_args, int spdk_err)
2387 {
2388 	ASSERT_SPDK_FC_MASTER_THREAD();
2389 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *offline_cb_args = cb_args;
2390 	struct spdk_nvmf_hw_port_link_break_args *offline_args = NULL;
2391 	spdk_nvmf_fc_callback cb_func = NULL;
2392 	int err = 0;
2393 	struct spdk_nvmf_fc_port *fc_port = NULL;
2394 	int num_nports = 0;
2395 	char log_str[256];
2396 
2397 	if (0 != spdk_err) {
2398 		DEV_VERIFY(!"port link break cb: spdk_err not success.");
2399 		SPDK_ERRLOG("port link break cb: spdk_err:%d.\n", spdk_err);
2400 		goto out;
2401 	}
2402 
2403 	if (!offline_cb_args) {
2404 		DEV_VERIFY(!"port link break cb: port_offline_args is NULL.");
2405 		err = -EINVAL;
2406 		goto out;
2407 	}
2408 
2409 	offline_args = offline_cb_args->args;
2410 	if (!offline_args) {
2411 		DEV_VERIFY(!"port link break cb: offline_args is NULL.");
2412 		err = -EINVAL;
2413 		goto out;
2414 	}
2415 
2416 	if (port_handle != offline_args->port_handle) {
2417 		DEV_VERIFY(!"port link break cb: port_handle mismatch.");
2418 		err = -EINVAL;
2419 		goto out;
2420 	}
2421 
2422 	cb_func = offline_cb_args->cb_func;
2423 	if (!cb_func) {
2424 		DEV_VERIFY(!"port link break cb: cb_func is NULL.");
2425 		err = -EINVAL;
2426 		goto out;
2427 	}
2428 
2429 	fc_port = spdk_nvmf_fc_port_lookup(port_handle);
2430 	if (!fc_port) {
2431 		DEV_VERIFY(!"port link break cb: fc_port is NULL.");
2432 		SPDK_ERRLOG("port link break cb: Unable to find port:%d\n",
2433 			    offline_args->port_handle);
2434 		err = -EINVAL;
2435 		goto out;
2436 	}
2437 
2438 	num_nports = fc_port->num_nports;
2439 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
2440 		/*
2441 		 * Don't call the callback unless all nports have been deleted.
2442 		 */
2443 		goto out;
2444 	}
2445 
2446 	if (num_nports != 0) {
2447 		DEV_VERIFY(!"port link break cb: num_nports in non-zero.");
2448 		SPDK_ERRLOG("port link break cb: # of ports should be 0. Instead, num_nports:%d\n",
2449 			    num_nports);
2450 		err = -EINVAL;
2451 	}
2452 
2453 	/*
2454 	 * Mark the hwqps as offline and unregister the pollers.
2455 	 */
2456 	(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
2457 
2458 	/*
2459 	 * Since there are no more nports, execute the callback(s).
2460 	 */
2461 	(void)cb_func(port_handle, SPDK_FC_LINK_BREAK,
2462 		      (void *)offline_args->cb_ctx, spdk_err);
2463 
2464 out:
2465 	free(offline_cb_args);
2466 
2467 	snprintf(log_str, sizeof(log_str),
2468 		 "port link break cb: port:%d evt_type:%d num_nports:%d err:%d spdk_err:%d.\n",
2469 		 port_handle, event_type, num_nports, err, spdk_err);
2470 
2471 	if (err != 0) {
2472 		SPDK_ERRLOG("%s", log_str);
2473 	} else {
2474 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2475 	}
2476 	return;
2477 }
2478 
2479 /*
2480  * FC port must have all its nports deleted before transitioning to offline state.
2481  */
2482 static void
2483 nvmf_fc_adm_hw_port_offline_nport_delete(struct spdk_nvmf_fc_port *fc_port)
2484 {
2485 	struct spdk_nvmf_fc_nport *nport = NULL;
2486 	/* All nports must have been deleted at this point for this fc port */
2487 	DEV_VERIFY(fc_port && TAILQ_EMPTY(&fc_port->nport_list));
2488 	DEV_VERIFY(fc_port->num_nports == 0);
2489 	/* Mark the nport states to be zombie, if they exist */
2490 	if (fc_port && !TAILQ_EMPTY(&fc_port->nport_list)) {
2491 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
2492 			(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2493 		}
2494 	}
2495 }
2496 
2497 static void
2498 nvmf_fc_adm_i_t_delete_cb(void *args, uint32_t err)
2499 {
2500 	ASSERT_SPDK_FC_MASTER_THREAD();
2501 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = args;
2502 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2503 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2504 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
2505 	int spdk_err = 0;
2506 	uint8_t port_handle = cb_data->port_handle;
2507 	uint32_t s_id = rport->s_id;
2508 	uint32_t rpi = rport->rpi;
2509 	uint32_t assoc_count = rport->assoc_count;
2510 	uint32_t nport_hdl = nport->nport_hdl;
2511 	uint32_t d_id = nport->d_id;
2512 	char log_str[256];
2513 
2514 	/*
2515 	 * Assert on any delete failure.
2516 	 */
2517 	if (0 != err) {
2518 		DEV_VERIFY(!"Error in IT Delete callback.");
2519 		goto out;
2520 	}
2521 
2522 	if (cb_func != NULL) {
2523 		(void)cb_func(port_handle, SPDK_FC_IT_DELETE, cb_data->fc_cb_ctx, spdk_err);
2524 	}
2525 
2526 out:
2527 	free(cb_data);
2528 
2529 	snprintf(log_str, sizeof(log_str),
2530 		 "IT delete assoc_cb on nport %d done, port_handle:%d s_id:%d d_id:%d rpi:%d rport_assoc_count:%d rc = %d.\n",
2531 		 nport_hdl, port_handle, s_id, d_id, rpi, assoc_count, err);
2532 
2533 	if (err != 0) {
2534 		SPDK_ERRLOG("%s", log_str);
2535 	} else {
2536 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2537 	}
2538 }
2539 
2540 static void
2541 nvmf_fc_adm_i_t_delete_assoc_cb(void *args, uint32_t err)
2542 {
2543 	ASSERT_SPDK_FC_MASTER_THREAD();
2544 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = args;
2545 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
2546 	struct spdk_nvmf_fc_remote_port_info *rport = cb_data->rport;
2547 	spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func = cb_data->cb_func;
2548 	uint32_t s_id = rport->s_id;
2549 	uint32_t rpi = rport->rpi;
2550 	uint32_t assoc_count = rport->assoc_count;
2551 	uint32_t nport_hdl = nport->nport_hdl;
2552 	uint32_t d_id = nport->d_id;
2553 	char log_str[256];
2554 
2555 	/*
2556 	 * Assert on any association delete failure. We continue to delete other
2557 	 * associations in promoted builds.
2558 	 */
2559 	if (0 != err) {
2560 		DEV_VERIFY(!"Nport's association delete callback returned error");
2561 		if (nport->assoc_count > 0) {
2562 			nport->assoc_count--;
2563 		}
2564 		if (rport->assoc_count > 0) {
2565 			rport->assoc_count--;
2566 		}
2567 	}
2568 
2569 	/*
2570 	 * If this is the last association being deleted for the ITN,
2571 	 * execute the callback(s).
2572 	 */
2573 	if (0 == rport->assoc_count) {
2574 		/* Remove the rport from the remote port list. */
2575 		if (spdk_nvmf_fc_nport_remove_rem_port(nport, rport) != 0) {
2576 			SPDK_ERRLOG("Error while removing rport from list.\n");
2577 			DEV_VERIFY(!"Error while removing rport from list.");
2578 		}
2579 
2580 		if (cb_func != NULL) {
2581 			/*
2582 			 * Callback function is provided by the caller
2583 			 * of nvmf_fc_adm_i_t_delete_assoc().
2584 			 */
2585 			(void)cb_func(cb_data->cb_ctx, 0);
2586 		}
2587 		free(rport);
2588 		free(args);
2589 	}
2590 
2591 	snprintf(log_str, sizeof(log_str),
2592 		 "IT delete assoc_cb on nport %d done, s_id:%d d_id:%d rpi:%d rport_assoc_count:%d err = %d.\n",
2593 		 nport_hdl, s_id, d_id, rpi, assoc_count, err);
2594 
2595 	if (err != 0) {
2596 		SPDK_ERRLOG("%s", log_str);
2597 	} else {
2598 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2599 	}
2600 }
2601 
2602 /**
2603  * Process a IT delete.
2604  */
2605 static void
2606 nvmf_fc_adm_i_t_delete_assoc(struct spdk_nvmf_fc_nport *nport,
2607 			     struct spdk_nvmf_fc_remote_port_info *rport,
2608 			     spdk_nvmf_fc_adm_i_t_delete_assoc_cb_fn cb_func,
2609 			     void *cb_ctx)
2610 {
2611 	int err = 0;
2612 	struct spdk_nvmf_fc_association *assoc = NULL;
2613 	int assoc_err = 0;
2614 	uint32_t num_assoc = 0;
2615 	uint32_t num_assoc_del_scheduled = 0;
2616 	struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data *cb_data = NULL;
2617 	uint8_t port_hdl = nport->port_hdl;
2618 	uint32_t s_id = rport->s_id;
2619 	uint32_t rpi = rport->rpi;
2620 	uint32_t assoc_count = rport->assoc_count;
2621 	char log_str[256];
2622 
2623 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete associations on nport:%d begin.\n",
2624 		      nport->nport_hdl);
2625 
2626 	/*
2627 	 * Allocate memory for callback data.
2628 	 * This memory will be freed by the callback function.
2629 	 */
2630 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_assoc_cb_data));
2631 	if (NULL == cb_data) {
2632 		SPDK_ERRLOG("Failed to allocate memory for cb_data on nport:%d.\n", nport->nport_hdl);
2633 		err = -ENOMEM;
2634 		goto out;
2635 	}
2636 	cb_data->nport       = nport;
2637 	cb_data->rport       = rport;
2638 	cb_data->port_handle = port_hdl;
2639 	cb_data->cb_func     = cb_func;
2640 	cb_data->cb_ctx      = cb_ctx;
2641 
2642 	/*
2643 	 * Delete all associations, if any, related with this ITN/remote_port.
2644 	 */
2645 	TAILQ_FOREACH(assoc, &nport->fc_associations, link) {
2646 		num_assoc++;
2647 		if (assoc->s_id == s_id) {
2648 			assoc_err = spdk_nvmf_fc_delete_association(nport,
2649 					assoc->assoc_id,
2650 					false /* send abts */, false,
2651 					nvmf_fc_adm_i_t_delete_assoc_cb, cb_data);
2652 			if (0 != assoc_err) {
2653 				/*
2654 				 * Mark this association as zombie.
2655 				 */
2656 				err = -EINVAL;
2657 				DEV_VERIFY(!"Error while deleting association");
2658 				(void)spdk_nvmf_fc_assoc_set_state(assoc, SPDK_NVMF_FC_OBJECT_ZOMBIE);
2659 			} else {
2660 				num_assoc_del_scheduled++;
2661 			}
2662 		}
2663 	}
2664 
2665 out:
2666 	if ((cb_data) && (num_assoc_del_scheduled == 0)) {
2667 		/*
2668 		 * Since there are no association_delete calls
2669 		 * successfully scheduled, the association_delete
2670 		 * callback function will never be called.
2671 		 * In this case, call the callback function now.
2672 		 */
2673 		nvmf_fc_adm_i_t_delete_assoc_cb(cb_data, 0);
2674 	}
2675 
2676 	snprintf(log_str, sizeof(log_str),
2677 		 "IT delete associations on nport:%d end. "
2678 		 "s_id:%d rpi:%d assoc_count:%d assoc:%d assoc_del_scheduled:%d rc:%d.\n",
2679 		 nport->nport_hdl, s_id, rpi, assoc_count, num_assoc, num_assoc_del_scheduled, err);
2680 
2681 	if (err == 0) {
2682 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
2683 	} else {
2684 		SPDK_ERRLOG("%s", log_str);
2685 	}
2686 }
2687 
2688 static void
2689 nvmf_fc_adm_queue_quiesce_cb(void *cb_data, enum spdk_nvmf_fc_poller_api_ret ret)
2690 {
2691 	ASSERT_SPDK_FC_MASTER_THREAD();
2692 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *quiesce_api_data = NULL;
2693 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2694 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2695 	struct spdk_nvmf_fc_port *fc_port = NULL;
2696 	int err = 0;
2697 
2698 	quiesce_api_data = (struct spdk_nvmf_fc_poller_api_quiesce_queue_args *)cb_data;
2699 	hwqp = quiesce_api_data->hwqp;
2700 	fc_port = hwqp->fc_port;
2701 	port_quiesce_ctx = (struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *)quiesce_api_data->ctx;
2702 	spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func = port_quiesce_ctx->cb_func;
2703 
2704 	/*
2705 	 * Decrement the callback/quiesced queue count.
2706 	 */
2707 	port_quiesce_ctx->quiesce_count--;
2708 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Queue%d Quiesced\n", quiesce_api_data->hwqp->hwqp_id);
2709 
2710 	free(quiesce_api_data);
2711 	/*
2712 	 * Wait for call backs i.e. max_ioq_queues + LS QUEUE.
2713 	 */
2714 	if (port_quiesce_ctx->quiesce_count > 0) {
2715 		return;
2716 	}
2717 
2718 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2719 		SPDK_ERRLOG("Port %d already in quiesced state.\n", fc_port->port_hdl);
2720 	} else {
2721 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesced.\n", fc_port->port_hdl);
2722 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2723 	}
2724 
2725 	if (cb_func) {
2726 		/*
2727 		 * Callback function for the called of quiesce.
2728 		 */
2729 		cb_func(port_quiesce_ctx->ctx, err);
2730 	}
2731 
2732 	/*
2733 	 * Free the context structure.
2734 	 */
2735 	free(port_quiesce_ctx);
2736 
2737 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d quiesce done, rc = %d.\n", fc_port->port_hdl,
2738 		      err);
2739 }
2740 
2741 static int
2742 nvmf_fc_adm_hw_queue_quiesce(struct spdk_nvmf_fc_hwqp *fc_hwqp, void *ctx,
2743 			     spdk_nvmf_fc_poller_api_cb cb_func)
2744 {
2745 	struct spdk_nvmf_fc_poller_api_quiesce_queue_args *args;
2746 	enum spdk_nvmf_fc_poller_api_ret rc = SPDK_NVMF_FC_POLLER_API_SUCCESS;
2747 	int err = 0;
2748 
2749 	args = calloc(1, sizeof(struct spdk_nvmf_fc_poller_api_quiesce_queue_args));
2750 
2751 	if (args == NULL) {
2752 		err = -ENOMEM;
2753 		SPDK_ERRLOG("Failed to allocate memory for poller quiesce args, hwqp:%d\n", fc_hwqp->hwqp_id);
2754 		goto done;
2755 	}
2756 	args->hwqp = fc_hwqp;
2757 	args->ctx = ctx;
2758 	args->cb_info.cb_func = cb_func;
2759 	args->cb_info.cb_data = args;
2760 	args->cb_info.cb_thread = spdk_get_thread();
2761 
2762 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Quiesce queue %d\n", fc_hwqp->hwqp_id);
2763 	rc = spdk_nvmf_fc_poller_api_func(fc_hwqp, SPDK_NVMF_FC_POLLER_API_QUIESCE_QUEUE, args);
2764 	if (rc) {
2765 		free(args);
2766 		err = -EINVAL;
2767 	}
2768 
2769 done:
2770 	return err;
2771 }
2772 
2773 /*
2774  * Hw port Quiesce
2775  */
2776 static int
2777 nvmf_fc_adm_hw_port_quiesce(struct spdk_nvmf_fc_port *fc_port, void *ctx,
2778 			    spdk_nvmf_fc_adm_hw_port_quiesce_cb_fn cb_func)
2779 {
2780 	struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx *port_quiesce_ctx = NULL;
2781 	uint32_t i = 0;
2782 	int err = 0;
2783 
2784 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port:%d is being quiesced.\n", fc_port->port_hdl);
2785 
2786 	/*
2787 	 * If the port is in an OFFLINE state, set the state to QUIESCED
2788 	 * and execute the callback.
2789 	 */
2790 	if (fc_port->hw_port_status == SPDK_FC_PORT_OFFLINE) {
2791 		fc_port->hw_port_status = SPDK_FC_PORT_QUIESCED;
2792 	}
2793 
2794 	if (fc_port->hw_port_status == SPDK_FC_PORT_QUIESCED) {
2795 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Port %d already in quiesced state.\n",
2796 			      fc_port->port_hdl);
2797 		/*
2798 		 * Execute the callback function directly.
2799 		 */
2800 		cb_func(ctx, err);
2801 		goto out;
2802 	}
2803 
2804 	port_quiesce_ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_quiesce_ctx));
2805 
2806 	if (port_quiesce_ctx == NULL) {
2807 		err = -ENOMEM;
2808 		SPDK_ERRLOG("Failed to allocate memory for LS queue quiesce ctx, port:%d\n",
2809 			    fc_port->port_hdl);
2810 		goto out;
2811 	}
2812 
2813 	port_quiesce_ctx->quiesce_count = 0;
2814 	port_quiesce_ctx->ctx = ctx;
2815 	port_quiesce_ctx->cb_func = cb_func;
2816 
2817 	/*
2818 	 * Quiesce the LS queue.
2819 	 */
2820 	err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->ls_queue, port_quiesce_ctx,
2821 					   nvmf_fc_adm_queue_quiesce_cb);
2822 	if (err != 0) {
2823 		SPDK_ERRLOG("Failed to quiesce the LS queue.\n");
2824 		goto out;
2825 	}
2826 	port_quiesce_ctx->quiesce_count++;
2827 
2828 	/*
2829 	 * Quiesce the IO queues.
2830 	 */
2831 	for (i = 0; i < fc_port->num_io_queues; i++) {
2832 		err = nvmf_fc_adm_hw_queue_quiesce(&fc_port->io_queues[i],
2833 						   port_quiesce_ctx,
2834 						   nvmf_fc_adm_queue_quiesce_cb);
2835 		if (err != 0) {
2836 			DEV_VERIFY(0);
2837 			SPDK_ERRLOG("Failed to quiesce the IO queue:%d.\n", fc_port->io_queues[i].hwqp_id);
2838 		}
2839 		port_quiesce_ctx->quiesce_count++;
2840 	}
2841 
2842 out:
2843 	if (port_quiesce_ctx && err != 0) {
2844 		free(port_quiesce_ctx);
2845 	}
2846 	return err;
2847 }
2848 
2849 /*
2850  * Initialize and add a HW port entry to the global
2851  * HW port list.
2852  */
2853 static void
2854 nvmf_fc_adm_evnt_hw_port_init(void *arg)
2855 {
2856 	ASSERT_SPDK_FC_MASTER_THREAD();
2857 	struct spdk_nvmf_fc_port *fc_port = NULL;
2858 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2859 	struct spdk_nvmf_fc_hw_port_init_args *args = (struct spdk_nvmf_fc_hw_port_init_args *)
2860 			api_data->api_args;
2861 	int err = 0;
2862 
2863 	if (args->io_queue_cnt > spdk_env_get_core_count()) {
2864 		SPDK_ERRLOG("IO queues count greater than cores for %d.\n", args->port_handle);
2865 		err = EINVAL;
2866 		goto abort_port_init;
2867 	}
2868 
2869 	/*
2870 	 * 1. Check for duplicate initialization.
2871 	 */
2872 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2873 	if (fc_port != NULL) {
2874 		/* Port already exists, check if it has to be re-initialized */
2875 		err = nvmf_fc_adm_hw_port_reinit_validate(fc_port, args);
2876 		if (err) {
2877 			/*
2878 			 * In case of an error we do not want to free the fc_port
2879 			 * so we set that pointer to NULL.
2880 			 */
2881 			fc_port = NULL;
2882 		}
2883 		goto abort_port_init;
2884 	}
2885 
2886 	/*
2887 	 * 2. Get the memory to instantiate a fc port.
2888 	 */
2889 	fc_port = calloc(1, sizeof(struct spdk_nvmf_fc_port) +
2890 			 (args->io_queue_cnt * sizeof(struct spdk_nvmf_fc_hwqp)));
2891 	if (fc_port == NULL) {
2892 		SPDK_ERRLOG("Failed to allocate memory for fc_port %d.\n", args->port_handle);
2893 		err = -ENOMEM;
2894 		goto abort_port_init;
2895 	}
2896 
2897 	/* assign the io_queues array */
2898 	fc_port->io_queues = (struct spdk_nvmf_fc_hwqp *)((uint8_t *)fc_port + sizeof(
2899 				     struct spdk_nvmf_fc_port));
2900 
2901 	/*
2902 	 * 3. Initialize the contents for the FC-port
2903 	 */
2904 	err = nvmf_fc_adm_hw_port_data_init(fc_port, args);
2905 
2906 	if (err != 0) {
2907 		SPDK_ERRLOG("Data initialization failed for fc_port %d.\n", args->port_handle);
2908 		DEV_VERIFY(!"Data initialization failed for fc_port");
2909 		goto abort_port_init;
2910 	}
2911 
2912 	/*
2913 	 * 4. Add this port to the global fc port list in the library.
2914 	 */
2915 	spdk_nvmf_fc_port_add(fc_port);
2916 
2917 abort_port_init:
2918 	if (err && fc_port) {
2919 		free(fc_port);
2920 	}
2921 	if (api_data->cb_func != NULL) {
2922 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_INIT, args->cb_ctx, err);
2923 	}
2924 
2925 	free(arg);
2926 
2927 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d initialize done, rc = %d.\n",
2928 		      args->port_handle, err);
2929 }
2930 
2931 /*
2932  * Online a HW port.
2933  */
2934 static void
2935 nvmf_fc_adm_evnt_hw_port_online(void *arg)
2936 {
2937 	ASSERT_SPDK_FC_MASTER_THREAD();
2938 	struct spdk_nvmf_fc_port *fc_port = NULL;
2939 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2940 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2941 	struct spdk_nvmf_fc_hw_port_online_args *args = (struct spdk_nvmf_fc_hw_port_online_args *)
2942 			api_data->api_args;
2943 	int i = 0;
2944 	int err = 0;
2945 
2946 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2947 	if (fc_port) {
2948 		/* Set the port state to online */
2949 		err = spdk_nvmf_fc_port_set_online(fc_port);
2950 		if (err != 0) {
2951 			SPDK_ERRLOG("Hw port %d online failed. err = %d\n", fc_port->port_hdl, err);
2952 			DEV_VERIFY(!"Hw port online failed");
2953 			goto out;
2954 		}
2955 
2956 		hwqp = &fc_port->ls_queue;
2957 		hwqp->context = NULL;
2958 		(void)spdk_nvmf_fc_hwqp_set_online(hwqp);
2959 
2960 		/* Cycle through all the io queues and setup a hwqp poller for each. */
2961 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
2962 			hwqp = &fc_port->io_queues[i];
2963 			hwqp->context = NULL;
2964 			(void)spdk_nvmf_fc_hwqp_set_online(hwqp);
2965 			spdk_nvmf_fc_poll_group_add_hwqp(hwqp);
2966 		}
2967 	} else {
2968 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
2969 		err = -EINVAL;
2970 	}
2971 
2972 out:
2973 	if (api_data->cb_func != NULL) {
2974 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_ONLINE, args->cb_ctx, err);
2975 	}
2976 
2977 	free(arg);
2978 
2979 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d online done, rc = %d.\n", args->port_handle,
2980 		      err);
2981 }
2982 
2983 /*
2984  * Offline a HW port.
2985  */
2986 static void
2987 nvmf_fc_adm_evnt_hw_port_offline(void *arg)
2988 {
2989 	ASSERT_SPDK_FC_MASTER_THREAD();
2990 	struct spdk_nvmf_fc_port *fc_port = NULL;
2991 	struct spdk_nvmf_fc_hwqp *hwqp = NULL;
2992 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
2993 	struct spdk_nvmf_fc_hw_port_offline_args *args = (struct spdk_nvmf_fc_hw_port_offline_args *)
2994 			api_data->api_args;
2995 	int i = 0;
2996 	int err = 0;
2997 
2998 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
2999 	if (fc_port) {
3000 		/* Set the port state to offline, if it is not already. */
3001 		err = spdk_nvmf_fc_port_set_offline(fc_port);
3002 		if (err != 0) {
3003 			SPDK_ERRLOG("Hw port %d already offline. err = %d\n", fc_port->port_hdl, err);
3004 			err = 0;
3005 			goto out;
3006 		}
3007 
3008 		hwqp = &fc_port->ls_queue;
3009 		(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
3010 
3011 		/* Remove poller for all the io queues. */
3012 		for (i = 0; i < (int)fc_port->num_io_queues; i++) {
3013 			hwqp = &fc_port->io_queues[i];
3014 			(void)spdk_nvmf_fc_hwqp_set_offline(hwqp);
3015 			spdk_nvmf_fc_poll_group_remove_hwqp(hwqp);
3016 		}
3017 
3018 		/*
3019 		 * Delete all the nports. Ideally, the nports should have been purged
3020 		 * before the offline event, in which case, only a validation is required.
3021 		 */
3022 		nvmf_fc_adm_hw_port_offline_nport_delete(fc_port);
3023 	} else {
3024 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3025 		err = -EINVAL;
3026 	}
3027 out:
3028 	if (api_data->cb_func != NULL) {
3029 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_OFFLINE, args->cb_ctx, err);
3030 	}
3031 
3032 	free(arg);
3033 
3034 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d offline done, rc = %d.\n", args->port_handle,
3035 		      err);
3036 }
3037 
3038 struct nvmf_fc_add_rem_listener_ctx {
3039 	bool add_listener;
3040 	struct spdk_nvme_transport_id trid;
3041 };
3042 
3043 static void
3044 nvmf_fc_adm_subsystem_resume_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3045 {
3046 	ASSERT_SPDK_FC_MASTER_THREAD();
3047 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3048 	free(ctx);
3049 }
3050 
3051 static void
3052 nvmf_fc_adm_subsystem_paused_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
3053 {
3054 	ASSERT_SPDK_FC_MASTER_THREAD();
3055 	struct nvmf_fc_add_rem_listener_ctx *ctx = (struct nvmf_fc_add_rem_listener_ctx *)cb_arg;
3056 
3057 	if (ctx->add_listener) {
3058 		spdk_nvmf_subsystem_add_listener(subsystem, &ctx->trid);
3059 	} else {
3060 		spdk_nvmf_subsystem_remove_listener(subsystem, &ctx->trid);
3061 	}
3062 	if (spdk_nvmf_subsystem_resume(subsystem, nvmf_fc_adm_subsystem_resume_cb, ctx)) {
3063 		SPDK_ERRLOG("Failed to resume subsystem: %s\n", subsystem->subnqn);
3064 		free(ctx);
3065 	}
3066 }
3067 
3068 static int
3069 nvmf_fc_adm_add_rem_nport_listener(struct spdk_nvmf_fc_nport *nport, bool add)
3070 {
3071 	struct spdk_nvmf_tgt *tgt = spdk_nvmf_fc_get_tgt();
3072 	struct spdk_nvmf_subsystem *subsystem;
3073 
3074 	if (!tgt) {
3075 		SPDK_ERRLOG("No nvmf target defined\n");
3076 		return -EINVAL;
3077 	}
3078 
3079 	subsystem = spdk_nvmf_subsystem_get_first(tgt);
3080 	while (subsystem) {
3081 		struct nvmf_fc_add_rem_listener_ctx *ctx;
3082 
3083 		ctx = calloc(1, sizeof(struct nvmf_fc_add_rem_listener_ctx));
3084 		if (ctx) {
3085 			ctx->add_listener = add;
3086 			spdk_nvmf_fc_create_trid(&ctx->trid, nport->fc_nodename.u.wwn,
3087 						 nport->fc_portname.u.wwn);
3088 			if (spdk_nvmf_subsystem_pause(subsystem, nvmf_fc_adm_subsystem_paused_cb, ctx)) {
3089 				SPDK_ERRLOG("Failed to pause subsystem: %s\n", subsystem->subnqn);
3090 				free(ctx);
3091 			}
3092 		}
3093 		subsystem = spdk_nvmf_subsystem_get_next(subsystem);
3094 	}
3095 
3096 	return 0;
3097 }
3098 
3099 /*
3100  * Create a Nport.
3101  */
3102 static void
3103 nvmf_fc_adm_evnt_nport_create(void *arg)
3104 {
3105 	ASSERT_SPDK_FC_MASTER_THREAD();
3106 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3107 	struct spdk_nvmf_fc_nport_create_args *args = (struct spdk_nvmf_fc_nport_create_args *)
3108 			api_data->api_args;
3109 	struct spdk_nvmf_fc_nport *nport = NULL;
3110 	struct spdk_nvmf_fc_port *fc_port = NULL;
3111 	int err = 0;
3112 
3113 	/*
3114 	 * Get the physical port.
3115 	 */
3116 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3117 	if (fc_port == NULL) {
3118 		err = -EINVAL;
3119 		goto out;
3120 	}
3121 
3122 	/*
3123 	 * Check for duplicate initialization.
3124 	 */
3125 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3126 	if (nport != NULL) {
3127 		SPDK_ERRLOG("Duplicate SPDK FC nport %d exists for FC port:%d.\n", args->nport_handle,
3128 			    args->port_handle);
3129 		err = -EINVAL;
3130 		goto out;
3131 	}
3132 
3133 	/*
3134 	 * Get the memory to instantiate a fc nport.
3135 	 */
3136 	nport = calloc(1, sizeof(struct spdk_nvmf_fc_nport));
3137 	if (nport == NULL) {
3138 		SPDK_ERRLOG("Failed to allocate memory for nport %d.\n",
3139 			    args->nport_handle);
3140 		err = -ENOMEM;
3141 		goto out;
3142 	}
3143 
3144 	/*
3145 	 * Initialize the contents for the nport
3146 	 */
3147 	nport->nport_hdl    = args->nport_handle;
3148 	nport->port_hdl     = args->port_handle;
3149 	nport->nport_state  = SPDK_NVMF_FC_OBJECT_CREATED;
3150 	nport->fc_nodename  = args->fc_nodename;
3151 	nport->fc_portname  = args->fc_portname;
3152 	nport->d_id         = args->d_id;
3153 	nport->fc_port      = spdk_nvmf_fc_port_lookup(args->port_handle);
3154 
3155 	(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_CREATED);
3156 	TAILQ_INIT(&nport->rem_port_list);
3157 	nport->rport_count = 0;
3158 	TAILQ_INIT(&nport->fc_associations);
3159 	nport->assoc_count = 0;
3160 
3161 	/*
3162 	 * Populate the nport address (as listening address) to the nvmf subsystems.
3163 	 */
3164 	err = nvmf_fc_adm_add_rem_nport_listener(nport, true);
3165 
3166 	(void)spdk_nvmf_fc_port_add_nport(fc_port, nport);
3167 out:
3168 	if (err && nport) {
3169 		free(nport);
3170 	}
3171 
3172 	if (api_data->cb_func != NULL) {
3173 		(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_CREATE, args->cb_ctx, err);
3174 	}
3175 
3176 	free(arg);
3177 }
3178 
3179 static void
3180 nvmf_fc_adm_delete_nport_cb(uint8_t port_handle, enum spdk_fc_event event_type,
3181 			    void *cb_args, int spdk_err)
3182 {
3183 	ASSERT_SPDK_FC_MASTER_THREAD();
3184 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = cb_args;
3185 	struct spdk_nvmf_fc_nport *nport = cb_data->nport;
3186 	spdk_nvmf_fc_callback cb_func = cb_data->fc_cb_func;
3187 	int err = 0;
3188 	uint16_t nport_hdl = 0;
3189 	char log_str[256];
3190 
3191 	/*
3192 	 * Assert on any delete failure.
3193 	 */
3194 	if (nport == NULL) {
3195 		SPDK_ERRLOG("Nport delete callback returned null nport");
3196 		DEV_VERIFY(!"nport is null.");
3197 		goto out;
3198 	}
3199 
3200 	nport_hdl = nport->nport_hdl;
3201 	if (0 != spdk_err) {
3202 		SPDK_ERRLOG("Nport delete callback returned error. FC Port: "
3203 			    "%d, Nport: %d\n",
3204 			    nport->port_hdl, nport->nport_hdl);
3205 		DEV_VERIFY(!"nport delete callback error.");
3206 	}
3207 
3208 	/*
3209 	 * Free the nport if this is the last rport being deleted and
3210 	 * execute the callback(s).
3211 	 */
3212 	if (spdk_nvmf_fc_nport_has_no_rport(nport)) {
3213 		if (0 != nport->assoc_count) {
3214 			SPDK_ERRLOG("association count != 0\n");
3215 			DEV_VERIFY(!"association count != 0");
3216 		}
3217 
3218 		err = spdk_nvmf_fc_port_remove_nport(nport->fc_port, nport);
3219 		if (0 != err) {
3220 			SPDK_ERRLOG("Nport delete callback: Failed to remove "
3221 				    "nport from nport list. FC Port:%d Nport:%d\n",
3222 				    nport->port_hdl, nport->nport_hdl);
3223 		}
3224 		/* Free the nport */
3225 		free(nport);
3226 
3227 		if (cb_func != NULL) {
3228 			(void)cb_func(cb_data->port_handle, SPDK_FC_NPORT_DELETE, cb_data->fc_cb_ctx, spdk_err);
3229 		}
3230 		free(cb_data);
3231 	}
3232 out:
3233 	snprintf(log_str, sizeof(log_str),
3234 		 "port:%d nport:%d delete cb exit, evt_type:%d rc:%d.\n",
3235 		 port_handle, nport_hdl, event_type, spdk_err);
3236 
3237 	if (err != 0) {
3238 		SPDK_ERRLOG("%s", log_str);
3239 	} else {
3240 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3241 	}
3242 }
3243 
3244 /*
3245  * Delete Nport.
3246  */
3247 static void
3248 nvmf_fc_adm_evnt_nport_delete(void *arg)
3249 {
3250 	ASSERT_SPDK_FC_MASTER_THREAD();
3251 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3252 	struct spdk_nvmf_fc_nport_delete_args *args = (struct spdk_nvmf_fc_nport_delete_args *)
3253 			api_data->api_args;
3254 	struct spdk_nvmf_fc_nport *nport = NULL;
3255 	struct spdk_nvmf_fc_adm_nport_del_cb_data *cb_data = NULL;
3256 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3257 	int err = 0;
3258 	uint32_t rport_cnt = 0;
3259 	int rc = 0;
3260 
3261 	/*
3262 	 * Make sure that the nport exists.
3263 	 */
3264 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3265 	if (nport == NULL) {
3266 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d for FC Port: %d.\n", args->nport_handle,
3267 			    args->port_handle);
3268 		err = -EINVAL;
3269 		goto out;
3270 	}
3271 
3272 	/*
3273 	 * Allocate memory for callback data.
3274 	 */
3275 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_nport_del_cb_data));
3276 	if (NULL == cb_data) {
3277 		SPDK_ERRLOG("Failed to allocate memory for cb_data %d.\n", args->nport_handle);
3278 		err = -ENOMEM;
3279 		goto out;
3280 	}
3281 
3282 	cb_data->nport = nport;
3283 	cb_data->port_handle = args->port_handle;
3284 	cb_data->fc_cb_func = api_data->cb_func;
3285 	cb_data->fc_cb_ctx = args->cb_ctx;
3286 
3287 	/*
3288 	 * Begin nport tear down
3289 	 */
3290 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3291 		(void)spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3292 	} else if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3293 		/*
3294 		 * Deletion of this nport already in progress. Register callback
3295 		 * and return.
3296 		 */
3297 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3298 		err = -ENODEV;
3299 		goto out;
3300 	} else {
3301 		/* nport partially created/deleted */
3302 		DEV_VERIFY(nport->nport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3303 		DEV_VERIFY(0 != "Nport in zombie state");
3304 		err = -ENODEV;
3305 		goto out;
3306 	}
3307 
3308 	/*
3309 	 * Remove this nport from listening addresses across subsystems
3310 	 */
3311 	rc = nvmf_fc_adm_add_rem_nport_listener(nport, false);
3312 
3313 	if (0 != rc) {
3314 		err = spdk_nvmf_fc_nport_set_state(nport, SPDK_NVMF_FC_OBJECT_ZOMBIE);
3315 		SPDK_ERRLOG("Unable to remove the listen addr in the subsystems for nport %d.\n",
3316 			    nport->nport_hdl);
3317 		goto out;
3318 	}
3319 
3320 	/*
3321 	 * Delete all the remote ports (if any) for the nport
3322 	 */
3323 	/* TODO - Need to do this with a "first" and a "next" accessor function
3324 	 * for completeness. Look at app-subsystem as examples.
3325 	 */
3326 	if (spdk_nvmf_fc_nport_has_no_rport(nport)) {
3327 		/* No rports to delete. Complete the nport deletion. */
3328 		nvmf_fc_adm_delete_nport_cb(nport->port_hdl, SPDK_FC_NPORT_DELETE, cb_data, 0);
3329 		goto out;
3330 	}
3331 
3332 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3333 		struct spdk_nvmf_fc_hw_i_t_delete_args *it_del_args = calloc(
3334 					1, sizeof(struct spdk_nvmf_fc_hw_i_t_delete_args));
3335 
3336 		if (it_del_args == NULL) {
3337 			err = -ENOMEM;
3338 			SPDK_ERRLOG("SPDK_FC_IT_DELETE no mem to delete rport with rpi:%d s_id:%d.\n",
3339 				    rport_iter->rpi, rport_iter->s_id);
3340 			DEV_VERIFY(!"SPDK_FC_IT_DELETE failed, cannot allocate memory");
3341 			goto out;
3342 		}
3343 
3344 		rport_cnt++;
3345 		it_del_args->port_handle = nport->port_hdl;
3346 		it_del_args->nport_handle = nport->nport_hdl;
3347 		it_del_args->cb_ctx = (void *)cb_data;
3348 		it_del_args->rpi = rport_iter->rpi;
3349 		it_del_args->s_id = rport_iter->s_id;
3350 
3351 		spdk_nvmf_fc_master_enqueue_event(SPDK_FC_IT_DELETE, (void *)it_del_args,
3352 						  nvmf_fc_adm_delete_nport_cb);
3353 	}
3354 
3355 out:
3356 	/* On failure, execute the callback function now */
3357 	if ((err != 0) || (rc != 0)) {
3358 		SPDK_ERRLOG("NPort %d delete failed, error:%d, fc port:%d, "
3359 			    "rport_cnt:%d rc:%d.\n",
3360 			    args->nport_handle, err, args->port_handle,
3361 			    rport_cnt, rc);
3362 		if (cb_data) {
3363 			free(cb_data);
3364 		}
3365 		if (api_data->cb_func != NULL) {
3366 			(void)api_data->cb_func(args->port_handle, SPDK_FC_NPORT_DELETE, args->cb_ctx, err);
3367 		}
3368 
3369 	} else {
3370 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3371 			      "NPort %d delete done succesfully, fc port:%d. "
3372 			      "rport_cnt:%d\n",
3373 			      args->nport_handle, args->port_handle, rport_cnt);
3374 	}
3375 
3376 	free(arg);
3377 }
3378 
3379 /*
3380  * Process an PRLI/IT add.
3381  */
3382 static void
3383 nvmf_fc_adm_evnt_i_t_add(void *arg)
3384 {
3385 	ASSERT_SPDK_FC_MASTER_THREAD();
3386 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3387 	struct spdk_nvmf_fc_hw_i_t_add_args *args = (struct spdk_nvmf_fc_hw_i_t_add_args *)
3388 			api_data->api_args;
3389 	struct spdk_nvmf_fc_nport *nport = NULL;
3390 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3391 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3392 	int err = 0;
3393 
3394 	/*
3395 	 * Make sure the nport port exists.
3396 	 */
3397 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3398 	if (nport == NULL) {
3399 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3400 		err = -EINVAL;
3401 		goto out;
3402 	}
3403 
3404 	/*
3405 	 * Check for duplicate i_t_add.
3406 	 */
3407 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3408 		if ((rport_iter->s_id == args->s_id) && (rport_iter->rpi == args->rpi)) {
3409 			SPDK_ERRLOG("Duplicate rport found for FC nport %d: sid:%d rpi:%d\n",
3410 				    args->nport_handle, rport_iter->s_id, rport_iter->rpi);
3411 			err = -EEXIST;
3412 			goto out;
3413 		}
3414 	}
3415 
3416 	/*
3417 	 * Get the memory to instantiate the remote port
3418 	 */
3419 	rport = calloc(1, sizeof(struct spdk_nvmf_fc_remote_port_info));
3420 	if (rport == NULL) {
3421 		SPDK_ERRLOG("Memory allocation for rem port failed.\n");
3422 		err = -ENOMEM;
3423 		goto out;
3424 	}
3425 
3426 	/*
3427 	 * Initialize the contents for the rport
3428 	 */
3429 	(void)spdk_nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_CREATED);
3430 	rport->s_id = args->s_id;
3431 	rport->rpi = args->rpi;
3432 	rport->fc_nodename = args->fc_nodename;
3433 	rport->fc_portname = args->fc_portname;
3434 
3435 	/*
3436 	 * Add remote port to nport
3437 	 */
3438 	if (spdk_nvmf_fc_nport_add_rem_port(nport, rport) != 0) {
3439 		DEV_VERIFY(!"Error while adding rport to list");
3440 	};
3441 
3442 	/*
3443 	 * TODO: Do we validate the initiators service parameters?
3444 	 */
3445 
3446 	/*
3447 	 * Get the targets service parameters from the library
3448 	 * to return back to the driver.
3449 	 */
3450 	args->target_prli_info = spdk_nvmf_fc_get_prli_service_params();
3451 
3452 out:
3453 	if (api_data->cb_func != NULL) {
3454 		/*
3455 		 * Passing pointer to the args struct as the first argument.
3456 		 * The cb_func should handle this appropriately.
3457 		 */
3458 		(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_ADD, args->cb_ctx, err);
3459 	}
3460 
3461 	free(arg);
3462 
3463 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3464 		      "IT add on nport %d done, rc = %d.\n",
3465 		      args->nport_handle, err);
3466 }
3467 
3468 /**
3469  * Process a IT delete.
3470  */
3471 static void
3472 nvmf_fc_adm_evnt_i_t_delete(void *arg)
3473 {
3474 	ASSERT_SPDK_FC_MASTER_THREAD();
3475 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3476 	struct spdk_nvmf_fc_hw_i_t_delete_args *args = (struct spdk_nvmf_fc_hw_i_t_delete_args *)
3477 			api_data->api_args;
3478 	int rc = 0;
3479 	struct spdk_nvmf_fc_nport *nport = NULL;
3480 	struct spdk_nvmf_fc_adm_i_t_del_cb_data *cb_data = NULL;
3481 	struct spdk_nvmf_fc_remote_port_info *rport_iter = NULL;
3482 	struct spdk_nvmf_fc_remote_port_info *rport = NULL;
3483 	uint32_t num_rport = 0;
3484 	char log_str[256];
3485 
3486 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "IT delete on nport:%d begin.\n", args->nport_handle);
3487 
3488 	/*
3489 	 * Make sure the nport port exists. If it does not, error out.
3490 	 */
3491 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3492 	if (nport == NULL) {
3493 		SPDK_ERRLOG("Unable to find the SPDK FC nport:%d\n", args->nport_handle);
3494 		rc = -EINVAL;
3495 		goto out;
3496 	}
3497 
3498 	/*
3499 	 * Find this ITN / rport (remote port).
3500 	 */
3501 	TAILQ_FOREACH(rport_iter, &nport->rem_port_list, link) {
3502 		num_rport++;
3503 		if ((rport_iter->s_id == args->s_id) &&
3504 		    (rport_iter->rpi == args->rpi) &&
3505 		    (rport_iter->rport_state == SPDK_NVMF_FC_OBJECT_CREATED)) {
3506 			rport = rport_iter;
3507 			break;
3508 		}
3509 	}
3510 
3511 	/*
3512 	 * We should find either zero or exactly one rport.
3513 	 *
3514 	 * If we find zero rports, that means that a previous request has
3515 	 * removed the rport by the time we reached here. In this case,
3516 	 * simply return out.
3517 	 */
3518 	if (rport == NULL) {
3519 		rc = -ENODEV;
3520 		goto out;
3521 	}
3522 
3523 	/*
3524 	 * We have found exactly one rport. Allocate memory for callback data.
3525 	 */
3526 	cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_i_t_del_cb_data));
3527 	if (NULL == cb_data) {
3528 		SPDK_ERRLOG("Failed to allocate memory for cb_data for nport:%d.\n", args->nport_handle);
3529 		rc = -ENOMEM;
3530 		goto out;
3531 	}
3532 
3533 	cb_data->nport = nport;
3534 	cb_data->rport = rport;
3535 	cb_data->port_handle = args->port_handle;
3536 	cb_data->fc_cb_func = api_data->cb_func;
3537 	cb_data->fc_cb_ctx = args->cb_ctx;
3538 
3539 	/*
3540 	 * Validate rport object state.
3541 	 */
3542 	if (rport->rport_state == SPDK_NVMF_FC_OBJECT_CREATED) {
3543 		(void)spdk_nvmf_fc_rport_set_state(rport, SPDK_NVMF_FC_OBJECT_TO_BE_DELETED);
3544 	} else if (rport->rport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3545 		/*
3546 		 * Deletion of this rport already in progress. Register callback
3547 		 * and return.
3548 		 */
3549 		/* TODO: Register callback in callback vector. For now, set the error and return. */
3550 		rc = -ENODEV;
3551 		goto out;
3552 	} else {
3553 		/* rport partially created/deleted */
3554 		DEV_VERIFY(rport->rport_state == SPDK_NVMF_FC_OBJECT_ZOMBIE);
3555 		DEV_VERIFY(!"Invalid rport_state");
3556 		rc = -ENODEV;
3557 		goto out;
3558 	}
3559 
3560 	/*
3561 	 * We have successfully found a rport to delete. Call
3562 	 * nvmf_fc_i_t_delete_assoc(), which will perform further
3563 	 * IT-delete processing as well as free the cb_data.
3564 	 */
3565 	nvmf_fc_adm_i_t_delete_assoc(nport, rport, nvmf_fc_adm_i_t_delete_cb,
3566 				     (void *)cb_data);
3567 
3568 out:
3569 	if (rc != 0) {
3570 		/*
3571 		 * We have entered here because either we encountered an
3572 		 * error, or we did not find a rport to delete.
3573 		 * As a result, we will not call the function
3574 		 * nvmf_fc_i_t_delete_assoc() for further IT-delete
3575 		 * processing. Therefore, execute the callback function now.
3576 		 */
3577 		if (cb_data) {
3578 			free(cb_data);
3579 		}
3580 		if (api_data->cb_func != NULL) {
3581 			(void)api_data->cb_func(args->port_handle, SPDK_FC_IT_DELETE, args->cb_ctx, rc);
3582 		}
3583 	}
3584 
3585 	snprintf(log_str, sizeof(log_str),
3586 		 "IT delete on nport:%d end. num_rport:%d rc = %d.\n",
3587 		 args->nport_handle, num_rport, rc);
3588 
3589 	if (rc != 0) {
3590 		SPDK_ERRLOG("%s", log_str);
3591 	} else {
3592 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3593 	}
3594 
3595 	free(arg);
3596 }
3597 
3598 /*
3599  * Process ABTS received
3600  */
3601 static void
3602 nvmf_fc_adm_evnt_abts_recv(void *arg)
3603 {
3604 	ASSERT_SPDK_FC_MASTER_THREAD();
3605 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3606 	struct spdk_nvmf_fc_abts_args *args = (struct spdk_nvmf_fc_abts_args *)api_data->api_args;
3607 	struct spdk_nvmf_fc_nport *nport = NULL;
3608 	int err = 0;
3609 
3610 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "FC ABTS received. RPI:%d, oxid:%d, rxid:%d\n", args->rpi,
3611 		      args->oxid, args->rxid);
3612 
3613 	/*
3614 	 * 1. Make sure the nport port exists.
3615 	 */
3616 	nport = spdk_nvmf_fc_nport_find(args->port_handle, args->nport_handle);
3617 	if (nport == NULL) {
3618 		SPDK_ERRLOG("Unable to find the SPDK FC nport %d\n", args->nport_handle);
3619 		err = -EINVAL;
3620 		goto out;
3621 	}
3622 
3623 	/*
3624 	 * 2. If the nport is in the process of being deleted, drop the ABTS.
3625 	 */
3626 	if (nport->nport_state == SPDK_NVMF_FC_OBJECT_TO_BE_DELETED) {
3627 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API,
3628 			      "FC ABTS dropped because the nport is being deleted; RPI:%d, oxid:%d, rxid:%d\n",
3629 			      args->rpi, args->oxid, args->rxid);
3630 		err = 0;
3631 		goto out;
3632 
3633 	}
3634 
3635 	/*
3636 	 * 3. Pass the received ABTS-LS to the library for handling.
3637 	 */
3638 	spdk_nvmf_fc_handle_abts_frame(nport, args->rpi, args->oxid, args->rxid);
3639 
3640 out:
3641 	if (api_data->cb_func != NULL) {
3642 		/*
3643 		 * Passing pointer to the args struct as the first argument.
3644 		 * The cb_func should handle this appropriately.
3645 		 */
3646 		(void)api_data->cb_func(args->port_handle, SPDK_FC_ABTS_RECV, args, err);
3647 	} else {
3648 		/* No callback set, free the args */
3649 		free(args);
3650 	}
3651 
3652 	free(arg);
3653 }
3654 
3655 /*
3656  * Callback function for hw port quiesce.
3657  */
3658 static void
3659 nvmf_fc_adm_hw_port_quiesce_reset_cb(void *ctx, int err)
3660 {
3661 	ASSERT_SPDK_FC_MASTER_THREAD();
3662 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *reset_ctx =
3663 		(struct spdk_nvmf_fc_adm_hw_port_reset_ctx *)ctx;
3664 	struct spdk_nvmf_fc_hw_port_reset_args *args = reset_ctx->reset_args;
3665 	spdk_nvmf_fc_callback cb_func = reset_ctx->reset_cb_func;
3666 	struct spdk_nvmf_fc_queue_dump_info dump_info;
3667 	struct spdk_nvmf_fc_port *fc_port = NULL;
3668 	char *dump_buf = NULL;
3669 	uint32_t dump_buf_size = SPDK_FC_HW_DUMP_BUF_SIZE;
3670 
3671 	/*
3672 	 * Free the callback context struct.
3673 	 */
3674 	free(ctx);
3675 
3676 	if (err != 0) {
3677 		SPDK_ERRLOG("Port %d  quiesce operation failed.\n", args->port_handle);
3678 		goto out;
3679 	}
3680 
3681 	if (args->dump_queues == false) {
3682 		/*
3683 		 * Queues need not be dumped.
3684 		 */
3685 		goto out;
3686 	}
3687 
3688 	SPDK_ERRLOG("Dumping queues for HW port %d\n", args->port_handle);
3689 
3690 	/*
3691 	 * Get the fc port.
3692 	 */
3693 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3694 	if (fc_port == NULL) {
3695 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3696 		err = -EINVAL;
3697 		goto out;
3698 	}
3699 
3700 	/*
3701 	 * Allocate memory for the dump buffer.
3702 	 * This memory will be freed by FCT.
3703 	 */
3704 	dump_buf = (char *)calloc(1, dump_buf_size);
3705 	if (dump_buf == NULL) {
3706 		err = -ENOMEM;
3707 		SPDK_ERRLOG("Memory allocation for dump buffer failed, SPDK FC port %d\n", args->port_handle);
3708 		goto out;
3709 	}
3710 	*args->dump_buf  = (uint32_t *)dump_buf;
3711 	dump_info.buffer = dump_buf;
3712 	dump_info.offset = 0;
3713 
3714 	/*
3715 	 * Add the dump reason to the top of the buffer.
3716 	 */
3717 	spdk_nvmf_fc_dump_buf_print(&dump_info, "%s\n", args->reason);
3718 
3719 	/*
3720 	 * Dump the hwqp.
3721 	 */
3722 	spdk_nvmf_fc_dump_all_queues(fc_port, &dump_info);
3723 
3724 out:
3725 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d reset done, queues_dumped = %d, rc = %d.\n",
3726 		      args->port_handle, args->dump_queues, err);
3727 
3728 	if (cb_func != NULL) {
3729 		(void)cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3730 	}
3731 }
3732 
3733 /*
3734  * HW port reset
3735 
3736  */
3737 static void
3738 nvmf_fc_adm_evnt_hw_port_reset(void *arg)
3739 {
3740 	ASSERT_SPDK_FC_MASTER_THREAD();
3741 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3742 	struct spdk_nvmf_fc_hw_port_reset_args *args = (struct spdk_nvmf_fc_hw_port_reset_args *)
3743 			api_data->api_args;
3744 	struct spdk_nvmf_fc_port *fc_port = NULL;
3745 	struct spdk_nvmf_fc_adm_hw_port_reset_ctx *ctx = NULL;
3746 	int err = 0;
3747 
3748 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump\n", args->port_handle);
3749 
3750 	/*
3751 	 * Make sure the physical port exists.
3752 	 */
3753 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3754 	if (fc_port == NULL) {
3755 		SPDK_ERRLOG("Unable to find the SPDK FC port %d\n", args->port_handle);
3756 		err = -EINVAL;
3757 		goto out;
3758 	}
3759 
3760 	/*
3761 	 * Save the reset event args and the callback in a context struct.
3762 	 */
3763 	ctx = calloc(1, sizeof(struct spdk_nvmf_fc_adm_hw_port_reset_ctx));
3764 
3765 	if (ctx == NULL) {
3766 		err = -ENOMEM;
3767 		SPDK_ERRLOG("Memory allocation for reset ctx failed, SPDK FC port %d\n", args->port_handle);
3768 		goto fail;
3769 	}
3770 
3771 	ctx->reset_args = arg;
3772 	ctx->reset_cb_func = api_data->cb_func;
3773 
3774 	/*
3775 	 * Quiesce the hw port.
3776 	 */
3777 	err = nvmf_fc_adm_hw_port_quiesce(fc_port, ctx, nvmf_fc_adm_hw_port_quiesce_reset_cb);
3778 	if (err != 0) {
3779 		goto fail;
3780 	}
3781 
3782 	/*
3783 	 * Once the ports are successfully quiesced the reset processing
3784 	 * will continue in the callback function: spdk_fc_port_quiesce_reset_cb
3785 	 */
3786 	return;
3787 fail:
3788 	free(ctx);
3789 
3790 out:
3791 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "HW port %d dump done, rc = %d.\n", args->port_handle,
3792 		      err);
3793 
3794 	if (api_data->cb_func != NULL) {
3795 		(void)api_data->cb_func(args->port_handle, SPDK_FC_HW_PORT_RESET, args->cb_ctx, err);
3796 	}
3797 
3798 	free(arg);
3799 }
3800 
3801 /*
3802  * Process a link break event on a HW port.
3803  */
3804 static void
3805 nvmf_fc_adm_evnt_hw_port_link_break(void *arg)
3806 {
3807 	ASSERT_SPDK_FC_MASTER_THREAD();
3808 	struct spdk_nvmf_fc_adm_api_data *api_data = (struct spdk_nvmf_fc_adm_api_data *)arg;
3809 	struct spdk_nvmf_hw_port_link_break_args *args = (struct spdk_nvmf_hw_port_link_break_args *)
3810 			api_data->api_args;
3811 	struct spdk_nvmf_fc_port *fc_port = NULL;
3812 	int err = 0;
3813 	struct spdk_nvmf_fc_adm_port_link_break_cb_data *cb_data = NULL;
3814 	struct spdk_nvmf_fc_nport *nport = NULL;
3815 	uint32_t nport_deletes_sent = 0;
3816 	uint32_t nport_deletes_skipped = 0;
3817 	struct spdk_nvmf_fc_nport_delete_args *nport_del_args = NULL;
3818 	char log_str[256];
3819 
3820 	/*
3821 	 * Get the fc port using the port handle.
3822 	 */
3823 	fc_port = spdk_nvmf_fc_port_lookup(args->port_handle);
3824 	if (!fc_port) {
3825 		SPDK_ERRLOG("port link break: Unable to find the SPDK FC port %d\n",
3826 			    args->port_handle);
3827 		err = -EINVAL;
3828 		goto out;
3829 	}
3830 
3831 	/*
3832 	 * Set the port state to offline, if it is not already.
3833 	 */
3834 	err = spdk_nvmf_fc_port_set_offline(fc_port);
3835 	if (err != 0) {
3836 		SPDK_ERRLOG("port link break: HW port %d already offline. rc = %d\n",
3837 			    fc_port->port_hdl, err);
3838 		err = 0;
3839 		goto out;
3840 	}
3841 
3842 	/*
3843 	 * Delete all the nports, if any.
3844 	 */
3845 	if (!TAILQ_EMPTY(&fc_port->nport_list)) {
3846 		TAILQ_FOREACH(nport, &fc_port->nport_list, link) {
3847 			/* Skipped the nports that are not in CREATED state */
3848 			if (nport->nport_state != SPDK_NVMF_FC_OBJECT_CREATED) {
3849 				nport_deletes_skipped++;
3850 				continue;
3851 			}
3852 
3853 			/* Allocate memory for callback data. */
3854 			cb_data = calloc(1, sizeof(struct spdk_nvmf_fc_adm_port_link_break_cb_data));
3855 			if (NULL == cb_data) {
3856 				SPDK_ERRLOG("port link break: Failed to allocate memory for cb_data %d.\n",
3857 					    args->port_handle);
3858 				err = -ENOMEM;
3859 				goto out;
3860 			}
3861 			cb_data->args = args;
3862 			cb_data->cb_func = api_data->cb_func;
3863 			nport_del_args = &cb_data->nport_del_args;
3864 			nport_del_args->port_handle = args->port_handle;
3865 			nport_del_args->nport_handle = nport->nport_hdl;
3866 			nport_del_args->cb_ctx = cb_data;
3867 
3868 			spdk_nvmf_fc_master_enqueue_event(SPDK_FC_NPORT_DELETE,
3869 							  (void *)nport_del_args,
3870 							  nvmf_fc_adm_hw_port_link_break_cb);
3871 
3872 			nport_deletes_sent++;
3873 		}
3874 	}
3875 
3876 	if (nport_deletes_sent == 0 && err == 0) {
3877 		/*
3878 		 * Mark the hwqps as offline and unregister the pollers.
3879 		 */
3880 		(void)nvmf_fc_adm_port_hwqp_offline_del_poller(fc_port);
3881 	}
3882 
3883 out:
3884 	snprintf(log_str, sizeof(log_str),
3885 		 "port link break done: port:%d nport_deletes_sent:%d nport_deletes_skipped:%d rc:%d.\n",
3886 		 args->port_handle, nport_deletes_sent, nport_deletes_skipped, err);
3887 
3888 	if (err != 0) {
3889 		SPDK_ERRLOG("%s", log_str);
3890 	} else {
3891 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "%s", log_str);
3892 	}
3893 
3894 	if ((api_data->cb_func != NULL) && (nport_deletes_sent == 0)) {
3895 		/*
3896 		 * No nport_deletes are sent, which would have eventually
3897 		 * called the port_link_break callback. Therefore, call the
3898 		 * port_link_break callback here.
3899 		 */
3900 		(void)api_data->cb_func(args->port_handle, SPDK_FC_LINK_BREAK, args->cb_ctx, err);
3901 	}
3902 
3903 	free(arg);
3904 }
3905 
3906 static inline void
3907 nvmf_fc_adm_run_on_master_thread(spdk_msg_fn fn, void *args)
3908 {
3909 	if (spdk_nvmf_fc_get_master_thread()) {
3910 		spdk_thread_send_msg(spdk_nvmf_fc_get_master_thread(), fn, args);
3911 	}
3912 }
3913 
3914 /*
3915  * Queue up an event in the SPDK masters event queue.
3916  * Used by the FC driver to notify the SPDK master of FC related events.
3917  */
3918 int
3919 spdk_nvmf_fc_master_enqueue_event(enum spdk_fc_event event_type, void *args,
3920 				  spdk_nvmf_fc_callback cb_func)
3921 {
3922 	int err = 0;
3923 	struct spdk_nvmf_fc_adm_api_data *api_data = NULL;
3924 
3925 	SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d.\n", event_type);
3926 
3927 	if (event_type >= SPDK_FC_EVENT_MAX) {
3928 		SPDK_ERRLOG("Invalid spdk_fc_event_t %d.\n", event_type);
3929 		err = -EINVAL;
3930 		goto done;
3931 	}
3932 
3933 	if (args == NULL) {
3934 		SPDK_ERRLOG("Null args for event %d.\n", event_type);
3935 		err = -EINVAL;
3936 		goto done;
3937 	}
3938 
3939 	api_data = calloc(1, sizeof(*api_data));
3940 
3941 	if (api_data == NULL) {
3942 		SPDK_ERRLOG("Failed to alloc api data for event %d.\n", event_type);
3943 		err = -ENOMEM;
3944 		goto done;
3945 	}
3946 
3947 	api_data->api_args = args;
3948 	api_data->cb_func = cb_func;
3949 
3950 	switch (event_type) {
3951 	case SPDK_FC_HW_PORT_INIT:
3952 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_init,
3953 						 (void *)api_data);
3954 		break;
3955 
3956 	case SPDK_FC_HW_PORT_ONLINE:
3957 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_online,
3958 						 (void *)api_data);
3959 		break;
3960 
3961 	case SPDK_FC_HW_PORT_OFFLINE:
3962 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_offline,
3963 						 (void *)api_data);
3964 		break;
3965 
3966 	case SPDK_FC_NPORT_CREATE:
3967 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_create,
3968 						 (void *)api_data);
3969 		break;
3970 
3971 	case SPDK_FC_NPORT_DELETE:
3972 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_nport_delete,
3973 						 (void *)api_data);
3974 		break;
3975 
3976 	case SPDK_FC_IT_ADD:
3977 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_add,
3978 						 (void *)api_data);
3979 		break;
3980 
3981 	case SPDK_FC_IT_DELETE:
3982 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_i_t_delete,
3983 						 (void *)api_data);
3984 		break;
3985 
3986 	case SPDK_FC_ABTS_RECV:
3987 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_abts_recv,
3988 						 (void *)api_data);
3989 		break;
3990 
3991 	case SPDK_FC_LINK_BREAK:
3992 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_link_break,
3993 						 (void *)api_data);
3994 		break;
3995 
3996 	case SPDK_FC_HW_PORT_RESET:
3997 		nvmf_fc_adm_run_on_master_thread(nvmf_fc_adm_evnt_hw_port_reset,
3998 						 (void *)api_data);
3999 		break;
4000 
4001 	case SPDK_FC_UNRECOVERABLE_ERR:
4002 	default:
4003 		SPDK_ERRLOG("Invalid spdk_fc_event_t: %d\n", event_type);
4004 		err = -EINVAL;
4005 		break;
4006 	}
4007 
4008 done:
4009 
4010 	if (err == 0) {
4011 		SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_ADM_API, "Enqueue event %d done successfully\n", event_type);
4012 	} else {
4013 		SPDK_ERRLOG("Enqueue event %d failed, err = %d\n", event_type, err);
4014 		if (api_data) {
4015 			free(api_data);
4016 		}
4017 	}
4018 
4019 	return err;
4020 }
4021 
4022 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc_adm_api", SPDK_LOG_NVMF_FC_ADM_API);
4023 SPDK_LOG_REGISTER_COMPONENT("nvmf_fc", SPDK_LOG_NVMF_FC)
4024