xref: /spdk/module/sock/posix/posix.c (revision f6866117acb32c78d5ea7bd76ba330284655af35)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #if defined(__FreeBSD__)
10 #include <sys/event.h>
11 #define SPDK_KEVENT
12 #else
13 #include <sys/epoll.h>
14 #define SPDK_EPOLL
15 #endif
16 
17 #if defined(__linux__)
18 #include <linux/errqueue.h>
19 #endif
20 
21 #include "spdk/env.h"
22 #include "spdk/log.h"
23 #include "spdk/pipe.h"
24 #include "spdk/sock.h"
25 #include "spdk/util.h"
26 #include "spdk/string.h"
27 #include "spdk_internal/sock.h"
28 #include "../sock_kernel.h"
29 
30 #include "openssl/crypto.h"
31 #include "openssl/err.h"
32 #include "openssl/ssl.h"
33 
34 #define MAX_TMPBUF 1024
35 #define PORTNUMLEN 32
36 
37 #if defined(SO_ZEROCOPY) && defined(MSG_ZEROCOPY)
38 #define SPDK_ZEROCOPY
39 #endif
40 
41 struct spdk_posix_sock {
42 	struct spdk_sock	base;
43 	int			fd;
44 
45 	uint32_t		sendmsg_idx;
46 
47 	struct spdk_pipe	*recv_pipe;
48 	void			*recv_buf;
49 	int			recv_buf_sz;
50 	bool			pipe_has_data;
51 	bool			socket_has_data;
52 	bool			zcopy;
53 
54 	int			placement_id;
55 
56 	SSL_CTX			*ctx;
57 	SSL			*ssl;
58 
59 	TAILQ_ENTRY(spdk_posix_sock)	link;
60 };
61 
62 TAILQ_HEAD(spdk_has_data_list, spdk_posix_sock);
63 
64 struct spdk_posix_sock_group_impl {
65 	struct spdk_sock_group_impl	base;
66 	int				fd;
67 	struct spdk_has_data_list	socks_with_data;
68 	int				placement_id;
69 };
70 
71 static struct spdk_sock_impl_opts g_spdk_posix_sock_impl_opts = {
72 	.recv_buf_size = MIN_SO_RCVBUF_SIZE,
73 	.send_buf_size = MIN_SO_SNDBUF_SIZE,
74 	.enable_recv_pipe = true,
75 	.enable_quickack = false,
76 	.enable_placement_id = PLACEMENT_NONE,
77 	.enable_zerocopy_send_server = true,
78 	.enable_zerocopy_send_client = false,
79 	.zerocopy_threshold = 0,
80 	.tls_version = 0,
81 	.enable_ktls = false,
82 	.psk_key = NULL,
83 	.psk_identity = NULL
84 };
85 
86 static struct spdk_sock_map g_map = {
87 	.entries = STAILQ_HEAD_INITIALIZER(g_map.entries),
88 	.mtx = PTHREAD_MUTEX_INITIALIZER
89 };
90 
91 __attribute((destructor)) static void
92 posix_sock_map_cleanup(void)
93 {
94 	spdk_sock_map_cleanup(&g_map);
95 }
96 
97 #define __posix_sock(sock) (struct spdk_posix_sock *)sock
98 #define __posix_group_impl(group) (struct spdk_posix_sock_group_impl *)group
99 
100 static void
101 posix_sock_copy_impl_opts(struct spdk_sock_impl_opts *dest, const struct spdk_sock_impl_opts *src,
102 			  size_t len)
103 {
104 #define FIELD_OK(field) \
105 	offsetof(struct spdk_sock_impl_opts, field) + sizeof(src->field) <= len
106 
107 #define SET_FIELD(field) \
108 	if (FIELD_OK(field)) { \
109 		dest->field = src->field; \
110 	}
111 
112 	SET_FIELD(recv_buf_size);
113 	SET_FIELD(send_buf_size);
114 	SET_FIELD(enable_recv_pipe);
115 	SET_FIELD(enable_zerocopy_send);
116 	SET_FIELD(enable_quickack);
117 	SET_FIELD(enable_placement_id);
118 	SET_FIELD(enable_zerocopy_send_server);
119 	SET_FIELD(enable_zerocopy_send_client);
120 	SET_FIELD(zerocopy_threshold);
121 	SET_FIELD(tls_version);
122 	SET_FIELD(enable_ktls);
123 	SET_FIELD(psk_key);
124 	SET_FIELD(psk_identity);
125 
126 #undef SET_FIELD
127 #undef FIELD_OK
128 }
129 
130 static int
131 posix_sock_impl_get_opts(struct spdk_sock_impl_opts *opts, size_t *len)
132 {
133 	if (!opts || !len) {
134 		errno = EINVAL;
135 		return -1;
136 	}
137 
138 	assert(sizeof(*opts) >= *len);
139 	memset(opts, 0, *len);
140 
141 	posix_sock_copy_impl_opts(opts, &g_spdk_posix_sock_impl_opts, *len);
142 	*len = spdk_min(*len, sizeof(g_spdk_posix_sock_impl_opts));
143 
144 	return 0;
145 }
146 
147 static int
148 posix_sock_impl_set_opts(const struct spdk_sock_impl_opts *opts, size_t len)
149 {
150 	if (!opts) {
151 		errno = EINVAL;
152 		return -1;
153 	}
154 
155 	assert(sizeof(*opts) >= len);
156 	posix_sock_copy_impl_opts(&g_spdk_posix_sock_impl_opts, opts, len);
157 
158 	return 0;
159 }
160 
161 static void
162 posix_opts_get_impl_opts(const struct spdk_sock_opts *opts, struct spdk_sock_impl_opts *dest)
163 {
164 	/* Copy the default impl_opts first to cover cases when user's impl_opts is smaller */
165 	memcpy(dest, &g_spdk_posix_sock_impl_opts, sizeof(*dest));
166 
167 	if (opts->impl_opts != NULL) {
168 		assert(sizeof(*dest) >= opts->impl_opts_size);
169 		posix_sock_copy_impl_opts(dest, opts->impl_opts, opts->impl_opts_size);
170 	}
171 }
172 
173 static int
174 posix_sock_getaddr(struct spdk_sock *_sock, char *saddr, int slen, uint16_t *sport,
175 		   char *caddr, int clen, uint16_t *cport)
176 {
177 	struct spdk_posix_sock *sock = __posix_sock(_sock);
178 	struct sockaddr_storage sa;
179 	socklen_t salen;
180 	int rc;
181 
182 	assert(sock != NULL);
183 
184 	memset(&sa, 0, sizeof sa);
185 	salen = sizeof sa;
186 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
187 	if (rc != 0) {
188 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
189 		return -1;
190 	}
191 
192 	switch (sa.ss_family) {
193 	case AF_UNIX:
194 		/* Acceptable connection types that don't have IPs */
195 		return 0;
196 	case AF_INET:
197 	case AF_INET6:
198 		/* Code below will get IP addresses */
199 		break;
200 	default:
201 		/* Unsupported socket family */
202 		return -1;
203 	}
204 
205 	rc = get_addr_str((struct sockaddr *)&sa, saddr, slen);
206 	if (rc != 0) {
207 		SPDK_ERRLOG("getnameinfo() failed (errno=%d)\n", errno);
208 		return -1;
209 	}
210 
211 	if (sport) {
212 		if (sa.ss_family == AF_INET) {
213 			*sport = ntohs(((struct sockaddr_in *) &sa)->sin_port);
214 		} else if (sa.ss_family == AF_INET6) {
215 			*sport = ntohs(((struct sockaddr_in6 *) &sa)->sin6_port);
216 		}
217 	}
218 
219 	memset(&sa, 0, sizeof sa);
220 	salen = sizeof sa;
221 	rc = getpeername(sock->fd, (struct sockaddr *) &sa, &salen);
222 	if (rc != 0) {
223 		SPDK_ERRLOG("getpeername() failed (errno=%d)\n", errno);
224 		return -1;
225 	}
226 
227 	rc = get_addr_str((struct sockaddr *)&sa, caddr, clen);
228 	if (rc != 0) {
229 		SPDK_ERRLOG("getnameinfo() failed (errno=%d)\n", errno);
230 		return -1;
231 	}
232 
233 	if (cport) {
234 		if (sa.ss_family == AF_INET) {
235 			*cport = ntohs(((struct sockaddr_in *) &sa)->sin_port);
236 		} else if (sa.ss_family == AF_INET6) {
237 			*cport = ntohs(((struct sockaddr_in6 *) &sa)->sin6_port);
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 enum posix_sock_create_type {
245 	SPDK_SOCK_CREATE_LISTEN,
246 	SPDK_SOCK_CREATE_CONNECT,
247 };
248 
249 static int
250 posix_sock_alloc_pipe(struct spdk_posix_sock *sock, int sz)
251 {
252 	uint8_t *new_buf;
253 	struct spdk_pipe *new_pipe;
254 	struct iovec siov[2];
255 	struct iovec diov[2];
256 	int sbytes;
257 	ssize_t bytes;
258 
259 	if (sock->recv_buf_sz == sz) {
260 		return 0;
261 	}
262 
263 	/* If the new size is 0, just free the pipe */
264 	if (sz == 0) {
265 		spdk_pipe_destroy(sock->recv_pipe);
266 		free(sock->recv_buf);
267 		sock->recv_pipe = NULL;
268 		sock->recv_buf = NULL;
269 		return 0;
270 	} else if (sz < MIN_SOCK_PIPE_SIZE) {
271 		SPDK_ERRLOG("The size of the pipe must be larger than %d\n", MIN_SOCK_PIPE_SIZE);
272 		return -1;
273 	}
274 
275 	/* Round up to next 64 byte multiple */
276 	new_buf = calloc(SPDK_ALIGN_CEIL(sz, 64), sizeof(uint8_t));
277 	if (!new_buf) {
278 		SPDK_ERRLOG("socket recv buf allocation failed\n");
279 		return -ENOMEM;
280 	}
281 
282 	new_pipe = spdk_pipe_create(new_buf, sz);
283 	if (new_pipe == NULL) {
284 		SPDK_ERRLOG("socket pipe allocation failed\n");
285 		free(new_buf);
286 		return -ENOMEM;
287 	}
288 
289 	if (sock->recv_pipe != NULL) {
290 		/* Pull all of the data out of the old pipe */
291 		sbytes = spdk_pipe_reader_get_buffer(sock->recv_pipe, sock->recv_buf_sz, siov);
292 		if (sbytes > sz) {
293 			/* Too much data to fit into the new pipe size */
294 			spdk_pipe_destroy(new_pipe);
295 			free(new_buf);
296 			return -EINVAL;
297 		}
298 
299 		sbytes = spdk_pipe_writer_get_buffer(new_pipe, sz, diov);
300 		assert(sbytes == sz);
301 
302 		bytes = spdk_iovcpy(siov, 2, diov, 2);
303 		spdk_pipe_writer_advance(new_pipe, bytes);
304 
305 		spdk_pipe_destroy(sock->recv_pipe);
306 		free(sock->recv_buf);
307 	}
308 
309 	sock->recv_buf_sz = sz;
310 	sock->recv_buf = new_buf;
311 	sock->recv_pipe = new_pipe;
312 
313 	return 0;
314 }
315 
316 static int
317 posix_sock_set_recvbuf(struct spdk_sock *_sock, int sz)
318 {
319 	struct spdk_posix_sock *sock = __posix_sock(_sock);
320 	int min_size;
321 	int rc;
322 
323 	assert(sock != NULL);
324 
325 	if (_sock->impl_opts.enable_recv_pipe) {
326 		rc = posix_sock_alloc_pipe(sock, sz);
327 		if (rc) {
328 			return rc;
329 		}
330 	}
331 
332 	/* Set kernel buffer size to be at least MIN_SO_RCVBUF_SIZE and
333 	 * g_spdk_posix_sock_impl_opts.recv_buf_size. */
334 	min_size = spdk_max(MIN_SO_RCVBUF_SIZE, g_spdk_posix_sock_impl_opts.recv_buf_size);
335 
336 	if (sz < min_size) {
337 		sz = min_size;
338 	}
339 
340 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUF, &sz, sizeof(sz));
341 	if (rc < 0) {
342 		return rc;
343 	}
344 
345 	_sock->impl_opts.recv_buf_size = sz;
346 
347 	return 0;
348 }
349 
350 static int
351 posix_sock_set_sendbuf(struct spdk_sock *_sock, int sz)
352 {
353 	struct spdk_posix_sock *sock = __posix_sock(_sock);
354 	int min_size;
355 	int rc;
356 
357 	assert(sock != NULL);
358 
359 	/* Set kernel buffer size to be at least MIN_SO_SNDBUF_SIZE and
360 	 * g_spdk_posix_sock_impl_opts.send_buf_size. */
361 	min_size = spdk_max(MIN_SO_SNDBUF_SIZE, g_spdk_posix_sock_impl_opts.send_buf_size);
362 
363 	if (sz < min_size) {
364 		sz = min_size;
365 	}
366 
367 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_SNDBUF, &sz, sizeof(sz));
368 	if (rc < 0) {
369 		return rc;
370 	}
371 
372 	_sock->impl_opts.send_buf_size = sz;
373 
374 	return 0;
375 }
376 
377 static void
378 posix_sock_init(struct spdk_posix_sock *sock, bool enable_zero_copy)
379 {
380 #if defined(SPDK_ZEROCOPY) || defined(__linux__)
381 	int flag;
382 	int rc;
383 #endif
384 
385 #if defined(SPDK_ZEROCOPY)
386 	flag = 1;
387 
388 	if (enable_zero_copy) {
389 		/* Try to turn on zero copy sends */
390 		rc = setsockopt(sock->fd, SOL_SOCKET, SO_ZEROCOPY, &flag, sizeof(flag));
391 		if (rc == 0) {
392 			sock->zcopy = true;
393 		}
394 	}
395 #endif
396 
397 #if defined(__linux__)
398 	flag = 1;
399 
400 	if (sock->base.impl_opts.enable_quickack) {
401 		rc = setsockopt(sock->fd, IPPROTO_TCP, TCP_QUICKACK, &flag, sizeof(flag));
402 		if (rc != 0) {
403 			SPDK_ERRLOG("quickack was failed to set\n");
404 		}
405 	}
406 
407 	spdk_sock_get_placement_id(sock->fd, sock->base.impl_opts.enable_placement_id,
408 				   &sock->placement_id);
409 
410 	if (sock->base.impl_opts.enable_placement_id == PLACEMENT_MARK) {
411 		/* Save placement_id */
412 		spdk_sock_map_insert(&g_map, sock->placement_id, NULL);
413 	}
414 #endif
415 }
416 
417 static struct spdk_posix_sock *
418 posix_sock_alloc(int fd, struct spdk_sock_impl_opts *impl_opts, bool enable_zero_copy)
419 {
420 	struct spdk_posix_sock *sock;
421 
422 	sock = calloc(1, sizeof(*sock));
423 	if (sock == NULL) {
424 		SPDK_ERRLOG("sock allocation failed\n");
425 		return NULL;
426 	}
427 
428 	sock->fd = fd;
429 	memcpy(&sock->base.impl_opts, impl_opts, sizeof(*impl_opts));
430 	posix_sock_init(sock, enable_zero_copy);
431 
432 	return sock;
433 }
434 
435 static int
436 posix_fd_create(struct addrinfo *res, struct spdk_sock_opts *opts,
437 		struct spdk_sock_impl_opts *impl_opts)
438 {
439 	int fd;
440 	int val = 1;
441 	int rc, sz;
442 #if defined(__linux__)
443 	int to;
444 #endif
445 
446 	fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
447 	if (fd < 0) {
448 		/* error */
449 		return -1;
450 	}
451 
452 	sz = impl_opts->recv_buf_size;
453 	rc = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &sz, sizeof(sz));
454 	if (rc) {
455 		/* Not fatal */
456 	}
457 
458 	sz = impl_opts->send_buf_size;
459 	rc = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sz, sizeof(sz));
460 	if (rc) {
461 		/* Not fatal */
462 	}
463 
464 	rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof val);
465 	if (rc != 0) {
466 		close(fd);
467 		/* error */
468 		return -1;
469 	}
470 	rc = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof val);
471 	if (rc != 0) {
472 		close(fd);
473 		/* error */
474 		return -1;
475 	}
476 
477 #if defined(SO_PRIORITY)
478 	if (opts->priority) {
479 		rc = setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &opts->priority, sizeof val);
480 		if (rc != 0) {
481 			close(fd);
482 			/* error */
483 			return -1;
484 		}
485 	}
486 #endif
487 
488 	if (res->ai_family == AF_INET6) {
489 		rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, sizeof val);
490 		if (rc != 0) {
491 			close(fd);
492 			/* error */
493 			return -1;
494 		}
495 	}
496 
497 	if (opts->ack_timeout) {
498 #if defined(__linux__)
499 		to = opts->ack_timeout;
500 		rc = setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &to, sizeof(to));
501 		if (rc != 0) {
502 			close(fd);
503 			/* error */
504 			return -1;
505 		}
506 #else
507 		SPDK_WARNLOG("TCP_USER_TIMEOUT is not supported.\n");
508 #endif
509 	}
510 
511 	return fd;
512 }
513 
514 static unsigned int
515 posix_sock_tls_psk_server_cb(SSL *ssl,
516 			     const char *id,
517 			     unsigned char *psk,
518 			     unsigned int max_psk_len)
519 {
520 	long key_len;
521 	unsigned char *default_psk;
522 	struct spdk_sock_impl_opts *impl_opts;
523 
524 	impl_opts = SSL_get_app_data(ssl);
525 
526 	if (impl_opts->psk_key == NULL) {
527 		SPDK_ERRLOG("PSK is not set\n");
528 		goto err;
529 	}
530 	SPDK_DEBUGLOG(sock_posix, "Length of Client's PSK ID %lu\n", strlen(impl_opts->psk_identity));
531 	if (id == NULL) {
532 		SPDK_ERRLOG("Received empty PSK ID\n");
533 		goto err;
534 	}
535 	SPDK_DEBUGLOG(sock_posix,  "Received PSK ID '%s'\n", id);
536 	if (strcmp(impl_opts->psk_identity, id) != 0) {
537 		SPDK_ERRLOG("Unknown Client's PSK ID\n");
538 		goto err;
539 	}
540 
541 	SPDK_DEBUGLOG(sock_posix, "Length of Client's PSK KEY %u\n", max_psk_len);
542 	default_psk = OPENSSL_hexstr2buf(impl_opts->psk_key, &key_len);
543 	if (default_psk == NULL) {
544 		SPDK_ERRLOG("Could not unhexlify PSK\n");
545 		goto err;
546 	}
547 	if (key_len > max_psk_len) {
548 		SPDK_ERRLOG("Insufficient buffer size to copy PSK\n");
549 		OPENSSL_free(default_psk);
550 		goto err;
551 	}
552 
553 	memcpy(psk, default_psk, key_len);
554 	OPENSSL_free(default_psk);
555 
556 	return key_len;
557 
558 err:
559 	return 0;
560 }
561 
562 static unsigned int
563 posix_sock_tls_psk_client_cb(SSL *ssl, const char *hint,
564 			     char *identity,
565 			     unsigned int max_identity_len,
566 			     unsigned char *psk,
567 			     unsigned int max_psk_len)
568 {
569 	long key_len;
570 	unsigned char *default_psk;
571 	struct spdk_sock_impl_opts *impl_opts;
572 
573 	impl_opts = SSL_get_app_data(ssl);
574 
575 	if (hint) {
576 		SPDK_DEBUGLOG(sock_posix,  "Received PSK identity hint '%s'\n", hint);
577 	}
578 
579 	if (impl_opts->psk_key == NULL) {
580 		SPDK_ERRLOG("PSK is not set\n");
581 		goto err;
582 	}
583 	default_psk = OPENSSL_hexstr2buf(impl_opts->psk_key, &key_len);
584 	if (default_psk == NULL) {
585 		SPDK_ERRLOG("Could not unhexlify PSK\n");
586 		goto err;
587 	}
588 	if ((strlen(impl_opts->psk_identity) + 1 > max_identity_len)
589 	    || (key_len > max_psk_len)) {
590 		OPENSSL_free(default_psk);
591 		SPDK_ERRLOG("PSK ID or Key buffer is not sufficient\n");
592 		goto err;
593 	}
594 	spdk_strcpy_pad(identity, impl_opts->psk_identity, strlen(impl_opts->psk_identity), 0);
595 	SPDK_DEBUGLOG(sock_posix, "Sending PSK identity '%s'\n", identity);
596 
597 	memcpy(psk, default_psk, key_len);
598 	SPDK_DEBUGLOG(sock_posix, "Provided out-of-band (OOB) PSK for TLS1.3 client\n");
599 	OPENSSL_free(default_psk);
600 
601 	return key_len;
602 
603 err:
604 	return 0;
605 }
606 
607 static SSL_CTX *
608 posix_sock_create_ssl_context(const SSL_METHOD *method, struct spdk_sock_opts *opts,
609 			      struct spdk_sock_impl_opts *impl_opts)
610 {
611 	SSL_CTX *ctx;
612 	int tls_version = 0;
613 	bool ktls_enabled = false;
614 #ifdef SSL_OP_ENABLE_KTLS
615 	long options;
616 #endif
617 
618 	SSL_library_init();
619 	OpenSSL_add_all_algorithms();
620 	SSL_load_error_strings();
621 	/* Produce a SSL CTX in SSL V2 and V3 standards compliant way */
622 	ctx = SSL_CTX_new(method);
623 	if (!ctx) {
624 		SPDK_ERRLOG("SSL_CTX_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
625 		return NULL;
626 	}
627 	SPDK_DEBUGLOG(sock_posix, "SSL context created\n");
628 
629 	switch (impl_opts->tls_version) {
630 	case 0:
631 		/* auto-negotioation */
632 		break;
633 	case SPDK_TLS_VERSION_1_1:
634 		tls_version = TLS1_1_VERSION;
635 		break;
636 	case SPDK_TLS_VERSION_1_2:
637 		tls_version = TLS1_2_VERSION;
638 		break;
639 	case SPDK_TLS_VERSION_1_3:
640 		tls_version = TLS1_3_VERSION;
641 		break;
642 	default:
643 		SPDK_ERRLOG("Incorrect TLS version provided: %d\n", impl_opts->tls_version);
644 		goto err;
645 	}
646 
647 	if (tls_version) {
648 		SPDK_DEBUGLOG(sock_posix, "Hardening TLS version to '%d'='0x%X'\n", impl_opts->tls_version,
649 			      tls_version);
650 		if (!SSL_CTX_set_min_proto_version(ctx, tls_version)) {
651 			SPDK_ERRLOG("Unable to set Min TLS version to '%d'='0x%X\n", impl_opts->tls_version, tls_version);
652 			goto err;
653 		}
654 		if (!SSL_CTX_set_max_proto_version(ctx, tls_version)) {
655 			SPDK_ERRLOG("Unable to set Max TLS version to '%d'='0x%X\n", impl_opts->tls_version, tls_version);
656 			goto err;
657 		}
658 	}
659 	if (impl_opts->enable_ktls) {
660 		SPDK_DEBUGLOG(sock_posix, "Enabling kTLS offload\n");
661 #ifdef SSL_OP_ENABLE_KTLS
662 		options = SSL_CTX_set_options(ctx, SSL_OP_ENABLE_KTLS);
663 		ktls_enabled = options & SSL_OP_ENABLE_KTLS;
664 #else
665 		ktls_enabled = false;
666 #endif
667 		if (!ktls_enabled) {
668 			SPDK_ERRLOG("Unable to set kTLS offload via SSL_CTX_set_options(). Configure openssl with 'enable-ktls'\n");
669 			goto err;
670 		}
671 	}
672 
673 	return ctx;
674 
675 err:
676 	SSL_CTX_free(ctx);
677 	return NULL;
678 }
679 
680 static SSL *
681 ssl_sock_connect_loop(SSL_CTX *ctx, int fd, struct spdk_sock_impl_opts *impl_opts)
682 {
683 	int rc;
684 	SSL *ssl;
685 	int ssl_get_error;
686 
687 	ssl = SSL_new(ctx);
688 	if (!ssl) {
689 		SPDK_ERRLOG("SSL_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
690 		return NULL;
691 	}
692 	SSL_set_fd(ssl, fd);
693 	SSL_set_app_data(ssl, impl_opts);
694 	SSL_set_psk_client_callback(ssl, posix_sock_tls_psk_client_cb);
695 	SPDK_DEBUGLOG(sock_posix, "SSL object creation finished: %p\n", ssl);
696 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
697 	while ((rc = SSL_connect(ssl)) != 1) {
698 		SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
699 		ssl_get_error = SSL_get_error(ssl, rc);
700 		SPDK_DEBUGLOG(sock_posix, "SSL_connect failed %d = SSL_connect(%p), %d = SSL_get_error(%p, %d)\n",
701 			      rc, ssl, ssl_get_error, ssl, rc);
702 		switch (ssl_get_error) {
703 		case SSL_ERROR_WANT_READ:
704 		case SSL_ERROR_WANT_WRITE:
705 			continue;
706 		default:
707 			break;
708 		}
709 		SPDK_ERRLOG("SSL_connect() failed, errno = %d\n", errno);
710 		SSL_free(ssl);
711 		return NULL;
712 	}
713 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
714 	SPDK_DEBUGLOG(sock_posix, "Negotiated Cipher suite:%s\n",
715 		      SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)));
716 	return ssl;
717 }
718 
719 static SSL *
720 ssl_sock_accept_loop(SSL_CTX *ctx, int fd, struct spdk_sock_impl_opts *impl_opts)
721 {
722 	int rc;
723 	SSL *ssl;
724 	int ssl_get_error;
725 
726 	ssl = SSL_new(ctx);
727 	if (!ssl) {
728 		SPDK_ERRLOG("SSL_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
729 		return NULL;
730 	}
731 	SSL_set_fd(ssl, fd);
732 	SSL_set_app_data(ssl, impl_opts);
733 	SSL_set_psk_server_callback(ssl, posix_sock_tls_psk_server_cb);
734 	SPDK_DEBUGLOG(sock_posix, "SSL object creation finished: %p\n", ssl);
735 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
736 	while ((rc = SSL_accept(ssl)) != 1) {
737 		SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
738 		ssl_get_error = SSL_get_error(ssl, rc);
739 		SPDK_DEBUGLOG(sock_posix, "SSL_accept failed %d = SSL_accept(%p), %d = SSL_get_error(%p, %d)\n", rc,
740 			      ssl, ssl_get_error, ssl, rc);
741 		switch (ssl_get_error) {
742 		case SSL_ERROR_WANT_READ:
743 		case SSL_ERROR_WANT_WRITE:
744 			continue;
745 		default:
746 			break;
747 		}
748 		SPDK_ERRLOG("SSL_accept() failed, errno = %d\n", errno);
749 		SSL_free(ssl);
750 		return NULL;
751 	}
752 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
753 	SPDK_DEBUGLOG(sock_posix, "Negotiated Cipher suite:%s\n",
754 		      SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)));
755 	return ssl;
756 }
757 
758 static ssize_t
759 SSL_readv(SSL *ssl, const struct iovec *iov, int iovcnt)
760 {
761 	int i, rc = 0;
762 	ssize_t total = 0;
763 
764 	for (i = 0; i < iovcnt; i++) {
765 		rc = SSL_read(ssl, iov[i].iov_base, iov[i].iov_len);
766 
767 		if (rc > 0) {
768 			total += rc;
769 		}
770 		if (rc != (int)iov[i].iov_len) {
771 			break;
772 		}
773 	}
774 	if (total > 0) {
775 		errno = 0;
776 		return total;
777 	}
778 	switch (SSL_get_error(ssl, rc)) {
779 	case SSL_ERROR_ZERO_RETURN:
780 		errno = ENOTCONN;
781 		return 0;
782 	case SSL_ERROR_WANT_READ:
783 	case SSL_ERROR_WANT_WRITE:
784 	case SSL_ERROR_WANT_CONNECT:
785 	case SSL_ERROR_WANT_ACCEPT:
786 	case SSL_ERROR_WANT_X509_LOOKUP:
787 	case SSL_ERROR_WANT_ASYNC:
788 	case SSL_ERROR_WANT_ASYNC_JOB:
789 	case SSL_ERROR_WANT_CLIENT_HELLO_CB:
790 		errno = EAGAIN;
791 		return -1;
792 	case SSL_ERROR_SYSCALL:
793 	case SSL_ERROR_SSL:
794 		errno = ENOTCONN;
795 		return -1;
796 	default:
797 		errno = ENOTCONN;
798 		return -1;
799 	}
800 }
801 
802 static ssize_t
803 SSL_writev(SSL *ssl, struct iovec *iov, int iovcnt)
804 {
805 	int i, rc = 0;
806 	ssize_t total = 0;
807 
808 	for (i = 0; i < iovcnt; i++) {
809 		rc = SSL_write(ssl, iov[i].iov_base, iov[i].iov_len);
810 
811 		if (rc > 0) {
812 			total += rc;
813 		}
814 		if (rc != (int)iov[i].iov_len) {
815 			break;
816 		}
817 	}
818 	if (total > 0) {
819 		errno = 0;
820 		return total;
821 	}
822 	switch (SSL_get_error(ssl, rc)) {
823 	case SSL_ERROR_ZERO_RETURN:
824 		errno = ENOTCONN;
825 		return 0;
826 	case SSL_ERROR_WANT_READ:
827 	case SSL_ERROR_WANT_WRITE:
828 	case SSL_ERROR_WANT_CONNECT:
829 	case SSL_ERROR_WANT_ACCEPT:
830 	case SSL_ERROR_WANT_X509_LOOKUP:
831 	case SSL_ERROR_WANT_ASYNC:
832 	case SSL_ERROR_WANT_ASYNC_JOB:
833 	case SSL_ERROR_WANT_CLIENT_HELLO_CB:
834 		errno = EAGAIN;
835 		return -1;
836 	case SSL_ERROR_SYSCALL:
837 	case SSL_ERROR_SSL:
838 		errno = ENOTCONN;
839 		return -1;
840 	default:
841 		errno = ENOTCONN;
842 		return -1;
843 	}
844 }
845 
846 static struct spdk_sock *
847 posix_sock_create(const char *ip, int port,
848 		  enum posix_sock_create_type type,
849 		  struct spdk_sock_opts *opts,
850 		  bool enable_ssl)
851 {
852 	struct spdk_posix_sock *sock;
853 	struct spdk_sock_impl_opts impl_opts;
854 	char buf[MAX_TMPBUF];
855 	char portnum[PORTNUMLEN];
856 	char *p;
857 	struct addrinfo hints, *res, *res0;
858 	int fd, flag;
859 	int rc;
860 	bool enable_zcopy_user_opts = true;
861 	bool enable_zcopy_impl_opts = true;
862 	SSL_CTX *ctx = 0;
863 	SSL *ssl = 0;
864 
865 	assert(opts != NULL);
866 	posix_opts_get_impl_opts(opts, &impl_opts);
867 
868 	if (ip == NULL) {
869 		return NULL;
870 	}
871 	if (ip[0] == '[') {
872 		snprintf(buf, sizeof(buf), "%s", ip + 1);
873 		p = strchr(buf, ']');
874 		if (p != NULL) {
875 			*p = '\0';
876 		}
877 		ip = (const char *) &buf[0];
878 	}
879 
880 	snprintf(portnum, sizeof portnum, "%d", port);
881 	memset(&hints, 0, sizeof hints);
882 	hints.ai_family = PF_UNSPEC;
883 	hints.ai_socktype = SOCK_STREAM;
884 	hints.ai_flags = AI_NUMERICSERV;
885 	hints.ai_flags |= AI_PASSIVE;
886 	hints.ai_flags |= AI_NUMERICHOST;
887 	rc = getaddrinfo(ip, portnum, &hints, &res0);
888 	if (rc != 0) {
889 		SPDK_ERRLOG("getaddrinfo() failed %s (%d)\n", gai_strerror(rc), rc);
890 		return NULL;
891 	}
892 
893 	/* try listen */
894 	fd = -1;
895 	for (res = res0; res != NULL; res = res->ai_next) {
896 retry:
897 		fd = posix_fd_create(res, opts, &impl_opts);
898 		if (fd < 0) {
899 			continue;
900 		}
901 		if (type == SPDK_SOCK_CREATE_LISTEN) {
902 			rc = bind(fd, res->ai_addr, res->ai_addrlen);
903 			if (rc != 0) {
904 				SPDK_ERRLOG("bind() failed at port %d, errno = %d\n", port, errno);
905 				switch (errno) {
906 				case EINTR:
907 					/* interrupted? */
908 					close(fd);
909 					goto retry;
910 				case EADDRNOTAVAIL:
911 					SPDK_ERRLOG("IP address %s not available. "
912 						    "Verify IP address in config file "
913 						    "and make sure setup script is "
914 						    "run before starting spdk app.\n", ip);
915 				/* FALLTHROUGH */
916 				default:
917 					/* try next family */
918 					close(fd);
919 					fd = -1;
920 					continue;
921 				}
922 			}
923 			/* bind OK */
924 			rc = listen(fd, 512);
925 			if (rc != 0) {
926 				SPDK_ERRLOG("listen() failed, errno = %d\n", errno);
927 				close(fd);
928 				fd = -1;
929 				break;
930 			}
931 			enable_zcopy_impl_opts = impl_opts.enable_zerocopy_send_server;
932 		} else if (type == SPDK_SOCK_CREATE_CONNECT) {
933 			rc = connect(fd, res->ai_addr, res->ai_addrlen);
934 			if (rc != 0) {
935 				SPDK_ERRLOG("connect() failed, errno = %d\n", errno);
936 				/* try next family */
937 				close(fd);
938 				fd = -1;
939 				continue;
940 			}
941 			enable_zcopy_impl_opts = impl_opts.enable_zerocopy_send_client;
942 			if (enable_ssl) {
943 				ctx = posix_sock_create_ssl_context(TLS_client_method(), opts, &impl_opts);
944 				if (!ctx) {
945 					SPDK_ERRLOG("posix_sock_create_ssl_context() failed, errno = %d\n", errno);
946 					close(fd);
947 					fd = -1;
948 					break;
949 				}
950 				ssl = ssl_sock_connect_loop(ctx, fd, &impl_opts);
951 				if (!ssl) {
952 					SPDK_ERRLOG("ssl_sock_connect_loop() failed, errno = %d\n", errno);
953 					close(fd);
954 					fd = -1;
955 					SSL_CTX_free(ctx);
956 					break;
957 				}
958 			}
959 		}
960 
961 		flag = fcntl(fd, F_GETFL);
962 		if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
963 			SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%d)\n", fd, errno);
964 			SSL_free(ssl);
965 			SSL_CTX_free(ctx);
966 			close(fd);
967 			fd = -1;
968 			break;
969 		}
970 		break;
971 	}
972 	freeaddrinfo(res0);
973 
974 	if (fd < 0) {
975 		return NULL;
976 	}
977 
978 	/* Only enable zero copy for non-loopback and non-ssl sockets. */
979 	enable_zcopy_user_opts = opts->zcopy && !sock_is_loopback(fd) && !enable_ssl;
980 
981 	sock = posix_sock_alloc(fd, &impl_opts, enable_zcopy_user_opts && enable_zcopy_impl_opts);
982 	if (sock == NULL) {
983 		SPDK_ERRLOG("sock allocation failed\n");
984 		SSL_free(ssl);
985 		SSL_CTX_free(ctx);
986 		close(fd);
987 		return NULL;
988 	}
989 
990 	if (ctx) {
991 		sock->ctx = ctx;
992 	}
993 
994 	if (ssl) {
995 		sock->ssl = ssl;
996 	}
997 
998 	return &sock->base;
999 }
1000 
1001 static struct spdk_sock *
1002 posix_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
1003 {
1004 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_LISTEN, opts, false);
1005 }
1006 
1007 static struct spdk_sock *
1008 posix_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
1009 {
1010 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_CONNECT, opts, false);
1011 }
1012 
1013 static struct spdk_sock *
1014 _posix_sock_accept(struct spdk_sock *_sock, bool enable_ssl)
1015 {
1016 	struct spdk_posix_sock		*sock = __posix_sock(_sock);
1017 	struct sockaddr_storage		sa;
1018 	socklen_t			salen;
1019 	int				rc, fd;
1020 	struct spdk_posix_sock		*new_sock;
1021 	int				flag;
1022 	SSL_CTX *ctx = 0;
1023 	SSL *ssl = 0;
1024 
1025 	memset(&sa, 0, sizeof(sa));
1026 	salen = sizeof(sa);
1027 
1028 	assert(sock != NULL);
1029 
1030 	rc = accept(sock->fd, (struct sockaddr *)&sa, &salen);
1031 
1032 	if (rc == -1) {
1033 		return NULL;
1034 	}
1035 
1036 	fd = rc;
1037 
1038 	flag = fcntl(fd, F_GETFL);
1039 	if ((!(flag & O_NONBLOCK)) && (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0)) {
1040 		SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%d)\n", fd, errno);
1041 		close(fd);
1042 		return NULL;
1043 	}
1044 
1045 #if defined(SO_PRIORITY)
1046 	/* The priority is not inherited, so call this function again */
1047 	if (sock->base.opts.priority) {
1048 		rc = setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &sock->base.opts.priority, sizeof(int));
1049 		if (rc != 0) {
1050 			close(fd);
1051 			return NULL;
1052 		}
1053 	}
1054 #endif
1055 
1056 	/* Establish SSL connection */
1057 	if (enable_ssl) {
1058 		ctx = posix_sock_create_ssl_context(TLS_server_method(), &sock->base.opts, &sock->base.impl_opts);
1059 		if (!ctx) {
1060 			SPDK_ERRLOG("posix_sock_create_ssl_context() failed, errno = %d\n", errno);
1061 			close(fd);
1062 			return NULL;
1063 		}
1064 		ssl = ssl_sock_accept_loop(ctx, fd, &sock->base.impl_opts);
1065 		if (!ssl) {
1066 			SPDK_ERRLOG("ssl_sock_accept_loop() failed, errno = %d\n", errno);
1067 			close(fd);
1068 			SSL_CTX_free(ctx);
1069 			return NULL;
1070 		}
1071 	}
1072 
1073 	/* Inherit the zero copy feature from the listen socket */
1074 	new_sock = posix_sock_alloc(fd, &sock->base.impl_opts, sock->zcopy);
1075 	if (new_sock == NULL) {
1076 		close(fd);
1077 		SSL_free(ssl);
1078 		SSL_CTX_free(ctx);
1079 		return NULL;
1080 	}
1081 
1082 	if (ctx) {
1083 		new_sock->ctx = ctx;
1084 	}
1085 
1086 	if (ssl) {
1087 		new_sock->ssl = ssl;
1088 	}
1089 
1090 	return &new_sock->base;
1091 }
1092 
1093 static struct spdk_sock *
1094 posix_sock_accept(struct spdk_sock *_sock)
1095 {
1096 	return _posix_sock_accept(_sock, false);
1097 }
1098 
1099 static int
1100 posix_sock_close(struct spdk_sock *_sock)
1101 {
1102 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1103 
1104 	assert(TAILQ_EMPTY(&_sock->pending_reqs));
1105 
1106 	if (sock->ssl != NULL) {
1107 		SSL_shutdown(sock->ssl);
1108 	}
1109 
1110 	/* If the socket fails to close, the best choice is to
1111 	 * leak the fd but continue to free the rest of the sock
1112 	 * memory. */
1113 	close(sock->fd);
1114 
1115 	SSL_free(sock->ssl);
1116 	SSL_CTX_free(sock->ctx);
1117 
1118 	spdk_pipe_destroy(sock->recv_pipe);
1119 	free(sock->recv_buf);
1120 	free(sock);
1121 
1122 	return 0;
1123 }
1124 
1125 #ifdef SPDK_ZEROCOPY
1126 static int
1127 _sock_check_zcopy(struct spdk_sock *sock)
1128 {
1129 	struct spdk_posix_sock *psock = __posix_sock(sock);
1130 	struct msghdr msgh = {};
1131 	uint8_t buf[sizeof(struct cmsghdr) + sizeof(struct sock_extended_err)];
1132 	ssize_t rc;
1133 	struct sock_extended_err *serr;
1134 	struct cmsghdr *cm;
1135 	uint32_t idx;
1136 	struct spdk_sock_request *req, *treq;
1137 	bool found;
1138 
1139 	msgh.msg_control = buf;
1140 	msgh.msg_controllen = sizeof(buf);
1141 
1142 	while (true) {
1143 		rc = recvmsg(psock->fd, &msgh, MSG_ERRQUEUE);
1144 
1145 		if (rc < 0) {
1146 			if (errno == EWOULDBLOCK || errno == EAGAIN) {
1147 				return 0;
1148 			}
1149 
1150 			if (!TAILQ_EMPTY(&sock->pending_reqs)) {
1151 				SPDK_ERRLOG("Attempting to receive from ERRQUEUE yielded error, but pending list still has orphaned entries\n");
1152 			} else {
1153 				SPDK_WARNLOG("Recvmsg yielded an error!\n");
1154 			}
1155 			return 0;
1156 		}
1157 
1158 		cm = CMSG_FIRSTHDR(&msgh);
1159 		if (!(cm &&
1160 		      ((cm->cmsg_level == SOL_IP && cm->cmsg_type == IP_RECVERR) ||
1161 		       (cm->cmsg_level == SOL_IPV6 && cm->cmsg_type == IPV6_RECVERR)))) {
1162 			SPDK_WARNLOG("Unexpected cmsg level or type!\n");
1163 			return 0;
1164 		}
1165 
1166 		serr = (struct sock_extended_err *)CMSG_DATA(cm);
1167 		if (serr->ee_errno != 0 || serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
1168 			SPDK_WARNLOG("Unexpected extended error origin\n");
1169 			return 0;
1170 		}
1171 
1172 		/* Most of the time, the pending_reqs array is in the exact
1173 		 * order we need such that all of the requests to complete are
1174 		 * in order, in the front. It is guaranteed that all requests
1175 		 * belonging to the same sendmsg call are sequential, so once
1176 		 * we encounter one match we can stop looping as soon as a
1177 		 * non-match is found.
1178 		 */
1179 		for (idx = serr->ee_info; idx <= serr->ee_data; idx++) {
1180 			found = false;
1181 			TAILQ_FOREACH_SAFE(req, &sock->pending_reqs, internal.link, treq) {
1182 				if (!req->internal.is_zcopy) {
1183 					/* This wasn't a zcopy request. It was just waiting in line to complete */
1184 					rc = spdk_sock_request_put(sock, req, 0);
1185 					if (rc < 0) {
1186 						return rc;
1187 					}
1188 				} else if (req->internal.offset == idx) {
1189 					found = true;
1190 					rc = spdk_sock_request_put(sock, req, 0);
1191 					if (rc < 0) {
1192 						return rc;
1193 					}
1194 				} else if (found) {
1195 					break;
1196 				}
1197 			}
1198 		}
1199 	}
1200 
1201 	return 0;
1202 }
1203 #endif
1204 
1205 static int
1206 _sock_flush(struct spdk_sock *sock)
1207 {
1208 	struct spdk_posix_sock *psock = __posix_sock(sock);
1209 	struct msghdr msg = {};
1210 	int flags;
1211 	struct iovec iovs[IOV_BATCH_SIZE];
1212 	int iovcnt;
1213 	int retval;
1214 	struct spdk_sock_request *req;
1215 	int i;
1216 	ssize_t rc, sent;
1217 	unsigned int offset;
1218 	size_t len;
1219 	bool is_zcopy = false;
1220 
1221 	/* Can't flush from within a callback or we end up with recursive calls */
1222 	if (sock->cb_cnt > 0) {
1223 		errno = EAGAIN;
1224 		return -1;
1225 	}
1226 
1227 #ifdef SPDK_ZEROCOPY
1228 	if (psock->zcopy) {
1229 		flags = MSG_ZEROCOPY | MSG_NOSIGNAL;
1230 	} else
1231 #endif
1232 	{
1233 		flags = MSG_NOSIGNAL;
1234 	}
1235 
1236 	iovcnt = spdk_sock_prep_reqs(sock, iovs, 0, NULL, &flags);
1237 	if (iovcnt == 0) {
1238 		return 0;
1239 	}
1240 
1241 #ifdef SPDK_ZEROCOPY
1242 	is_zcopy = flags & MSG_ZEROCOPY;
1243 #endif
1244 
1245 	/* Perform the vectored write */
1246 	msg.msg_iov = iovs;
1247 	msg.msg_iovlen = iovcnt;
1248 
1249 	if (psock->ssl) {
1250 		rc = SSL_writev(psock->ssl, iovs, iovcnt);
1251 	} else {
1252 		rc = sendmsg(psock->fd, &msg, flags);
1253 	}
1254 	if (rc <= 0) {
1255 		if (rc == 0 || errno == EAGAIN || errno == EWOULDBLOCK || (errno == ENOBUFS && psock->zcopy)) {
1256 			errno = EAGAIN;
1257 		}
1258 		return -1;
1259 	}
1260 
1261 	sent = rc;
1262 
1263 	if (is_zcopy) {
1264 		/* Handling overflow case, because we use psock->sendmsg_idx - 1 for the
1265 		 * req->internal.offset, so sendmsg_idx should not be zero  */
1266 		if (spdk_unlikely(psock->sendmsg_idx == UINT32_MAX)) {
1267 			psock->sendmsg_idx = 1;
1268 		} else {
1269 			psock->sendmsg_idx++;
1270 		}
1271 	}
1272 
1273 	/* Consume the requests that were actually written */
1274 	req = TAILQ_FIRST(&sock->queued_reqs);
1275 	while (req) {
1276 		offset = req->internal.offset;
1277 
1278 		/* req->internal.is_zcopy is true when the whole req or part of it is sent with zerocopy */
1279 		req->internal.is_zcopy = is_zcopy;
1280 
1281 		for (i = 0; i < req->iovcnt; i++) {
1282 			/* Advance by the offset first */
1283 			if (offset >= SPDK_SOCK_REQUEST_IOV(req, i)->iov_len) {
1284 				offset -= SPDK_SOCK_REQUEST_IOV(req, i)->iov_len;
1285 				continue;
1286 			}
1287 
1288 			/* Calculate the remaining length of this element */
1289 			len = SPDK_SOCK_REQUEST_IOV(req, i)->iov_len - offset;
1290 
1291 			if (len > (size_t)rc) {
1292 				/* This element was partially sent. */
1293 				req->internal.offset += rc;
1294 				return sent;
1295 			}
1296 
1297 			offset = 0;
1298 			req->internal.offset += len;
1299 			rc -= len;
1300 		}
1301 
1302 		/* Handled a full request. */
1303 		spdk_sock_request_pend(sock, req);
1304 
1305 		if (!req->internal.is_zcopy && req == TAILQ_FIRST(&sock->pending_reqs)) {
1306 			/* The sendmsg syscall above isn't currently asynchronous,
1307 			* so it's already done. */
1308 			retval = spdk_sock_request_put(sock, req, 0);
1309 			if (retval) {
1310 				break;
1311 			}
1312 		} else {
1313 			/* Re-use the offset field to hold the sendmsg call index. The
1314 			 * index is 0 based, so subtract one here because we've already
1315 			 * incremented above. */
1316 			req->internal.offset = psock->sendmsg_idx - 1;
1317 		}
1318 
1319 		if (rc == 0) {
1320 			break;
1321 		}
1322 
1323 		req = TAILQ_FIRST(&sock->queued_reqs);
1324 	}
1325 
1326 	return sent;
1327 }
1328 
1329 static int
1330 posix_sock_flush(struct spdk_sock *sock)
1331 {
1332 #ifdef SPDK_ZEROCOPY
1333 	struct spdk_posix_sock *psock = __posix_sock(sock);
1334 
1335 	if (psock->zcopy && !TAILQ_EMPTY(&sock->pending_reqs)) {
1336 		_sock_check_zcopy(sock);
1337 	}
1338 #endif
1339 
1340 	return _sock_flush(sock);
1341 }
1342 
1343 static ssize_t
1344 posix_sock_recv_from_pipe(struct spdk_posix_sock *sock, struct iovec *diov, int diovcnt)
1345 {
1346 	struct iovec siov[2];
1347 	int sbytes;
1348 	ssize_t bytes;
1349 	struct spdk_posix_sock_group_impl *group;
1350 
1351 	sbytes = spdk_pipe_reader_get_buffer(sock->recv_pipe, sock->recv_buf_sz, siov);
1352 	if (sbytes < 0) {
1353 		errno = EINVAL;
1354 		return -1;
1355 	} else if (sbytes == 0) {
1356 		errno = EAGAIN;
1357 		return -1;
1358 	}
1359 
1360 	bytes = spdk_iovcpy(siov, 2, diov, diovcnt);
1361 
1362 	if (bytes == 0) {
1363 		/* The only way this happens is if diov is 0 length */
1364 		errno = EINVAL;
1365 		return -1;
1366 	}
1367 
1368 	spdk_pipe_reader_advance(sock->recv_pipe, bytes);
1369 
1370 	/* If we drained the pipe, mark it appropriately */
1371 	if (spdk_pipe_reader_bytes_available(sock->recv_pipe) == 0) {
1372 		assert(sock->pipe_has_data == true);
1373 
1374 		group = __posix_group_impl(sock->base.group_impl);
1375 		if (group && !sock->socket_has_data) {
1376 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1377 		}
1378 
1379 		sock->pipe_has_data = false;
1380 	}
1381 
1382 	return bytes;
1383 }
1384 
1385 static inline ssize_t
1386 posix_sock_read(struct spdk_posix_sock *sock)
1387 {
1388 	struct iovec iov[2];
1389 	int bytes_avail, bytes_recvd;
1390 	struct spdk_posix_sock_group_impl *group;
1391 
1392 	bytes_avail = spdk_pipe_writer_get_buffer(sock->recv_pipe, sock->recv_buf_sz, iov);
1393 
1394 	if (bytes_avail <= 0) {
1395 		return bytes_avail;
1396 	}
1397 
1398 	if (sock->ssl) {
1399 		bytes_recvd = SSL_readv(sock->ssl, iov, 2);
1400 	} else {
1401 		bytes_recvd = readv(sock->fd, iov, 2);
1402 	}
1403 
1404 	assert(sock->pipe_has_data == false);
1405 
1406 	if (bytes_recvd <= 0) {
1407 		/* Errors count as draining the socket data */
1408 		if (sock->base.group_impl && sock->socket_has_data) {
1409 			group = __posix_group_impl(sock->base.group_impl);
1410 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1411 		}
1412 
1413 		sock->socket_has_data = false;
1414 
1415 		return bytes_recvd;
1416 	}
1417 
1418 	spdk_pipe_writer_advance(sock->recv_pipe, bytes_recvd);
1419 
1420 #if DEBUG
1421 	if (sock->base.group_impl) {
1422 		assert(sock->socket_has_data == true);
1423 	}
1424 #endif
1425 
1426 	sock->pipe_has_data = true;
1427 	if (bytes_recvd < bytes_avail) {
1428 		/* We drained the kernel socket entirely. */
1429 		sock->socket_has_data = false;
1430 	}
1431 
1432 	return bytes_recvd;
1433 }
1434 
1435 static ssize_t
1436 posix_sock_readv(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
1437 {
1438 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1439 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(sock->base.group_impl);
1440 	int rc, i;
1441 	size_t len;
1442 
1443 	if (sock->recv_pipe == NULL) {
1444 		assert(sock->pipe_has_data == false);
1445 		if (group && sock->socket_has_data) {
1446 			sock->socket_has_data = false;
1447 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1448 		}
1449 		if (sock->ssl) {
1450 			return SSL_readv(sock->ssl, iov, iovcnt);
1451 		} else {
1452 			return readv(sock->fd, iov, iovcnt);
1453 		}
1454 	}
1455 
1456 	/* If the socket is not in a group, we must assume it always has
1457 	 * data waiting for us because it is not epolled */
1458 	if (!sock->pipe_has_data && (group == NULL || sock->socket_has_data)) {
1459 		/* If the user is receiving a sufficiently large amount of data,
1460 		 * receive directly to their buffers. */
1461 		len = 0;
1462 		for (i = 0; i < iovcnt; i++) {
1463 			len += iov[i].iov_len;
1464 		}
1465 
1466 		if (len >= MIN_SOCK_PIPE_SIZE) {
1467 			/* TODO: Should this detect if kernel socket is drained? */
1468 			if (sock->ssl) {
1469 				return SSL_readv(sock->ssl, iov, iovcnt);
1470 			} else {
1471 				return readv(sock->fd, iov, iovcnt);
1472 			}
1473 		}
1474 
1475 		/* Otherwise, do a big read into our pipe */
1476 		rc = posix_sock_read(sock);
1477 		if (rc <= 0) {
1478 			return rc;
1479 		}
1480 	}
1481 
1482 	return posix_sock_recv_from_pipe(sock, iov, iovcnt);
1483 }
1484 
1485 static ssize_t
1486 posix_sock_recv(struct spdk_sock *sock, void *buf, size_t len)
1487 {
1488 	struct iovec iov[1];
1489 
1490 	iov[0].iov_base = buf;
1491 	iov[0].iov_len = len;
1492 
1493 	return posix_sock_readv(sock, iov, 1);
1494 }
1495 
1496 static void
1497 posix_sock_readv_async(struct spdk_sock *sock, struct spdk_sock_request *req)
1498 {
1499 	req->cb_fn(req->cb_arg, -ENOTSUP);
1500 }
1501 
1502 static ssize_t
1503 posix_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
1504 {
1505 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1506 	int rc;
1507 
1508 	/* In order to process a writev, we need to flush any asynchronous writes
1509 	 * first. */
1510 	rc = _sock_flush(_sock);
1511 	if (rc < 0) {
1512 		return rc;
1513 	}
1514 
1515 	if (!TAILQ_EMPTY(&_sock->queued_reqs)) {
1516 		/* We weren't able to flush all requests */
1517 		errno = EAGAIN;
1518 		return -1;
1519 	}
1520 
1521 	if (sock->ssl) {
1522 		return SSL_writev(sock->ssl, iov, iovcnt);
1523 	} else {
1524 		return writev(sock->fd, iov, iovcnt);
1525 	}
1526 }
1527 
1528 static void
1529 posix_sock_writev_async(struct spdk_sock *sock, struct spdk_sock_request *req)
1530 {
1531 	int rc;
1532 
1533 	spdk_sock_request_queue(sock, req);
1534 
1535 	/* If there are a sufficient number queued, just flush them out immediately. */
1536 	if (sock->queued_iovcnt >= IOV_BATCH_SIZE) {
1537 		rc = _sock_flush(sock);
1538 		if (rc < 0 && errno != EAGAIN) {
1539 			spdk_sock_abort_requests(sock);
1540 		}
1541 	}
1542 }
1543 
1544 static int
1545 posix_sock_set_recvlowat(struct spdk_sock *_sock, int nbytes)
1546 {
1547 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1548 	int val;
1549 	int rc;
1550 
1551 	assert(sock != NULL);
1552 
1553 	val = nbytes;
1554 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_RCVLOWAT, &val, sizeof val);
1555 	if (rc != 0) {
1556 		return -1;
1557 	}
1558 	return 0;
1559 }
1560 
1561 static bool
1562 posix_sock_is_ipv6(struct spdk_sock *_sock)
1563 {
1564 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1565 	struct sockaddr_storage sa;
1566 	socklen_t salen;
1567 	int rc;
1568 
1569 	assert(sock != NULL);
1570 
1571 	memset(&sa, 0, sizeof sa);
1572 	salen = sizeof sa;
1573 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
1574 	if (rc != 0) {
1575 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
1576 		return false;
1577 	}
1578 
1579 	return (sa.ss_family == AF_INET6);
1580 }
1581 
1582 static bool
1583 posix_sock_is_ipv4(struct spdk_sock *_sock)
1584 {
1585 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1586 	struct sockaddr_storage sa;
1587 	socklen_t salen;
1588 	int rc;
1589 
1590 	assert(sock != NULL);
1591 
1592 	memset(&sa, 0, sizeof sa);
1593 	salen = sizeof sa;
1594 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
1595 	if (rc != 0) {
1596 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
1597 		return false;
1598 	}
1599 
1600 	return (sa.ss_family == AF_INET);
1601 }
1602 
1603 static bool
1604 posix_sock_is_connected(struct spdk_sock *_sock)
1605 {
1606 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1607 	uint8_t byte;
1608 	int rc;
1609 
1610 	rc = recv(sock->fd, &byte, 1, MSG_PEEK);
1611 	if (rc == 0) {
1612 		return false;
1613 	}
1614 
1615 	if (rc < 0) {
1616 		if (errno == EAGAIN || errno == EWOULDBLOCK) {
1617 			return true;
1618 		}
1619 
1620 		return false;
1621 	}
1622 
1623 	return true;
1624 }
1625 
1626 static struct spdk_sock_group_impl *
1627 posix_sock_group_impl_get_optimal(struct spdk_sock *_sock, struct spdk_sock_group_impl *hint)
1628 {
1629 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1630 	struct spdk_sock_group_impl *group_impl;
1631 
1632 	if (sock->placement_id != -1) {
1633 		spdk_sock_map_lookup(&g_map, sock->placement_id, &group_impl, hint);
1634 		return group_impl;
1635 	}
1636 
1637 	return NULL;
1638 }
1639 
1640 static struct spdk_sock_group_impl *
1641 posix_sock_group_impl_create(void)
1642 {
1643 	struct spdk_posix_sock_group_impl *group_impl;
1644 	int fd;
1645 
1646 #if defined(SPDK_EPOLL)
1647 	fd = epoll_create1(0);
1648 #elif defined(SPDK_KEVENT)
1649 	fd = kqueue();
1650 #endif
1651 	if (fd == -1) {
1652 		return NULL;
1653 	}
1654 
1655 	group_impl = calloc(1, sizeof(*group_impl));
1656 	if (group_impl == NULL) {
1657 		SPDK_ERRLOG("group_impl allocation failed\n");
1658 		close(fd);
1659 		return NULL;
1660 	}
1661 
1662 	group_impl->fd = fd;
1663 	TAILQ_INIT(&group_impl->socks_with_data);
1664 	group_impl->placement_id = -1;
1665 
1666 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_CPU) {
1667 		spdk_sock_map_insert(&g_map, spdk_env_get_current_core(), &group_impl->base);
1668 		group_impl->placement_id = spdk_env_get_current_core();
1669 	}
1670 
1671 	return &group_impl->base;
1672 }
1673 
1674 static void
1675 posix_sock_mark(struct spdk_posix_sock_group_impl *group, struct spdk_posix_sock *sock,
1676 		int placement_id)
1677 {
1678 #if defined(SO_MARK)
1679 	int rc;
1680 
1681 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_MARK,
1682 			&placement_id, sizeof(placement_id));
1683 	if (rc != 0) {
1684 		/* Not fatal */
1685 		SPDK_ERRLOG("Error setting SO_MARK\n");
1686 		return;
1687 	}
1688 
1689 	rc = spdk_sock_map_insert(&g_map, placement_id, &group->base);
1690 	if (rc != 0) {
1691 		/* Not fatal */
1692 		SPDK_ERRLOG("Failed to insert sock group into map: %d\n", rc);
1693 		return;
1694 	}
1695 
1696 	sock->placement_id = placement_id;
1697 #endif
1698 }
1699 
1700 static void
1701 posix_sock_update_mark(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1702 {
1703 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1704 
1705 	if (group->placement_id == -1) {
1706 		group->placement_id = spdk_sock_map_find_free(&g_map);
1707 
1708 		/* If a free placement id is found, update existing sockets in this group */
1709 		if (group->placement_id != -1) {
1710 			struct spdk_sock  *sock, *tmp;
1711 
1712 			TAILQ_FOREACH_SAFE(sock, &_group->socks, link, tmp) {
1713 				posix_sock_mark(group, __posix_sock(sock), group->placement_id);
1714 			}
1715 		}
1716 	}
1717 
1718 	if (group->placement_id != -1) {
1719 		/*
1720 		 * group placement id is already determined for this poll group.
1721 		 * Mark socket with group's placement id.
1722 		 */
1723 		posix_sock_mark(group, __posix_sock(_sock), group->placement_id);
1724 	}
1725 }
1726 
1727 static int
1728 posix_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1729 {
1730 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1731 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1732 	int rc;
1733 
1734 #if defined(SPDK_EPOLL)
1735 	struct epoll_event event;
1736 
1737 	memset(&event, 0, sizeof(event));
1738 	/* EPOLLERR is always on even if we don't set it, but be explicit for clarity */
1739 	event.events = EPOLLIN | EPOLLERR;
1740 	event.data.ptr = sock;
1741 
1742 	rc = epoll_ctl(group->fd, EPOLL_CTL_ADD, sock->fd, &event);
1743 #elif defined(SPDK_KEVENT)
1744 	struct kevent event;
1745 	struct timespec ts = {0};
1746 
1747 	EV_SET(&event, sock->fd, EVFILT_READ, EV_ADD, 0, 0, sock);
1748 
1749 	rc = kevent(group->fd, &event, 1, NULL, 0, &ts);
1750 #endif
1751 
1752 	if (rc != 0) {
1753 		return rc;
1754 	}
1755 
1756 	/* switched from another polling group due to scheduling */
1757 	if (spdk_unlikely(sock->recv_pipe != NULL  &&
1758 			  (spdk_pipe_reader_bytes_available(sock->recv_pipe) > 0))) {
1759 		sock->pipe_has_data = true;
1760 		sock->socket_has_data = false;
1761 		TAILQ_INSERT_TAIL(&group->socks_with_data, sock, link);
1762 	}
1763 
1764 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_MARK) {
1765 		posix_sock_update_mark(_group, _sock);
1766 	} else if (sock->placement_id != -1) {
1767 		rc = spdk_sock_map_insert(&g_map, sock->placement_id, &group->base);
1768 		if (rc != 0) {
1769 			SPDK_ERRLOG("Failed to insert sock group into map: %d\n", rc);
1770 			/* Do not treat this as an error. The system will continue running. */
1771 		}
1772 	}
1773 
1774 	return rc;
1775 }
1776 
1777 static int
1778 posix_sock_group_impl_remove_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1779 {
1780 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1781 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1782 	int rc;
1783 
1784 	if (sock->pipe_has_data || sock->socket_has_data) {
1785 		TAILQ_REMOVE(&group->socks_with_data, sock, link);
1786 		sock->pipe_has_data = false;
1787 		sock->socket_has_data = false;
1788 	}
1789 
1790 	if (sock->placement_id != -1) {
1791 		spdk_sock_map_release(&g_map, sock->placement_id);
1792 	}
1793 
1794 #if defined(SPDK_EPOLL)
1795 	struct epoll_event event;
1796 
1797 	/* Event parameter is ignored but some old kernel version still require it. */
1798 	rc = epoll_ctl(group->fd, EPOLL_CTL_DEL, sock->fd, &event);
1799 #elif defined(SPDK_KEVENT)
1800 	struct kevent event;
1801 	struct timespec ts = {0};
1802 
1803 	EV_SET(&event, sock->fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
1804 
1805 	rc = kevent(group->fd, &event, 1, NULL, 0, &ts);
1806 	if (rc == 0 && event.flags & EV_ERROR) {
1807 		rc = -1;
1808 		errno = event.data;
1809 	}
1810 #endif
1811 
1812 	spdk_sock_abort_requests(_sock);
1813 
1814 	return rc;
1815 }
1816 
1817 static int
1818 posix_sock_group_impl_poll(struct spdk_sock_group_impl *_group, int max_events,
1819 			   struct spdk_sock **socks)
1820 {
1821 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1822 	struct spdk_sock *sock, *tmp;
1823 	int num_events, i, rc;
1824 	struct spdk_posix_sock *psock, *ptmp;
1825 #if defined(SPDK_EPOLL)
1826 	struct epoll_event events[MAX_EVENTS_PER_POLL];
1827 #elif defined(SPDK_KEVENT)
1828 	struct kevent events[MAX_EVENTS_PER_POLL];
1829 	struct timespec ts = {0};
1830 #endif
1831 
1832 #ifdef SPDK_ZEROCOPY
1833 	/* When all of the following conditions are met
1834 	 * - non-blocking socket
1835 	 * - zero copy is enabled
1836 	 * - interrupts suppressed (i.e. busy polling)
1837 	 * - the NIC tx queue is full at the time sendmsg() is called
1838 	 * - epoll_wait determines there is an EPOLLIN event for the socket
1839 	 * then we can get into a situation where data we've sent is queued
1840 	 * up in the kernel network stack, but interrupts have been suppressed
1841 	 * because other traffic is flowing so the kernel misses the signal
1842 	 * to flush the software tx queue. If there wasn't incoming data
1843 	 * pending on the socket, then epoll_wait would have been sufficient
1844 	 * to kick off the send operation, but since there is a pending event
1845 	 * epoll_wait does not trigger the necessary operation.
1846 	 *
1847 	 * We deal with this by checking for all of the above conditions and
1848 	 * additionally looking for EPOLLIN events that were not consumed from
1849 	 * the last poll loop. We take this to mean that the upper layer is
1850 	 * unable to consume them because it is blocked waiting for resources
1851 	 * to free up, and those resources are most likely freed in response
1852 	 * to a pending asynchronous write completing.
1853 	 *
1854 	 * Additionally, sockets that have the same placement_id actually share
1855 	 * an underlying hardware queue. That means polling one of them is
1856 	 * equivalent to polling all of them. As a quick mechanism to avoid
1857 	 * making extra poll() calls, stash the last placement_id during the loop
1858 	 * and only poll if it's not the same. The overwhelmingly common case
1859 	 * is that all sockets in this list have the same placement_id because
1860 	 * SPDK is intentionally grouping sockets by that value, so even
1861 	 * though this won't stop all extra calls to poll(), it's very fast
1862 	 * and will catch all of them in practice.
1863 	 */
1864 	int last_placement_id = -1;
1865 
1866 	TAILQ_FOREACH(psock, &group->socks_with_data, link) {
1867 		if (psock->zcopy && psock->placement_id >= 0 &&
1868 		    psock->placement_id != last_placement_id) {
1869 			struct pollfd pfd = {psock->fd, POLLIN | POLLERR, 0};
1870 
1871 			poll(&pfd, 1, 0);
1872 			last_placement_id = psock->placement_id;
1873 		}
1874 	}
1875 #endif
1876 
1877 	/* This must be a TAILQ_FOREACH_SAFE because while flushing,
1878 	 * a completion callback could remove the sock from the
1879 	 * group. */
1880 	TAILQ_FOREACH_SAFE(sock, &_group->socks, link, tmp) {
1881 		rc = _sock_flush(sock);
1882 		if (rc < 0 && errno != EAGAIN) {
1883 			spdk_sock_abort_requests(sock);
1884 		}
1885 	}
1886 
1887 	assert(max_events > 0);
1888 
1889 #if defined(SPDK_EPOLL)
1890 	num_events = epoll_wait(group->fd, events, max_events, 0);
1891 #elif defined(SPDK_KEVENT)
1892 	num_events = kevent(group->fd, NULL, 0, events, max_events, &ts);
1893 #endif
1894 
1895 	if (num_events == -1) {
1896 		return -1;
1897 	} else if (num_events == 0 && !TAILQ_EMPTY(&_group->socks)) {
1898 		sock = TAILQ_FIRST(&_group->socks);
1899 		psock = __posix_sock(sock);
1900 		/* poll() is called here to busy poll the queue associated with
1901 		 * first socket in list and potentially reap incoming data.
1902 		 */
1903 		if (sock->opts.priority) {
1904 			struct pollfd pfd = {0, 0, 0};
1905 
1906 			pfd.fd = psock->fd;
1907 			pfd.events = POLLIN | POLLERR;
1908 			poll(&pfd, 1, 0);
1909 		}
1910 	}
1911 
1912 	for (i = 0; i < num_events; i++) {
1913 #if defined(SPDK_EPOLL)
1914 		sock = events[i].data.ptr;
1915 		psock = __posix_sock(sock);
1916 
1917 #ifdef SPDK_ZEROCOPY
1918 		if (events[i].events & EPOLLERR) {
1919 			rc = _sock_check_zcopy(sock);
1920 			/* If the socket was closed or removed from
1921 			 * the group in response to a send ack, don't
1922 			 * add it to the array here. */
1923 			if (rc || sock->cb_fn == NULL) {
1924 				continue;
1925 			}
1926 		}
1927 #endif
1928 		if ((events[i].events & EPOLLIN) == 0) {
1929 			continue;
1930 		}
1931 
1932 #elif defined(SPDK_KEVENT)
1933 		sock = events[i].udata;
1934 		psock = __posix_sock(sock);
1935 #endif
1936 
1937 		/* If the socket is not already in the list, add it now */
1938 		if (!psock->socket_has_data && !psock->pipe_has_data) {
1939 			TAILQ_INSERT_TAIL(&group->socks_with_data, psock, link);
1940 		}
1941 		psock->socket_has_data = true;
1942 	}
1943 
1944 	num_events = 0;
1945 
1946 	TAILQ_FOREACH_SAFE(psock, &group->socks_with_data, link, ptmp) {
1947 		if (num_events == max_events) {
1948 			break;
1949 		}
1950 
1951 		/* If the socket's cb_fn is NULL, just remove it from the
1952 		 * list and do not add it to socks array */
1953 		if (spdk_unlikely(psock->base.cb_fn == NULL)) {
1954 			psock->socket_has_data = false;
1955 			psock->pipe_has_data = false;
1956 			TAILQ_REMOVE(&group->socks_with_data, psock, link);
1957 			continue;
1958 		}
1959 
1960 		socks[num_events++] = &psock->base;
1961 	}
1962 
1963 	/* Cycle the has_data list so that each time we poll things aren't
1964 	 * in the same order. Say we have 6 sockets in the list, named as follows:
1965 	 * A B C D E F
1966 	 * And all 6 sockets had epoll events, but max_events is only 3. That means
1967 	 * psock currently points at D. We want to rearrange the list to the following:
1968 	 * D E F A B C
1969 	 *
1970 	 * The variables below are named according to this example to make it easier to
1971 	 * follow the swaps.
1972 	 */
1973 	if (psock != NULL) {
1974 		struct spdk_posix_sock *pa, *pc, *pd, *pf;
1975 
1976 		/* Capture pointers to the elements we need */
1977 		pd = psock;
1978 		pc = TAILQ_PREV(pd, spdk_has_data_list, link);
1979 		pa = TAILQ_FIRST(&group->socks_with_data);
1980 		pf = TAILQ_LAST(&group->socks_with_data, spdk_has_data_list);
1981 
1982 		/* Break the link between C and D */
1983 		pc->link.tqe_next = NULL;
1984 
1985 		/* Connect F to A */
1986 		pf->link.tqe_next = pa;
1987 		pa->link.tqe_prev = &pf->link.tqe_next;
1988 
1989 		/* Fix up the list first/last pointers */
1990 		group->socks_with_data.tqh_first = pd;
1991 		group->socks_with_data.tqh_last = &pc->link.tqe_next;
1992 
1993 		/* D is in front of the list, make tqe prev pointer point to the head of list */
1994 		pd->link.tqe_prev = &group->socks_with_data.tqh_first;
1995 	}
1996 
1997 	return num_events;
1998 }
1999 
2000 static int
2001 posix_sock_group_impl_close(struct spdk_sock_group_impl *_group)
2002 {
2003 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
2004 	int rc;
2005 
2006 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_CPU) {
2007 		spdk_sock_map_release(&g_map, spdk_env_get_current_core());
2008 	}
2009 
2010 	rc = close(group->fd);
2011 	free(group);
2012 	return rc;
2013 }
2014 
2015 static struct spdk_net_impl g_posix_net_impl = {
2016 	.name		= "posix",
2017 	.getaddr	= posix_sock_getaddr,
2018 	.connect	= posix_sock_connect,
2019 	.listen		= posix_sock_listen,
2020 	.accept		= posix_sock_accept,
2021 	.close		= posix_sock_close,
2022 	.recv		= posix_sock_recv,
2023 	.readv		= posix_sock_readv,
2024 	.readv_async	= posix_sock_readv_async,
2025 	.writev		= posix_sock_writev,
2026 	.writev_async	= posix_sock_writev_async,
2027 	.flush		= posix_sock_flush,
2028 	.set_recvlowat	= posix_sock_set_recvlowat,
2029 	.set_recvbuf	= posix_sock_set_recvbuf,
2030 	.set_sendbuf	= posix_sock_set_sendbuf,
2031 	.is_ipv6	= posix_sock_is_ipv6,
2032 	.is_ipv4	= posix_sock_is_ipv4,
2033 	.is_connected	= posix_sock_is_connected,
2034 	.group_impl_get_optimal	= posix_sock_group_impl_get_optimal,
2035 	.group_impl_create	= posix_sock_group_impl_create,
2036 	.group_impl_add_sock	= posix_sock_group_impl_add_sock,
2037 	.group_impl_remove_sock = posix_sock_group_impl_remove_sock,
2038 	.group_impl_poll	= posix_sock_group_impl_poll,
2039 	.group_impl_close	= posix_sock_group_impl_close,
2040 	.get_opts	= posix_sock_impl_get_opts,
2041 	.set_opts	= posix_sock_impl_set_opts,
2042 };
2043 
2044 SPDK_NET_IMPL_REGISTER(posix, &g_posix_net_impl, DEFAULT_SOCK_PRIORITY + 1);
2045 
2046 static struct spdk_sock *
2047 ssl_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
2048 {
2049 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_LISTEN, opts, true);
2050 }
2051 
2052 static struct spdk_sock *
2053 ssl_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
2054 {
2055 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_CONNECT, opts, true);
2056 }
2057 
2058 static struct spdk_sock *
2059 ssl_sock_accept(struct spdk_sock *_sock)
2060 {
2061 	return _posix_sock_accept(_sock, true);
2062 }
2063 
2064 static struct spdk_net_impl g_ssl_net_impl = {
2065 	.name		= "ssl",
2066 	.getaddr	= posix_sock_getaddr,
2067 	.connect	= ssl_sock_connect,
2068 	.listen		= ssl_sock_listen,
2069 	.accept		= ssl_sock_accept,
2070 	.close		= posix_sock_close,
2071 	.recv		= posix_sock_recv,
2072 	.readv		= posix_sock_readv,
2073 	.writev		= posix_sock_writev,
2074 	.writev_async	= posix_sock_writev_async,
2075 	.flush		= posix_sock_flush,
2076 	.set_recvlowat	= posix_sock_set_recvlowat,
2077 	.set_recvbuf	= posix_sock_set_recvbuf,
2078 	.set_sendbuf	= posix_sock_set_sendbuf,
2079 	.is_ipv6	= posix_sock_is_ipv6,
2080 	.is_ipv4	= posix_sock_is_ipv4,
2081 	.is_connected	= posix_sock_is_connected,
2082 	.group_impl_get_optimal	= posix_sock_group_impl_get_optimal,
2083 	.group_impl_create	= posix_sock_group_impl_create,
2084 	.group_impl_add_sock	= posix_sock_group_impl_add_sock,
2085 	.group_impl_remove_sock = posix_sock_group_impl_remove_sock,
2086 	.group_impl_poll	= posix_sock_group_impl_poll,
2087 	.group_impl_close	= posix_sock_group_impl_close,
2088 	.get_opts	= posix_sock_impl_get_opts,
2089 	.set_opts	= posix_sock_impl_set_opts,
2090 };
2091 
2092 SPDK_NET_IMPL_REGISTER(ssl, &g_ssl_net_impl, DEFAULT_SOCK_PRIORITY);
2093 SPDK_LOG_REGISTER_COMPONENT(sock_posix)
2094