xref: /spdk/module/sock/posix/posix.c (revision 877573897ad52be4fa8989f7617bd655b87e05c4)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #if defined(__FreeBSD__)
10 #include <sys/event.h>
11 #define SPDK_KEVENT
12 #else
13 #include <sys/epoll.h>
14 #define SPDK_EPOLL
15 #endif
16 
17 #if defined(__linux__)
18 #include <linux/errqueue.h>
19 #endif
20 
21 #include "spdk/env.h"
22 #include "spdk/log.h"
23 #include "spdk/pipe.h"
24 #include "spdk/sock.h"
25 #include "spdk/util.h"
26 #include "spdk/string.h"
27 #include "spdk_internal/sock.h"
28 #include "../sock_kernel.h"
29 
30 #include "openssl/crypto.h"
31 #include "openssl/err.h"
32 #include "openssl/ssl.h"
33 
34 #define MAX_TMPBUF 1024
35 #define PORTNUMLEN 32
36 
37 #if defined(SO_ZEROCOPY) && defined(MSG_ZEROCOPY)
38 #define SPDK_ZEROCOPY
39 #endif
40 
41 struct spdk_posix_sock {
42 	struct spdk_sock	base;
43 	int			fd;
44 
45 	uint32_t		sendmsg_idx;
46 
47 	struct spdk_pipe	*recv_pipe;
48 	void			*recv_buf;
49 	int			recv_buf_sz;
50 	bool			pipe_has_data;
51 	bool			socket_has_data;
52 	bool			zcopy;
53 
54 	int			placement_id;
55 
56 	SSL_CTX			*ctx;
57 	SSL			*ssl;
58 
59 	TAILQ_ENTRY(spdk_posix_sock)	link;
60 };
61 
62 TAILQ_HEAD(spdk_has_data_list, spdk_posix_sock);
63 
64 struct spdk_posix_sock_group_impl {
65 	struct spdk_sock_group_impl	base;
66 	int				fd;
67 	struct spdk_has_data_list	socks_with_data;
68 	int				placement_id;
69 };
70 
71 static struct spdk_sock_impl_opts g_spdk_posix_sock_impl_opts = {
72 	.recv_buf_size = MIN_SO_RCVBUF_SIZE,
73 	.send_buf_size = MIN_SO_SNDBUF_SIZE,
74 	.enable_recv_pipe = true,
75 	.enable_quickack = false,
76 	.enable_placement_id = PLACEMENT_NONE,
77 	.enable_zerocopy_send_server = true,
78 	.enable_zerocopy_send_client = false,
79 	.zerocopy_threshold = 0,
80 	.tls_version = 0,
81 	.enable_ktls = false,
82 	.psk_key = NULL,
83 	.psk_identity = NULL
84 };
85 
86 static struct spdk_sock_map g_map = {
87 	.entries = STAILQ_HEAD_INITIALIZER(g_map.entries),
88 	.mtx = PTHREAD_MUTEX_INITIALIZER
89 };
90 
91 __attribute((destructor)) static void
92 posix_sock_map_cleanup(void)
93 {
94 	spdk_sock_map_cleanup(&g_map);
95 }
96 
97 #define __posix_sock(sock) (struct spdk_posix_sock *)sock
98 #define __posix_group_impl(group) (struct spdk_posix_sock_group_impl *)group
99 
100 static void
101 posix_sock_copy_impl_opts(struct spdk_sock_impl_opts *dest, const struct spdk_sock_impl_opts *src,
102 			  size_t len)
103 {
104 #define FIELD_OK(field) \
105 	offsetof(struct spdk_sock_impl_opts, field) + sizeof(src->field) <= len
106 
107 #define SET_FIELD(field) \
108 	if (FIELD_OK(field)) { \
109 		dest->field = src->field; \
110 	}
111 
112 	SET_FIELD(recv_buf_size);
113 	SET_FIELD(send_buf_size);
114 	SET_FIELD(enable_recv_pipe);
115 	SET_FIELD(enable_zerocopy_send);
116 	SET_FIELD(enable_quickack);
117 	SET_FIELD(enable_placement_id);
118 	SET_FIELD(enable_zerocopy_send_server);
119 	SET_FIELD(enable_zerocopy_send_client);
120 	SET_FIELD(zerocopy_threshold);
121 	SET_FIELD(tls_version);
122 	SET_FIELD(enable_ktls);
123 	SET_FIELD(psk_key);
124 	SET_FIELD(psk_identity);
125 
126 #undef SET_FIELD
127 #undef FIELD_OK
128 }
129 
130 static int
131 posix_sock_impl_get_opts(struct spdk_sock_impl_opts *opts, size_t *len)
132 {
133 	if (!opts || !len) {
134 		errno = EINVAL;
135 		return -1;
136 	}
137 
138 	assert(sizeof(*opts) >= *len);
139 	memset(opts, 0, *len);
140 
141 	posix_sock_copy_impl_opts(opts, &g_spdk_posix_sock_impl_opts, *len);
142 	*len = spdk_min(*len, sizeof(g_spdk_posix_sock_impl_opts));
143 
144 	return 0;
145 }
146 
147 static int
148 posix_sock_impl_set_opts(const struct spdk_sock_impl_opts *opts, size_t len)
149 {
150 	if (!opts) {
151 		errno = EINVAL;
152 		return -1;
153 	}
154 
155 	assert(sizeof(*opts) >= len);
156 	posix_sock_copy_impl_opts(&g_spdk_posix_sock_impl_opts, opts, len);
157 
158 	return 0;
159 }
160 
161 static void
162 posix_opts_get_impl_opts(const struct spdk_sock_opts *opts, struct spdk_sock_impl_opts *dest)
163 {
164 	/* Copy the default impl_opts first to cover cases when user's impl_opts is smaller */
165 	memcpy(dest, &g_spdk_posix_sock_impl_opts, sizeof(*dest));
166 
167 	if (opts->impl_opts != NULL) {
168 		assert(sizeof(*dest) >= opts->impl_opts_size);
169 		posix_sock_copy_impl_opts(dest, opts->impl_opts, opts->impl_opts_size);
170 	}
171 }
172 
173 static int
174 posix_sock_getaddr(struct spdk_sock *_sock, char *saddr, int slen, uint16_t *sport,
175 		   char *caddr, int clen, uint16_t *cport)
176 {
177 	struct spdk_posix_sock *sock = __posix_sock(_sock);
178 	struct sockaddr_storage sa;
179 	socklen_t salen;
180 	int rc;
181 
182 	assert(sock != NULL);
183 
184 	memset(&sa, 0, sizeof sa);
185 	salen = sizeof sa;
186 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
187 	if (rc != 0) {
188 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
189 		return -1;
190 	}
191 
192 	switch (sa.ss_family) {
193 	case AF_UNIX:
194 		/* Acceptable connection types that don't have IPs */
195 		return 0;
196 	case AF_INET:
197 	case AF_INET6:
198 		/* Code below will get IP addresses */
199 		break;
200 	default:
201 		/* Unsupported socket family */
202 		return -1;
203 	}
204 
205 	rc = get_addr_str((struct sockaddr *)&sa, saddr, slen);
206 	if (rc != 0) {
207 		SPDK_ERRLOG("getnameinfo() failed (errno=%d)\n", errno);
208 		return -1;
209 	}
210 
211 	if (sport) {
212 		if (sa.ss_family == AF_INET) {
213 			*sport = ntohs(((struct sockaddr_in *) &sa)->sin_port);
214 		} else if (sa.ss_family == AF_INET6) {
215 			*sport = ntohs(((struct sockaddr_in6 *) &sa)->sin6_port);
216 		}
217 	}
218 
219 	memset(&sa, 0, sizeof sa);
220 	salen = sizeof sa;
221 	rc = getpeername(sock->fd, (struct sockaddr *) &sa, &salen);
222 	if (rc != 0) {
223 		SPDK_ERRLOG("getpeername() failed (errno=%d)\n", errno);
224 		return -1;
225 	}
226 
227 	rc = get_addr_str((struct sockaddr *)&sa, caddr, clen);
228 	if (rc != 0) {
229 		SPDK_ERRLOG("getnameinfo() failed (errno=%d)\n", errno);
230 		return -1;
231 	}
232 
233 	if (cport) {
234 		if (sa.ss_family == AF_INET) {
235 			*cport = ntohs(((struct sockaddr_in *) &sa)->sin_port);
236 		} else if (sa.ss_family == AF_INET6) {
237 			*cport = ntohs(((struct sockaddr_in6 *) &sa)->sin6_port);
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 enum posix_sock_create_type {
245 	SPDK_SOCK_CREATE_LISTEN,
246 	SPDK_SOCK_CREATE_CONNECT,
247 };
248 
249 static int
250 posix_sock_alloc_pipe(struct spdk_posix_sock *sock, int sz)
251 {
252 	uint8_t *new_buf;
253 	struct spdk_pipe *new_pipe;
254 	struct iovec siov[2];
255 	struct iovec diov[2];
256 	int sbytes;
257 	ssize_t bytes;
258 
259 	if (sock->recv_buf_sz == sz) {
260 		return 0;
261 	}
262 
263 	/* If the new size is 0, just free the pipe */
264 	if (sz == 0) {
265 		spdk_pipe_destroy(sock->recv_pipe);
266 		free(sock->recv_buf);
267 		sock->recv_pipe = NULL;
268 		sock->recv_buf = NULL;
269 		return 0;
270 	} else if (sz < MIN_SOCK_PIPE_SIZE) {
271 		SPDK_ERRLOG("The size of the pipe must be larger than %d\n", MIN_SOCK_PIPE_SIZE);
272 		return -1;
273 	}
274 
275 	/* Round up to next 64 byte multiple */
276 	new_buf = calloc(SPDK_ALIGN_CEIL(sz + 1, 64), sizeof(uint8_t));
277 	if (!new_buf) {
278 		SPDK_ERRLOG("socket recv buf allocation failed\n");
279 		return -ENOMEM;
280 	}
281 
282 	new_pipe = spdk_pipe_create(new_buf, sz + 1);
283 	if (new_pipe == NULL) {
284 		SPDK_ERRLOG("socket pipe allocation failed\n");
285 		free(new_buf);
286 		return -ENOMEM;
287 	}
288 
289 	if (sock->recv_pipe != NULL) {
290 		/* Pull all of the data out of the old pipe */
291 		sbytes = spdk_pipe_reader_get_buffer(sock->recv_pipe, sock->recv_buf_sz, siov);
292 		if (sbytes > sz) {
293 			/* Too much data to fit into the new pipe size */
294 			spdk_pipe_destroy(new_pipe);
295 			free(new_buf);
296 			return -EINVAL;
297 		}
298 
299 		sbytes = spdk_pipe_writer_get_buffer(new_pipe, sz, diov);
300 		assert(sbytes == sz);
301 
302 		bytes = spdk_iovcpy(siov, 2, diov, 2);
303 		spdk_pipe_writer_advance(new_pipe, bytes);
304 
305 		spdk_pipe_destroy(sock->recv_pipe);
306 		free(sock->recv_buf);
307 	}
308 
309 	sock->recv_buf_sz = sz;
310 	sock->recv_buf = new_buf;
311 	sock->recv_pipe = new_pipe;
312 
313 	return 0;
314 }
315 
316 static int
317 posix_sock_set_recvbuf(struct spdk_sock *_sock, int sz)
318 {
319 	struct spdk_posix_sock *sock = __posix_sock(_sock);
320 	int min_size;
321 	int rc;
322 
323 	assert(sock != NULL);
324 
325 	if (_sock->impl_opts.enable_recv_pipe) {
326 		rc = posix_sock_alloc_pipe(sock, sz);
327 		if (rc) {
328 			return rc;
329 		}
330 	}
331 
332 	/* Set kernel buffer size to be at least MIN_SO_RCVBUF_SIZE and
333 	 * g_spdk_posix_sock_impl_opts.recv_buf_size. */
334 	min_size = spdk_max(MIN_SO_RCVBUF_SIZE, g_spdk_posix_sock_impl_opts.recv_buf_size);
335 
336 	if (sz < min_size) {
337 		sz = min_size;
338 	}
339 
340 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUF, &sz, sizeof(sz));
341 	if (rc < 0) {
342 		return rc;
343 	}
344 
345 	_sock->impl_opts.recv_buf_size = sz;
346 
347 	return 0;
348 }
349 
350 static int
351 posix_sock_set_sendbuf(struct spdk_sock *_sock, int sz)
352 {
353 	struct spdk_posix_sock *sock = __posix_sock(_sock);
354 	int min_size;
355 	int rc;
356 
357 	assert(sock != NULL);
358 
359 	/* Set kernel buffer size to be at least MIN_SO_SNDBUF_SIZE and
360 	 * g_spdk_posix_sock_impl_opts.send_buf_size. */
361 	min_size = spdk_max(MIN_SO_SNDBUF_SIZE, g_spdk_posix_sock_impl_opts.send_buf_size);
362 
363 	if (sz < min_size) {
364 		sz = min_size;
365 	}
366 
367 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_SNDBUF, &sz, sizeof(sz));
368 	if (rc < 0) {
369 		return rc;
370 	}
371 
372 	_sock->impl_opts.send_buf_size = sz;
373 
374 	return 0;
375 }
376 
377 static void
378 posix_sock_init(struct spdk_posix_sock *sock, bool enable_zero_copy)
379 {
380 #if defined(SPDK_ZEROCOPY) || defined(__linux__)
381 	int flag;
382 	int rc;
383 #endif
384 
385 #if defined(SPDK_ZEROCOPY)
386 	flag = 1;
387 
388 	if (enable_zero_copy) {
389 		/* Try to turn on zero copy sends */
390 		rc = setsockopt(sock->fd, SOL_SOCKET, SO_ZEROCOPY, &flag, sizeof(flag));
391 		if (rc == 0) {
392 			sock->zcopy = true;
393 		}
394 	}
395 #endif
396 
397 #if defined(__linux__)
398 	flag = 1;
399 
400 	if (sock->base.impl_opts.enable_quickack) {
401 		rc = setsockopt(sock->fd, IPPROTO_TCP, TCP_QUICKACK, &flag, sizeof(flag));
402 		if (rc != 0) {
403 			SPDK_ERRLOG("quickack was failed to set\n");
404 		}
405 	}
406 
407 	spdk_sock_get_placement_id(sock->fd, sock->base.impl_opts.enable_placement_id,
408 				   &sock->placement_id);
409 
410 	if (sock->base.impl_opts.enable_placement_id == PLACEMENT_MARK) {
411 		/* Save placement_id */
412 		spdk_sock_map_insert(&g_map, sock->placement_id, NULL);
413 	}
414 #endif
415 }
416 
417 static struct spdk_posix_sock *
418 posix_sock_alloc(int fd, struct spdk_sock_impl_opts *impl_opts, bool enable_zero_copy)
419 {
420 	struct spdk_posix_sock *sock;
421 
422 	sock = calloc(1, sizeof(*sock));
423 	if (sock == NULL) {
424 		SPDK_ERRLOG("sock allocation failed\n");
425 		return NULL;
426 	}
427 
428 	sock->fd = fd;
429 	memcpy(&sock->base.impl_opts, impl_opts, sizeof(*impl_opts));
430 	posix_sock_init(sock, enable_zero_copy);
431 
432 	return sock;
433 }
434 
435 static int
436 posix_fd_create(struct addrinfo *res, struct spdk_sock_opts *opts,
437 		struct spdk_sock_impl_opts *impl_opts)
438 {
439 	int fd;
440 	int val = 1;
441 	int rc, sz;
442 #if defined(__linux__)
443 	int to;
444 #endif
445 
446 	fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
447 	if (fd < 0) {
448 		/* error */
449 		return -1;
450 	}
451 
452 	sz = impl_opts->recv_buf_size;
453 	rc = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &sz, sizeof(sz));
454 	if (rc) {
455 		/* Not fatal */
456 	}
457 
458 	sz = impl_opts->send_buf_size;
459 	rc = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sz, sizeof(sz));
460 	if (rc) {
461 		/* Not fatal */
462 	}
463 
464 	rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof val);
465 	if (rc != 0) {
466 		close(fd);
467 		/* error */
468 		return -1;
469 	}
470 	rc = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof val);
471 	if (rc != 0) {
472 		close(fd);
473 		/* error */
474 		return -1;
475 	}
476 
477 #if defined(SO_PRIORITY)
478 	if (opts->priority) {
479 		rc = setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &opts->priority, sizeof val);
480 		if (rc != 0) {
481 			close(fd);
482 			/* error */
483 			return -1;
484 		}
485 	}
486 #endif
487 
488 	if (res->ai_family == AF_INET6) {
489 		rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, sizeof val);
490 		if (rc != 0) {
491 			close(fd);
492 			/* error */
493 			return -1;
494 		}
495 	}
496 
497 	if (opts->ack_timeout) {
498 #if defined(__linux__)
499 		to = opts->ack_timeout;
500 		rc = setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &to, sizeof(to));
501 		if (rc != 0) {
502 			close(fd);
503 			/* error */
504 			return -1;
505 		}
506 #else
507 		SPDK_WARNLOG("TCP_USER_TIMEOUT is not supported.\n");
508 #endif
509 	}
510 
511 	return fd;
512 }
513 
514 static unsigned int
515 posix_sock_tls_psk_server_cb(SSL *ssl,
516 			     const char *id,
517 			     unsigned char *psk,
518 			     unsigned int max_psk_len)
519 {
520 	long key_len;
521 	unsigned char *default_psk;
522 	struct spdk_sock_impl_opts *impl_opts;
523 
524 	impl_opts = SSL_get_app_data(ssl);
525 
526 	if (impl_opts->psk_key == NULL) {
527 		SPDK_ERRLOG("PSK is not set\n");
528 		goto err;
529 	}
530 	SPDK_DEBUGLOG(sock_posix, "Length of Client's PSK ID %lu\n", strlen(impl_opts->psk_identity));
531 	if (id == NULL) {
532 		SPDK_ERRLOG("Received empty PSK ID\n");
533 		goto err;
534 	}
535 	SPDK_DEBUGLOG(sock_posix,  "Received PSK ID '%s'\n", id);
536 	if (strcmp(impl_opts->psk_identity, id) != 0) {
537 		SPDK_ERRLOG("Unknown Client's PSK ID\n");
538 		goto err;
539 	}
540 
541 	SPDK_DEBUGLOG(sock_posix, "Length of Client's PSK KEY %u\n", max_psk_len);
542 	default_psk = OPENSSL_hexstr2buf(impl_opts->psk_key, &key_len);
543 	if (default_psk == NULL) {
544 		SPDK_ERRLOG("Could not unhexlify PSK\n");
545 		goto err;
546 	}
547 	if (key_len > max_psk_len) {
548 		SPDK_ERRLOG("Insufficient buffer size to copy PSK\n");
549 		OPENSSL_free(default_psk);
550 		goto err;
551 	}
552 
553 	memcpy(psk, default_psk, key_len);
554 	OPENSSL_free(default_psk);
555 
556 	return key_len;
557 
558 err:
559 	return 0;
560 }
561 
562 static unsigned int
563 posix_sock_tls_psk_client_cb(SSL *ssl, const char *hint,
564 			     char *identity,
565 			     unsigned int max_identity_len,
566 			     unsigned char *psk,
567 			     unsigned int max_psk_len)
568 {
569 	long key_len;
570 	unsigned char *default_psk;
571 	struct spdk_sock_impl_opts *impl_opts;
572 
573 	impl_opts = SSL_get_app_data(ssl);
574 
575 	if (hint) {
576 		SPDK_DEBUGLOG(sock_posix,  "Received PSK identity hint '%s'\n", hint);
577 	}
578 
579 	if (impl_opts->psk_key == NULL) {
580 		SPDK_ERRLOG("PSK is not set\n");
581 		goto err;
582 	}
583 	default_psk = OPENSSL_hexstr2buf(impl_opts->psk_key, &key_len);
584 	if (default_psk == NULL) {
585 		SPDK_ERRLOG("Could not unhexlify PSK\n");
586 		goto err;
587 	}
588 	if ((strlen(impl_opts->psk_identity) + 1 > max_identity_len)
589 	    || (key_len > max_psk_len)) {
590 		OPENSSL_free(default_psk);
591 		SPDK_ERRLOG("PSK ID or Key buffer is not sufficient\n");
592 		goto err;
593 	}
594 	spdk_strcpy_pad(identity, impl_opts->psk_identity, strlen(impl_opts->psk_identity), 0);
595 	SPDK_DEBUGLOG(sock_posix, "Sending PSK identity '%s'\n", identity);
596 
597 	memcpy(psk, default_psk, key_len);
598 	SPDK_DEBUGLOG(sock_posix, "Provided out-of-band (OOB) PSK for TLS1.3 client\n");
599 	OPENSSL_free(default_psk);
600 
601 	return key_len;
602 
603 err:
604 	return 0;
605 }
606 
607 static SSL_CTX *
608 posix_sock_create_ssl_context(const SSL_METHOD *method, struct spdk_sock_opts *opts,
609 			      struct spdk_sock_impl_opts *impl_opts)
610 {
611 	SSL_CTX *ctx;
612 	int tls_version = 0;
613 	bool ktls_enabled = false;
614 #ifdef SSL_OP_ENABLE_KTLS
615 	long options;
616 #endif
617 
618 	SSL_library_init();
619 	OpenSSL_add_all_algorithms();
620 	SSL_load_error_strings();
621 	/* Produce a SSL CTX in SSL V2 and V3 standards compliant way */
622 	ctx = SSL_CTX_new(method);
623 	if (!ctx) {
624 		SPDK_ERRLOG("SSL_CTX_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
625 		return NULL;
626 	}
627 	SPDK_DEBUGLOG(sock_posix, "SSL context created\n");
628 
629 	switch (impl_opts->tls_version) {
630 	case 0:
631 		/* auto-negotioation */
632 		break;
633 	case SPDK_TLS_VERSION_1_1:
634 		tls_version = TLS1_1_VERSION;
635 		break;
636 	case SPDK_TLS_VERSION_1_2:
637 		tls_version = TLS1_2_VERSION;
638 		break;
639 	case SPDK_TLS_VERSION_1_3:
640 		tls_version = TLS1_3_VERSION;
641 		break;
642 	default:
643 		SPDK_ERRLOG("Incorrect TLS version provided: %d\n", impl_opts->tls_version);
644 		goto err;
645 	}
646 
647 	if (tls_version) {
648 		SPDK_DEBUGLOG(sock_posix, "Hardening TLS version to '%d'='0x%X'\n", impl_opts->tls_version,
649 			      tls_version);
650 		if (!SSL_CTX_set_min_proto_version(ctx, tls_version)) {
651 			SPDK_ERRLOG("Unable to set Min TLS version to '%d'='0x%X\n", impl_opts->tls_version, tls_version);
652 			goto err;
653 		}
654 		if (!SSL_CTX_set_max_proto_version(ctx, tls_version)) {
655 			SPDK_ERRLOG("Unable to set Max TLS version to '%d'='0x%X\n", impl_opts->tls_version, tls_version);
656 			goto err;
657 		}
658 	}
659 	if (impl_opts->enable_ktls) {
660 		SPDK_DEBUGLOG(sock_posix, "Enabling kTLS offload\n");
661 #ifdef SSL_OP_ENABLE_KTLS
662 		options = SSL_CTX_set_options(ctx, SSL_OP_ENABLE_KTLS);
663 		ktls_enabled = options & SSL_OP_ENABLE_KTLS;
664 #else
665 		ktls_enabled = false;
666 #endif
667 		if (!ktls_enabled) {
668 			SPDK_ERRLOG("Unable to set kTLS offload via SSL_CTX_set_options(). Configure openssl with 'enable-ktls'\n");
669 			goto err;
670 		}
671 	}
672 
673 	return ctx;
674 
675 err:
676 	SSL_CTX_free(ctx);
677 	return NULL;
678 }
679 
680 static SSL *
681 ssl_sock_connect_loop(SSL_CTX *ctx, int fd, struct spdk_sock_impl_opts *impl_opts)
682 {
683 	int rc;
684 	SSL *ssl;
685 	int ssl_get_error;
686 
687 	ssl = SSL_new(ctx);
688 	if (!ssl) {
689 		SPDK_ERRLOG("SSL_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
690 		return NULL;
691 	}
692 	SSL_set_fd(ssl, fd);
693 	SSL_set_app_data(ssl, impl_opts);
694 	SSL_set_psk_client_callback(ssl, posix_sock_tls_psk_client_cb);
695 	SPDK_DEBUGLOG(sock_posix, "SSL object creation finished: %p\n", ssl);
696 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
697 	while ((rc = SSL_connect(ssl)) != 1) {
698 		SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
699 		ssl_get_error = SSL_get_error(ssl, rc);
700 		SPDK_DEBUGLOG(sock_posix, "SSL_connect failed %d = SSL_connect(%p), %d = SSL_get_error(%p, %d)\n",
701 			      rc, ssl, ssl_get_error, ssl, rc);
702 		switch (ssl_get_error) {
703 		case SSL_ERROR_WANT_READ:
704 		case SSL_ERROR_WANT_WRITE:
705 			continue;
706 		default:
707 			break;
708 		}
709 		SPDK_ERRLOG("SSL_connect() failed, errno = %d\n", errno);
710 		SSL_free(ssl);
711 		return NULL;
712 	}
713 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
714 	SPDK_DEBUGLOG(sock_posix, "Negotiated Cipher suite:%s\n",
715 		      SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)));
716 	return ssl;
717 }
718 
719 static SSL *
720 ssl_sock_accept_loop(SSL_CTX *ctx, int fd, struct spdk_sock_impl_opts *impl_opts)
721 {
722 	int rc;
723 	SSL *ssl;
724 	int ssl_get_error;
725 
726 	ssl = SSL_new(ctx);
727 	if (!ssl) {
728 		SPDK_ERRLOG("SSL_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
729 		return NULL;
730 	}
731 	SSL_set_fd(ssl, fd);
732 	SSL_set_app_data(ssl, impl_opts);
733 	SSL_set_psk_server_callback(ssl, posix_sock_tls_psk_server_cb);
734 	SPDK_DEBUGLOG(sock_posix, "SSL object creation finished: %p\n", ssl);
735 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
736 	while ((rc = SSL_accept(ssl)) != 1) {
737 		SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
738 		ssl_get_error = SSL_get_error(ssl, rc);
739 		SPDK_DEBUGLOG(sock_posix, "SSL_accept failed %d = SSL_accept(%p), %d = SSL_get_error(%p, %d)\n", rc,
740 			      ssl, ssl_get_error, ssl, rc);
741 		switch (ssl_get_error) {
742 		case SSL_ERROR_WANT_READ:
743 		case SSL_ERROR_WANT_WRITE:
744 			continue;
745 		default:
746 			break;
747 		}
748 		SPDK_ERRLOG("SSL_accept() failed, errno = %d\n", errno);
749 		SSL_free(ssl);
750 		return NULL;
751 	}
752 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
753 	SPDK_DEBUGLOG(sock_posix, "Negotiated Cipher suite:%s\n",
754 		      SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)));
755 	return ssl;
756 }
757 
758 static ssize_t
759 SSL_readv(SSL *ssl, const struct iovec *iov, int iovcnt)
760 {
761 	int i, rc = 0;
762 	ssize_t total = 0;
763 
764 	for (i = 0; i < iovcnt; i++) {
765 		rc = SSL_read(ssl, iov[i].iov_base, iov[i].iov_len);
766 
767 		if (rc > 0) {
768 			total += rc;
769 		}
770 		if (rc != (int)iov[i].iov_len) {
771 			break;
772 		}
773 	}
774 	if (total > 0) {
775 		errno = 0;
776 		return total;
777 	}
778 	switch (SSL_get_error(ssl, rc)) {
779 	case SSL_ERROR_ZERO_RETURN:
780 		errno = ENOTCONN;
781 		return 0;
782 	case SSL_ERROR_WANT_READ:
783 	case SSL_ERROR_WANT_WRITE:
784 	case SSL_ERROR_WANT_CONNECT:
785 	case SSL_ERROR_WANT_ACCEPT:
786 	case SSL_ERROR_WANT_X509_LOOKUP:
787 	case SSL_ERROR_WANT_ASYNC:
788 	case SSL_ERROR_WANT_ASYNC_JOB:
789 	case SSL_ERROR_WANT_CLIENT_HELLO_CB:
790 		errno = EAGAIN;
791 		return -1;
792 	case SSL_ERROR_SYSCALL:
793 	case SSL_ERROR_SSL:
794 		errno = ENOTCONN;
795 		return -1;
796 	default:
797 		errno = ENOTCONN;
798 		return -1;
799 	}
800 }
801 
802 static ssize_t
803 SSL_writev(SSL *ssl, struct iovec *iov, int iovcnt)
804 {
805 	int i, rc = 0;
806 	ssize_t total = 0;
807 
808 	for (i = 0; i < iovcnt; i++) {
809 		rc = SSL_write(ssl, iov[i].iov_base, iov[i].iov_len);
810 
811 		if (rc > 0) {
812 			total += rc;
813 		}
814 		if (rc != (int)iov[i].iov_len) {
815 			break;
816 		}
817 	}
818 	if (total > 0) {
819 		errno = 0;
820 		return total;
821 	}
822 	switch (SSL_get_error(ssl, rc)) {
823 	case SSL_ERROR_ZERO_RETURN:
824 		errno = ENOTCONN;
825 		return 0;
826 	case SSL_ERROR_WANT_READ:
827 	case SSL_ERROR_WANT_WRITE:
828 	case SSL_ERROR_WANT_CONNECT:
829 	case SSL_ERROR_WANT_ACCEPT:
830 	case SSL_ERROR_WANT_X509_LOOKUP:
831 	case SSL_ERROR_WANT_ASYNC:
832 	case SSL_ERROR_WANT_ASYNC_JOB:
833 	case SSL_ERROR_WANT_CLIENT_HELLO_CB:
834 		errno = EAGAIN;
835 		return -1;
836 	case SSL_ERROR_SYSCALL:
837 	case SSL_ERROR_SSL:
838 		errno = ENOTCONN;
839 		return -1;
840 	default:
841 		errno = ENOTCONN;
842 		return -1;
843 	}
844 }
845 
846 static struct spdk_sock *
847 posix_sock_create(const char *ip, int port,
848 		  enum posix_sock_create_type type,
849 		  struct spdk_sock_opts *opts,
850 		  bool enable_ssl)
851 {
852 	struct spdk_posix_sock *sock;
853 	struct spdk_sock_impl_opts impl_opts;
854 	char buf[MAX_TMPBUF];
855 	char portnum[PORTNUMLEN];
856 	char *p;
857 	struct addrinfo hints, *res, *res0;
858 	int fd, flag;
859 	int rc;
860 	bool enable_zcopy_user_opts = true;
861 	bool enable_zcopy_impl_opts = true;
862 	SSL_CTX *ctx = 0;
863 	SSL *ssl = 0;
864 
865 	assert(opts != NULL);
866 	posix_opts_get_impl_opts(opts, &impl_opts);
867 
868 	if (ip == NULL) {
869 		return NULL;
870 	}
871 	if (ip[0] == '[') {
872 		snprintf(buf, sizeof(buf), "%s", ip + 1);
873 		p = strchr(buf, ']');
874 		if (p != NULL) {
875 			*p = '\0';
876 		}
877 		ip = (const char *) &buf[0];
878 	}
879 
880 	snprintf(portnum, sizeof portnum, "%d", port);
881 	memset(&hints, 0, sizeof hints);
882 	hints.ai_family = PF_UNSPEC;
883 	hints.ai_socktype = SOCK_STREAM;
884 	hints.ai_flags = AI_NUMERICSERV;
885 	hints.ai_flags |= AI_PASSIVE;
886 	hints.ai_flags |= AI_NUMERICHOST;
887 	rc = getaddrinfo(ip, portnum, &hints, &res0);
888 	if (rc != 0) {
889 		SPDK_ERRLOG("getaddrinfo() failed %s (%d)\n", gai_strerror(rc), rc);
890 		return NULL;
891 	}
892 
893 	/* try listen */
894 	fd = -1;
895 	for (res = res0; res != NULL; res = res->ai_next) {
896 retry:
897 		fd = posix_fd_create(res, opts, &impl_opts);
898 		if (fd < 0) {
899 			continue;
900 		}
901 		if (type == SPDK_SOCK_CREATE_LISTEN) {
902 			rc = bind(fd, res->ai_addr, res->ai_addrlen);
903 			if (rc != 0) {
904 				SPDK_ERRLOG("bind() failed at port %d, errno = %d\n", port, errno);
905 				switch (errno) {
906 				case EINTR:
907 					/* interrupted? */
908 					close(fd);
909 					goto retry;
910 				case EADDRNOTAVAIL:
911 					SPDK_ERRLOG("IP address %s not available. "
912 						    "Verify IP address in config file "
913 						    "and make sure setup script is "
914 						    "run before starting spdk app.\n", ip);
915 				/* FALLTHROUGH */
916 				default:
917 					/* try next family */
918 					close(fd);
919 					fd = -1;
920 					continue;
921 				}
922 			}
923 			/* bind OK */
924 			rc = listen(fd, 512);
925 			if (rc != 0) {
926 				SPDK_ERRLOG("listen() failed, errno = %d\n", errno);
927 				close(fd);
928 				fd = -1;
929 				break;
930 			}
931 			enable_zcopy_impl_opts = impl_opts.enable_zerocopy_send_server;
932 		} else if (type == SPDK_SOCK_CREATE_CONNECT) {
933 			rc = connect(fd, res->ai_addr, res->ai_addrlen);
934 			if (rc != 0) {
935 				SPDK_ERRLOG("connect() failed, errno = %d\n", errno);
936 				/* try next family */
937 				close(fd);
938 				fd = -1;
939 				continue;
940 			}
941 			enable_zcopy_impl_opts = impl_opts.enable_zerocopy_send_client;
942 			if (enable_ssl) {
943 				ctx = posix_sock_create_ssl_context(TLS_client_method(), opts, &impl_opts);
944 				if (!ctx) {
945 					SPDK_ERRLOG("posix_sock_create_ssl_context() failed, errno = %d\n", errno);
946 					close(fd);
947 					fd = -1;
948 					break;
949 				}
950 				ssl = ssl_sock_connect_loop(ctx, fd, &impl_opts);
951 				if (!ssl) {
952 					SPDK_ERRLOG("ssl_sock_connect_loop() failed, errno = %d\n", errno);
953 					close(fd);
954 					fd = -1;
955 					SSL_CTX_free(ctx);
956 					break;
957 				}
958 			}
959 		}
960 
961 		flag = fcntl(fd, F_GETFL);
962 		if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
963 			SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%d)\n", fd, errno);
964 			SSL_free(ssl);
965 			SSL_CTX_free(ctx);
966 			close(fd);
967 			fd = -1;
968 			break;
969 		}
970 		break;
971 	}
972 	freeaddrinfo(res0);
973 
974 	if (fd < 0) {
975 		return NULL;
976 	}
977 
978 	/* Only enable zero copy for non-loopback and non-ssl sockets. */
979 	enable_zcopy_user_opts = opts->zcopy && !sock_is_loopback(fd) && !enable_ssl;
980 
981 	sock = posix_sock_alloc(fd, &impl_opts, enable_zcopy_user_opts && enable_zcopy_impl_opts);
982 	if (sock == NULL) {
983 		SPDK_ERRLOG("sock allocation failed\n");
984 		SSL_free(ssl);
985 		SSL_CTX_free(ctx);
986 		close(fd);
987 		return NULL;
988 	}
989 
990 	if (ctx) {
991 		sock->ctx = ctx;
992 	}
993 
994 	if (ssl) {
995 		sock->ssl = ssl;
996 	}
997 
998 	return &sock->base;
999 }
1000 
1001 static struct spdk_sock *
1002 posix_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
1003 {
1004 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_LISTEN, opts, false);
1005 }
1006 
1007 static struct spdk_sock *
1008 posix_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
1009 {
1010 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_CONNECT, opts, false);
1011 }
1012 
1013 static struct spdk_sock *
1014 _posix_sock_accept(struct spdk_sock *_sock, bool enable_ssl)
1015 {
1016 	struct spdk_posix_sock		*sock = __posix_sock(_sock);
1017 	struct sockaddr_storage		sa;
1018 	socklen_t			salen;
1019 	int				rc, fd;
1020 	struct spdk_posix_sock		*new_sock;
1021 	int				flag;
1022 	SSL_CTX *ctx = 0;
1023 	SSL *ssl = 0;
1024 
1025 	memset(&sa, 0, sizeof(sa));
1026 	salen = sizeof(sa);
1027 
1028 	assert(sock != NULL);
1029 
1030 	rc = accept(sock->fd, (struct sockaddr *)&sa, &salen);
1031 
1032 	if (rc == -1) {
1033 		return NULL;
1034 	}
1035 
1036 	fd = rc;
1037 
1038 	flag = fcntl(fd, F_GETFL);
1039 	if ((!(flag & O_NONBLOCK)) && (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0)) {
1040 		SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%d)\n", fd, errno);
1041 		close(fd);
1042 		return NULL;
1043 	}
1044 
1045 #if defined(SO_PRIORITY)
1046 	/* The priority is not inherited, so call this function again */
1047 	if (sock->base.opts.priority) {
1048 		rc = setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &sock->base.opts.priority, sizeof(int));
1049 		if (rc != 0) {
1050 			close(fd);
1051 			return NULL;
1052 		}
1053 	}
1054 #endif
1055 
1056 	/* Establish SSL connection */
1057 	if (enable_ssl) {
1058 		ctx = posix_sock_create_ssl_context(TLS_server_method(), &sock->base.opts, &sock->base.impl_opts);
1059 		if (!ctx) {
1060 			SPDK_ERRLOG("posix_sock_create_ssl_context() failed, errno = %d\n", errno);
1061 			close(fd);
1062 			return NULL;
1063 		}
1064 		ssl = ssl_sock_accept_loop(ctx, fd, &sock->base.impl_opts);
1065 		if (!ssl) {
1066 			SPDK_ERRLOG("ssl_sock_accept_loop() failed, errno = %d\n", errno);
1067 			close(fd);
1068 			SSL_CTX_free(ctx);
1069 			return NULL;
1070 		}
1071 	}
1072 
1073 	/* Inherit the zero copy feature from the listen socket */
1074 	new_sock = posix_sock_alloc(fd, &sock->base.impl_opts, sock->zcopy);
1075 	if (new_sock == NULL) {
1076 		close(fd);
1077 		SSL_free(ssl);
1078 		SSL_CTX_free(ctx);
1079 		return NULL;
1080 	}
1081 
1082 	if (ctx) {
1083 		new_sock->ctx = ctx;
1084 	}
1085 
1086 	if (ssl) {
1087 		new_sock->ssl = ssl;
1088 	}
1089 
1090 	return &new_sock->base;
1091 }
1092 
1093 static struct spdk_sock *
1094 posix_sock_accept(struct spdk_sock *_sock)
1095 {
1096 	return _posix_sock_accept(_sock, false);
1097 }
1098 
1099 static int
1100 posix_sock_close(struct spdk_sock *_sock)
1101 {
1102 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1103 
1104 	assert(TAILQ_EMPTY(&_sock->pending_reqs));
1105 
1106 	if (sock->ssl != NULL) {
1107 		SSL_shutdown(sock->ssl);
1108 	}
1109 
1110 	/* If the socket fails to close, the best choice is to
1111 	 * leak the fd but continue to free the rest of the sock
1112 	 * memory. */
1113 	close(sock->fd);
1114 
1115 	SSL_free(sock->ssl);
1116 	SSL_CTX_free(sock->ctx);
1117 
1118 	spdk_pipe_destroy(sock->recv_pipe);
1119 	free(sock->recv_buf);
1120 	free(sock);
1121 
1122 	return 0;
1123 }
1124 
1125 #ifdef SPDK_ZEROCOPY
1126 static int
1127 _sock_check_zcopy(struct spdk_sock *sock)
1128 {
1129 	struct spdk_posix_sock *psock = __posix_sock(sock);
1130 	struct msghdr msgh = {};
1131 	uint8_t buf[sizeof(struct cmsghdr) + sizeof(struct sock_extended_err)];
1132 	ssize_t rc;
1133 	struct sock_extended_err *serr;
1134 	struct cmsghdr *cm;
1135 	uint32_t idx;
1136 	struct spdk_sock_request *req, *treq;
1137 	bool found;
1138 
1139 	msgh.msg_control = buf;
1140 	msgh.msg_controllen = sizeof(buf);
1141 
1142 	while (true) {
1143 		rc = recvmsg(psock->fd, &msgh, MSG_ERRQUEUE);
1144 
1145 		if (rc < 0) {
1146 			if (errno == EWOULDBLOCK || errno == EAGAIN) {
1147 				return 0;
1148 			}
1149 
1150 			if (!TAILQ_EMPTY(&sock->pending_reqs)) {
1151 				SPDK_ERRLOG("Attempting to receive from ERRQUEUE yielded error, but pending list still has orphaned entries\n");
1152 			} else {
1153 				SPDK_WARNLOG("Recvmsg yielded an error!\n");
1154 			}
1155 			return 0;
1156 		}
1157 
1158 		cm = CMSG_FIRSTHDR(&msgh);
1159 		if (!(cm &&
1160 		      ((cm->cmsg_level == SOL_IP && cm->cmsg_type == IP_RECVERR) ||
1161 		       (cm->cmsg_level == SOL_IPV6 && cm->cmsg_type == IPV6_RECVERR)))) {
1162 			SPDK_WARNLOG("Unexpected cmsg level or type!\n");
1163 			return 0;
1164 		}
1165 
1166 		serr = (struct sock_extended_err *)CMSG_DATA(cm);
1167 		if (serr->ee_errno != 0 || serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
1168 			SPDK_WARNLOG("Unexpected extended error origin\n");
1169 			return 0;
1170 		}
1171 
1172 		/* Most of the time, the pending_reqs array is in the exact
1173 		 * order we need such that all of the requests to complete are
1174 		 * in order, in the front. It is guaranteed that all requests
1175 		 * belonging to the same sendmsg call are sequential, so once
1176 		 * we encounter one match we can stop looping as soon as a
1177 		 * non-match is found.
1178 		 */
1179 		for (idx = serr->ee_info; idx <= serr->ee_data; idx++) {
1180 			found = false;
1181 			TAILQ_FOREACH_SAFE(req, &sock->pending_reqs, internal.link, treq) {
1182 				if (!req->internal.is_zcopy) {
1183 					/* This wasn't a zcopy request. It was just waiting in line to complete */
1184 					rc = spdk_sock_request_put(sock, req, 0);
1185 					if (rc < 0) {
1186 						return rc;
1187 					}
1188 				} else if (req->internal.offset == idx) {
1189 					found = true;
1190 					rc = spdk_sock_request_put(sock, req, 0);
1191 					if (rc < 0) {
1192 						return rc;
1193 					}
1194 				} else if (found) {
1195 					break;
1196 				}
1197 			}
1198 		}
1199 	}
1200 
1201 	return 0;
1202 }
1203 #endif
1204 
1205 static int
1206 _sock_flush(struct spdk_sock *sock)
1207 {
1208 	struct spdk_posix_sock *psock = __posix_sock(sock);
1209 	struct msghdr msg = {};
1210 	int flags;
1211 	struct iovec iovs[IOV_BATCH_SIZE];
1212 	int iovcnt;
1213 	int retval;
1214 	struct spdk_sock_request *req;
1215 	int i;
1216 	ssize_t rc;
1217 	unsigned int offset;
1218 	size_t len;
1219 	bool is_zcopy = false;
1220 
1221 	/* Can't flush from within a callback or we end up with recursive calls */
1222 	if (sock->cb_cnt > 0) {
1223 		return 0;
1224 	}
1225 
1226 #ifdef SPDK_ZEROCOPY
1227 	if (psock->zcopy) {
1228 		flags = MSG_ZEROCOPY | MSG_NOSIGNAL;
1229 	} else
1230 #endif
1231 	{
1232 		flags = MSG_NOSIGNAL;
1233 	}
1234 
1235 	iovcnt = spdk_sock_prep_reqs(sock, iovs, 0, NULL, &flags);
1236 	if (iovcnt == 0) {
1237 		return 0;
1238 	}
1239 
1240 #ifdef SPDK_ZEROCOPY
1241 	is_zcopy = flags & MSG_ZEROCOPY;
1242 #endif
1243 
1244 	/* Perform the vectored write */
1245 	msg.msg_iov = iovs;
1246 	msg.msg_iovlen = iovcnt;
1247 
1248 	if (psock->ssl) {
1249 		rc = SSL_writev(psock->ssl, iovs, iovcnt);
1250 	} else {
1251 		rc = sendmsg(psock->fd, &msg, flags);
1252 	}
1253 	if (rc <= 0) {
1254 		if (errno == EAGAIN || errno == EWOULDBLOCK || (errno == ENOBUFS && psock->zcopy)) {
1255 			return 0;
1256 		}
1257 		return rc;
1258 	}
1259 
1260 	if (is_zcopy) {
1261 		/* Handling overflow case, because we use psock->sendmsg_idx - 1 for the
1262 		 * req->internal.offset, so sendmsg_idx should not be zero  */
1263 		if (spdk_unlikely(psock->sendmsg_idx == UINT32_MAX)) {
1264 			psock->sendmsg_idx = 1;
1265 		} else {
1266 			psock->sendmsg_idx++;
1267 		}
1268 	}
1269 
1270 	/* Consume the requests that were actually written */
1271 	req = TAILQ_FIRST(&sock->queued_reqs);
1272 	while (req) {
1273 		offset = req->internal.offset;
1274 
1275 		/* req->internal.is_zcopy is true when the whole req or part of it is sent with zerocopy */
1276 		req->internal.is_zcopy = is_zcopy;
1277 
1278 		for (i = 0; i < req->iovcnt; i++) {
1279 			/* Advance by the offset first */
1280 			if (offset >= SPDK_SOCK_REQUEST_IOV(req, i)->iov_len) {
1281 				offset -= SPDK_SOCK_REQUEST_IOV(req, i)->iov_len;
1282 				continue;
1283 			}
1284 
1285 			/* Calculate the remaining length of this element */
1286 			len = SPDK_SOCK_REQUEST_IOV(req, i)->iov_len - offset;
1287 
1288 			if (len > (size_t)rc) {
1289 				/* This element was partially sent. */
1290 				req->internal.offset += rc;
1291 				return 0;
1292 			}
1293 
1294 			offset = 0;
1295 			req->internal.offset += len;
1296 			rc -= len;
1297 		}
1298 
1299 		/* Handled a full request. */
1300 		spdk_sock_request_pend(sock, req);
1301 
1302 		if (!req->internal.is_zcopy && req == TAILQ_FIRST(&sock->pending_reqs)) {
1303 			/* The sendmsg syscall above isn't currently asynchronous,
1304 			* so it's already done. */
1305 			retval = spdk_sock_request_put(sock, req, 0);
1306 			if (retval) {
1307 				break;
1308 			}
1309 		} else {
1310 			/* Re-use the offset field to hold the sendmsg call index. The
1311 			 * index is 0 based, so subtract one here because we've already
1312 			 * incremented above. */
1313 			req->internal.offset = psock->sendmsg_idx - 1;
1314 		}
1315 
1316 		if (rc == 0) {
1317 			break;
1318 		}
1319 
1320 		req = TAILQ_FIRST(&sock->queued_reqs);
1321 	}
1322 
1323 	return 0;
1324 }
1325 
1326 static int
1327 posix_sock_flush(struct spdk_sock *sock)
1328 {
1329 #ifdef SPDK_ZEROCOPY
1330 	struct spdk_posix_sock *psock = __posix_sock(sock);
1331 
1332 	if (psock->zcopy && !TAILQ_EMPTY(&sock->pending_reqs)) {
1333 		_sock_check_zcopy(sock);
1334 	}
1335 #endif
1336 
1337 	return _sock_flush(sock);
1338 }
1339 
1340 static ssize_t
1341 posix_sock_recv_from_pipe(struct spdk_posix_sock *sock, struct iovec *diov, int diovcnt)
1342 {
1343 	struct iovec siov[2];
1344 	int sbytes;
1345 	ssize_t bytes;
1346 	struct spdk_posix_sock_group_impl *group;
1347 
1348 	sbytes = spdk_pipe_reader_get_buffer(sock->recv_pipe, sock->recv_buf_sz, siov);
1349 	if (sbytes < 0) {
1350 		errno = EINVAL;
1351 		return -1;
1352 	} else if (sbytes == 0) {
1353 		errno = EAGAIN;
1354 		return -1;
1355 	}
1356 
1357 	bytes = spdk_iovcpy(siov, 2, diov, diovcnt);
1358 
1359 	if (bytes == 0) {
1360 		/* The only way this happens is if diov is 0 length */
1361 		errno = EINVAL;
1362 		return -1;
1363 	}
1364 
1365 	spdk_pipe_reader_advance(sock->recv_pipe, bytes);
1366 
1367 	/* If we drained the pipe, mark it appropriately */
1368 	if (spdk_pipe_reader_bytes_available(sock->recv_pipe) == 0) {
1369 		assert(sock->pipe_has_data == true);
1370 
1371 		group = __posix_group_impl(sock->base.group_impl);
1372 		if (group && !sock->socket_has_data) {
1373 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1374 		}
1375 
1376 		sock->pipe_has_data = false;
1377 	}
1378 
1379 	return bytes;
1380 }
1381 
1382 static inline ssize_t
1383 posix_sock_read(struct spdk_posix_sock *sock)
1384 {
1385 	struct iovec iov[2];
1386 	int bytes_avail, bytes_recvd;
1387 	struct spdk_posix_sock_group_impl *group;
1388 
1389 	bytes_avail = spdk_pipe_writer_get_buffer(sock->recv_pipe, sock->recv_buf_sz, iov);
1390 
1391 	if (bytes_avail <= 0) {
1392 		return bytes_avail;
1393 	}
1394 
1395 	if (sock->ssl) {
1396 		bytes_recvd = SSL_readv(sock->ssl, iov, 2);
1397 	} else {
1398 		bytes_recvd = readv(sock->fd, iov, 2);
1399 	}
1400 
1401 	assert(sock->pipe_has_data == false);
1402 
1403 	if (bytes_recvd <= 0) {
1404 		/* Errors count as draining the socket data */
1405 		if (sock->base.group_impl && sock->socket_has_data) {
1406 			group = __posix_group_impl(sock->base.group_impl);
1407 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1408 		}
1409 
1410 		sock->socket_has_data = false;
1411 
1412 		return bytes_recvd;
1413 	}
1414 
1415 	spdk_pipe_writer_advance(sock->recv_pipe, bytes_recvd);
1416 
1417 #if DEBUG
1418 	if (sock->base.group_impl) {
1419 		assert(sock->socket_has_data == true);
1420 	}
1421 #endif
1422 
1423 	sock->pipe_has_data = true;
1424 	if (bytes_recvd < bytes_avail) {
1425 		/* We drained the kernel socket entirely. */
1426 		sock->socket_has_data = false;
1427 	}
1428 
1429 	return bytes_recvd;
1430 }
1431 
1432 static ssize_t
1433 posix_sock_readv(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
1434 {
1435 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1436 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(sock->base.group_impl);
1437 	int rc, i;
1438 	size_t len;
1439 
1440 	if (sock->recv_pipe == NULL) {
1441 		assert(sock->pipe_has_data == false);
1442 		if (group && sock->socket_has_data) {
1443 			sock->socket_has_data = false;
1444 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1445 		}
1446 		if (sock->ssl) {
1447 			return SSL_readv(sock->ssl, iov, iovcnt);
1448 		} else {
1449 			return readv(sock->fd, iov, iovcnt);
1450 		}
1451 	}
1452 
1453 	/* If the socket is not in a group, we must assume it always has
1454 	 * data waiting for us because it is not epolled */
1455 	if (!sock->pipe_has_data && (group == NULL || sock->socket_has_data)) {
1456 		/* If the user is receiving a sufficiently large amount of data,
1457 		 * receive directly to their buffers. */
1458 		len = 0;
1459 		for (i = 0; i < iovcnt; i++) {
1460 			len += iov[i].iov_len;
1461 		}
1462 
1463 		if (len >= MIN_SOCK_PIPE_SIZE) {
1464 			/* TODO: Should this detect if kernel socket is drained? */
1465 			if (sock->ssl) {
1466 				return SSL_readv(sock->ssl, iov, iovcnt);
1467 			} else {
1468 				return readv(sock->fd, iov, iovcnt);
1469 			}
1470 		}
1471 
1472 		/* Otherwise, do a big read into our pipe */
1473 		rc = posix_sock_read(sock);
1474 		if (rc <= 0) {
1475 			return rc;
1476 		}
1477 	}
1478 
1479 	return posix_sock_recv_from_pipe(sock, iov, iovcnt);
1480 }
1481 
1482 static ssize_t
1483 posix_sock_recv(struct spdk_sock *sock, void *buf, size_t len)
1484 {
1485 	struct iovec iov[1];
1486 
1487 	iov[0].iov_base = buf;
1488 	iov[0].iov_len = len;
1489 
1490 	return posix_sock_readv(sock, iov, 1);
1491 }
1492 
1493 static void
1494 posix_sock_readv_async(struct spdk_sock *sock, struct spdk_sock_request *req)
1495 {
1496 	req->cb_fn(req->cb_arg, -ENOTSUP);
1497 }
1498 
1499 static ssize_t
1500 posix_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
1501 {
1502 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1503 	int rc;
1504 
1505 	/* In order to process a writev, we need to flush any asynchronous writes
1506 	 * first. */
1507 	rc = _sock_flush(_sock);
1508 	if (rc < 0) {
1509 		return rc;
1510 	}
1511 
1512 	if (!TAILQ_EMPTY(&_sock->queued_reqs)) {
1513 		/* We weren't able to flush all requests */
1514 		errno = EAGAIN;
1515 		return -1;
1516 	}
1517 
1518 	if (sock->ssl) {
1519 		return SSL_writev(sock->ssl, iov, iovcnt);
1520 	} else {
1521 		return writev(sock->fd, iov, iovcnt);
1522 	}
1523 }
1524 
1525 static void
1526 posix_sock_writev_async(struct spdk_sock *sock, struct spdk_sock_request *req)
1527 {
1528 	int rc;
1529 
1530 	spdk_sock_request_queue(sock, req);
1531 
1532 	/* If there are a sufficient number queued, just flush them out immediately. */
1533 	if (sock->queued_iovcnt >= IOV_BATCH_SIZE) {
1534 		rc = _sock_flush(sock);
1535 		if (rc) {
1536 			spdk_sock_abort_requests(sock);
1537 		}
1538 	}
1539 }
1540 
1541 static int
1542 posix_sock_set_recvlowat(struct spdk_sock *_sock, int nbytes)
1543 {
1544 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1545 	int val;
1546 	int rc;
1547 
1548 	assert(sock != NULL);
1549 
1550 	val = nbytes;
1551 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_RCVLOWAT, &val, sizeof val);
1552 	if (rc != 0) {
1553 		return -1;
1554 	}
1555 	return 0;
1556 }
1557 
1558 static bool
1559 posix_sock_is_ipv6(struct spdk_sock *_sock)
1560 {
1561 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1562 	struct sockaddr_storage sa;
1563 	socklen_t salen;
1564 	int rc;
1565 
1566 	assert(sock != NULL);
1567 
1568 	memset(&sa, 0, sizeof sa);
1569 	salen = sizeof sa;
1570 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
1571 	if (rc != 0) {
1572 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
1573 		return false;
1574 	}
1575 
1576 	return (sa.ss_family == AF_INET6);
1577 }
1578 
1579 static bool
1580 posix_sock_is_ipv4(struct spdk_sock *_sock)
1581 {
1582 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1583 	struct sockaddr_storage sa;
1584 	socklen_t salen;
1585 	int rc;
1586 
1587 	assert(sock != NULL);
1588 
1589 	memset(&sa, 0, sizeof sa);
1590 	salen = sizeof sa;
1591 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
1592 	if (rc != 0) {
1593 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
1594 		return false;
1595 	}
1596 
1597 	return (sa.ss_family == AF_INET);
1598 }
1599 
1600 static bool
1601 posix_sock_is_connected(struct spdk_sock *_sock)
1602 {
1603 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1604 	uint8_t byte;
1605 	int rc;
1606 
1607 	rc = recv(sock->fd, &byte, 1, MSG_PEEK);
1608 	if (rc == 0) {
1609 		return false;
1610 	}
1611 
1612 	if (rc < 0) {
1613 		if (errno == EAGAIN || errno == EWOULDBLOCK) {
1614 			return true;
1615 		}
1616 
1617 		return false;
1618 	}
1619 
1620 	return true;
1621 }
1622 
1623 static struct spdk_sock_group_impl *
1624 posix_sock_group_impl_get_optimal(struct spdk_sock *_sock, struct spdk_sock_group_impl *hint)
1625 {
1626 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1627 	struct spdk_sock_group_impl *group_impl;
1628 
1629 	if (sock->placement_id != -1) {
1630 		spdk_sock_map_lookup(&g_map, sock->placement_id, &group_impl, hint);
1631 		return group_impl;
1632 	}
1633 
1634 	return NULL;
1635 }
1636 
1637 static struct spdk_sock_group_impl *
1638 posix_sock_group_impl_create(void)
1639 {
1640 	struct spdk_posix_sock_group_impl *group_impl;
1641 	int fd;
1642 
1643 #if defined(SPDK_EPOLL)
1644 	fd = epoll_create1(0);
1645 #elif defined(SPDK_KEVENT)
1646 	fd = kqueue();
1647 #endif
1648 	if (fd == -1) {
1649 		return NULL;
1650 	}
1651 
1652 	group_impl = calloc(1, sizeof(*group_impl));
1653 	if (group_impl == NULL) {
1654 		SPDK_ERRLOG("group_impl allocation failed\n");
1655 		close(fd);
1656 		return NULL;
1657 	}
1658 
1659 	group_impl->fd = fd;
1660 	TAILQ_INIT(&group_impl->socks_with_data);
1661 	group_impl->placement_id = -1;
1662 
1663 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_CPU) {
1664 		spdk_sock_map_insert(&g_map, spdk_env_get_current_core(), &group_impl->base);
1665 		group_impl->placement_id = spdk_env_get_current_core();
1666 	}
1667 
1668 	return &group_impl->base;
1669 }
1670 
1671 static void
1672 posix_sock_mark(struct spdk_posix_sock_group_impl *group, struct spdk_posix_sock *sock,
1673 		int placement_id)
1674 {
1675 #if defined(SO_MARK)
1676 	int rc;
1677 
1678 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_MARK,
1679 			&placement_id, sizeof(placement_id));
1680 	if (rc != 0) {
1681 		/* Not fatal */
1682 		SPDK_ERRLOG("Error setting SO_MARK\n");
1683 		return;
1684 	}
1685 
1686 	rc = spdk_sock_map_insert(&g_map, placement_id, &group->base);
1687 	if (rc != 0) {
1688 		/* Not fatal */
1689 		SPDK_ERRLOG("Failed to insert sock group into map: %d\n", rc);
1690 		return;
1691 	}
1692 
1693 	sock->placement_id = placement_id;
1694 #endif
1695 }
1696 
1697 static void
1698 posix_sock_update_mark(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1699 {
1700 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1701 
1702 	if (group->placement_id == -1) {
1703 		group->placement_id = spdk_sock_map_find_free(&g_map);
1704 
1705 		/* If a free placement id is found, update existing sockets in this group */
1706 		if (group->placement_id != -1) {
1707 			struct spdk_sock  *sock, *tmp;
1708 
1709 			TAILQ_FOREACH_SAFE(sock, &_group->socks, link, tmp) {
1710 				posix_sock_mark(group, __posix_sock(sock), group->placement_id);
1711 			}
1712 		}
1713 	}
1714 
1715 	if (group->placement_id != -1) {
1716 		/*
1717 		 * group placement id is already determined for this poll group.
1718 		 * Mark socket with group's placement id.
1719 		 */
1720 		posix_sock_mark(group, __posix_sock(_sock), group->placement_id);
1721 	}
1722 }
1723 
1724 static int
1725 posix_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1726 {
1727 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1728 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1729 	int rc;
1730 
1731 #if defined(SPDK_EPOLL)
1732 	struct epoll_event event;
1733 
1734 	memset(&event, 0, sizeof(event));
1735 	/* EPOLLERR is always on even if we don't set it, but be explicit for clarity */
1736 	event.events = EPOLLIN | EPOLLERR;
1737 	event.data.ptr = sock;
1738 
1739 	rc = epoll_ctl(group->fd, EPOLL_CTL_ADD, sock->fd, &event);
1740 #elif defined(SPDK_KEVENT)
1741 	struct kevent event;
1742 	struct timespec ts = {0};
1743 
1744 	EV_SET(&event, sock->fd, EVFILT_READ, EV_ADD, 0, 0, sock);
1745 
1746 	rc = kevent(group->fd, &event, 1, NULL, 0, &ts);
1747 #endif
1748 
1749 	if (rc != 0) {
1750 		return rc;
1751 	}
1752 
1753 	/* switched from another polling group due to scheduling */
1754 	if (spdk_unlikely(sock->recv_pipe != NULL  &&
1755 			  (spdk_pipe_reader_bytes_available(sock->recv_pipe) > 0))) {
1756 		sock->pipe_has_data = true;
1757 		sock->socket_has_data = false;
1758 		TAILQ_INSERT_TAIL(&group->socks_with_data, sock, link);
1759 	}
1760 
1761 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_MARK) {
1762 		posix_sock_update_mark(_group, _sock);
1763 	} else if (sock->placement_id != -1) {
1764 		rc = spdk_sock_map_insert(&g_map, sock->placement_id, &group->base);
1765 		if (rc != 0) {
1766 			SPDK_ERRLOG("Failed to insert sock group into map: %d\n", rc);
1767 			/* Do not treat this as an error. The system will continue running. */
1768 		}
1769 	}
1770 
1771 	return rc;
1772 }
1773 
1774 static int
1775 posix_sock_group_impl_remove_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1776 {
1777 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1778 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1779 	int rc;
1780 
1781 	if (sock->pipe_has_data || sock->socket_has_data) {
1782 		TAILQ_REMOVE(&group->socks_with_data, sock, link);
1783 		sock->pipe_has_data = false;
1784 		sock->socket_has_data = false;
1785 	}
1786 
1787 	if (sock->placement_id != -1) {
1788 		spdk_sock_map_release(&g_map, sock->placement_id);
1789 	}
1790 
1791 #if defined(SPDK_EPOLL)
1792 	struct epoll_event event;
1793 
1794 	/* Event parameter is ignored but some old kernel version still require it. */
1795 	rc = epoll_ctl(group->fd, EPOLL_CTL_DEL, sock->fd, &event);
1796 #elif defined(SPDK_KEVENT)
1797 	struct kevent event;
1798 	struct timespec ts = {0};
1799 
1800 	EV_SET(&event, sock->fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
1801 
1802 	rc = kevent(group->fd, &event, 1, NULL, 0, &ts);
1803 	if (rc == 0 && event.flags & EV_ERROR) {
1804 		rc = -1;
1805 		errno = event.data;
1806 	}
1807 #endif
1808 
1809 	spdk_sock_abort_requests(_sock);
1810 
1811 	return rc;
1812 }
1813 
1814 static int
1815 posix_sock_group_impl_poll(struct spdk_sock_group_impl *_group, int max_events,
1816 			   struct spdk_sock **socks)
1817 {
1818 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1819 	struct spdk_sock *sock, *tmp;
1820 	int num_events, i, rc;
1821 	struct spdk_posix_sock *psock, *ptmp;
1822 #if defined(SPDK_EPOLL)
1823 	struct epoll_event events[MAX_EVENTS_PER_POLL];
1824 #elif defined(SPDK_KEVENT)
1825 	struct kevent events[MAX_EVENTS_PER_POLL];
1826 	struct timespec ts = {0};
1827 #endif
1828 
1829 #ifdef SPDK_ZEROCOPY
1830 	/* When all of the following conditions are met
1831 	 * - non-blocking socket
1832 	 * - zero copy is enabled
1833 	 * - interrupts suppressed (i.e. busy polling)
1834 	 * - the NIC tx queue is full at the time sendmsg() is called
1835 	 * - epoll_wait determines there is an EPOLLIN event for the socket
1836 	 * then we can get into a situation where data we've sent is queued
1837 	 * up in the kernel network stack, but interrupts have been suppressed
1838 	 * because other traffic is flowing so the kernel misses the signal
1839 	 * to flush the software tx queue. If there wasn't incoming data
1840 	 * pending on the socket, then epoll_wait would have been sufficient
1841 	 * to kick off the send operation, but since there is a pending event
1842 	 * epoll_wait does not trigger the necessary operation.
1843 	 *
1844 	 * We deal with this by checking for all of the above conditions and
1845 	 * additionally looking for EPOLLIN events that were not consumed from
1846 	 * the last poll loop. We take this to mean that the upper layer is
1847 	 * unable to consume them because it is blocked waiting for resources
1848 	 * to free up, and those resources are most likely freed in response
1849 	 * to a pending asynchronous write completing.
1850 	 *
1851 	 * Additionally, sockets that have the same placement_id actually share
1852 	 * an underlying hardware queue. That means polling one of them is
1853 	 * equivalent to polling all of them. As a quick mechanism to avoid
1854 	 * making extra poll() calls, stash the last placement_id during the loop
1855 	 * and only poll if it's not the same. The overwhelmingly common case
1856 	 * is that all sockets in this list have the same placement_id because
1857 	 * SPDK is intentionally grouping sockets by that value, so even
1858 	 * though this won't stop all extra calls to poll(), it's very fast
1859 	 * and will catch all of them in practice.
1860 	 */
1861 	int last_placement_id = -1;
1862 
1863 	TAILQ_FOREACH(psock, &group->socks_with_data, link) {
1864 		if (psock->zcopy && psock->placement_id >= 0 &&
1865 		    psock->placement_id != last_placement_id) {
1866 			struct pollfd pfd = {psock->fd, POLLIN | POLLERR, 0};
1867 
1868 			poll(&pfd, 1, 0);
1869 			last_placement_id = psock->placement_id;
1870 		}
1871 	}
1872 #endif
1873 
1874 	/* This must be a TAILQ_FOREACH_SAFE because while flushing,
1875 	 * a completion callback could remove the sock from the
1876 	 * group. */
1877 	TAILQ_FOREACH_SAFE(sock, &_group->socks, link, tmp) {
1878 		rc = _sock_flush(sock);
1879 		if (rc) {
1880 			spdk_sock_abort_requests(sock);
1881 		}
1882 	}
1883 
1884 	assert(max_events > 0);
1885 
1886 #if defined(SPDK_EPOLL)
1887 	num_events = epoll_wait(group->fd, events, max_events, 0);
1888 #elif defined(SPDK_KEVENT)
1889 	num_events = kevent(group->fd, NULL, 0, events, max_events, &ts);
1890 #endif
1891 
1892 	if (num_events == -1) {
1893 		return -1;
1894 	} else if (num_events == 0 && !TAILQ_EMPTY(&_group->socks)) {
1895 		sock = TAILQ_FIRST(&_group->socks);
1896 		psock = __posix_sock(sock);
1897 		/* poll() is called here to busy poll the queue associated with
1898 		 * first socket in list and potentially reap incoming data.
1899 		 */
1900 		if (sock->opts.priority) {
1901 			struct pollfd pfd = {0, 0, 0};
1902 
1903 			pfd.fd = psock->fd;
1904 			pfd.events = POLLIN | POLLERR;
1905 			poll(&pfd, 1, 0);
1906 		}
1907 	}
1908 
1909 	for (i = 0; i < num_events; i++) {
1910 #if defined(SPDK_EPOLL)
1911 		sock = events[i].data.ptr;
1912 		psock = __posix_sock(sock);
1913 
1914 #ifdef SPDK_ZEROCOPY
1915 		if (events[i].events & EPOLLERR) {
1916 			rc = _sock_check_zcopy(sock);
1917 			/* If the socket was closed or removed from
1918 			 * the group in response to a send ack, don't
1919 			 * add it to the array here. */
1920 			if (rc || sock->cb_fn == NULL) {
1921 				continue;
1922 			}
1923 		}
1924 #endif
1925 		if ((events[i].events & EPOLLIN) == 0) {
1926 			continue;
1927 		}
1928 
1929 #elif defined(SPDK_KEVENT)
1930 		sock = events[i].udata;
1931 		psock = __posix_sock(sock);
1932 #endif
1933 
1934 		/* If the socket is not already in the list, add it now */
1935 		if (!psock->socket_has_data && !psock->pipe_has_data) {
1936 			TAILQ_INSERT_TAIL(&group->socks_with_data, psock, link);
1937 		}
1938 		psock->socket_has_data = true;
1939 	}
1940 
1941 	num_events = 0;
1942 
1943 	TAILQ_FOREACH_SAFE(psock, &group->socks_with_data, link, ptmp) {
1944 		if (num_events == max_events) {
1945 			break;
1946 		}
1947 
1948 		/* If the socket's cb_fn is NULL, just remove it from the
1949 		 * list and do not add it to socks array */
1950 		if (spdk_unlikely(psock->base.cb_fn == NULL)) {
1951 			psock->socket_has_data = false;
1952 			psock->pipe_has_data = false;
1953 			TAILQ_REMOVE(&group->socks_with_data, psock, link);
1954 			continue;
1955 		}
1956 
1957 		socks[num_events++] = &psock->base;
1958 	}
1959 
1960 	/* Cycle the has_data list so that each time we poll things aren't
1961 	 * in the same order. Say we have 6 sockets in the list, named as follows:
1962 	 * A B C D E F
1963 	 * And all 6 sockets had epoll events, but max_events is only 3. That means
1964 	 * psock currently points at D. We want to rearrange the list to the following:
1965 	 * D E F A B C
1966 	 *
1967 	 * The variables below are named according to this example to make it easier to
1968 	 * follow the swaps.
1969 	 */
1970 	if (psock != NULL) {
1971 		struct spdk_posix_sock *pa, *pc, *pd, *pf;
1972 
1973 		/* Capture pointers to the elements we need */
1974 		pd = psock;
1975 		pc = TAILQ_PREV(pd, spdk_has_data_list, link);
1976 		pa = TAILQ_FIRST(&group->socks_with_data);
1977 		pf = TAILQ_LAST(&group->socks_with_data, spdk_has_data_list);
1978 
1979 		/* Break the link between C and D */
1980 		pc->link.tqe_next = NULL;
1981 
1982 		/* Connect F to A */
1983 		pf->link.tqe_next = pa;
1984 		pa->link.tqe_prev = &pf->link.tqe_next;
1985 
1986 		/* Fix up the list first/last pointers */
1987 		group->socks_with_data.tqh_first = pd;
1988 		group->socks_with_data.tqh_last = &pc->link.tqe_next;
1989 
1990 		/* D is in front of the list, make tqe prev pointer point to the head of list */
1991 		pd->link.tqe_prev = &group->socks_with_data.tqh_first;
1992 	}
1993 
1994 	return num_events;
1995 }
1996 
1997 static int
1998 posix_sock_group_impl_close(struct spdk_sock_group_impl *_group)
1999 {
2000 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
2001 	int rc;
2002 
2003 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_CPU) {
2004 		spdk_sock_map_release(&g_map, spdk_env_get_current_core());
2005 	}
2006 
2007 	rc = close(group->fd);
2008 	free(group);
2009 	return rc;
2010 }
2011 
2012 static struct spdk_net_impl g_posix_net_impl = {
2013 	.name		= "posix",
2014 	.getaddr	= posix_sock_getaddr,
2015 	.connect	= posix_sock_connect,
2016 	.listen		= posix_sock_listen,
2017 	.accept		= posix_sock_accept,
2018 	.close		= posix_sock_close,
2019 	.recv		= posix_sock_recv,
2020 	.readv		= posix_sock_readv,
2021 	.readv_async	= posix_sock_readv_async,
2022 	.writev		= posix_sock_writev,
2023 	.writev_async	= posix_sock_writev_async,
2024 	.flush		= posix_sock_flush,
2025 	.set_recvlowat	= posix_sock_set_recvlowat,
2026 	.set_recvbuf	= posix_sock_set_recvbuf,
2027 	.set_sendbuf	= posix_sock_set_sendbuf,
2028 	.is_ipv6	= posix_sock_is_ipv6,
2029 	.is_ipv4	= posix_sock_is_ipv4,
2030 	.is_connected	= posix_sock_is_connected,
2031 	.group_impl_get_optimal	= posix_sock_group_impl_get_optimal,
2032 	.group_impl_create	= posix_sock_group_impl_create,
2033 	.group_impl_add_sock	= posix_sock_group_impl_add_sock,
2034 	.group_impl_remove_sock = posix_sock_group_impl_remove_sock,
2035 	.group_impl_poll	= posix_sock_group_impl_poll,
2036 	.group_impl_close	= posix_sock_group_impl_close,
2037 	.get_opts	= posix_sock_impl_get_opts,
2038 	.set_opts	= posix_sock_impl_set_opts,
2039 };
2040 
2041 SPDK_NET_IMPL_REGISTER(posix, &g_posix_net_impl, DEFAULT_SOCK_PRIORITY + 1);
2042 
2043 static struct spdk_sock *
2044 ssl_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
2045 {
2046 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_LISTEN, opts, true);
2047 }
2048 
2049 static struct spdk_sock *
2050 ssl_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
2051 {
2052 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_CONNECT, opts, true);
2053 }
2054 
2055 static struct spdk_sock *
2056 ssl_sock_accept(struct spdk_sock *_sock)
2057 {
2058 	return _posix_sock_accept(_sock, true);
2059 }
2060 
2061 static struct spdk_net_impl g_ssl_net_impl = {
2062 	.name		= "ssl",
2063 	.getaddr	= posix_sock_getaddr,
2064 	.connect	= ssl_sock_connect,
2065 	.listen		= ssl_sock_listen,
2066 	.accept		= ssl_sock_accept,
2067 	.close		= posix_sock_close,
2068 	.recv		= posix_sock_recv,
2069 	.readv		= posix_sock_readv,
2070 	.writev		= posix_sock_writev,
2071 	.writev_async	= posix_sock_writev_async,
2072 	.flush		= posix_sock_flush,
2073 	.set_recvlowat	= posix_sock_set_recvlowat,
2074 	.set_recvbuf	= posix_sock_set_recvbuf,
2075 	.set_sendbuf	= posix_sock_set_sendbuf,
2076 	.is_ipv6	= posix_sock_is_ipv6,
2077 	.is_ipv4	= posix_sock_is_ipv4,
2078 	.is_connected	= posix_sock_is_connected,
2079 	.group_impl_get_optimal	= posix_sock_group_impl_get_optimal,
2080 	.group_impl_create	= posix_sock_group_impl_create,
2081 	.group_impl_add_sock	= posix_sock_group_impl_add_sock,
2082 	.group_impl_remove_sock = posix_sock_group_impl_remove_sock,
2083 	.group_impl_poll	= posix_sock_group_impl_poll,
2084 	.group_impl_close	= posix_sock_group_impl_close,
2085 	.get_opts	= posix_sock_impl_get_opts,
2086 	.set_opts	= posix_sock_impl_set_opts,
2087 };
2088 
2089 SPDK_NET_IMPL_REGISTER(ssl, &g_ssl_net_impl, DEFAULT_SOCK_PRIORITY);
2090 SPDK_LOG_REGISTER_COMPONENT(sock_posix)
2091